s5p-sss.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Cryptographic API.
  4. //
  5. // Support for Samsung S5PV210 and Exynos HW acceleration.
  6. //
  7. // Copyright (C) 2011 NetUP Inc. All rights reserved.
  8. // Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
  9. //
  10. // Hash part based on omap-sham.c driver.
  11. #include <linux/clk.h>
  12. #include <linux/crypto.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/err.h>
  15. #include <linux/errno.h>
  16. #include <linux/init.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/io.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/scatterlist.h>
  25. #include <crypto/ctr.h>
  26. #include <crypto/aes.h>
  27. #include <crypto/algapi.h>
  28. #include <crypto/scatterwalk.h>
  29. #include <crypto/hash.h>
  30. #include <crypto/md5.h>
  31. #include <crypto/sha1.h>
  32. #include <crypto/sha2.h>
  33. #include <crypto/internal/hash.h>
  34. #define _SBF(s, v) ((v) << (s))
  35. /* Feed control registers */
  36. #define SSS_REG_FCINTSTAT 0x0000
  37. #define SSS_FCINTSTAT_HPARTINT BIT(7)
  38. #define SSS_FCINTSTAT_HDONEINT BIT(5)
  39. #define SSS_FCINTSTAT_BRDMAINT BIT(3)
  40. #define SSS_FCINTSTAT_BTDMAINT BIT(2)
  41. #define SSS_FCINTSTAT_HRDMAINT BIT(1)
  42. #define SSS_FCINTSTAT_PKDMAINT BIT(0)
  43. #define SSS_REG_FCINTENSET 0x0004
  44. #define SSS_FCINTENSET_HPARTINTENSET BIT(7)
  45. #define SSS_FCINTENSET_HDONEINTENSET BIT(5)
  46. #define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
  47. #define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
  48. #define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
  49. #define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
  50. #define SSS_REG_FCINTENCLR 0x0008
  51. #define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
  52. #define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
  53. #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
  54. #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
  55. #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
  56. #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
  57. #define SSS_REG_FCINTPEND 0x000C
  58. #define SSS_FCINTPEND_HPARTINTP BIT(7)
  59. #define SSS_FCINTPEND_HDONEINTP BIT(5)
  60. #define SSS_FCINTPEND_BRDMAINTP BIT(3)
  61. #define SSS_FCINTPEND_BTDMAINTP BIT(2)
  62. #define SSS_FCINTPEND_HRDMAINTP BIT(1)
  63. #define SSS_FCINTPEND_PKDMAINTP BIT(0)
  64. #define SSS_REG_FCFIFOSTAT 0x0010
  65. #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
  66. #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
  67. #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
  68. #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
  69. #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
  70. #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
  71. #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
  72. #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
  73. #define SSS_REG_FCFIFOCTRL 0x0014
  74. #define SSS_FCFIFOCTRL_DESSEL BIT(2)
  75. #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
  76. #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
  77. #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
  78. #define SSS_HASHIN_MASK _SBF(0, 0x03)
  79. #define SSS_REG_FCBRDMAS 0x0020
  80. #define SSS_REG_FCBRDMAL 0x0024
  81. #define SSS_REG_FCBRDMAC 0x0028
  82. #define SSS_FCBRDMAC_BYTESWAP BIT(1)
  83. #define SSS_FCBRDMAC_FLUSH BIT(0)
  84. #define SSS_REG_FCBTDMAS 0x0030
  85. #define SSS_REG_FCBTDMAL 0x0034
  86. #define SSS_REG_FCBTDMAC 0x0038
  87. #define SSS_FCBTDMAC_BYTESWAP BIT(1)
  88. #define SSS_FCBTDMAC_FLUSH BIT(0)
  89. #define SSS_REG_FCHRDMAS 0x0040
  90. #define SSS_REG_FCHRDMAL 0x0044
  91. #define SSS_REG_FCHRDMAC 0x0048
  92. #define SSS_FCHRDMAC_BYTESWAP BIT(1)
  93. #define SSS_FCHRDMAC_FLUSH BIT(0)
  94. #define SSS_REG_FCPKDMAS 0x0050
  95. #define SSS_REG_FCPKDMAL 0x0054
  96. #define SSS_REG_FCPKDMAC 0x0058
  97. #define SSS_FCPKDMAC_BYTESWAP BIT(3)
  98. #define SSS_FCPKDMAC_DESCEND BIT(2)
  99. #define SSS_FCPKDMAC_TRANSMIT BIT(1)
  100. #define SSS_FCPKDMAC_FLUSH BIT(0)
  101. #define SSS_REG_FCPKDMAO 0x005C
  102. /* AES registers */
  103. #define SSS_REG_AES_CONTROL 0x00
  104. #define SSS_AES_BYTESWAP_DI BIT(11)
  105. #define SSS_AES_BYTESWAP_DO BIT(10)
  106. #define SSS_AES_BYTESWAP_IV BIT(9)
  107. #define SSS_AES_BYTESWAP_CNT BIT(8)
  108. #define SSS_AES_BYTESWAP_KEY BIT(7)
  109. #define SSS_AES_KEY_CHANGE_MODE BIT(6)
  110. #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
  111. #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
  112. #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
  113. #define SSS_AES_FIFO_MODE BIT(3)
  114. #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
  115. #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
  116. #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
  117. #define SSS_AES_MODE_DECRYPT BIT(0)
  118. #define SSS_REG_AES_STATUS 0x04
  119. #define SSS_AES_BUSY BIT(2)
  120. #define SSS_AES_INPUT_READY BIT(1)
  121. #define SSS_AES_OUTPUT_READY BIT(0)
  122. #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
  123. #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
  124. #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
  125. #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
  126. #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
  127. #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
  128. #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
  129. #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
  130. #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
  131. #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
  132. SSS_AES_REG(dev, reg))
  133. /* HW engine modes */
  134. #define FLAGS_AES_DECRYPT BIT(0)
  135. #define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
  136. #define FLAGS_AES_CBC _SBF(1, 0x01)
  137. #define FLAGS_AES_CTR _SBF(1, 0x02)
  138. #define AES_KEY_LEN 16
  139. #define CRYPTO_QUEUE_LEN 1
  140. /* HASH registers */
  141. #define SSS_REG_HASH_CTRL 0x00
  142. #define SSS_HASH_USER_IV_EN BIT(5)
  143. #define SSS_HASH_INIT_BIT BIT(4)
  144. #define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
  145. #define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
  146. #define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
  147. #define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
  148. #define SSS_REG_HASH_CTRL_PAUSE 0x04
  149. #define SSS_HASH_PAUSE BIT(0)
  150. #define SSS_REG_HASH_CTRL_FIFO 0x08
  151. #define SSS_HASH_FIFO_MODE_DMA BIT(0)
  152. #define SSS_HASH_FIFO_MODE_CPU 0
  153. #define SSS_REG_HASH_CTRL_SWAP 0x0C
  154. #define SSS_HASH_BYTESWAP_DI BIT(3)
  155. #define SSS_HASH_BYTESWAP_DO BIT(2)
  156. #define SSS_HASH_BYTESWAP_IV BIT(1)
  157. #define SSS_HASH_BYTESWAP_KEY BIT(0)
  158. #define SSS_REG_HASH_STATUS 0x10
  159. #define SSS_HASH_STATUS_MSG_DONE BIT(6)
  160. #define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
  161. #define SSS_HASH_STATUS_BUFFER_READY BIT(0)
  162. #define SSS_REG_HASH_MSG_SIZE_LOW 0x20
  163. #define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
  164. #define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
  165. #define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
  166. #define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
  167. #define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
  168. #define HASH_BLOCK_SIZE 64
  169. #define HASH_REG_SIZEOF 4
  170. #define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
  171. #define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
  172. #define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
  173. /*
  174. * HASH bit numbers, used by device, setting in dev->hash_flags with
  175. * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
  176. * to keep HASH state BUSY or FREE, or to signal state from irq_handler
  177. * to hash_tasklet. SGS keep track of allocated memory for scatterlist
  178. */
  179. #define HASH_FLAGS_BUSY 0
  180. #define HASH_FLAGS_FINAL 1
  181. #define HASH_FLAGS_DMA_ACTIVE 2
  182. #define HASH_FLAGS_OUTPUT_READY 3
  183. #define HASH_FLAGS_DMA_READY 4
  184. #define HASH_FLAGS_SGS_COPIED 5
  185. #define HASH_FLAGS_SGS_ALLOCED 6
  186. /* HASH HW constants */
  187. #define BUFLEN HASH_BLOCK_SIZE
  188. #define SSS_HASH_DMA_LEN_ALIGN 8
  189. #define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
  190. #define SSS_HASH_QUEUE_LENGTH 10
  191. /**
  192. * struct samsung_aes_variant - platform specific SSS driver data
  193. * @aes_offset: AES register offset from SSS module's base.
  194. * @hash_offset: HASH register offset from SSS module's base.
  195. * @clk_names: names of clocks needed to run SSS IP
  196. *
  197. * Specifies platform specific configuration of SSS module.
  198. * Note: A structure for driver specific platform data is used for future
  199. * expansion of its usage.
  200. */
  201. struct samsung_aes_variant {
  202. unsigned int aes_offset;
  203. unsigned int hash_offset;
  204. const char *clk_names[2];
  205. };
  206. struct s5p_aes_reqctx {
  207. unsigned long mode;
  208. };
  209. struct s5p_aes_ctx {
  210. struct s5p_aes_dev *dev;
  211. u8 aes_key[AES_MAX_KEY_SIZE];
  212. u8 nonce[CTR_RFC3686_NONCE_SIZE];
  213. int keylen;
  214. };
  215. /**
  216. * struct s5p_aes_dev - Crypto device state container
  217. * @dev: Associated device
  218. * @clk: Clock for accessing hardware
  219. * @pclk: APB bus clock necessary to access the hardware
  220. * @ioaddr: Mapped IO memory region
  221. * @aes_ioaddr: Per-varian offset for AES block IO memory
  222. * @irq_fc: Feed control interrupt line
  223. * @req: Crypto request currently handled by the device
  224. * @ctx: Configuration for currently handled crypto request
  225. * @sg_src: Scatter list with source data for currently handled block
  226. * in device. This is DMA-mapped into device.
  227. * @sg_dst: Scatter list with destination data for currently handled block
  228. * in device. This is DMA-mapped into device.
  229. * @sg_src_cpy: In case of unaligned access, copied scatter list
  230. * with source data.
  231. * @sg_dst_cpy: In case of unaligned access, copied scatter list
  232. * with destination data.
  233. * @tasklet: New request scheduling jib
  234. * @queue: Crypto queue
  235. * @busy: Indicates whether the device is currently handling some request
  236. * thus it uses some of the fields from this state, like:
  237. * req, ctx, sg_src/dst (and copies). This essentially
  238. * protects against concurrent access to these fields.
  239. * @lock: Lock for protecting both access to device hardware registers
  240. * and fields related to current request (including the busy field).
  241. * @res: Resources for hash.
  242. * @io_hash_base: Per-variant offset for HASH block IO memory.
  243. * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags
  244. * variable.
  245. * @hash_flags: Flags for current HASH op.
  246. * @hash_queue: Async hash queue.
  247. * @hash_tasklet: New HASH request scheduling job.
  248. * @xmit_buf: Buffer for current HASH request transfer into SSS block.
  249. * @hash_req: Current request sending to SSS HASH block.
  250. * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
  251. * @hash_sg_cnt: Counter for hash_sg_iter.
  252. *
  253. * @use_hash: true if HASH algs enabled
  254. */
  255. struct s5p_aes_dev {
  256. struct device *dev;
  257. struct clk *clk;
  258. struct clk *pclk;
  259. void __iomem *ioaddr;
  260. void __iomem *aes_ioaddr;
  261. int irq_fc;
  262. struct skcipher_request *req;
  263. struct s5p_aes_ctx *ctx;
  264. struct scatterlist *sg_src;
  265. struct scatterlist *sg_dst;
  266. struct scatterlist *sg_src_cpy;
  267. struct scatterlist *sg_dst_cpy;
  268. struct tasklet_struct tasklet;
  269. struct crypto_queue queue;
  270. bool busy;
  271. spinlock_t lock;
  272. struct resource *res;
  273. void __iomem *io_hash_base;
  274. spinlock_t hash_lock; /* protect hash_ vars */
  275. unsigned long hash_flags;
  276. struct crypto_queue hash_queue;
  277. struct tasklet_struct hash_tasklet;
  278. u8 xmit_buf[BUFLEN];
  279. struct ahash_request *hash_req;
  280. struct scatterlist *hash_sg_iter;
  281. unsigned int hash_sg_cnt;
  282. bool use_hash;
  283. };
  284. /**
  285. * struct s5p_hash_reqctx - HASH request context
  286. * @dd: Associated device
  287. * @op_update: Current request operation (OP_UPDATE or OP_FINAL)
  288. * @digcnt: Number of bytes processed by HW (without buffer[] ones)
  289. * @digest: Digest message or IV for partial result
  290. * @nregs: Number of HW registers for digest or IV read/write
  291. * @engine: Bits for selecting type of HASH in SSS block
  292. * @sg: sg for DMA transfer
  293. * @sg_len: Length of sg for DMA transfer
  294. * @sgl: sg for joining buffer and req->src scatterlist
  295. * @skip: Skip offset in req->src for current op
  296. * @total: Total number of bytes for current request
  297. * @finup: Keep state for finup or final.
  298. * @error: Keep track of error.
  299. * @bufcnt: Number of bytes holded in buffer[]
  300. * @buffer: For byte(s) from end of req->src in UPDATE op
  301. */
  302. struct s5p_hash_reqctx {
  303. struct s5p_aes_dev *dd;
  304. bool op_update;
  305. u64 digcnt;
  306. u8 digest[SHA256_DIGEST_SIZE];
  307. unsigned int nregs; /* digest_size / sizeof(reg) */
  308. u32 engine;
  309. struct scatterlist *sg;
  310. unsigned int sg_len;
  311. struct scatterlist sgl[2];
  312. unsigned int skip;
  313. unsigned int total;
  314. bool finup;
  315. bool error;
  316. u32 bufcnt;
  317. u8 buffer[];
  318. };
  319. /**
  320. * struct s5p_hash_ctx - HASH transformation context
  321. * @dd: Associated device
  322. * @flags: Bits for algorithm HASH.
  323. * @fallback: Software transformation for zero message or size < BUFLEN.
  324. */
  325. struct s5p_hash_ctx {
  326. struct s5p_aes_dev *dd;
  327. unsigned long flags;
  328. struct crypto_shash *fallback;
  329. };
  330. static const struct samsung_aes_variant s5p_aes_data = {
  331. .aes_offset = 0x4000,
  332. .hash_offset = 0x6000,
  333. .clk_names = { "secss", },
  334. };
  335. static const struct samsung_aes_variant exynos_aes_data = {
  336. .aes_offset = 0x200,
  337. .hash_offset = 0x400,
  338. .clk_names = { "secss", },
  339. };
  340. static const struct samsung_aes_variant exynos5433_slim_aes_data = {
  341. .aes_offset = 0x400,
  342. .hash_offset = 0x800,
  343. .clk_names = { "aclk", "pclk", },
  344. };
  345. static const struct of_device_id s5p_sss_dt_match[] = {
  346. {
  347. .compatible = "samsung,s5pv210-secss",
  348. .data = &s5p_aes_data,
  349. },
  350. {
  351. .compatible = "samsung,exynos4210-secss",
  352. .data = &exynos_aes_data,
  353. },
  354. {
  355. .compatible = "samsung,exynos5433-slim-sss",
  356. .data = &exynos5433_slim_aes_data,
  357. },
  358. { },
  359. };
  360. MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
  361. static inline const struct samsung_aes_variant *find_s5p_sss_version
  362. (const struct platform_device *pdev)
  363. {
  364. if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node))
  365. return of_device_get_match_data(&pdev->dev);
  366. return (const struct samsung_aes_variant *)
  367. platform_get_device_id(pdev)->driver_data;
  368. }
  369. static struct s5p_aes_dev *s5p_dev;
  370. static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
  371. const struct scatterlist *sg)
  372. {
  373. SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
  374. SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
  375. }
  376. static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
  377. const struct scatterlist *sg)
  378. {
  379. SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
  380. SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
  381. }
  382. static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
  383. {
  384. int len;
  385. if (!*sg)
  386. return;
  387. len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
  388. free_pages((unsigned long)sg_virt(*sg), get_order(len));
  389. kfree(*sg);
  390. *sg = NULL;
  391. }
  392. static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
  393. unsigned int nbytes, int out)
  394. {
  395. struct scatter_walk walk;
  396. if (!nbytes)
  397. return;
  398. scatterwalk_start(&walk, sg);
  399. scatterwalk_copychunks(buf, &walk, nbytes, out);
  400. scatterwalk_done(&walk, out, 0);
  401. }
  402. static void s5p_sg_done(struct s5p_aes_dev *dev)
  403. {
  404. struct skcipher_request *req = dev->req;
  405. struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
  406. if (dev->sg_dst_cpy) {
  407. dev_dbg(dev->dev,
  408. "Copying %d bytes of output data back to original place\n",
  409. dev->req->cryptlen);
  410. s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
  411. dev->req->cryptlen, 1);
  412. }
  413. s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
  414. s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
  415. if (reqctx->mode & FLAGS_AES_CBC)
  416. memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
  417. else if (reqctx->mode & FLAGS_AES_CTR)
  418. memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
  419. }
  420. /* Calls the completion. Cannot be called with dev->lock hold. */
  421. static void s5p_aes_complete(struct skcipher_request *req, int err)
  422. {
  423. req->base.complete(&req->base, err);
  424. }
  425. static void s5p_unset_outdata(struct s5p_aes_dev *dev)
  426. {
  427. dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
  428. }
  429. static void s5p_unset_indata(struct s5p_aes_dev *dev)
  430. {
  431. dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
  432. }
  433. static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
  434. struct scatterlist **dst)
  435. {
  436. void *pages;
  437. int len;
  438. *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
  439. if (!*dst)
  440. return -ENOMEM;
  441. len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
  442. pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
  443. if (!pages) {
  444. kfree(*dst);
  445. *dst = NULL;
  446. return -ENOMEM;
  447. }
  448. s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0);
  449. sg_init_table(*dst, 1);
  450. sg_set_buf(*dst, pages, len);
  451. return 0;
  452. }
  453. static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
  454. {
  455. if (!sg->length)
  456. return -EINVAL;
  457. if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
  458. return -ENOMEM;
  459. dev->sg_dst = sg;
  460. return 0;
  461. }
  462. static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
  463. {
  464. if (!sg->length)
  465. return -EINVAL;
  466. if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
  467. return -ENOMEM;
  468. dev->sg_src = sg;
  469. return 0;
  470. }
  471. /*
  472. * Returns -ERRNO on error (mapping of new data failed).
  473. * On success returns:
  474. * - 0 if there is no more data,
  475. * - 1 if new transmitting (output) data is ready and its address+length
  476. * have to be written to device (by calling s5p_set_dma_outdata()).
  477. */
  478. static int s5p_aes_tx(struct s5p_aes_dev *dev)
  479. {
  480. int ret = 0;
  481. s5p_unset_outdata(dev);
  482. if (!sg_is_last(dev->sg_dst)) {
  483. ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
  484. if (!ret)
  485. ret = 1;
  486. }
  487. return ret;
  488. }
  489. /*
  490. * Returns -ERRNO on error (mapping of new data failed).
  491. * On success returns:
  492. * - 0 if there is no more data,
  493. * - 1 if new receiving (input) data is ready and its address+length
  494. * have to be written to device (by calling s5p_set_dma_indata()).
  495. */
  496. static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
  497. {
  498. int ret = 0;
  499. s5p_unset_indata(dev);
  500. if (!sg_is_last(dev->sg_src)) {
  501. ret = s5p_set_indata(dev, sg_next(dev->sg_src));
  502. if (!ret)
  503. ret = 1;
  504. }
  505. return ret;
  506. }
  507. static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
  508. {
  509. return __raw_readl(dd->io_hash_base + offset);
  510. }
  511. static inline void s5p_hash_write(struct s5p_aes_dev *dd,
  512. u32 offset, u32 value)
  513. {
  514. __raw_writel(value, dd->io_hash_base + offset);
  515. }
  516. /**
  517. * s5p_set_dma_hashdata() - start DMA with sg
  518. * @dev: device
  519. * @sg: scatterlist ready to DMA transmit
  520. */
  521. static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
  522. const struct scatterlist *sg)
  523. {
  524. dev->hash_sg_cnt--;
  525. SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
  526. SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
  527. }
  528. /**
  529. * s5p_hash_rx() - get next hash_sg_iter
  530. * @dev: device
  531. *
  532. * Return:
  533. * 2 if there is no more data and it is UPDATE op
  534. * 1 if new receiving (input) data is ready and can be written to device
  535. * 0 if there is no more data and it is FINAL op
  536. */
  537. static int s5p_hash_rx(struct s5p_aes_dev *dev)
  538. {
  539. if (dev->hash_sg_cnt > 0) {
  540. dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
  541. return 1;
  542. }
  543. set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
  544. if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
  545. return 0;
  546. return 2;
  547. }
  548. static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
  549. {
  550. struct platform_device *pdev = dev_id;
  551. struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
  552. struct skcipher_request *req;
  553. int err_dma_tx = 0;
  554. int err_dma_rx = 0;
  555. int err_dma_hx = 0;
  556. bool tx_end = false;
  557. bool hx_end = false;
  558. unsigned long flags;
  559. u32 status, st_bits;
  560. int err;
  561. spin_lock_irqsave(&dev->lock, flags);
  562. /*
  563. * Handle rx or tx interrupt. If there is still data (scatterlist did not
  564. * reach end), then map next scatterlist entry.
  565. * In case of such mapping error, s5p_aes_complete() should be called.
  566. *
  567. * If there is no more data in tx scatter list, call s5p_aes_complete()
  568. * and schedule new tasklet.
  569. *
  570. * Handle hx interrupt. If there is still data map next entry.
  571. */
  572. status = SSS_READ(dev, FCINTSTAT);
  573. if (status & SSS_FCINTSTAT_BRDMAINT)
  574. err_dma_rx = s5p_aes_rx(dev);
  575. if (status & SSS_FCINTSTAT_BTDMAINT) {
  576. if (sg_is_last(dev->sg_dst))
  577. tx_end = true;
  578. err_dma_tx = s5p_aes_tx(dev);
  579. }
  580. if (status & SSS_FCINTSTAT_HRDMAINT)
  581. err_dma_hx = s5p_hash_rx(dev);
  582. st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
  583. SSS_FCINTSTAT_HRDMAINT);
  584. /* clear DMA bits */
  585. SSS_WRITE(dev, FCINTPEND, st_bits);
  586. /* clear HASH irq bits */
  587. if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
  588. /* cannot have both HPART and HDONE */
  589. if (status & SSS_FCINTSTAT_HPARTINT)
  590. st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
  591. if (status & SSS_FCINTSTAT_HDONEINT)
  592. st_bits = SSS_HASH_STATUS_MSG_DONE;
  593. set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
  594. s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
  595. hx_end = true;
  596. /* when DONE or PART, do not handle HASH DMA */
  597. err_dma_hx = 0;
  598. }
  599. if (err_dma_rx < 0) {
  600. err = err_dma_rx;
  601. goto error;
  602. }
  603. if (err_dma_tx < 0) {
  604. err = err_dma_tx;
  605. goto error;
  606. }
  607. if (tx_end) {
  608. s5p_sg_done(dev);
  609. if (err_dma_hx == 1)
  610. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  611. spin_unlock_irqrestore(&dev->lock, flags);
  612. s5p_aes_complete(dev->req, 0);
  613. /* Device is still busy */
  614. tasklet_schedule(&dev->tasklet);
  615. } else {
  616. /*
  617. * Writing length of DMA block (either receiving or
  618. * transmitting) will start the operation immediately, so this
  619. * should be done at the end (even after clearing pending
  620. * interrupts to not miss the interrupt).
  621. */
  622. if (err_dma_tx == 1)
  623. s5p_set_dma_outdata(dev, dev->sg_dst);
  624. if (err_dma_rx == 1)
  625. s5p_set_dma_indata(dev, dev->sg_src);
  626. if (err_dma_hx == 1)
  627. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  628. spin_unlock_irqrestore(&dev->lock, flags);
  629. }
  630. goto hash_irq_end;
  631. error:
  632. s5p_sg_done(dev);
  633. dev->busy = false;
  634. req = dev->req;
  635. if (err_dma_hx == 1)
  636. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  637. spin_unlock_irqrestore(&dev->lock, flags);
  638. s5p_aes_complete(req, err);
  639. hash_irq_end:
  640. /*
  641. * Note about else if:
  642. * when hash_sg_iter reaches end and its UPDATE op,
  643. * issue SSS_HASH_PAUSE and wait for HPART irq
  644. */
  645. if (hx_end)
  646. tasklet_schedule(&dev->hash_tasklet);
  647. else if (err_dma_hx == 2)
  648. s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
  649. SSS_HASH_PAUSE);
  650. return IRQ_HANDLED;
  651. }
  652. /**
  653. * s5p_hash_read_msg() - read message or IV from HW
  654. * @req: AHASH request
  655. */
  656. static void s5p_hash_read_msg(struct ahash_request *req)
  657. {
  658. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  659. struct s5p_aes_dev *dd = ctx->dd;
  660. u32 *hash = (u32 *)ctx->digest;
  661. unsigned int i;
  662. for (i = 0; i < ctx->nregs; i++)
  663. hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
  664. }
  665. /**
  666. * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
  667. * @dd: device
  668. * @ctx: request context
  669. */
  670. static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
  671. const struct s5p_hash_reqctx *ctx)
  672. {
  673. const u32 *hash = (const u32 *)ctx->digest;
  674. unsigned int i;
  675. for (i = 0; i < ctx->nregs; i++)
  676. s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
  677. }
  678. /**
  679. * s5p_hash_write_iv() - write IV for next partial/finup op.
  680. * @req: AHASH request
  681. */
  682. static void s5p_hash_write_iv(struct ahash_request *req)
  683. {
  684. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  685. s5p_hash_write_ctx_iv(ctx->dd, ctx);
  686. }
  687. /**
  688. * s5p_hash_copy_result() - copy digest into req->result
  689. * @req: AHASH request
  690. */
  691. static void s5p_hash_copy_result(struct ahash_request *req)
  692. {
  693. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  694. if (!req->result)
  695. return;
  696. memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
  697. }
  698. /**
  699. * s5p_hash_dma_flush() - flush HASH DMA
  700. * @dev: secss device
  701. */
  702. static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
  703. {
  704. SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
  705. }
  706. /**
  707. * s5p_hash_dma_enable() - enable DMA mode for HASH
  708. * @dev: secss device
  709. *
  710. * enable DMA mode for HASH
  711. */
  712. static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
  713. {
  714. s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
  715. }
  716. /**
  717. * s5p_hash_irq_disable() - disable irq HASH signals
  718. * @dev: secss device
  719. * @flags: bitfield with irq's to be disabled
  720. */
  721. static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
  722. {
  723. SSS_WRITE(dev, FCINTENCLR, flags);
  724. }
  725. /**
  726. * s5p_hash_irq_enable() - enable irq signals
  727. * @dev: secss device
  728. * @flags: bitfield with irq's to be enabled
  729. */
  730. static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
  731. {
  732. SSS_WRITE(dev, FCINTENSET, flags);
  733. }
  734. /**
  735. * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
  736. * @dev: secss device
  737. * @hashflow: HASH stream flow with/without crypto AES/DES
  738. */
  739. static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
  740. {
  741. unsigned long flags;
  742. u32 flow;
  743. spin_lock_irqsave(&dev->lock, flags);
  744. flow = SSS_READ(dev, FCFIFOCTRL);
  745. flow &= ~SSS_HASHIN_MASK;
  746. flow |= hashflow;
  747. SSS_WRITE(dev, FCFIFOCTRL, flow);
  748. spin_unlock_irqrestore(&dev->lock, flags);
  749. }
  750. /**
  751. * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
  752. * @dev: secss device
  753. * @hashflow: HASH stream flow with/without AES/DES
  754. *
  755. * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
  756. * enable HASH irq's HRDMA, HDONE, HPART
  757. */
  758. static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
  759. {
  760. s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
  761. SSS_FCINTENCLR_HDONEINTENCLR |
  762. SSS_FCINTENCLR_HPARTINTENCLR);
  763. s5p_hash_dma_flush(dev);
  764. s5p_hash_dma_enable(dev);
  765. s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
  766. s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
  767. SSS_FCINTENSET_HDONEINTENSET |
  768. SSS_FCINTENSET_HPARTINTENSET);
  769. }
  770. /**
  771. * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
  772. * @dd: secss device
  773. * @length: length for request
  774. * @final: true if final op
  775. *
  776. * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
  777. * after previous updates, fill up IV words. For final, calculate and set
  778. * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
  779. * length as 2^63 so it will be never reached and set to zero prelow and
  780. * prehigh.
  781. *
  782. * This function does not start DMA transfer.
  783. */
  784. static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
  785. bool final)
  786. {
  787. struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  788. u32 prelow, prehigh, low, high;
  789. u32 configflags, swapflags;
  790. u64 tmplen;
  791. configflags = ctx->engine | SSS_HASH_INIT_BIT;
  792. if (likely(ctx->digcnt)) {
  793. s5p_hash_write_ctx_iv(dd, ctx);
  794. configflags |= SSS_HASH_USER_IV_EN;
  795. }
  796. if (final) {
  797. /* number of bytes for last part */
  798. low = length;
  799. high = 0;
  800. /* total number of bits prev hashed */
  801. tmplen = ctx->digcnt * 8;
  802. prelow = (u32)tmplen;
  803. prehigh = (u32)(tmplen >> 32);
  804. } else {
  805. prelow = 0;
  806. prehigh = 0;
  807. low = 0;
  808. high = BIT(31);
  809. }
  810. swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
  811. SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
  812. s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
  813. s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
  814. s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
  815. s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
  816. s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
  817. s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
  818. }
  819. /**
  820. * s5p_hash_xmit_dma() - start DMA hash processing
  821. * @dd: secss device
  822. * @length: length for request
  823. * @final: true if final op
  824. *
  825. * Update digcnt here, as it is needed for finup/final op.
  826. */
  827. static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
  828. bool final)
  829. {
  830. struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  831. unsigned int cnt;
  832. cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
  833. if (!cnt) {
  834. dev_err(dd->dev, "dma_map_sg error\n");
  835. ctx->error = true;
  836. return -EINVAL;
  837. }
  838. set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
  839. dd->hash_sg_iter = ctx->sg;
  840. dd->hash_sg_cnt = cnt;
  841. s5p_hash_write_ctrl(dd, length, final);
  842. ctx->digcnt += length;
  843. ctx->total -= length;
  844. /* catch last interrupt */
  845. if (final)
  846. set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
  847. s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
  848. return -EINPROGRESS;
  849. }
  850. /**
  851. * s5p_hash_copy_sgs() - copy request's bytes into new buffer
  852. * @ctx: request context
  853. * @sg: source scatterlist request
  854. * @new_len: number of bytes to process from sg
  855. *
  856. * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
  857. * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
  858. * with allocated buffer.
  859. *
  860. * Set bit in dd->hash_flag so we can free it after irq ends processing.
  861. */
  862. static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
  863. struct scatterlist *sg, unsigned int new_len)
  864. {
  865. unsigned int pages, len;
  866. void *buf;
  867. len = new_len + ctx->bufcnt;
  868. pages = get_order(len);
  869. buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
  870. if (!buf) {
  871. dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
  872. ctx->error = true;
  873. return -ENOMEM;
  874. }
  875. if (ctx->bufcnt)
  876. memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
  877. scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
  878. new_len, 0);
  879. sg_init_table(ctx->sgl, 1);
  880. sg_set_buf(ctx->sgl, buf, len);
  881. ctx->sg = ctx->sgl;
  882. ctx->sg_len = 1;
  883. ctx->bufcnt = 0;
  884. ctx->skip = 0;
  885. set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
  886. return 0;
  887. }
  888. /**
  889. * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
  890. * @ctx: request context
  891. * @sg: source scatterlist request
  892. * @new_len: number of bytes to process from sg
  893. *
  894. * Allocate new scatterlist table, copy data for HASH into it. If there was
  895. * xmit_buf filled, prepare it first, then copy page, length and offset from
  896. * source sg into it, adjusting begin and/or end for skip offset and
  897. * hash_later value.
  898. *
  899. * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
  900. * it after irq ends processing.
  901. */
  902. static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
  903. struct scatterlist *sg, unsigned int new_len)
  904. {
  905. unsigned int skip = ctx->skip, n = sg_nents(sg);
  906. struct scatterlist *tmp;
  907. unsigned int len;
  908. if (ctx->bufcnt)
  909. n++;
  910. ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
  911. if (!ctx->sg) {
  912. ctx->error = true;
  913. return -ENOMEM;
  914. }
  915. sg_init_table(ctx->sg, n);
  916. tmp = ctx->sg;
  917. ctx->sg_len = 0;
  918. if (ctx->bufcnt) {
  919. sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
  920. tmp = sg_next(tmp);
  921. ctx->sg_len++;
  922. }
  923. while (sg && skip >= sg->length) {
  924. skip -= sg->length;
  925. sg = sg_next(sg);
  926. }
  927. while (sg && new_len) {
  928. len = sg->length - skip;
  929. if (new_len < len)
  930. len = new_len;
  931. new_len -= len;
  932. sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
  933. skip = 0;
  934. if (new_len <= 0)
  935. sg_mark_end(tmp);
  936. tmp = sg_next(tmp);
  937. ctx->sg_len++;
  938. sg = sg_next(sg);
  939. }
  940. set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
  941. return 0;
  942. }
  943. /**
  944. * s5p_hash_prepare_sgs() - prepare sg for processing
  945. * @ctx: request context
  946. * @sg: source scatterlist request
  947. * @new_len: number of bytes to process from sg
  948. * @final: final flag
  949. *
  950. * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
  951. * sg table have good aligned elements (list_ok). If one of this checks fails,
  952. * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
  953. * data into this buffer and prepare request in sgl, or (2) allocates new sg
  954. * table and prepare sg elements.
  955. *
  956. * For digest or finup all conditions can be good, and we may not need any
  957. * fixes.
  958. */
  959. static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
  960. struct scatterlist *sg,
  961. unsigned int new_len, bool final)
  962. {
  963. unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
  964. bool aligned = true, list_ok = true;
  965. struct scatterlist *sg_tmp = sg;
  966. if (!sg || !sg->length || !new_len)
  967. return 0;
  968. if (skip || !final)
  969. list_ok = false;
  970. while (nbytes > 0 && sg_tmp) {
  971. n++;
  972. if (skip >= sg_tmp->length) {
  973. skip -= sg_tmp->length;
  974. if (!sg_tmp->length) {
  975. aligned = false;
  976. break;
  977. }
  978. } else {
  979. if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
  980. aligned = false;
  981. break;
  982. }
  983. if (nbytes < sg_tmp->length - skip) {
  984. list_ok = false;
  985. break;
  986. }
  987. nbytes -= sg_tmp->length - skip;
  988. skip = 0;
  989. }
  990. sg_tmp = sg_next(sg_tmp);
  991. }
  992. if (!aligned)
  993. return s5p_hash_copy_sgs(ctx, sg, new_len);
  994. else if (!list_ok)
  995. return s5p_hash_copy_sg_lists(ctx, sg, new_len);
  996. /*
  997. * Have aligned data from previous operation and/or current
  998. * Note: will enter here only if (digest or finup) and aligned
  999. */
  1000. if (ctx->bufcnt) {
  1001. ctx->sg_len = n;
  1002. sg_init_table(ctx->sgl, 2);
  1003. sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
  1004. sg_chain(ctx->sgl, 2, sg);
  1005. ctx->sg = ctx->sgl;
  1006. ctx->sg_len++;
  1007. } else {
  1008. ctx->sg = sg;
  1009. ctx->sg_len = n;
  1010. }
  1011. return 0;
  1012. }
  1013. /**
  1014. * s5p_hash_prepare_request() - prepare request for processing
  1015. * @req: AHASH request
  1016. * @update: true if UPDATE op
  1017. *
  1018. * Note 1: we can have update flag _and_ final flag at the same time.
  1019. * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
  1020. * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
  1021. * we have final op
  1022. */
  1023. static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
  1024. {
  1025. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1026. bool final = ctx->finup;
  1027. int xmit_len, hash_later, nbytes;
  1028. int ret;
  1029. if (update)
  1030. nbytes = req->nbytes;
  1031. else
  1032. nbytes = 0;
  1033. ctx->total = nbytes + ctx->bufcnt;
  1034. if (!ctx->total)
  1035. return 0;
  1036. if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
  1037. /* bytes left from previous request, so fill up to BUFLEN */
  1038. int len = BUFLEN - ctx->bufcnt % BUFLEN;
  1039. if (len > nbytes)
  1040. len = nbytes;
  1041. scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
  1042. 0, len, 0);
  1043. ctx->bufcnt += len;
  1044. nbytes -= len;
  1045. ctx->skip = len;
  1046. } else {
  1047. ctx->skip = 0;
  1048. }
  1049. if (ctx->bufcnt)
  1050. memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
  1051. xmit_len = ctx->total;
  1052. if (final) {
  1053. hash_later = 0;
  1054. } else {
  1055. if (IS_ALIGNED(xmit_len, BUFLEN))
  1056. xmit_len -= BUFLEN;
  1057. else
  1058. xmit_len -= xmit_len & (BUFLEN - 1);
  1059. hash_later = ctx->total - xmit_len;
  1060. /* copy hash_later bytes from end of req->src */
  1061. /* previous bytes are in xmit_buf, so no overwrite */
  1062. scatterwalk_map_and_copy(ctx->buffer, req->src,
  1063. req->nbytes - hash_later,
  1064. hash_later, 0);
  1065. }
  1066. if (xmit_len > BUFLEN) {
  1067. ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
  1068. final);
  1069. if (ret)
  1070. return ret;
  1071. } else {
  1072. /* have buffered data only */
  1073. if (unlikely(!ctx->bufcnt)) {
  1074. /* first update didn't fill up buffer */
  1075. scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
  1076. 0, xmit_len, 0);
  1077. }
  1078. sg_init_table(ctx->sgl, 1);
  1079. sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
  1080. ctx->sg = ctx->sgl;
  1081. ctx->sg_len = 1;
  1082. }
  1083. ctx->bufcnt = hash_later;
  1084. if (!final)
  1085. ctx->total = xmit_len;
  1086. return 0;
  1087. }
  1088. /**
  1089. * s5p_hash_update_dma_stop() - unmap DMA
  1090. * @dd: secss device
  1091. *
  1092. * Unmap scatterlist ctx->sg.
  1093. */
  1094. static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
  1095. {
  1096. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  1097. dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
  1098. clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
  1099. }
  1100. /**
  1101. * s5p_hash_finish() - copy calculated digest to crypto layer
  1102. * @req: AHASH request
  1103. */
  1104. static void s5p_hash_finish(struct ahash_request *req)
  1105. {
  1106. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1107. struct s5p_aes_dev *dd = ctx->dd;
  1108. if (ctx->digcnt)
  1109. s5p_hash_copy_result(req);
  1110. dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
  1111. }
  1112. /**
  1113. * s5p_hash_finish_req() - finish request
  1114. * @req: AHASH request
  1115. * @err: error
  1116. */
  1117. static void s5p_hash_finish_req(struct ahash_request *req, int err)
  1118. {
  1119. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1120. struct s5p_aes_dev *dd = ctx->dd;
  1121. unsigned long flags;
  1122. if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
  1123. free_pages((unsigned long)sg_virt(ctx->sg),
  1124. get_order(ctx->sg->length));
  1125. if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
  1126. kfree(ctx->sg);
  1127. ctx->sg = NULL;
  1128. dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
  1129. BIT(HASH_FLAGS_SGS_COPIED));
  1130. if (!err && !ctx->error) {
  1131. s5p_hash_read_msg(req);
  1132. if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
  1133. s5p_hash_finish(req);
  1134. } else {
  1135. ctx->error = true;
  1136. }
  1137. spin_lock_irqsave(&dd->hash_lock, flags);
  1138. dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
  1139. BIT(HASH_FLAGS_DMA_READY) |
  1140. BIT(HASH_FLAGS_OUTPUT_READY));
  1141. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1142. if (req->base.complete)
  1143. req->base.complete(&req->base, err);
  1144. }
  1145. /**
  1146. * s5p_hash_handle_queue() - handle hash queue
  1147. * @dd: device s5p_aes_dev
  1148. * @req: AHASH request
  1149. *
  1150. * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
  1151. * device then processes the first request from the dd->queue
  1152. *
  1153. * Returns: see s5p_hash_final below.
  1154. */
  1155. static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
  1156. struct ahash_request *req)
  1157. {
  1158. struct crypto_async_request *async_req, *backlog;
  1159. struct s5p_hash_reqctx *ctx;
  1160. unsigned long flags;
  1161. int err = 0, ret = 0;
  1162. retry:
  1163. spin_lock_irqsave(&dd->hash_lock, flags);
  1164. if (req)
  1165. ret = ahash_enqueue_request(&dd->hash_queue, req);
  1166. if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
  1167. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1168. return ret;
  1169. }
  1170. backlog = crypto_get_backlog(&dd->hash_queue);
  1171. async_req = crypto_dequeue_request(&dd->hash_queue);
  1172. if (async_req)
  1173. set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
  1174. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1175. if (!async_req)
  1176. return ret;
  1177. if (backlog)
  1178. backlog->complete(backlog, -EINPROGRESS);
  1179. req = ahash_request_cast(async_req);
  1180. dd->hash_req = req;
  1181. ctx = ahash_request_ctx(req);
  1182. err = s5p_hash_prepare_request(req, ctx->op_update);
  1183. if (err || !ctx->total)
  1184. goto out;
  1185. dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
  1186. ctx->op_update, req->nbytes);
  1187. s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
  1188. if (ctx->digcnt)
  1189. s5p_hash_write_iv(req); /* restore hash IV */
  1190. if (ctx->op_update) { /* HASH_OP_UPDATE */
  1191. err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
  1192. if (err != -EINPROGRESS && ctx->finup && !ctx->error)
  1193. /* no final() after finup() */
  1194. err = s5p_hash_xmit_dma(dd, ctx->total, true);
  1195. } else { /* HASH_OP_FINAL */
  1196. err = s5p_hash_xmit_dma(dd, ctx->total, true);
  1197. }
  1198. out:
  1199. if (err != -EINPROGRESS) {
  1200. /* hash_tasklet_cb will not finish it, so do it here */
  1201. s5p_hash_finish_req(req, err);
  1202. req = NULL;
  1203. /*
  1204. * Execute next request immediately if there is anything
  1205. * in queue.
  1206. */
  1207. goto retry;
  1208. }
  1209. return ret;
  1210. }
  1211. /**
  1212. * s5p_hash_tasklet_cb() - hash tasklet
  1213. * @data: ptr to s5p_aes_dev
  1214. */
  1215. static void s5p_hash_tasklet_cb(unsigned long data)
  1216. {
  1217. struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
  1218. if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
  1219. s5p_hash_handle_queue(dd, NULL);
  1220. return;
  1221. }
  1222. if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
  1223. if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
  1224. &dd->hash_flags)) {
  1225. s5p_hash_update_dma_stop(dd);
  1226. }
  1227. if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
  1228. &dd->hash_flags)) {
  1229. /* hash or semi-hash ready */
  1230. clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
  1231. goto finish;
  1232. }
  1233. }
  1234. return;
  1235. finish:
  1236. /* finish curent request */
  1237. s5p_hash_finish_req(dd->hash_req, 0);
  1238. /* If we are not busy, process next req */
  1239. if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
  1240. s5p_hash_handle_queue(dd, NULL);
  1241. }
  1242. /**
  1243. * s5p_hash_enqueue() - enqueue request
  1244. * @req: AHASH request
  1245. * @op: operation UPDATE (true) or FINAL (false)
  1246. *
  1247. * Returns: see s5p_hash_final below.
  1248. */
  1249. static int s5p_hash_enqueue(struct ahash_request *req, bool op)
  1250. {
  1251. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1252. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
  1253. ctx->op_update = op;
  1254. return s5p_hash_handle_queue(tctx->dd, req);
  1255. }
  1256. /**
  1257. * s5p_hash_update() - process the hash input data
  1258. * @req: AHASH request
  1259. *
  1260. * If request will fit in buffer, copy it and return immediately
  1261. * else enqueue it with OP_UPDATE.
  1262. *
  1263. * Returns: see s5p_hash_final below.
  1264. */
  1265. static int s5p_hash_update(struct ahash_request *req)
  1266. {
  1267. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1268. if (!req->nbytes)
  1269. return 0;
  1270. if (ctx->bufcnt + req->nbytes <= BUFLEN) {
  1271. scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
  1272. 0, req->nbytes, 0);
  1273. ctx->bufcnt += req->nbytes;
  1274. return 0;
  1275. }
  1276. return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
  1277. }
  1278. /**
  1279. * s5p_hash_final() - close up hash and calculate digest
  1280. * @req: AHASH request
  1281. *
  1282. * Note: in final req->src do not have any data, and req->nbytes can be
  1283. * non-zero.
  1284. *
  1285. * If there were no input data processed yet and the buffered hash data is
  1286. * less than BUFLEN (64) then calculate the final hash immediately by using
  1287. * SW algorithm fallback.
  1288. *
  1289. * Otherwise enqueues the current AHASH request with OP_FINAL operation op
  1290. * and finalize hash message in HW. Note that if digcnt!=0 then there were
  1291. * previous update op, so there are always some buffered bytes in ctx->buffer,
  1292. * which means that ctx->bufcnt!=0
  1293. *
  1294. * Returns:
  1295. * 0 if the request has been processed immediately,
  1296. * -EINPROGRESS if the operation has been queued for later execution or is set
  1297. * to processing by HW,
  1298. * -EBUSY if queue is full and request should be resubmitted later,
  1299. * other negative values denotes an error.
  1300. */
  1301. static int s5p_hash_final(struct ahash_request *req)
  1302. {
  1303. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1304. ctx->finup = true;
  1305. if (ctx->error)
  1306. return -EINVAL; /* uncompleted hash is not needed */
  1307. if (!ctx->digcnt && ctx->bufcnt < BUFLEN) {
  1308. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
  1309. return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer,
  1310. ctx->bufcnt, req->result);
  1311. }
  1312. return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
  1313. }
  1314. /**
  1315. * s5p_hash_finup() - process last req->src and calculate digest
  1316. * @req: AHASH request containing the last update data
  1317. *
  1318. * Return values: see s5p_hash_final above.
  1319. */
  1320. static int s5p_hash_finup(struct ahash_request *req)
  1321. {
  1322. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1323. int err1, err2;
  1324. ctx->finup = true;
  1325. err1 = s5p_hash_update(req);
  1326. if (err1 == -EINPROGRESS || err1 == -EBUSY)
  1327. return err1;
  1328. /*
  1329. * final() has to be always called to cleanup resources even if
  1330. * update() failed, except EINPROGRESS or calculate digest for small
  1331. * size
  1332. */
  1333. err2 = s5p_hash_final(req);
  1334. return err1 ?: err2;
  1335. }
  1336. /**
  1337. * s5p_hash_init() - initialize AHASH request contex
  1338. * @req: AHASH request
  1339. *
  1340. * Init async hash request context.
  1341. */
  1342. static int s5p_hash_init(struct ahash_request *req)
  1343. {
  1344. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1345. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1346. struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  1347. ctx->dd = tctx->dd;
  1348. ctx->error = false;
  1349. ctx->finup = false;
  1350. ctx->bufcnt = 0;
  1351. ctx->digcnt = 0;
  1352. ctx->total = 0;
  1353. ctx->skip = 0;
  1354. dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
  1355. crypto_ahash_digestsize(tfm));
  1356. switch (crypto_ahash_digestsize(tfm)) {
  1357. case MD5_DIGEST_SIZE:
  1358. ctx->engine = SSS_HASH_ENGINE_MD5;
  1359. ctx->nregs = HASH_MD5_MAX_REG;
  1360. break;
  1361. case SHA1_DIGEST_SIZE:
  1362. ctx->engine = SSS_HASH_ENGINE_SHA1;
  1363. ctx->nregs = HASH_SHA1_MAX_REG;
  1364. break;
  1365. case SHA256_DIGEST_SIZE:
  1366. ctx->engine = SSS_HASH_ENGINE_SHA256;
  1367. ctx->nregs = HASH_SHA256_MAX_REG;
  1368. break;
  1369. default:
  1370. ctx->error = true;
  1371. return -EINVAL;
  1372. }
  1373. return 0;
  1374. }
  1375. /**
  1376. * s5p_hash_digest - calculate digest from req->src
  1377. * @req: AHASH request
  1378. *
  1379. * Return values: see s5p_hash_final above.
  1380. */
  1381. static int s5p_hash_digest(struct ahash_request *req)
  1382. {
  1383. return s5p_hash_init(req) ?: s5p_hash_finup(req);
  1384. }
  1385. /**
  1386. * s5p_hash_cra_init_alg - init crypto alg transformation
  1387. * @tfm: crypto transformation
  1388. */
  1389. static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
  1390. {
  1391. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  1392. const char *alg_name = crypto_tfm_alg_name(tfm);
  1393. tctx->dd = s5p_dev;
  1394. /* Allocate a fallback and abort if it failed. */
  1395. tctx->fallback = crypto_alloc_shash(alg_name, 0,
  1396. CRYPTO_ALG_NEED_FALLBACK);
  1397. if (IS_ERR(tctx->fallback)) {
  1398. pr_err("fallback alloc fails for '%s'\n", alg_name);
  1399. return PTR_ERR(tctx->fallback);
  1400. }
  1401. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1402. sizeof(struct s5p_hash_reqctx) + BUFLEN);
  1403. return 0;
  1404. }
  1405. /**
  1406. * s5p_hash_cra_init - init crypto tfm
  1407. * @tfm: crypto transformation
  1408. */
  1409. static int s5p_hash_cra_init(struct crypto_tfm *tfm)
  1410. {
  1411. return s5p_hash_cra_init_alg(tfm);
  1412. }
  1413. /**
  1414. * s5p_hash_cra_exit - exit crypto tfm
  1415. * @tfm: crypto transformation
  1416. *
  1417. * free allocated fallback
  1418. */
  1419. static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
  1420. {
  1421. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  1422. crypto_free_shash(tctx->fallback);
  1423. tctx->fallback = NULL;
  1424. }
  1425. /**
  1426. * s5p_hash_export - export hash state
  1427. * @req: AHASH request
  1428. * @out: buffer for exported state
  1429. */
  1430. static int s5p_hash_export(struct ahash_request *req, void *out)
  1431. {
  1432. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1433. memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
  1434. return 0;
  1435. }
  1436. /**
  1437. * s5p_hash_import - import hash state
  1438. * @req: AHASH request
  1439. * @in: buffer with state to be imported from
  1440. */
  1441. static int s5p_hash_import(struct ahash_request *req, const void *in)
  1442. {
  1443. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1444. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1445. struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  1446. const struct s5p_hash_reqctx *ctx_in = in;
  1447. memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
  1448. if (ctx_in->bufcnt > BUFLEN) {
  1449. ctx->error = true;
  1450. return -EINVAL;
  1451. }
  1452. ctx->dd = tctx->dd;
  1453. ctx->error = false;
  1454. return 0;
  1455. }
  1456. static struct ahash_alg algs_sha1_md5_sha256[] = {
  1457. {
  1458. .init = s5p_hash_init,
  1459. .update = s5p_hash_update,
  1460. .final = s5p_hash_final,
  1461. .finup = s5p_hash_finup,
  1462. .digest = s5p_hash_digest,
  1463. .export = s5p_hash_export,
  1464. .import = s5p_hash_import,
  1465. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1466. .halg.digestsize = SHA1_DIGEST_SIZE,
  1467. .halg.base = {
  1468. .cra_name = "sha1",
  1469. .cra_driver_name = "exynos-sha1",
  1470. .cra_priority = 100,
  1471. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1472. CRYPTO_ALG_ASYNC |
  1473. CRYPTO_ALG_NEED_FALLBACK,
  1474. .cra_blocksize = HASH_BLOCK_SIZE,
  1475. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1476. .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
  1477. .cra_module = THIS_MODULE,
  1478. .cra_init = s5p_hash_cra_init,
  1479. .cra_exit = s5p_hash_cra_exit,
  1480. }
  1481. },
  1482. {
  1483. .init = s5p_hash_init,
  1484. .update = s5p_hash_update,
  1485. .final = s5p_hash_final,
  1486. .finup = s5p_hash_finup,
  1487. .digest = s5p_hash_digest,
  1488. .export = s5p_hash_export,
  1489. .import = s5p_hash_import,
  1490. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1491. .halg.digestsize = MD5_DIGEST_SIZE,
  1492. .halg.base = {
  1493. .cra_name = "md5",
  1494. .cra_driver_name = "exynos-md5",
  1495. .cra_priority = 100,
  1496. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1497. CRYPTO_ALG_ASYNC |
  1498. CRYPTO_ALG_NEED_FALLBACK,
  1499. .cra_blocksize = HASH_BLOCK_SIZE,
  1500. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1501. .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
  1502. .cra_module = THIS_MODULE,
  1503. .cra_init = s5p_hash_cra_init,
  1504. .cra_exit = s5p_hash_cra_exit,
  1505. }
  1506. },
  1507. {
  1508. .init = s5p_hash_init,
  1509. .update = s5p_hash_update,
  1510. .final = s5p_hash_final,
  1511. .finup = s5p_hash_finup,
  1512. .digest = s5p_hash_digest,
  1513. .export = s5p_hash_export,
  1514. .import = s5p_hash_import,
  1515. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1516. .halg.digestsize = SHA256_DIGEST_SIZE,
  1517. .halg.base = {
  1518. .cra_name = "sha256",
  1519. .cra_driver_name = "exynos-sha256",
  1520. .cra_priority = 100,
  1521. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1522. CRYPTO_ALG_ASYNC |
  1523. CRYPTO_ALG_NEED_FALLBACK,
  1524. .cra_blocksize = HASH_BLOCK_SIZE,
  1525. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1526. .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
  1527. .cra_module = THIS_MODULE,
  1528. .cra_init = s5p_hash_cra_init,
  1529. .cra_exit = s5p_hash_cra_exit,
  1530. }
  1531. }
  1532. };
  1533. static void s5p_set_aes(struct s5p_aes_dev *dev,
  1534. const u8 *key, const u8 *iv, const u8 *ctr,
  1535. unsigned int keylen)
  1536. {
  1537. void __iomem *keystart;
  1538. if (iv)
  1539. memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv,
  1540. AES_BLOCK_SIZE);
  1541. if (ctr)
  1542. memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr,
  1543. AES_BLOCK_SIZE);
  1544. if (keylen == AES_KEYSIZE_256)
  1545. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
  1546. else if (keylen == AES_KEYSIZE_192)
  1547. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
  1548. else
  1549. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
  1550. memcpy_toio(keystart, key, keylen);
  1551. }
  1552. static bool s5p_is_sg_aligned(struct scatterlist *sg)
  1553. {
  1554. while (sg) {
  1555. if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
  1556. return false;
  1557. sg = sg_next(sg);
  1558. }
  1559. return true;
  1560. }
  1561. static int s5p_set_indata_start(struct s5p_aes_dev *dev,
  1562. struct skcipher_request *req)
  1563. {
  1564. struct scatterlist *sg;
  1565. int err;
  1566. dev->sg_src_cpy = NULL;
  1567. sg = req->src;
  1568. if (!s5p_is_sg_aligned(sg)) {
  1569. dev_dbg(dev->dev,
  1570. "At least one unaligned source scatter list, making a copy\n");
  1571. err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
  1572. if (err)
  1573. return err;
  1574. sg = dev->sg_src_cpy;
  1575. }
  1576. err = s5p_set_indata(dev, sg);
  1577. if (err) {
  1578. s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
  1579. return err;
  1580. }
  1581. return 0;
  1582. }
  1583. static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
  1584. struct skcipher_request *req)
  1585. {
  1586. struct scatterlist *sg;
  1587. int err;
  1588. dev->sg_dst_cpy = NULL;
  1589. sg = req->dst;
  1590. if (!s5p_is_sg_aligned(sg)) {
  1591. dev_dbg(dev->dev,
  1592. "At least one unaligned dest scatter list, making a copy\n");
  1593. err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
  1594. if (err)
  1595. return err;
  1596. sg = dev->sg_dst_cpy;
  1597. }
  1598. err = s5p_set_outdata(dev, sg);
  1599. if (err) {
  1600. s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
  1601. return err;
  1602. }
  1603. return 0;
  1604. }
  1605. static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
  1606. {
  1607. struct skcipher_request *req = dev->req;
  1608. u32 aes_control;
  1609. unsigned long flags;
  1610. int err;
  1611. u8 *iv, *ctr;
  1612. /* This sets bit [13:12] to 00, which selects 128-bit counter */
  1613. aes_control = SSS_AES_KEY_CHANGE_MODE;
  1614. if (mode & FLAGS_AES_DECRYPT)
  1615. aes_control |= SSS_AES_MODE_DECRYPT;
  1616. if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
  1617. aes_control |= SSS_AES_CHAIN_MODE_CBC;
  1618. iv = req->iv;
  1619. ctr = NULL;
  1620. } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
  1621. aes_control |= SSS_AES_CHAIN_MODE_CTR;
  1622. iv = NULL;
  1623. ctr = req->iv;
  1624. } else {
  1625. iv = NULL; /* AES_ECB */
  1626. ctr = NULL;
  1627. }
  1628. if (dev->ctx->keylen == AES_KEYSIZE_192)
  1629. aes_control |= SSS_AES_KEY_SIZE_192;
  1630. else if (dev->ctx->keylen == AES_KEYSIZE_256)
  1631. aes_control |= SSS_AES_KEY_SIZE_256;
  1632. aes_control |= SSS_AES_FIFO_MODE;
  1633. /* as a variant it is possible to use byte swapping on DMA side */
  1634. aes_control |= SSS_AES_BYTESWAP_DI
  1635. | SSS_AES_BYTESWAP_DO
  1636. | SSS_AES_BYTESWAP_IV
  1637. | SSS_AES_BYTESWAP_KEY
  1638. | SSS_AES_BYTESWAP_CNT;
  1639. spin_lock_irqsave(&dev->lock, flags);
  1640. SSS_WRITE(dev, FCINTENCLR,
  1641. SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
  1642. SSS_WRITE(dev, FCFIFOCTRL, 0x00);
  1643. err = s5p_set_indata_start(dev, req);
  1644. if (err)
  1645. goto indata_error;
  1646. err = s5p_set_outdata_start(dev, req);
  1647. if (err)
  1648. goto outdata_error;
  1649. SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
  1650. s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
  1651. s5p_set_dma_indata(dev, dev->sg_src);
  1652. s5p_set_dma_outdata(dev, dev->sg_dst);
  1653. SSS_WRITE(dev, FCINTENSET,
  1654. SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
  1655. spin_unlock_irqrestore(&dev->lock, flags);
  1656. return;
  1657. outdata_error:
  1658. s5p_unset_indata(dev);
  1659. indata_error:
  1660. s5p_sg_done(dev);
  1661. dev->busy = false;
  1662. spin_unlock_irqrestore(&dev->lock, flags);
  1663. s5p_aes_complete(req, err);
  1664. }
  1665. static void s5p_tasklet_cb(unsigned long data)
  1666. {
  1667. struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
  1668. struct crypto_async_request *async_req, *backlog;
  1669. struct s5p_aes_reqctx *reqctx;
  1670. unsigned long flags;
  1671. spin_lock_irqsave(&dev->lock, flags);
  1672. backlog = crypto_get_backlog(&dev->queue);
  1673. async_req = crypto_dequeue_request(&dev->queue);
  1674. if (!async_req) {
  1675. dev->busy = false;
  1676. spin_unlock_irqrestore(&dev->lock, flags);
  1677. return;
  1678. }
  1679. spin_unlock_irqrestore(&dev->lock, flags);
  1680. if (backlog)
  1681. backlog->complete(backlog, -EINPROGRESS);
  1682. dev->req = skcipher_request_cast(async_req);
  1683. dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
  1684. reqctx = skcipher_request_ctx(dev->req);
  1685. s5p_aes_crypt_start(dev, reqctx->mode);
  1686. }
  1687. static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
  1688. struct skcipher_request *req)
  1689. {
  1690. unsigned long flags;
  1691. int err;
  1692. spin_lock_irqsave(&dev->lock, flags);
  1693. err = crypto_enqueue_request(&dev->queue, &req->base);
  1694. if (dev->busy) {
  1695. spin_unlock_irqrestore(&dev->lock, flags);
  1696. return err;
  1697. }
  1698. dev->busy = true;
  1699. spin_unlock_irqrestore(&dev->lock, flags);
  1700. tasklet_schedule(&dev->tasklet);
  1701. return err;
  1702. }
  1703. static int s5p_aes_crypt(struct skcipher_request *req, unsigned long mode)
  1704. {
  1705. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  1706. struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
  1707. struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
  1708. struct s5p_aes_dev *dev = ctx->dev;
  1709. if (!req->cryptlen)
  1710. return 0;
  1711. if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE) &&
  1712. ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
  1713. dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n");
  1714. return -EINVAL;
  1715. }
  1716. reqctx->mode = mode;
  1717. return s5p_aes_handle_req(dev, req);
  1718. }
  1719. static int s5p_aes_setkey(struct crypto_skcipher *cipher,
  1720. const u8 *key, unsigned int keylen)
  1721. {
  1722. struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
  1723. struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  1724. if (keylen != AES_KEYSIZE_128 &&
  1725. keylen != AES_KEYSIZE_192 &&
  1726. keylen != AES_KEYSIZE_256)
  1727. return -EINVAL;
  1728. memcpy(ctx->aes_key, key, keylen);
  1729. ctx->keylen = keylen;
  1730. return 0;
  1731. }
  1732. static int s5p_aes_ecb_encrypt(struct skcipher_request *req)
  1733. {
  1734. return s5p_aes_crypt(req, 0);
  1735. }
  1736. static int s5p_aes_ecb_decrypt(struct skcipher_request *req)
  1737. {
  1738. return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
  1739. }
  1740. static int s5p_aes_cbc_encrypt(struct skcipher_request *req)
  1741. {
  1742. return s5p_aes_crypt(req, FLAGS_AES_CBC);
  1743. }
  1744. static int s5p_aes_cbc_decrypt(struct skcipher_request *req)
  1745. {
  1746. return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
  1747. }
  1748. static int s5p_aes_ctr_crypt(struct skcipher_request *req)
  1749. {
  1750. return s5p_aes_crypt(req, FLAGS_AES_CTR);
  1751. }
  1752. static int s5p_aes_init_tfm(struct crypto_skcipher *tfm)
  1753. {
  1754. struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
  1755. ctx->dev = s5p_dev;
  1756. crypto_skcipher_set_reqsize(tfm, sizeof(struct s5p_aes_reqctx));
  1757. return 0;
  1758. }
  1759. static struct skcipher_alg algs[] = {
  1760. {
  1761. .base.cra_name = "ecb(aes)",
  1762. .base.cra_driver_name = "ecb-aes-s5p",
  1763. .base.cra_priority = 100,
  1764. .base.cra_flags = CRYPTO_ALG_ASYNC |
  1765. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1766. .base.cra_blocksize = AES_BLOCK_SIZE,
  1767. .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
  1768. .base.cra_alignmask = 0x0f,
  1769. .base.cra_module = THIS_MODULE,
  1770. .min_keysize = AES_MIN_KEY_SIZE,
  1771. .max_keysize = AES_MAX_KEY_SIZE,
  1772. .setkey = s5p_aes_setkey,
  1773. .encrypt = s5p_aes_ecb_encrypt,
  1774. .decrypt = s5p_aes_ecb_decrypt,
  1775. .init = s5p_aes_init_tfm,
  1776. },
  1777. {
  1778. .base.cra_name = "cbc(aes)",
  1779. .base.cra_driver_name = "cbc-aes-s5p",
  1780. .base.cra_priority = 100,
  1781. .base.cra_flags = CRYPTO_ALG_ASYNC |
  1782. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1783. .base.cra_blocksize = AES_BLOCK_SIZE,
  1784. .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
  1785. .base.cra_alignmask = 0x0f,
  1786. .base.cra_module = THIS_MODULE,
  1787. .min_keysize = AES_MIN_KEY_SIZE,
  1788. .max_keysize = AES_MAX_KEY_SIZE,
  1789. .ivsize = AES_BLOCK_SIZE,
  1790. .setkey = s5p_aes_setkey,
  1791. .encrypt = s5p_aes_cbc_encrypt,
  1792. .decrypt = s5p_aes_cbc_decrypt,
  1793. .init = s5p_aes_init_tfm,
  1794. },
  1795. {
  1796. .base.cra_name = "ctr(aes)",
  1797. .base.cra_driver_name = "ctr-aes-s5p",
  1798. .base.cra_priority = 100,
  1799. .base.cra_flags = CRYPTO_ALG_ASYNC |
  1800. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1801. .base.cra_blocksize = 1,
  1802. .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
  1803. .base.cra_alignmask = 0x0f,
  1804. .base.cra_module = THIS_MODULE,
  1805. .min_keysize = AES_MIN_KEY_SIZE,
  1806. .max_keysize = AES_MAX_KEY_SIZE,
  1807. .ivsize = AES_BLOCK_SIZE,
  1808. .setkey = s5p_aes_setkey,
  1809. .encrypt = s5p_aes_ctr_crypt,
  1810. .decrypt = s5p_aes_ctr_crypt,
  1811. .init = s5p_aes_init_tfm,
  1812. },
  1813. };
  1814. static int s5p_aes_probe(struct platform_device *pdev)
  1815. {
  1816. struct device *dev = &pdev->dev;
  1817. int i, j, err;
  1818. const struct samsung_aes_variant *variant;
  1819. struct s5p_aes_dev *pdata;
  1820. struct resource *res;
  1821. unsigned int hash_i;
  1822. if (s5p_dev)
  1823. return -EEXIST;
  1824. pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
  1825. if (!pdata)
  1826. return -ENOMEM;
  1827. variant = find_s5p_sss_version(pdev);
  1828. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1829. if (!res)
  1830. return -EINVAL;
  1831. /*
  1832. * Note: HASH and PRNG uses the same registers in secss, avoid
  1833. * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
  1834. * is enabled in config. We need larger size for HASH registers in
  1835. * secss, current describe only AES/DES
  1836. */
  1837. if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
  1838. if (variant == &exynos_aes_data) {
  1839. res->end += 0x300;
  1840. pdata->use_hash = true;
  1841. }
  1842. }
  1843. pdata->res = res;
  1844. pdata->ioaddr = devm_ioremap_resource(dev, res);
  1845. if (IS_ERR(pdata->ioaddr)) {
  1846. if (!pdata->use_hash)
  1847. return PTR_ERR(pdata->ioaddr);
  1848. /* try AES without HASH */
  1849. res->end -= 0x300;
  1850. pdata->use_hash = false;
  1851. pdata->ioaddr = devm_ioremap_resource(dev, res);
  1852. if (IS_ERR(pdata->ioaddr))
  1853. return PTR_ERR(pdata->ioaddr);
  1854. }
  1855. pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
  1856. if (IS_ERR(pdata->clk))
  1857. return dev_err_probe(dev, PTR_ERR(pdata->clk),
  1858. "failed to find secss clock %s\n",
  1859. variant->clk_names[0]);
  1860. err = clk_prepare_enable(pdata->clk);
  1861. if (err < 0) {
  1862. dev_err(dev, "Enabling clock %s failed, err %d\n",
  1863. variant->clk_names[0], err);
  1864. return err;
  1865. }
  1866. if (variant->clk_names[1]) {
  1867. pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
  1868. if (IS_ERR(pdata->pclk)) {
  1869. err = dev_err_probe(dev, PTR_ERR(pdata->pclk),
  1870. "failed to find clock %s\n",
  1871. variant->clk_names[1]);
  1872. goto err_clk;
  1873. }
  1874. err = clk_prepare_enable(pdata->pclk);
  1875. if (err < 0) {
  1876. dev_err(dev, "Enabling clock %s failed, err %d\n",
  1877. variant->clk_names[0], err);
  1878. goto err_clk;
  1879. }
  1880. } else {
  1881. pdata->pclk = NULL;
  1882. }
  1883. spin_lock_init(&pdata->lock);
  1884. spin_lock_init(&pdata->hash_lock);
  1885. pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
  1886. pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
  1887. pdata->irq_fc = platform_get_irq(pdev, 0);
  1888. if (pdata->irq_fc < 0) {
  1889. err = pdata->irq_fc;
  1890. dev_warn(dev, "feed control interrupt is not available.\n");
  1891. goto err_irq;
  1892. }
  1893. err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
  1894. s5p_aes_interrupt, IRQF_ONESHOT,
  1895. pdev->name, pdev);
  1896. if (err < 0) {
  1897. dev_warn(dev, "feed control interrupt is not available.\n");
  1898. goto err_irq;
  1899. }
  1900. pdata->busy = false;
  1901. pdata->dev = dev;
  1902. platform_set_drvdata(pdev, pdata);
  1903. s5p_dev = pdata;
  1904. tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
  1905. crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
  1906. for (i = 0; i < ARRAY_SIZE(algs); i++) {
  1907. err = crypto_register_skcipher(&algs[i]);
  1908. if (err)
  1909. goto err_algs;
  1910. }
  1911. if (pdata->use_hash) {
  1912. tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
  1913. (unsigned long)pdata);
  1914. crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
  1915. for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
  1916. hash_i++) {
  1917. struct ahash_alg *alg;
  1918. alg = &algs_sha1_md5_sha256[hash_i];
  1919. err = crypto_register_ahash(alg);
  1920. if (err) {
  1921. dev_err(dev, "can't register '%s': %d\n",
  1922. alg->halg.base.cra_driver_name, err);
  1923. goto err_hash;
  1924. }
  1925. }
  1926. }
  1927. dev_info(dev, "s5p-sss driver registered\n");
  1928. return 0;
  1929. err_hash:
  1930. for (j = hash_i - 1; j >= 0; j--)
  1931. crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
  1932. tasklet_kill(&pdata->hash_tasklet);
  1933. res->end -= 0x300;
  1934. err_algs:
  1935. if (i < ARRAY_SIZE(algs))
  1936. dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,
  1937. err);
  1938. for (j = 0; j < i; j++)
  1939. crypto_unregister_skcipher(&algs[j]);
  1940. tasklet_kill(&pdata->tasklet);
  1941. err_irq:
  1942. clk_disable_unprepare(pdata->pclk);
  1943. err_clk:
  1944. clk_disable_unprepare(pdata->clk);
  1945. s5p_dev = NULL;
  1946. return err;
  1947. }
  1948. static int s5p_aes_remove(struct platform_device *pdev)
  1949. {
  1950. struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
  1951. int i;
  1952. for (i = 0; i < ARRAY_SIZE(algs); i++)
  1953. crypto_unregister_skcipher(&algs[i]);
  1954. tasklet_kill(&pdata->tasklet);
  1955. if (pdata->use_hash) {
  1956. for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
  1957. crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
  1958. pdata->res->end -= 0x300;
  1959. tasklet_kill(&pdata->hash_tasklet);
  1960. pdata->use_hash = false;
  1961. }
  1962. clk_disable_unprepare(pdata->pclk);
  1963. clk_disable_unprepare(pdata->clk);
  1964. s5p_dev = NULL;
  1965. return 0;
  1966. }
  1967. static struct platform_driver s5p_aes_crypto = {
  1968. .probe = s5p_aes_probe,
  1969. .remove = s5p_aes_remove,
  1970. .driver = {
  1971. .name = "s5p-secss",
  1972. .of_match_table = s5p_sss_dt_match,
  1973. },
  1974. };
  1975. module_platform_driver(s5p_aes_crypto);
  1976. MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
  1977. MODULE_LICENSE("GPL v2");
  1978. MODULE_AUTHOR("Vladimir Zapolskiy <[email protected]>");
  1979. MODULE_AUTHOR("Kamil Konieczny <[email protected]>");