ocs-aes.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Intel Keem Bay OCS AES Crypto Driver.
  4. *
  5. * Copyright (C) 2018-2020 Intel Corporation
  6. */
  7. #include <linux/dma-mapping.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/slab.h>
  11. #include <linux/swab.h>
  12. #include <asm/byteorder.h>
  13. #include <asm/errno.h>
  14. #include <crypto/aes.h>
  15. #include <crypto/gcm.h>
  16. #include "ocs-aes.h"
  17. #define AES_COMMAND_OFFSET 0x0000
  18. #define AES_KEY_0_OFFSET 0x0004
  19. #define AES_KEY_1_OFFSET 0x0008
  20. #define AES_KEY_2_OFFSET 0x000C
  21. #define AES_KEY_3_OFFSET 0x0010
  22. #define AES_KEY_4_OFFSET 0x0014
  23. #define AES_KEY_5_OFFSET 0x0018
  24. #define AES_KEY_6_OFFSET 0x001C
  25. #define AES_KEY_7_OFFSET 0x0020
  26. #define AES_IV_0_OFFSET 0x0024
  27. #define AES_IV_1_OFFSET 0x0028
  28. #define AES_IV_2_OFFSET 0x002C
  29. #define AES_IV_3_OFFSET 0x0030
  30. #define AES_ACTIVE_OFFSET 0x0034
  31. #define AES_STATUS_OFFSET 0x0038
  32. #define AES_KEY_SIZE_OFFSET 0x0044
  33. #define AES_IER_OFFSET 0x0048
  34. #define AES_ISR_OFFSET 0x005C
  35. #define AES_MULTIPURPOSE1_0_OFFSET 0x0200
  36. #define AES_MULTIPURPOSE1_1_OFFSET 0x0204
  37. #define AES_MULTIPURPOSE1_2_OFFSET 0x0208
  38. #define AES_MULTIPURPOSE1_3_OFFSET 0x020C
  39. #define AES_MULTIPURPOSE2_0_OFFSET 0x0220
  40. #define AES_MULTIPURPOSE2_1_OFFSET 0x0224
  41. #define AES_MULTIPURPOSE2_2_OFFSET 0x0228
  42. #define AES_MULTIPURPOSE2_3_OFFSET 0x022C
  43. #define AES_BYTE_ORDER_CFG_OFFSET 0x02C0
  44. #define AES_TLEN_OFFSET 0x0300
  45. #define AES_T_MAC_0_OFFSET 0x0304
  46. #define AES_T_MAC_1_OFFSET 0x0308
  47. #define AES_T_MAC_2_OFFSET 0x030C
  48. #define AES_T_MAC_3_OFFSET 0x0310
  49. #define AES_PLEN_OFFSET 0x0314
  50. #define AES_A_DMA_SRC_ADDR_OFFSET 0x0400
  51. #define AES_A_DMA_DST_ADDR_OFFSET 0x0404
  52. #define AES_A_DMA_SRC_SIZE_OFFSET 0x0408
  53. #define AES_A_DMA_DST_SIZE_OFFSET 0x040C
  54. #define AES_A_DMA_DMA_MODE_OFFSET 0x0410
  55. #define AES_A_DMA_NEXT_SRC_DESCR_OFFSET 0x0418
  56. #define AES_A_DMA_NEXT_DST_DESCR_OFFSET 0x041C
  57. #define AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET 0x0420
  58. #define AES_A_DMA_LOG_OFFSET 0x0424
  59. #define AES_A_DMA_STATUS_OFFSET 0x0428
  60. #define AES_A_DMA_PERF_CNTR_OFFSET 0x042C
  61. #define AES_A_DMA_MSI_ISR_OFFSET 0x0480
  62. #define AES_A_DMA_MSI_IER_OFFSET 0x0484
  63. #define AES_A_DMA_MSI_MASK_OFFSET 0x0488
  64. #define AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET 0x0600
  65. #define AES_A_DMA_OUTBUFFER_READ_FIFO_OFFSET 0x0700
  66. /*
  67. * AES_A_DMA_DMA_MODE register.
  68. * Default: 0x00000000.
  69. * bit[31] ACTIVE
  70. * This bit activates the DMA. When the DMA finishes, it resets
  71. * this bit to zero.
  72. * bit[30:26] Unused by this driver.
  73. * bit[25] SRC_LINK_LIST_EN
  74. * Source link list enable bit. When the linked list is terminated
  75. * this bit is reset by the DMA.
  76. * bit[24] DST_LINK_LIST_EN
  77. * Destination link list enable bit. When the linked list is
  78. * terminated this bit is reset by the DMA.
  79. * bit[23:0] Unused by this driver.
  80. */
  81. #define AES_A_DMA_DMA_MODE_ACTIVE BIT(31)
  82. #define AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN BIT(25)
  83. #define AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN BIT(24)
  84. /*
  85. * AES_ACTIVE register
  86. * default 0x00000000
  87. * bit[31:10] Reserved
  88. * bit[9] LAST_ADATA
  89. * bit[8] LAST_GCX
  90. * bit[7:2] Reserved
  91. * bit[1] TERMINATION
  92. * bit[0] TRIGGER
  93. */
  94. #define AES_ACTIVE_LAST_ADATA BIT(9)
  95. #define AES_ACTIVE_LAST_CCM_GCM BIT(8)
  96. #define AES_ACTIVE_TERMINATION BIT(1)
  97. #define AES_ACTIVE_TRIGGER BIT(0)
  98. #define AES_DISABLE_INT 0x00000000
  99. #define AES_DMA_CPD_ERR_INT BIT(8)
  100. #define AES_DMA_OUTBUF_RD_ERR_INT BIT(7)
  101. #define AES_DMA_OUTBUF_WR_ERR_INT BIT(6)
  102. #define AES_DMA_INBUF_RD_ERR_INT BIT(5)
  103. #define AES_DMA_INBUF_WR_ERR_INT BIT(4)
  104. #define AES_DMA_BAD_COMP_INT BIT(3)
  105. #define AES_DMA_SAI_INT BIT(2)
  106. #define AES_DMA_SRC_DONE_INT BIT(0)
  107. #define AES_COMPLETE_INT BIT(1)
  108. #define AES_DMA_MSI_MASK_CLEAR BIT(0)
  109. #define AES_128_BIT_KEY 0x00000000
  110. #define AES_256_BIT_KEY BIT(0)
  111. #define AES_DEACTIVATE_PERF_CNTR 0x00000000
  112. #define AES_ACTIVATE_PERF_CNTR BIT(0)
  113. #define AES_MAX_TAG_SIZE_U32 4
  114. #define OCS_LL_DMA_FLAG_TERMINATE BIT(31)
  115. /*
  116. * There is an inconsistency in the documentation. This is documented as a
  117. * 11-bit value, but it is actually 10-bits.
  118. */
  119. #define AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK 0x3FF
  120. /*
  121. * During CCM decrypt, the OCS block needs to finish processing the ciphertext
  122. * before the tag is written. For 128-bit mode this required delay is 28 OCS
  123. * clock cycles. For 256-bit mode it is 36 OCS clock cycles.
  124. */
  125. #define CCM_DECRYPT_DELAY_TAG_CLK_COUNT 36UL
  126. /*
  127. * During CCM decrypt there must be a delay of at least 42 OCS clock cycles
  128. * between setting the TRIGGER bit in AES_ACTIVE and setting the LAST_CCM_GCM
  129. * bit in the same register (as stated in the OCS databook)
  130. */
  131. #define CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT 42UL
  132. /* See RFC3610 section 2.2 */
  133. #define L_PRIME_MIN (1)
  134. #define L_PRIME_MAX (7)
  135. /*
  136. * CCM IV format from RFC 3610 section 2.3
  137. *
  138. * Octet Number Contents
  139. * ------------ ---------
  140. * 0 Flags
  141. * 1 ... 15-L Nonce N
  142. * 16-L ... 15 Counter i
  143. *
  144. * Flags = L' = L - 1
  145. */
  146. #define L_PRIME_IDX 0
  147. #define COUNTER_START(lprime) (16 - ((lprime) + 1))
  148. #define COUNTER_LEN(lprime) ((lprime) + 1)
  149. enum aes_counter_mode {
  150. AES_CTR_M_NO_INC = 0,
  151. AES_CTR_M_32_INC = 1,
  152. AES_CTR_M_64_INC = 2,
  153. AES_CTR_M_128_INC = 3,
  154. };
  155. /**
  156. * struct ocs_dma_linked_list - OCS DMA linked list entry.
  157. * @src_addr: Source address of the data.
  158. * @src_len: Length of data to be fetched.
  159. * @next: Next dma_list to fetch.
  160. * @ll_flags: Flags (Freeze @ terminate) for the DMA engine.
  161. */
  162. struct ocs_dma_linked_list {
  163. u32 src_addr;
  164. u32 src_len;
  165. u32 next;
  166. u32 ll_flags;
  167. } __packed;
  168. /*
  169. * Set endianness of inputs and outputs
  170. * AES_BYTE_ORDER_CFG
  171. * default 0x00000000
  172. * bit [10] - KEY_HI_LO_SWAP
  173. * bit [9] - KEY_HI_SWAP_DWORDS_IN_OCTWORD
  174. * bit [8] - KEY_HI_SWAP_BYTES_IN_DWORD
  175. * bit [7] - KEY_LO_SWAP_DWORDS_IN_OCTWORD
  176. * bit [6] - KEY_LO_SWAP_BYTES_IN_DWORD
  177. * bit [5] - IV_SWAP_DWORDS_IN_OCTWORD
  178. * bit [4] - IV_SWAP_BYTES_IN_DWORD
  179. * bit [3] - DOUT_SWAP_DWORDS_IN_OCTWORD
  180. * bit [2] - DOUT_SWAP_BYTES_IN_DWORD
  181. * bit [1] - DOUT_SWAP_DWORDS_IN_OCTWORD
  182. * bit [0] - DOUT_SWAP_BYTES_IN_DWORD
  183. */
  184. static inline void aes_a_set_endianness(const struct ocs_aes_dev *aes_dev)
  185. {
  186. iowrite32(0x7FF, aes_dev->base_reg + AES_BYTE_ORDER_CFG_OFFSET);
  187. }
  188. /* Trigger AES process start. */
  189. static inline void aes_a_op_trigger(const struct ocs_aes_dev *aes_dev)
  190. {
  191. iowrite32(AES_ACTIVE_TRIGGER, aes_dev->base_reg + AES_ACTIVE_OFFSET);
  192. }
  193. /* Indicate last bulk of data. */
  194. static inline void aes_a_op_termination(const struct ocs_aes_dev *aes_dev)
  195. {
  196. iowrite32(AES_ACTIVE_TERMINATION,
  197. aes_dev->base_reg + AES_ACTIVE_OFFSET);
  198. }
  199. /*
  200. * Set LAST_CCM_GCM in AES_ACTIVE register and clear all other bits.
  201. *
  202. * Called when DMA is programmed to fetch the last batch of data.
  203. * - For AES-CCM it is called for the last batch of Payload data and Ciphertext
  204. * data.
  205. * - For AES-GCM, it is called for the last batch of Plaintext data and
  206. * Ciphertext data.
  207. */
  208. static inline void aes_a_set_last_gcx(const struct ocs_aes_dev *aes_dev)
  209. {
  210. iowrite32(AES_ACTIVE_LAST_CCM_GCM,
  211. aes_dev->base_reg + AES_ACTIVE_OFFSET);
  212. }
  213. /* Wait for LAST_CCM_GCM bit to be unset. */
  214. static inline void aes_a_wait_last_gcx(const struct ocs_aes_dev *aes_dev)
  215. {
  216. u32 aes_active_reg;
  217. do {
  218. aes_active_reg = ioread32(aes_dev->base_reg +
  219. AES_ACTIVE_OFFSET);
  220. } while (aes_active_reg & AES_ACTIVE_LAST_CCM_GCM);
  221. }
  222. /* Wait for 10 bits of input occupancy. */
  223. static void aes_a_dma_wait_input_buffer_occupancy(const struct ocs_aes_dev *aes_dev)
  224. {
  225. u32 reg;
  226. do {
  227. reg = ioread32(aes_dev->base_reg + AES_A_DMA_STATUS_OFFSET);
  228. } while (reg & AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK);
  229. }
  230. /*
  231. * Set LAST_CCM_GCM and LAST_ADATA bits in AES_ACTIVE register (and clear all
  232. * other bits).
  233. *
  234. * Called when DMA is programmed to fetch the last batch of Associated Data
  235. * (CCM case) or Additional Authenticated Data (GCM case).
  236. */
  237. static inline void aes_a_set_last_gcx_and_adata(const struct ocs_aes_dev *aes_dev)
  238. {
  239. iowrite32(AES_ACTIVE_LAST_ADATA | AES_ACTIVE_LAST_CCM_GCM,
  240. aes_dev->base_reg + AES_ACTIVE_OFFSET);
  241. }
  242. /* Set DMA src and dst transfer size to 0 */
  243. static inline void aes_a_dma_set_xfer_size_zero(const struct ocs_aes_dev *aes_dev)
  244. {
  245. iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
  246. iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
  247. }
  248. /* Activate DMA for zero-byte transfer case. */
  249. static inline void aes_a_dma_active(const struct ocs_aes_dev *aes_dev)
  250. {
  251. iowrite32(AES_A_DMA_DMA_MODE_ACTIVE,
  252. aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
  253. }
  254. /* Activate DMA and enable src linked list */
  255. static inline void aes_a_dma_active_src_ll_en(const struct ocs_aes_dev *aes_dev)
  256. {
  257. iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
  258. AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN,
  259. aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
  260. }
  261. /* Activate DMA and enable dst linked list */
  262. static inline void aes_a_dma_active_dst_ll_en(const struct ocs_aes_dev *aes_dev)
  263. {
  264. iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
  265. AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
  266. aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
  267. }
  268. /* Activate DMA and enable src and dst linked lists */
  269. static inline void aes_a_dma_active_src_dst_ll_en(const struct ocs_aes_dev *aes_dev)
  270. {
  271. iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
  272. AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN |
  273. AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
  274. aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
  275. }
  276. /* Reset PERF_CNTR to 0 and activate it */
  277. static inline void aes_a_dma_reset_and_activate_perf_cntr(const struct ocs_aes_dev *aes_dev)
  278. {
  279. iowrite32(0x00000000, aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET);
  280. iowrite32(AES_ACTIVATE_PERF_CNTR,
  281. aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
  282. }
  283. /* Wait until PERF_CNTR is > delay, then deactivate it */
  284. static inline void aes_a_dma_wait_and_deactivate_perf_cntr(const struct ocs_aes_dev *aes_dev,
  285. int delay)
  286. {
  287. while (ioread32(aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET) < delay)
  288. ;
  289. iowrite32(AES_DEACTIVATE_PERF_CNTR,
  290. aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
  291. }
  292. /* Disable AES and DMA IRQ. */
  293. static void aes_irq_disable(struct ocs_aes_dev *aes_dev)
  294. {
  295. u32 isr_val = 0;
  296. /* Disable interrupts */
  297. iowrite32(AES_DISABLE_INT,
  298. aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
  299. iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
  300. /* Clear any pending interrupt */
  301. isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
  302. if (isr_val)
  303. iowrite32(isr_val,
  304. aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
  305. isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
  306. if (isr_val)
  307. iowrite32(isr_val,
  308. aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
  309. isr_val = ioread32(aes_dev->base_reg + AES_ISR_OFFSET);
  310. if (isr_val)
  311. iowrite32(isr_val, aes_dev->base_reg + AES_ISR_OFFSET);
  312. }
  313. /* Enable AES or DMA IRQ. IRQ is disabled once fired. */
  314. static void aes_irq_enable(struct ocs_aes_dev *aes_dev, u8 irq)
  315. {
  316. if (irq == AES_COMPLETE_INT) {
  317. /* Ensure DMA error interrupts are enabled */
  318. iowrite32(AES_DMA_CPD_ERR_INT |
  319. AES_DMA_OUTBUF_RD_ERR_INT |
  320. AES_DMA_OUTBUF_WR_ERR_INT |
  321. AES_DMA_INBUF_RD_ERR_INT |
  322. AES_DMA_INBUF_WR_ERR_INT |
  323. AES_DMA_BAD_COMP_INT |
  324. AES_DMA_SAI_INT,
  325. aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
  326. /*
  327. * AES_IER
  328. * default 0x00000000
  329. * bits [31:3] - reserved
  330. * bit [2] - EN_SKS_ERR
  331. * bit [1] - EN_AES_COMPLETE
  332. * bit [0] - reserved
  333. */
  334. iowrite32(AES_COMPLETE_INT, aes_dev->base_reg + AES_IER_OFFSET);
  335. return;
  336. }
  337. if (irq == AES_DMA_SRC_DONE_INT) {
  338. /* Ensure AES interrupts are disabled */
  339. iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
  340. /*
  341. * DMA_MSI_IER
  342. * default 0x00000000
  343. * bits [31:9] - reserved
  344. * bit [8] - CPD_ERR_INT_EN
  345. * bit [7] - OUTBUF_RD_ERR_INT_EN
  346. * bit [6] - OUTBUF_WR_ERR_INT_EN
  347. * bit [5] - INBUF_RD_ERR_INT_EN
  348. * bit [4] - INBUF_WR_ERR_INT_EN
  349. * bit [3] - BAD_COMP_INT_EN
  350. * bit [2] - SAI_INT_EN
  351. * bit [1] - DST_DONE_INT_EN
  352. * bit [0] - SRC_DONE_INT_EN
  353. */
  354. iowrite32(AES_DMA_CPD_ERR_INT |
  355. AES_DMA_OUTBUF_RD_ERR_INT |
  356. AES_DMA_OUTBUF_WR_ERR_INT |
  357. AES_DMA_INBUF_RD_ERR_INT |
  358. AES_DMA_INBUF_WR_ERR_INT |
  359. AES_DMA_BAD_COMP_INT |
  360. AES_DMA_SAI_INT |
  361. AES_DMA_SRC_DONE_INT,
  362. aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
  363. }
  364. }
  365. /* Enable and wait for IRQ (either from OCS AES engine or DMA) */
  366. static int ocs_aes_irq_enable_and_wait(struct ocs_aes_dev *aes_dev, u8 irq)
  367. {
  368. int rc;
  369. reinit_completion(&aes_dev->irq_completion);
  370. aes_irq_enable(aes_dev, irq);
  371. rc = wait_for_completion_interruptible(&aes_dev->irq_completion);
  372. if (rc)
  373. return rc;
  374. return aes_dev->dma_err_mask ? -EIO : 0;
  375. }
  376. /* Configure DMA to OCS, linked list mode */
  377. static inline void dma_to_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
  378. dma_addr_t dma_list)
  379. {
  380. iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
  381. iowrite32(dma_list,
  382. aes_dev->base_reg + AES_A_DMA_NEXT_SRC_DESCR_OFFSET);
  383. }
  384. /* Configure DMA from OCS, linked list mode */
  385. static inline void dma_from_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
  386. dma_addr_t dma_list)
  387. {
  388. iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
  389. iowrite32(dma_list,
  390. aes_dev->base_reg + AES_A_DMA_NEXT_DST_DESCR_OFFSET);
  391. }
  392. irqreturn_t ocs_aes_irq_handler(int irq, void *dev_id)
  393. {
  394. struct ocs_aes_dev *aes_dev = dev_id;
  395. u32 aes_dma_isr;
  396. /* Read DMA ISR status. */
  397. aes_dma_isr = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
  398. /* Disable and clear interrupts. */
  399. aes_irq_disable(aes_dev);
  400. /* Save DMA error status. */
  401. aes_dev->dma_err_mask = aes_dma_isr &
  402. (AES_DMA_CPD_ERR_INT |
  403. AES_DMA_OUTBUF_RD_ERR_INT |
  404. AES_DMA_OUTBUF_WR_ERR_INT |
  405. AES_DMA_INBUF_RD_ERR_INT |
  406. AES_DMA_INBUF_WR_ERR_INT |
  407. AES_DMA_BAD_COMP_INT |
  408. AES_DMA_SAI_INT);
  409. /* Signal IRQ completion. */
  410. complete(&aes_dev->irq_completion);
  411. return IRQ_HANDLED;
  412. }
  413. /**
  414. * ocs_aes_set_key() - Write key into OCS AES hardware.
  415. * @aes_dev: The OCS AES device to write the key to.
  416. * @key_size: The size of the key (in bytes).
  417. * @key: The key to write.
  418. * @cipher: The cipher the key is for.
  419. *
  420. * For AES @key_size must be either 16 or 32. For SM4 @key_size must be 16.
  421. *
  422. * Return: 0 on success, negative error code otherwise.
  423. */
  424. int ocs_aes_set_key(struct ocs_aes_dev *aes_dev, u32 key_size, const u8 *key,
  425. enum ocs_cipher cipher)
  426. {
  427. const u32 *key_u32;
  428. u32 val;
  429. int i;
  430. /* OCS AES supports 128-bit and 256-bit keys only. */
  431. if (cipher == OCS_AES && !(key_size == 32 || key_size == 16)) {
  432. dev_err(aes_dev->dev,
  433. "%d-bit keys not supported by AES cipher\n",
  434. key_size * 8);
  435. return -EINVAL;
  436. }
  437. /* OCS SM4 supports 128-bit keys only. */
  438. if (cipher == OCS_SM4 && key_size != 16) {
  439. dev_err(aes_dev->dev,
  440. "%d-bit keys not supported for SM4 cipher\n",
  441. key_size * 8);
  442. return -EINVAL;
  443. }
  444. if (!key)
  445. return -EINVAL;
  446. key_u32 = (const u32 *)key;
  447. /* Write key to AES_KEY[0-7] registers */
  448. for (i = 0; i < (key_size / sizeof(u32)); i++) {
  449. iowrite32(key_u32[i],
  450. aes_dev->base_reg + AES_KEY_0_OFFSET +
  451. (i * sizeof(u32)));
  452. }
  453. /*
  454. * Write key size
  455. * bits [31:1] - reserved
  456. * bit [0] - AES_KEY_SIZE
  457. * 0 - 128 bit key
  458. * 1 - 256 bit key
  459. */
  460. val = (key_size == 16) ? AES_128_BIT_KEY : AES_256_BIT_KEY;
  461. iowrite32(val, aes_dev->base_reg + AES_KEY_SIZE_OFFSET);
  462. return 0;
  463. }
  464. /* Write AES_COMMAND */
  465. static inline void set_ocs_aes_command(struct ocs_aes_dev *aes_dev,
  466. enum ocs_cipher cipher,
  467. enum ocs_mode mode,
  468. enum ocs_instruction instruction)
  469. {
  470. u32 val;
  471. /* AES_COMMAND
  472. * default 0x000000CC
  473. * bit [14] - CIPHER_SELECT
  474. * 0 - AES
  475. * 1 - SM4
  476. * bits [11:8] - OCS_AES_MODE
  477. * 0000 - ECB
  478. * 0001 - CBC
  479. * 0010 - CTR
  480. * 0110 - CCM
  481. * 0111 - GCM
  482. * 1001 - CTS
  483. * bits [7:6] - AES_INSTRUCTION
  484. * 00 - ENCRYPT
  485. * 01 - DECRYPT
  486. * 10 - EXPAND
  487. * 11 - BYPASS
  488. * bits [3:2] - CTR_M_BITS
  489. * 00 - No increment
  490. * 01 - Least significant 32 bits are incremented
  491. * 10 - Least significant 64 bits are incremented
  492. * 11 - Full 128 bits are incremented
  493. */
  494. val = (cipher << 14) | (mode << 8) | (instruction << 6) |
  495. (AES_CTR_M_128_INC << 2);
  496. iowrite32(val, aes_dev->base_reg + AES_COMMAND_OFFSET);
  497. }
  498. static void ocs_aes_init(struct ocs_aes_dev *aes_dev,
  499. enum ocs_mode mode,
  500. enum ocs_cipher cipher,
  501. enum ocs_instruction instruction)
  502. {
  503. /* Ensure interrupts are disabled and pending interrupts cleared. */
  504. aes_irq_disable(aes_dev);
  505. /* Set endianness recommended by data-sheet. */
  506. aes_a_set_endianness(aes_dev);
  507. /* Set AES_COMMAND register. */
  508. set_ocs_aes_command(aes_dev, cipher, mode, instruction);
  509. }
  510. /*
  511. * Write the byte length of the last AES/SM4 block of Payload data (without
  512. * zero padding and without the length of the MAC) in register AES_PLEN.
  513. */
  514. static inline void ocs_aes_write_last_data_blk_len(struct ocs_aes_dev *aes_dev,
  515. u32 size)
  516. {
  517. u32 val;
  518. if (size == 0) {
  519. val = 0;
  520. goto exit;
  521. }
  522. val = size % AES_BLOCK_SIZE;
  523. if (val == 0)
  524. val = AES_BLOCK_SIZE;
  525. exit:
  526. iowrite32(val, aes_dev->base_reg + AES_PLEN_OFFSET);
  527. }
  528. /*
  529. * Validate inputs according to mode.
  530. * If OK return 0; else return -EINVAL.
  531. */
  532. static int ocs_aes_validate_inputs(dma_addr_t src_dma_list, u32 src_size,
  533. const u8 *iv, u32 iv_size,
  534. dma_addr_t aad_dma_list, u32 aad_size,
  535. const u8 *tag, u32 tag_size,
  536. enum ocs_cipher cipher, enum ocs_mode mode,
  537. enum ocs_instruction instruction,
  538. dma_addr_t dst_dma_list)
  539. {
  540. /* Ensure cipher, mode and instruction are valid. */
  541. if (!(cipher == OCS_AES || cipher == OCS_SM4))
  542. return -EINVAL;
  543. if (mode != OCS_MODE_ECB && mode != OCS_MODE_CBC &&
  544. mode != OCS_MODE_CTR && mode != OCS_MODE_CCM &&
  545. mode != OCS_MODE_GCM && mode != OCS_MODE_CTS)
  546. return -EINVAL;
  547. if (instruction != OCS_ENCRYPT && instruction != OCS_DECRYPT &&
  548. instruction != OCS_EXPAND && instruction != OCS_BYPASS)
  549. return -EINVAL;
  550. /*
  551. * When instruction is OCS_BYPASS, OCS simply copies data from source
  552. * to destination using DMA.
  553. *
  554. * AES mode is irrelevant, but both source and destination DMA
  555. * linked-list must be defined.
  556. */
  557. if (instruction == OCS_BYPASS) {
  558. if (src_dma_list == DMA_MAPPING_ERROR ||
  559. dst_dma_list == DMA_MAPPING_ERROR)
  560. return -EINVAL;
  561. return 0;
  562. }
  563. /*
  564. * For performance reasons switch based on mode to limit unnecessary
  565. * conditionals for each mode
  566. */
  567. switch (mode) {
  568. case OCS_MODE_ECB:
  569. /* Ensure input length is multiple of block size */
  570. if (src_size % AES_BLOCK_SIZE != 0)
  571. return -EINVAL;
  572. /* Ensure source and destination linked lists are created */
  573. if (src_dma_list == DMA_MAPPING_ERROR ||
  574. dst_dma_list == DMA_MAPPING_ERROR)
  575. return -EINVAL;
  576. return 0;
  577. case OCS_MODE_CBC:
  578. /* Ensure input length is multiple of block size */
  579. if (src_size % AES_BLOCK_SIZE != 0)
  580. return -EINVAL;
  581. /* Ensure source and destination linked lists are created */
  582. if (src_dma_list == DMA_MAPPING_ERROR ||
  583. dst_dma_list == DMA_MAPPING_ERROR)
  584. return -EINVAL;
  585. /* Ensure IV is present and block size in length */
  586. if (!iv || iv_size != AES_BLOCK_SIZE)
  587. return -EINVAL;
  588. return 0;
  589. case OCS_MODE_CTR:
  590. /* Ensure input length of 1 byte or greater */
  591. if (src_size == 0)
  592. return -EINVAL;
  593. /* Ensure source and destination linked lists are created */
  594. if (src_dma_list == DMA_MAPPING_ERROR ||
  595. dst_dma_list == DMA_MAPPING_ERROR)
  596. return -EINVAL;
  597. /* Ensure IV is present and block size in length */
  598. if (!iv || iv_size != AES_BLOCK_SIZE)
  599. return -EINVAL;
  600. return 0;
  601. case OCS_MODE_CTS:
  602. /* Ensure input length >= block size */
  603. if (src_size < AES_BLOCK_SIZE)
  604. return -EINVAL;
  605. /* Ensure source and destination linked lists are created */
  606. if (src_dma_list == DMA_MAPPING_ERROR ||
  607. dst_dma_list == DMA_MAPPING_ERROR)
  608. return -EINVAL;
  609. /* Ensure IV is present and block size in length */
  610. if (!iv || iv_size != AES_BLOCK_SIZE)
  611. return -EINVAL;
  612. return 0;
  613. case OCS_MODE_GCM:
  614. /* Ensure IV is present and GCM_AES_IV_SIZE in length */
  615. if (!iv || iv_size != GCM_AES_IV_SIZE)
  616. return -EINVAL;
  617. /*
  618. * If input data present ensure source and destination linked
  619. * lists are created
  620. */
  621. if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
  622. dst_dma_list == DMA_MAPPING_ERROR))
  623. return -EINVAL;
  624. /* If aad present ensure aad linked list is created */
  625. if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
  626. return -EINVAL;
  627. /* Ensure tag destination is set */
  628. if (!tag)
  629. return -EINVAL;
  630. /* Just ensure that tag_size doesn't cause overflows. */
  631. if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
  632. return -EINVAL;
  633. return 0;
  634. case OCS_MODE_CCM:
  635. /* Ensure IV is present and block size in length */
  636. if (!iv || iv_size != AES_BLOCK_SIZE)
  637. return -EINVAL;
  638. /* 2 <= L <= 8, so 1 <= L' <= 7 */
  639. if (iv[L_PRIME_IDX] < L_PRIME_MIN ||
  640. iv[L_PRIME_IDX] > L_PRIME_MAX)
  641. return -EINVAL;
  642. /* If aad present ensure aad linked list is created */
  643. if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
  644. return -EINVAL;
  645. /* Just ensure that tag_size doesn't cause overflows. */
  646. if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
  647. return -EINVAL;
  648. if (instruction == OCS_DECRYPT) {
  649. /*
  650. * If input data present ensure source and destination
  651. * linked lists are created
  652. */
  653. if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
  654. dst_dma_list == DMA_MAPPING_ERROR))
  655. return -EINVAL;
  656. /* Ensure input tag is present */
  657. if (!tag)
  658. return -EINVAL;
  659. return 0;
  660. }
  661. /* Instruction == OCS_ENCRYPT */
  662. /*
  663. * Destination linked list always required (for tag even if no
  664. * input data)
  665. */
  666. if (dst_dma_list == DMA_MAPPING_ERROR)
  667. return -EINVAL;
  668. /* If input data present ensure src linked list is created */
  669. if (src_size && src_dma_list == DMA_MAPPING_ERROR)
  670. return -EINVAL;
  671. return 0;
  672. default:
  673. return -EINVAL;
  674. }
  675. }
  676. /**
  677. * ocs_aes_op() - Perform AES/SM4 operation.
  678. * @aes_dev: The OCS AES device to use.
  679. * @mode: The mode to use (ECB, CBC, CTR, or CTS).
  680. * @cipher: The cipher to use (AES or SM4).
  681. * @instruction: The instruction to perform (encrypt or decrypt).
  682. * @dst_dma_list: The OCS DMA list mapping output memory.
  683. * @src_dma_list: The OCS DMA list mapping input payload data.
  684. * @src_size: The amount of data mapped by @src_dma_list.
  685. * @iv: The IV vector.
  686. * @iv_size: The size (in bytes) of @iv.
  687. *
  688. * Return: 0 on success, negative error code otherwise.
  689. */
  690. int ocs_aes_op(struct ocs_aes_dev *aes_dev,
  691. enum ocs_mode mode,
  692. enum ocs_cipher cipher,
  693. enum ocs_instruction instruction,
  694. dma_addr_t dst_dma_list,
  695. dma_addr_t src_dma_list,
  696. u32 src_size,
  697. u8 *iv,
  698. u32 iv_size)
  699. {
  700. u32 *iv32;
  701. int rc;
  702. rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv, iv_size, 0, 0,
  703. NULL, 0, cipher, mode, instruction,
  704. dst_dma_list);
  705. if (rc)
  706. return rc;
  707. /*
  708. * ocs_aes_validate_inputs() is a generic check, now ensure mode is not
  709. * GCM or CCM.
  710. */
  711. if (mode == OCS_MODE_GCM || mode == OCS_MODE_CCM)
  712. return -EINVAL;
  713. /* Cast IV to u32 array. */
  714. iv32 = (u32 *)iv;
  715. ocs_aes_init(aes_dev, mode, cipher, instruction);
  716. if (mode == OCS_MODE_CTS) {
  717. /* Write the byte length of the last data block to engine. */
  718. ocs_aes_write_last_data_blk_len(aes_dev, src_size);
  719. }
  720. /* ECB is the only mode that doesn't use IV. */
  721. if (mode != OCS_MODE_ECB) {
  722. iowrite32(iv32[0], aes_dev->base_reg + AES_IV_0_OFFSET);
  723. iowrite32(iv32[1], aes_dev->base_reg + AES_IV_1_OFFSET);
  724. iowrite32(iv32[2], aes_dev->base_reg + AES_IV_2_OFFSET);
  725. iowrite32(iv32[3], aes_dev->base_reg + AES_IV_3_OFFSET);
  726. }
  727. /* Set AES_ACTIVE.TRIGGER to start the operation. */
  728. aes_a_op_trigger(aes_dev);
  729. /* Configure and activate input / output DMA. */
  730. dma_to_ocs_aes_ll(aes_dev, src_dma_list);
  731. dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
  732. aes_a_dma_active_src_dst_ll_en(aes_dev);
  733. if (mode == OCS_MODE_CTS) {
  734. /*
  735. * For CTS mode, instruct engine to activate ciphertext
  736. * stealing if last block of data is incomplete.
  737. */
  738. aes_a_set_last_gcx(aes_dev);
  739. } else {
  740. /* For all other modes, just write the 'termination' bit. */
  741. aes_a_op_termination(aes_dev);
  742. }
  743. /* Wait for engine to complete processing. */
  744. rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
  745. if (rc)
  746. return rc;
  747. if (mode == OCS_MODE_CTR) {
  748. /* Read back IV for streaming mode */
  749. iv32[0] = ioread32(aes_dev->base_reg + AES_IV_0_OFFSET);
  750. iv32[1] = ioread32(aes_dev->base_reg + AES_IV_1_OFFSET);
  751. iv32[2] = ioread32(aes_dev->base_reg + AES_IV_2_OFFSET);
  752. iv32[3] = ioread32(aes_dev->base_reg + AES_IV_3_OFFSET);
  753. }
  754. return 0;
  755. }
  756. /* Compute and write J0 to engine registers. */
  757. static void ocs_aes_gcm_write_j0(const struct ocs_aes_dev *aes_dev,
  758. const u8 *iv)
  759. {
  760. const u32 *j0 = (u32 *)iv;
  761. /*
  762. * IV must be 12 bytes; Other sizes not supported as Linux crypto API
  763. * does only expects/allows 12 byte IV for GCM
  764. */
  765. iowrite32(0x00000001, aes_dev->base_reg + AES_IV_0_OFFSET);
  766. iowrite32(__swab32(j0[2]), aes_dev->base_reg + AES_IV_1_OFFSET);
  767. iowrite32(__swab32(j0[1]), aes_dev->base_reg + AES_IV_2_OFFSET);
  768. iowrite32(__swab32(j0[0]), aes_dev->base_reg + AES_IV_3_OFFSET);
  769. }
  770. /* Read GCM tag from engine registers. */
  771. static inline void ocs_aes_gcm_read_tag(struct ocs_aes_dev *aes_dev,
  772. u8 *tag, u32 tag_size)
  773. {
  774. u32 tag_u32[AES_MAX_TAG_SIZE_U32];
  775. /*
  776. * The Authentication Tag T is stored in Little Endian order in the
  777. * registers with the most significant bytes stored from AES_T_MAC[3]
  778. * downward.
  779. */
  780. tag_u32[0] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_3_OFFSET));
  781. tag_u32[1] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_2_OFFSET));
  782. tag_u32[2] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_1_OFFSET));
  783. tag_u32[3] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_0_OFFSET));
  784. memcpy(tag, tag_u32, tag_size);
  785. }
  786. /**
  787. * ocs_aes_gcm_op() - Perform GCM operation.
  788. * @aes_dev: The OCS AES device to use.
  789. * @cipher: The Cipher to use (AES or SM4).
  790. * @instruction: The instruction to perform (encrypt or decrypt).
  791. * @dst_dma_list: The OCS DMA list mapping output memory.
  792. * @src_dma_list: The OCS DMA list mapping input payload data.
  793. * @src_size: The amount of data mapped by @src_dma_list.
  794. * @iv: The input IV vector.
  795. * @aad_dma_list: The OCS DMA list mapping input AAD data.
  796. * @aad_size: The amount of data mapped by @aad_dma_list.
  797. * @out_tag: Where to store computed tag.
  798. * @tag_size: The size (in bytes) of @out_tag.
  799. *
  800. * Return: 0 on success, negative error code otherwise.
  801. */
  802. int ocs_aes_gcm_op(struct ocs_aes_dev *aes_dev,
  803. enum ocs_cipher cipher,
  804. enum ocs_instruction instruction,
  805. dma_addr_t dst_dma_list,
  806. dma_addr_t src_dma_list,
  807. u32 src_size,
  808. const u8 *iv,
  809. dma_addr_t aad_dma_list,
  810. u32 aad_size,
  811. u8 *out_tag,
  812. u32 tag_size)
  813. {
  814. u64 bit_len;
  815. u32 val;
  816. int rc;
  817. rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
  818. GCM_AES_IV_SIZE, aad_dma_list,
  819. aad_size, out_tag, tag_size, cipher,
  820. OCS_MODE_GCM, instruction,
  821. dst_dma_list);
  822. if (rc)
  823. return rc;
  824. ocs_aes_init(aes_dev, OCS_MODE_GCM, cipher, instruction);
  825. /* Compute and write J0 to OCS HW. */
  826. ocs_aes_gcm_write_j0(aes_dev, iv);
  827. /* Write out_tag byte length */
  828. iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
  829. /* Write the byte length of the last plaintext / ciphertext block. */
  830. ocs_aes_write_last_data_blk_len(aes_dev, src_size);
  831. /* Write ciphertext bit length */
  832. bit_len = (u64)src_size * 8;
  833. val = bit_len & 0xFFFFFFFF;
  834. iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_0_OFFSET);
  835. val = bit_len >> 32;
  836. iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_1_OFFSET);
  837. /* Write aad bit length */
  838. bit_len = (u64)aad_size * 8;
  839. val = bit_len & 0xFFFFFFFF;
  840. iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_2_OFFSET);
  841. val = bit_len >> 32;
  842. iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_3_OFFSET);
  843. /* Set AES_ACTIVE.TRIGGER to start the operation. */
  844. aes_a_op_trigger(aes_dev);
  845. /* Process AAD. */
  846. if (aad_size) {
  847. /* If aad present, configure DMA to feed it to the engine. */
  848. dma_to_ocs_aes_ll(aes_dev, aad_dma_list);
  849. aes_a_dma_active_src_ll_en(aes_dev);
  850. /* Instructs engine to pad last block of aad, if needed. */
  851. aes_a_set_last_gcx_and_adata(aes_dev);
  852. /* Wait for DMA transfer to complete. */
  853. rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
  854. if (rc)
  855. return rc;
  856. } else {
  857. aes_a_set_last_gcx_and_adata(aes_dev);
  858. }
  859. /* Wait until adata (if present) has been processed. */
  860. aes_a_wait_last_gcx(aes_dev);
  861. aes_a_dma_wait_input_buffer_occupancy(aes_dev);
  862. /* Now process payload. */
  863. if (src_size) {
  864. /* Configure and activate DMA for both input and output data. */
  865. dma_to_ocs_aes_ll(aes_dev, src_dma_list);
  866. dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
  867. aes_a_dma_active_src_dst_ll_en(aes_dev);
  868. } else {
  869. aes_a_dma_set_xfer_size_zero(aes_dev);
  870. aes_a_dma_active(aes_dev);
  871. }
  872. /* Instruct AES/SMA4 engine payload processing is over. */
  873. aes_a_set_last_gcx(aes_dev);
  874. /* Wait for OCS AES engine to complete processing. */
  875. rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
  876. if (rc)
  877. return rc;
  878. ocs_aes_gcm_read_tag(aes_dev, out_tag, tag_size);
  879. return 0;
  880. }
  881. /* Write encrypted tag to AES/SM4 engine. */
  882. static void ocs_aes_ccm_write_encrypted_tag(struct ocs_aes_dev *aes_dev,
  883. const u8 *in_tag, u32 tag_size)
  884. {
  885. int i;
  886. /* Ensure DMA input buffer is empty */
  887. aes_a_dma_wait_input_buffer_occupancy(aes_dev);
  888. /*
  889. * During CCM decrypt, the OCS block needs to finish processing the
  890. * ciphertext before the tag is written. So delay needed after DMA has
  891. * completed writing the ciphertext
  892. */
  893. aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
  894. aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
  895. CCM_DECRYPT_DELAY_TAG_CLK_COUNT);
  896. /* Write encrypted tag to AES/SM4 engine. */
  897. for (i = 0; i < tag_size; i++) {
  898. iowrite8(in_tag[i], aes_dev->base_reg +
  899. AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
  900. }
  901. }
  902. /*
  903. * Write B0 CCM block to OCS AES HW.
  904. *
  905. * Note: B0 format is documented in NIST Special Publication 800-38C
  906. * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
  907. * (see Section A.2.1)
  908. */
  909. static int ocs_aes_ccm_write_b0(const struct ocs_aes_dev *aes_dev,
  910. const u8 *iv, u32 adata_size, u32 tag_size,
  911. u32 cryptlen)
  912. {
  913. u8 b0[16]; /* CCM B0 block is 16 bytes long. */
  914. int i, q;
  915. /* Initialize B0 to 0. */
  916. memset(b0, 0, sizeof(b0));
  917. /*
  918. * B0[0] is the 'Flags Octet' and has the following structure:
  919. * bit 7: Reserved
  920. * bit 6: Adata flag
  921. * bit 5-3: t value encoded as (t-2)/2
  922. * bit 2-0: q value encoded as q - 1
  923. */
  924. /* If there is AAD data, set the Adata flag. */
  925. if (adata_size)
  926. b0[0] |= BIT(6);
  927. /*
  928. * t denotes the octet length of T.
  929. * t can only be an element of { 4, 6, 8, 10, 12, 14, 16} and is
  930. * encoded as (t - 2) / 2
  931. */
  932. b0[0] |= (((tag_size - 2) / 2) & 0x7) << 3;
  933. /*
  934. * q is the octet length of Q.
  935. * q can only be an element of {2, 3, 4, 5, 6, 7, 8} and is encoded as
  936. * q - 1 == iv[0] & 0x7;
  937. */
  938. b0[0] |= iv[0] & 0x7;
  939. /*
  940. * Copy the Nonce N from IV to B0; N is located in iv[1]..iv[15 - q]
  941. * and must be copied to b0[1]..b0[15-q].
  942. * q == (iv[0] & 0x7) + 1
  943. */
  944. q = (iv[0] & 0x7) + 1;
  945. for (i = 1; i <= 15 - q; i++)
  946. b0[i] = iv[i];
  947. /*
  948. * The rest of B0 must contain Q, i.e., the message length.
  949. * Q is encoded in q octets, in big-endian order, so to write it, we
  950. * start from the end of B0 and we move backward.
  951. */
  952. i = sizeof(b0) - 1;
  953. while (q) {
  954. b0[i] = cryptlen & 0xff;
  955. cryptlen >>= 8;
  956. i--;
  957. q--;
  958. }
  959. /*
  960. * If cryptlen is not zero at this point, it means that its original
  961. * value was too big.
  962. */
  963. if (cryptlen)
  964. return -EOVERFLOW;
  965. /* Now write B0 to OCS AES input buffer. */
  966. for (i = 0; i < sizeof(b0); i++)
  967. iowrite8(b0[i], aes_dev->base_reg +
  968. AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
  969. return 0;
  970. }
  971. /*
  972. * Write adata length to OCS AES HW.
  973. *
  974. * Note: adata len encoding is documented in NIST Special Publication 800-38C
  975. * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
  976. * (see Section A.2.2)
  977. */
  978. static void ocs_aes_ccm_write_adata_len(const struct ocs_aes_dev *aes_dev,
  979. u64 adata_len)
  980. {
  981. u8 enc_a[10]; /* Maximum encoded size: 10 octets. */
  982. int i, len;
  983. /*
  984. * adata_len ('a') is encoded as follows:
  985. * If 0 < a < 2^16 - 2^8 ==> 'a' encoded as [a]16, i.e., two octets
  986. * (big endian).
  987. * If 2^16 - 2^8 ≤ a < 2^32 ==> 'a' encoded as 0xff || 0xfe || [a]32,
  988. * i.e., six octets (big endian).
  989. * If 2^32 ≤ a < 2^64 ==> 'a' encoded as 0xff || 0xff || [a]64,
  990. * i.e., ten octets (big endian).
  991. */
  992. if (adata_len < 65280) {
  993. len = 2;
  994. *(__be16 *)enc_a = cpu_to_be16(adata_len);
  995. } else if (adata_len <= 0xFFFFFFFF) {
  996. len = 6;
  997. *(__be16 *)enc_a = cpu_to_be16(0xfffe);
  998. *(__be32 *)&enc_a[2] = cpu_to_be32(adata_len);
  999. } else { /* adata_len >= 2^32 */
  1000. len = 10;
  1001. *(__be16 *)enc_a = cpu_to_be16(0xffff);
  1002. *(__be64 *)&enc_a[2] = cpu_to_be64(adata_len);
  1003. }
  1004. for (i = 0; i < len; i++)
  1005. iowrite8(enc_a[i],
  1006. aes_dev->base_reg +
  1007. AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
  1008. }
  1009. static int ocs_aes_ccm_do_adata(struct ocs_aes_dev *aes_dev,
  1010. dma_addr_t adata_dma_list, u32 adata_size)
  1011. {
  1012. int rc;
  1013. if (!adata_size) {
  1014. /* Since no aad the LAST_GCX bit can be set now */
  1015. aes_a_set_last_gcx_and_adata(aes_dev);
  1016. goto exit;
  1017. }
  1018. /* Adata case. */
  1019. /*
  1020. * Form the encoding of the Associated data length and write it
  1021. * to the AES/SM4 input buffer.
  1022. */
  1023. ocs_aes_ccm_write_adata_len(aes_dev, adata_size);
  1024. /* Configure the AES/SM4 DMA to fetch the Associated Data */
  1025. dma_to_ocs_aes_ll(aes_dev, adata_dma_list);
  1026. /* Activate DMA to fetch Associated data. */
  1027. aes_a_dma_active_src_ll_en(aes_dev);
  1028. /* Set LAST_GCX and LAST_ADATA in AES ACTIVE register. */
  1029. aes_a_set_last_gcx_and_adata(aes_dev);
  1030. /* Wait for DMA transfer to complete. */
  1031. rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
  1032. if (rc)
  1033. return rc;
  1034. exit:
  1035. /* Wait until adata (if present) has been processed. */
  1036. aes_a_wait_last_gcx(aes_dev);
  1037. aes_a_dma_wait_input_buffer_occupancy(aes_dev);
  1038. return 0;
  1039. }
  1040. static int ocs_aes_ccm_encrypt_do_payload(struct ocs_aes_dev *aes_dev,
  1041. dma_addr_t dst_dma_list,
  1042. dma_addr_t src_dma_list,
  1043. u32 src_size)
  1044. {
  1045. if (src_size) {
  1046. /*
  1047. * Configure and activate DMA for both input and output
  1048. * data.
  1049. */
  1050. dma_to_ocs_aes_ll(aes_dev, src_dma_list);
  1051. dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
  1052. aes_a_dma_active_src_dst_ll_en(aes_dev);
  1053. } else {
  1054. /* Configure and activate DMA for output data only. */
  1055. dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
  1056. aes_a_dma_active_dst_ll_en(aes_dev);
  1057. }
  1058. /*
  1059. * Set the LAST GCX bit in AES_ACTIVE Register to instruct
  1060. * AES/SM4 engine to pad the last block of data.
  1061. */
  1062. aes_a_set_last_gcx(aes_dev);
  1063. /* We are done, wait for IRQ and return. */
  1064. return ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
  1065. }
  1066. static int ocs_aes_ccm_decrypt_do_payload(struct ocs_aes_dev *aes_dev,
  1067. dma_addr_t dst_dma_list,
  1068. dma_addr_t src_dma_list,
  1069. u32 src_size)
  1070. {
  1071. if (!src_size) {
  1072. /* Let engine process 0-length input. */
  1073. aes_a_dma_set_xfer_size_zero(aes_dev);
  1074. aes_a_dma_active(aes_dev);
  1075. aes_a_set_last_gcx(aes_dev);
  1076. return 0;
  1077. }
  1078. /*
  1079. * Configure and activate DMA for both input and output
  1080. * data.
  1081. */
  1082. dma_to_ocs_aes_ll(aes_dev, src_dma_list);
  1083. dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
  1084. aes_a_dma_active_src_dst_ll_en(aes_dev);
  1085. /*
  1086. * Set the LAST GCX bit in AES_ACTIVE Register; this allows the
  1087. * AES/SM4 engine to differentiate between encrypted data and
  1088. * encrypted MAC.
  1089. */
  1090. aes_a_set_last_gcx(aes_dev);
  1091. /*
  1092. * Enable DMA DONE interrupt; once DMA transfer is over,
  1093. * interrupt handler will process the MAC/tag.
  1094. */
  1095. return ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
  1096. }
  1097. /*
  1098. * Compare Tag to Yr.
  1099. *
  1100. * Only used at the end of CCM decrypt. If tag == yr, message authentication
  1101. * has succeeded.
  1102. */
  1103. static inline int ccm_compare_tag_to_yr(struct ocs_aes_dev *aes_dev,
  1104. u8 tag_size_bytes)
  1105. {
  1106. u32 tag[AES_MAX_TAG_SIZE_U32];
  1107. u32 yr[AES_MAX_TAG_SIZE_U32];
  1108. u8 i;
  1109. /* Read Tag and Yr from AES registers. */
  1110. for (i = 0; i < AES_MAX_TAG_SIZE_U32; i++) {
  1111. tag[i] = ioread32(aes_dev->base_reg +
  1112. AES_T_MAC_0_OFFSET + (i * sizeof(u32)));
  1113. yr[i] = ioread32(aes_dev->base_reg +
  1114. AES_MULTIPURPOSE2_0_OFFSET +
  1115. (i * sizeof(u32)));
  1116. }
  1117. return memcmp(tag, yr, tag_size_bytes) ? -EBADMSG : 0;
  1118. }
  1119. /**
  1120. * ocs_aes_ccm_op() - Perform CCM operation.
  1121. * @aes_dev: The OCS AES device to use.
  1122. * @cipher: The Cipher to use (AES or SM4).
  1123. * @instruction: The instruction to perform (encrypt or decrypt).
  1124. * @dst_dma_list: The OCS DMA list mapping output memory.
  1125. * @src_dma_list: The OCS DMA list mapping input payload data.
  1126. * @src_size: The amount of data mapped by @src_dma_list.
  1127. * @iv: The input IV vector.
  1128. * @adata_dma_list: The OCS DMA list mapping input A-data.
  1129. * @adata_size: The amount of data mapped by @adata_dma_list.
  1130. * @in_tag: Input tag.
  1131. * @tag_size: The size (in bytes) of @in_tag.
  1132. *
  1133. * Note: for encrypt the tag is appended to the ciphertext (in the memory
  1134. * mapped by @dst_dma_list).
  1135. *
  1136. * Return: 0 on success, negative error code otherwise.
  1137. */
  1138. int ocs_aes_ccm_op(struct ocs_aes_dev *aes_dev,
  1139. enum ocs_cipher cipher,
  1140. enum ocs_instruction instruction,
  1141. dma_addr_t dst_dma_list,
  1142. dma_addr_t src_dma_list,
  1143. u32 src_size,
  1144. u8 *iv,
  1145. dma_addr_t adata_dma_list,
  1146. u32 adata_size,
  1147. u8 *in_tag,
  1148. u32 tag_size)
  1149. {
  1150. u32 *iv_32;
  1151. u8 lprime;
  1152. int rc;
  1153. rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
  1154. AES_BLOCK_SIZE, adata_dma_list, adata_size,
  1155. in_tag, tag_size, cipher, OCS_MODE_CCM,
  1156. instruction, dst_dma_list);
  1157. if (rc)
  1158. return rc;
  1159. ocs_aes_init(aes_dev, OCS_MODE_CCM, cipher, instruction);
  1160. /*
  1161. * Note: rfc 3610 and NIST 800-38C require counter of zero to encrypt
  1162. * auth tag so ensure this is the case
  1163. */
  1164. lprime = iv[L_PRIME_IDX];
  1165. memset(&iv[COUNTER_START(lprime)], 0, COUNTER_LEN(lprime));
  1166. /*
  1167. * Nonce is already converted to ctr0 before being passed into this
  1168. * function as iv.
  1169. */
  1170. iv_32 = (u32 *)iv;
  1171. iowrite32(__swab32(iv_32[0]),
  1172. aes_dev->base_reg + AES_MULTIPURPOSE1_3_OFFSET);
  1173. iowrite32(__swab32(iv_32[1]),
  1174. aes_dev->base_reg + AES_MULTIPURPOSE1_2_OFFSET);
  1175. iowrite32(__swab32(iv_32[2]),
  1176. aes_dev->base_reg + AES_MULTIPURPOSE1_1_OFFSET);
  1177. iowrite32(__swab32(iv_32[3]),
  1178. aes_dev->base_reg + AES_MULTIPURPOSE1_0_OFFSET);
  1179. /* Write MAC/tag length in register AES_TLEN */
  1180. iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
  1181. /*
  1182. * Write the byte length of the last AES/SM4 block of Payload data
  1183. * (without zero padding and without the length of the MAC) in register
  1184. * AES_PLEN.
  1185. */
  1186. ocs_aes_write_last_data_blk_len(aes_dev, src_size);
  1187. /* Set AES_ACTIVE.TRIGGER to start the operation. */
  1188. aes_a_op_trigger(aes_dev);
  1189. aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
  1190. /* Form block B0 and write it to the AES/SM4 input buffer. */
  1191. rc = ocs_aes_ccm_write_b0(aes_dev, iv, adata_size, tag_size, src_size);
  1192. if (rc)
  1193. return rc;
  1194. /*
  1195. * Ensure there has been at least CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT
  1196. * clock cycles since TRIGGER bit was set
  1197. */
  1198. aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
  1199. CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT);
  1200. /* Process Adata. */
  1201. ocs_aes_ccm_do_adata(aes_dev, adata_dma_list, adata_size);
  1202. /* For Encrypt case we just process the payload and return. */
  1203. if (instruction == OCS_ENCRYPT) {
  1204. return ocs_aes_ccm_encrypt_do_payload(aes_dev, dst_dma_list,
  1205. src_dma_list, src_size);
  1206. }
  1207. /* For Decypt we need to process the payload and then the tag. */
  1208. rc = ocs_aes_ccm_decrypt_do_payload(aes_dev, dst_dma_list,
  1209. src_dma_list, src_size);
  1210. if (rc)
  1211. return rc;
  1212. /* Process MAC/tag directly: feed tag to engine and wait for IRQ. */
  1213. ocs_aes_ccm_write_encrypted_tag(aes_dev, in_tag, tag_size);
  1214. rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
  1215. if (rc)
  1216. return rc;
  1217. return ccm_compare_tag_to_yr(aes_dev, tag_size);
  1218. }
  1219. /**
  1220. * ocs_create_linked_list_from_sg() - Create OCS DMA linked list from SG list.
  1221. * @aes_dev: The OCS AES device the list will be created for.
  1222. * @sg: The SG list OCS DMA linked list will be created from. When
  1223. * passed to this function, @sg must have been already mapped
  1224. * with dma_map_sg().
  1225. * @sg_dma_count: The number of DMA-mapped entries in @sg. This must be the
  1226. * value returned by dma_map_sg() when @sg was mapped.
  1227. * @dll_desc: The OCS DMA dma_list to use to store information about the
  1228. * created linked list.
  1229. * @data_size: The size of the data (from the SG list) to be mapped into the
  1230. * OCS DMA linked list.
  1231. * @data_offset: The offset (within the SG list) of the data to be mapped.
  1232. *
  1233. * Return: 0 on success, negative error code otherwise.
  1234. */
  1235. int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
  1236. struct scatterlist *sg,
  1237. int sg_dma_count,
  1238. struct ocs_dll_desc *dll_desc,
  1239. size_t data_size, size_t data_offset)
  1240. {
  1241. struct ocs_dma_linked_list *ll = NULL;
  1242. struct scatterlist *sg_tmp;
  1243. unsigned int tmp;
  1244. int dma_nents;
  1245. int i;
  1246. if (!dll_desc || !sg || !aes_dev)
  1247. return -EINVAL;
  1248. /* Default values for when no ddl_desc is created. */
  1249. dll_desc->vaddr = NULL;
  1250. dll_desc->dma_addr = DMA_MAPPING_ERROR;
  1251. dll_desc->size = 0;
  1252. if (data_size == 0)
  1253. return 0;
  1254. /* Loop over sg_list until we reach entry at specified offset. */
  1255. while (data_offset >= sg_dma_len(sg)) {
  1256. data_offset -= sg_dma_len(sg);
  1257. sg_dma_count--;
  1258. sg = sg_next(sg);
  1259. /* If we reach the end of the list, offset was invalid. */
  1260. if (!sg || sg_dma_count == 0)
  1261. return -EINVAL;
  1262. }
  1263. /* Compute number of DMA-mapped SG entries to add into OCS DMA list. */
  1264. dma_nents = 0;
  1265. tmp = 0;
  1266. sg_tmp = sg;
  1267. while (tmp < data_offset + data_size) {
  1268. /* If we reach the end of the list, data_size was invalid. */
  1269. if (!sg_tmp)
  1270. return -EINVAL;
  1271. tmp += sg_dma_len(sg_tmp);
  1272. dma_nents++;
  1273. sg_tmp = sg_next(sg_tmp);
  1274. }
  1275. if (dma_nents > sg_dma_count)
  1276. return -EINVAL;
  1277. /* Allocate the DMA list, one entry for each SG entry. */
  1278. dll_desc->size = sizeof(struct ocs_dma_linked_list) * dma_nents;
  1279. dll_desc->vaddr = dma_alloc_coherent(aes_dev->dev, dll_desc->size,
  1280. &dll_desc->dma_addr, GFP_KERNEL);
  1281. if (!dll_desc->vaddr)
  1282. return -ENOMEM;
  1283. /* Populate DMA linked list entries. */
  1284. ll = dll_desc->vaddr;
  1285. for (i = 0; i < dma_nents; i++, sg = sg_next(sg)) {
  1286. ll[i].src_addr = sg_dma_address(sg) + data_offset;
  1287. ll[i].src_len = (sg_dma_len(sg) - data_offset) < data_size ?
  1288. (sg_dma_len(sg) - data_offset) : data_size;
  1289. data_offset = 0;
  1290. data_size -= ll[i].src_len;
  1291. /* Current element points to the DMA address of the next one. */
  1292. ll[i].next = dll_desc->dma_addr + (sizeof(*ll) * (i + 1));
  1293. ll[i].ll_flags = 0;
  1294. }
  1295. /* Terminate last element. */
  1296. ll[i - 1].next = 0;
  1297. ll[i - 1].ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
  1298. return 0;
  1299. }