hwkm_v1.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QTI hardware key manager driver.
  4. *
  5. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  6. */
  7. #include <linux/types.h>
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/mod_devicetable.h>
  11. #include <linux/device.h>
  12. #include <linux/clk.h>
  13. #include <linux/err.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/io.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/delay.h>
  19. #include <linux/crypto.h>
  20. #include <linux/bitops.h>
  21. #include <linux/iommu.h>
  22. #include <linux/hwkm.h>
  23. #include "hwkmregs.h"
  24. #include "hwkm_serialize.h"
  25. #define BYTES_TO_WORDS(bytes) (((bytes) + 3) / 4)
  26. #define WRITE_TO_KDF_PACKET(cmd_ptr, src, len) \
  27. do { \
  28. memcpy(cmd_ptr, src, len); \
  29. cmd_ptr += len; \
  30. } while (0)
  31. #define ASYNC_CMD_HANDLING false
  32. /* Maximum number of times to poll */
  33. #define MAX_RETRIES 20000
  34. int retries;
  35. #define WAIT_UNTIL(cond) \
  36. for (retries = 0; !(cond) && (retries < MAX_RETRIES); retries++)
  37. #define EXPECTED_UNWRAP_KEY_SIZE 68
  38. #define ICEMEM_SLAVE_TPKEY_VAL 0x192
  39. #define KM_MASTER_TPKEY_SLOT 10
  40. #define BYTE_ORDER_VAL 8
  41. #define qti_hwkm_readl(hwkm, reg, dest) \
  42. (((dest) == KM_MASTER) ? \
  43. (readl_relaxed((void __iomem *)((hwkm)->km_base + (reg)))) : \
  44. (readl_relaxed((void __iomem *)((hwkm)->ice_hwkm_mmio + (reg)))))
  45. #define qti_hwkm_writel(hwkm, val, reg, dest) \
  46. (((dest) == KM_MASTER) ? \
  47. (writel_relaxed((val), (void __iomem *)((hwkm)->km_base + (reg)))) :\
  48. (writel_relaxed((val), (void __iomem *)((hwkm)->ice_hwkm_mmio + (reg)))))
  49. #define qti_hwkm_setb(hwkm, reg, nr, dest) { \
  50. u32 val = qti_hwkm_readl(hwkm, reg, dest); \
  51. val |= (0x1 << nr); \
  52. qti_hwkm_writel(hwkm, val, reg, dest); \
  53. }
  54. #define qti_hwkm_clearb(hwkm, reg, nr, dest) { \
  55. u32 val = qti_hwkm_readl(hwkm, reg, dest); \
  56. val &= ~(0x1 << nr); \
  57. qti_hwkm_writel(hwkm, val, reg, dest); \
  58. }
  59. struct hwkm_clk_info {
  60. struct list_head list;
  61. struct clk *clk;
  62. const char *name;
  63. u32 max_freq;
  64. u32 min_freq;
  65. u32 curr_freq;
  66. bool enabled;
  67. };
  68. static struct ice_mmio_data *mmio_data_ref;
  69. static unsigned int qti_hwkm_get_reg_data(struct ice_mmio_data *mmio_data,
  70. u32 reg, u32 offset, u32 mask,
  71. enum hwkm_destination dest)
  72. {
  73. u32 val;
  74. val = qti_hwkm_readl(mmio_data, reg, dest);
  75. return ((val & mask) >> offset);
  76. }
  77. static bool qti_hwkm_testb(struct ice_mmio_data *mmio_data, u32 reg, u8 nr,
  78. enum hwkm_destination dest)
  79. {
  80. u32 val = qti_hwkm_readl(mmio_data, reg, dest);
  81. val = (val >> nr) & 0x1;
  82. if (val == 0)
  83. return false;
  84. return true;
  85. }
  86. /**
  87. * qti_hwkm_master_transaction() - Send a command packet to the HWKM Master instance.
  88. *
  89. * @mmio_data: Structure holding ICE address registers.
  90. * @cmd_packet: Pointer to Master transaction command.
  91. * @cmd_words: Master transaction command size.
  92. * @rsp_packet: Pointer to Master transaction response.
  93. * @rsp_words: Master transaction response size.
  94. *
  95. * in section 3.2.5.1 of Key Manager HPG
  96. * - Clear CMD FIFO
  97. * - Clear Error Status Register
  98. * - Write CMD_ENABLE = 1
  99. * - for word in cmd_packet:
  100. * - poll until CMD_FIFO_AVAILABLE_SPACE > 0.
  101. * Timeout error after 1,000 retries.
  102. * - write word to CMD register
  103. * - for word in rsp_packet:
  104. * - poll until RSP_FIFO_AVAILABLE_DATA > 0.
  105. * Timeout error after 1,000 retries.
  106. * - read word from RSP register
  107. * - Verify CMD_DONE == 1
  108. * - Clear CMD_DONE
  109. *
  110. * Context: Any context.
  111. *
  112. * Return: Return 0 if success, -EINVAL on failure.
  113. */
  114. static int qti_hwkm_master_transaction(struct ice_mmio_data *mmio_data,
  115. const uint32_t *cmd_packet,
  116. size_t cmd_words,
  117. uint32_t *rsp_packet,
  118. size_t rsp_words)
  119. {
  120. int i;
  121. u32 val;
  122. uint32_t rsp_discard;
  123. /* Clear CMD FIFO */
  124. qti_hwkm_setb(mmio_data_ref, QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL,
  125. CMD_FIFO_CLEAR_BIT, KM_MASTER);
  126. /* Write memory barrier */
  127. wmb();
  128. qti_hwkm_clearb(mmio_data_ref, QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL,
  129. CMD_FIFO_CLEAR_BIT, KM_MASTER);
  130. /* Write memory barrier */
  131. wmb();
  132. /* Clear previous CMD errors, write 1 to err bits */
  133. val = qti_hwkm_readl(mmio_data_ref, QTI_HWKM_MASTER_RG_BANK2_BANKN_ESR,
  134. KM_MASTER);
  135. qti_hwkm_writel(mmio_data_ref, val,
  136. QTI_HWKM_MASTER_RG_BANK2_BANKN_ESR,
  137. KM_MASTER);
  138. /* Write memory barrier */
  139. wmb();
  140. /* Enable command */
  141. qti_hwkm_setb(mmio_data_ref, QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL, CMD_ENABLE_BIT,
  142. KM_MASTER);
  143. /* Write memory barrier */
  144. wmb();
  145. if (qti_hwkm_testb(mmio_data_ref, QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL,
  146. CMD_FIFO_CLEAR_BIT, KM_MASTER)) {
  147. pr_err("%s: CMD_FIFO_CLEAR_BIT not set\n", __func__);
  148. return -1;
  149. }
  150. if (qti_hwkm_testb(mmio_data_ref, QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_STATUS,
  151. RSP_FIFO_NOT_EMPTY, KM_MASTER)) {
  152. while (qti_hwkm_get_reg_data(mmio_data_ref,
  153. QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS,
  154. RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
  155. KM_MASTER) > 0) {
  156. rsp_discard = qti_hwkm_readl(mmio_data_ref,
  157. QTI_HWKM_MASTER_RG_BANK2_RSP_0, KM_MASTER);
  158. }
  159. /* Clear RSP_FIFO_NOT_EMPTY status bit */
  160. qti_hwkm_setb(mmio_data_ref, QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_STATUS,
  161. RSP_FIFO_NOT_EMPTY, KM_MASTER);
  162. /* Write memory barrier */
  163. wmb();
  164. }
  165. for (i = 0; i < cmd_words; i++) {
  166. WAIT_UNTIL(qti_hwkm_get_reg_data(mmio_data_ref,
  167. QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS,
  168. CMD_FIFO_AVAILABLE_SPACE, CMD_FIFO_AVAILABLE_SPACE_MASK,
  169. KM_MASTER) > 0);
  170. if (qti_hwkm_get_reg_data(mmio_data_ref,
  171. QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS,
  172. CMD_FIFO_AVAILABLE_SPACE, CMD_FIFO_AVAILABLE_SPACE_MASK,
  173. KM_MASTER) == 0) {
  174. pr_err("%s: cmd fifo space not available\n", __func__);
  175. return -1;
  176. }
  177. qti_hwkm_writel(mmio_data_ref, cmd_packet[i],
  178. QTI_HWKM_MASTER_RG_BANK2_CMD_0, KM_MASTER);
  179. /* Write memory barrier */
  180. wmb();
  181. }
  182. for (i = 0; i < rsp_words; i++) {
  183. WAIT_UNTIL(qti_hwkm_get_reg_data(mmio_data_ref,
  184. QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS,
  185. RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
  186. KM_MASTER) > 0);
  187. if (qti_hwkm_get_reg_data(mmio_data_ref,
  188. QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS,
  189. RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
  190. KM_MASTER) == 0) {
  191. pr_err("%s: rsp fifo data not available\n", __func__);
  192. return -EINVAL;
  193. }
  194. rsp_packet[i] = qti_hwkm_readl(mmio_data_ref,
  195. QTI_HWKM_MASTER_RG_BANK2_RSP_0, KM_MASTER);
  196. }
  197. if (!qti_hwkm_testb(mmio_data_ref, QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_STATUS,
  198. CMD_DONE_BIT, KM_MASTER)) {
  199. pr_err("%s: CMD_DONE_BIT not set\n", __func__);
  200. return -1;
  201. }
  202. /* Clear CMD_DONE status bit */
  203. qti_hwkm_setb(mmio_data_ref, QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_STATUS,
  204. CMD_DONE_BIT, KM_MASTER);
  205. /* Write memory barrier */
  206. wmb();
  207. return 0;
  208. }
  209. /**
  210. * qti_hwkm_ice_transaction(): Process ICE Slave transactions.
  211. *
  212. * @mmio_data: Structure holding ICE address registers.
  213. * @cmd_packet: Pointer to Master transaction command.
  214. * @cmd_words: Master transaction command size.
  215. * @rsp_packet: Pointer to Master transaction response.
  216. * @rsp_words: Master transaction response size.
  217. *
  218. * Send a command packet to the HWKM ICE slave instance as described in
  219. * section 3.2.5.1 of Key Manager HPG
  220. * - Clear CMD FIFO
  221. * - Clear Error Status Register
  222. * - Write CMD_ENABLE = 1
  223. * - for word in cmd_packet:
  224. * - poll until CMD_FIFO_AVAILABLE_SPACE > 0.
  225. * Timeout error after 1,000 retries.
  226. * - write word to CMD register
  227. * - for word in rsp_packet:
  228. * - poll until RSP_FIFO_AVAILABLE_DATA > 0.
  229. * Timeout error after 1,000 retries.
  230. * - read word from RSP register
  231. * - Verify CMD_DONE == 1
  232. * - Clear CMD_DONE
  233. *
  234. * Context: Any context.
  235. *
  236. * Return: Return 0 if success, -1 on failure.
  237. */
  238. static int qti_hwkm_ice_transaction(struct ice_mmio_data *mmio_data,
  239. const uint32_t *cmd_packet,
  240. size_t cmd_words,
  241. uint32_t *rsp_packet,
  242. size_t rsp_words)
  243. {
  244. int i;
  245. u32 val;
  246. uint32_t rsp_discard;
  247. /* Clear CMD FIFO */
  248. qti_hwkm_setb(mmio_data, QTI_HWKM_ICE_RG_BANK0_BANKN_CTL,
  249. CMD_FIFO_CLEAR_BIT, ICEMEM_SLAVE);
  250. /* Write memory barrier */
  251. wmb();
  252. qti_hwkm_clearb(mmio_data, QTI_HWKM_ICE_RG_BANK0_BANKN_CTL,
  253. CMD_FIFO_CLEAR_BIT, ICEMEM_SLAVE);
  254. /* Write memory barrier */
  255. wmb();
  256. /* Clear previous CMD errors, write 1 to err bits */
  257. val = qti_hwkm_readl(mmio_data, QTI_HWKM_ICE_RG_BANK0_BANKN_ESR,
  258. ICEMEM_SLAVE);
  259. qti_hwkm_writel(mmio_data, val,
  260. QTI_HWKM_ICE_RG_BANK0_BANKN_ESR,
  261. ICEMEM_SLAVE);
  262. /* Write memory barrier */
  263. wmb();
  264. /* Enable command */
  265. qti_hwkm_setb(mmio_data, QTI_HWKM_ICE_RG_BANK0_BANKN_CTL, CMD_ENABLE_BIT,
  266. ICEMEM_SLAVE);
  267. /* Write memory barrier */
  268. wmb();
  269. if (qti_hwkm_testb(mmio_data, QTI_HWKM_ICE_RG_BANK0_BANKN_CTL,
  270. CMD_FIFO_CLEAR_BIT, ICEMEM_SLAVE)) {
  271. pr_err("%s: CMD_FIFO_CLEAR_BIT not set\n", __func__);
  272. return -1;
  273. }
  274. if (qti_hwkm_testb(mmio_data, QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS,
  275. RSP_FIFO_NOT_EMPTY, ICEMEM_SLAVE)) {
  276. while (qti_hwkm_get_reg_data(mmio_data,
  277. QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS,
  278. RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
  279. ICEMEM_SLAVE) > 0) {
  280. rsp_discard = qti_hwkm_readl(mmio_data,
  281. QTI_HWKM_ICE_RG_BANK0_RSP_0, ICEMEM_SLAVE);
  282. }
  283. pr_err("%s: while exit\n", __func__);
  284. /* Clear RSP_FIFO_NOT_EMPTY status bit */
  285. qti_hwkm_setb(mmio_data, QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS,
  286. RSP_FIFO_NOT_EMPTY, ICEMEM_SLAVE);
  287. /* Write memory barrier */
  288. wmb();
  289. }
  290. for (i = 0; i < cmd_words; i++) {
  291. WAIT_UNTIL(qti_hwkm_get_reg_data(mmio_data,
  292. QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS,
  293. CMD_FIFO_AVAILABLE_SPACE, CMD_FIFO_AVAILABLE_SPACE_MASK,
  294. ICEMEM_SLAVE) > 0);
  295. if (qti_hwkm_get_reg_data(mmio_data,
  296. QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS,
  297. CMD_FIFO_AVAILABLE_SPACE, CMD_FIFO_AVAILABLE_SPACE_MASK,
  298. ICEMEM_SLAVE) == 0) {
  299. pr_err("%s: cmd fifo space not available\n", __func__);
  300. return -1;
  301. }
  302. qti_hwkm_writel(mmio_data, cmd_packet[i],
  303. QTI_HWKM_ICE_RG_BANK0_CMD_0, ICEMEM_SLAVE);
  304. /* Write memory barrier */
  305. wmb();
  306. }
  307. for (i = 0; i < rsp_words; i++) {
  308. WAIT_UNTIL(qti_hwkm_get_reg_data(mmio_data,
  309. QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS,
  310. RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
  311. ICEMEM_SLAVE) > 0);
  312. if (qti_hwkm_get_reg_data(mmio_data,
  313. QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS,
  314. RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
  315. ICEMEM_SLAVE) == 0) {
  316. pr_err("%s: rsp fifo data not available\n", __func__);
  317. return -1;
  318. }
  319. rsp_packet[i] = qti_hwkm_readl(mmio_data,
  320. QTI_HWKM_ICE_RG_BANK0_RSP_0, ICEMEM_SLAVE);
  321. }
  322. if (!qti_hwkm_testb(mmio_data, QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS,
  323. CMD_DONE_BIT, ICEMEM_SLAVE)) {
  324. pr_err("%s: CMD_DONE_BIT not set\n", __func__);
  325. return -1;
  326. }
  327. /* Clear CMD_DONE status bit */
  328. qti_hwkm_setb(mmio_data, QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS,
  329. CMD_DONE_BIT, ICEMEM_SLAVE);
  330. /* Write memory barrier */
  331. wmb();
  332. return 0;
  333. }
  334. /*
  335. * qti_hwkm_run_transaction() - Send a command packet to the selected KM instance and read
  336. * the response
  337. *
  338. * @mmio_data [in] Structure holding ICE address registers.
  339. * @dest [in] Destination KM instance.
  340. * @cmd_packet [in] pointer to start of command packet.
  341. * @cmd_words [in] words in the command packet.
  342. * @rsp_packet [out] pointer to start of response packet.
  343. * @rsp_words [in] words in the response buffer.
  344. *
  345. * Context: Any context.
  346. *
  347. * Return: Return 0 if success, -EINVAL on failure. -2 for Default.
  348. */
  349. static int qti_hwkm_run_transaction(struct ice_mmio_data *mmio_data,
  350. enum hwkm_destination dest,
  351. const uint32_t *cmd_packet,
  352. size_t cmd_words,
  353. uint32_t *rsp_packet,
  354. size_t rsp_words)
  355. {
  356. int status;
  357. if (cmd_packet == NULL || rsp_packet == NULL) {
  358. status = -EINVAL;
  359. return status;
  360. }
  361. switch (dest) {
  362. case KM_MASTER:
  363. status = qti_hwkm_master_transaction(mmio_data,
  364. cmd_packet, cmd_words,
  365. rsp_packet, rsp_words);
  366. break;
  367. case ICEMEM_SLAVE:
  368. status = qti_hwkm_ice_transaction(mmio_data,
  369. cmd_packet, cmd_words,
  370. rsp_packet, rsp_words);
  371. break;
  372. default:
  373. status = -2;
  374. break;
  375. }
  376. return status;
  377. }
  378. static void serialize_policy(struct hwkm_serialized_policy *out,
  379. const struct hwkm_key_policy *policy)
  380. {
  381. memset(out, 0, sizeof(struct hwkm_serialized_policy));
  382. out->wrap_with_tpkey = policy->wrap_with_tpk_allowed;
  383. out->hw_destination = policy->hw_destination;
  384. out->security_level = policy->security_lvl;
  385. out->swap_export_allowed = policy->swap_export_allowed;
  386. out->wrap_export_allowed = policy->wrap_export_allowed;
  387. out->key_type = policy->key_type;
  388. out->kdf_depth = policy->kdf_depth;
  389. out->encrypt_allowed = policy->enc_allowed;
  390. out->decrypt_allowed = policy->dec_allowed;
  391. out->alg_allowed = policy->alg_allowed;
  392. out->key_management_by_tz_secure_allowed = policy->km_by_tz_allowed;
  393. out->key_management_by_nonsecure_allowed = policy->km_by_nsec_allowed;
  394. out->key_management_by_modem_allowed = policy->km_by_modem_allowed;
  395. out->key_management_by_spu_allowed = policy->km_by_spu_allowed;
  396. }
  397. static void serialize_kdf_bsve(struct hwkm_kdf_bsve *out,
  398. const struct hwkm_bsve *bsve, u8 mks)
  399. {
  400. memset(out, 0, sizeof(struct hwkm_kdf_bsve));
  401. out->mks = mks;
  402. out->key_policy_version_en = bsve->km_key_policy_ver_en;
  403. out->apps_secure_en = bsve->km_apps_secure_en;
  404. out->msa_secure_en = bsve->km_msa_secure_en;
  405. out->lcm_fuse_row_en = bsve->km_lcm_fuse_en;
  406. out->boot_stage_otp_en = bsve->km_boot_stage_otp_en;
  407. out->swc_en = bsve->km_swc_en;
  408. out->fuse_region_sha_digest_en = bsve->km_fuse_region_sha_digest_en;
  409. out->child_key_policy_en = bsve->km_child_key_policy_en;
  410. out->mks_en = bsve->km_mks_en;
  411. }
  412. static void deserialize_policy(struct hwkm_key_policy *out,
  413. const struct hwkm_serialized_policy *policy)
  414. {
  415. memset(out, 0, sizeof(struct hwkm_key_policy));
  416. out->wrap_with_tpk_allowed = policy->wrap_with_tpkey;
  417. out->hw_destination = policy->hw_destination;
  418. out->security_lvl = policy->security_level;
  419. out->swap_export_allowed = policy->swap_export_allowed;
  420. out->wrap_export_allowed = policy->wrap_export_allowed;
  421. out->key_type = policy->key_type;
  422. out->kdf_depth = policy->kdf_depth;
  423. out->enc_allowed = policy->encrypt_allowed;
  424. out->dec_allowed = policy->decrypt_allowed;
  425. out->alg_allowed = policy->alg_allowed;
  426. out->km_by_tz_allowed = policy->key_management_by_tz_secure_allowed;
  427. out->km_by_nsec_allowed = policy->key_management_by_nonsecure_allowed;
  428. out->km_by_modem_allowed = policy->key_management_by_modem_allowed;
  429. out->km_by_spu_allowed = policy->key_management_by_spu_allowed;
  430. }
  431. static void reverse_bytes(u8 *bytes, size_t len)
  432. {
  433. size_t left;
  434. size_t right;
  435. for (left = 0, right = len - 1; left < right; left++, right--) {
  436. bytes[left] ^= bytes[right];
  437. bytes[right] ^= bytes[left];
  438. bytes[left] ^= bytes[right];
  439. }
  440. }
  441. static void reorder_ctx(u8 *ctx, size_t ctxlen)
  442. {
  443. int i;
  444. int len;
  445. len = ctxlen / BYTE_ORDER_VAL;
  446. /* Reverse ctx at 8 byte boundary */
  447. for (i = 0; i < len; i++)
  448. reverse_bytes(ctx + i*BYTE_ORDER_VAL, BYTE_ORDER_VAL);
  449. /*
  450. * If context is not a multiple of 8 bytes, reverse the last bytes
  451. * only. This simulates prepending the last 8 bytes with zeroes,
  452. * and then reversing the 8 bytes.
  453. */
  454. if (ctxlen % BYTE_ORDER_VAL != 0) {
  455. reverse_bytes(ctx + len*BYTE_ORDER_VAL,
  456. ctxlen % BYTE_ORDER_VAL);
  457. }
  458. }
  459. /*
  460. * qti_handle_key_unwrap_import() - Process Unwrap Import commad.
  461. *
  462. * @mmio_data: Structure holding ICE address registers.
  463. * @cmd_in: Pointer to input commnad packet.
  464. * @rsp_in: Pointer to output response packet.
  465. *
  466. * Command packet format (word indices):
  467. * CMD[0] = Operation info (OP, IRQ_EN, DKS, LEN)
  468. * CMD[1:17] = Wrapped Key Blob
  469. * CMD[18] = CRC (disabled)
  470. *
  471. * Response packet format (word indices):
  472. * RSP[0] = Operation info (OP, IRQ_EN, LEN)
  473. * RSP[1] = Error status
  474. *
  475. * Context: Any context.
  476. *
  477. * Return: Return 0 if success, -EINVAL on failure.
  478. */
  479. static int qti_handle_key_unwrap_import(struct ice_mmio_data *mmio_data,
  480. const struct hwkm_cmd *cmd_in,
  481. struct hwkm_rsp *rsp_in)
  482. {
  483. int status;
  484. u32 cmd[UNWRAP_IMPORT_CMD_WORDS] = {0};
  485. u32 rsp[UNWRAP_IMPORT_RSP_WORDS] = {0};
  486. struct hwkm_operation_info operation = {
  487. .op = KEY_UNWRAP_IMPORT,
  488. .irq_en = ASYNC_CMD_HANDLING,
  489. .slot1_desc = cmd_in->unwrap.dks,
  490. .slot2_desc = cmd_in->unwrap.kwk,
  491. .len = UNWRAP_IMPORT_CMD_WORDS
  492. };
  493. pr_debug("%s: KEY_UNWRAP_IMPORT start\n", __func__);
  494. if (cmd_in->unwrap.sz != EXPECTED_UNWRAP_KEY_SIZE) {
  495. pr_err("%s: Invalid key size - %d\n", __func__,
  496. cmd_in->unwrap.sz);
  497. return -EINVAL;
  498. }
  499. /*
  500. * Unwrap in HWKM does not do an integrity check for the last byte
  501. * (68th byte) as it is a noop. However, we need to make sure no
  502. * part of the keyblob provided was tampered with, even though it
  503. * is a noop. Adding an explicit check for the last byte before
  504. * providing to unwrap command.
  505. */
  506. if ((cmd_in->unwrap.wkb[EXPECTED_UNWRAP_KEY_SIZE - 1]) != 0x00) {
  507. pr_err("%s: Last byte corrupted, expecting zero value\n",
  508. __func__);
  509. return -EINVAL;
  510. }
  511. memcpy(cmd, &operation, OPERATION_INFO_LENGTH);
  512. memcpy(cmd + COMMAND_WRAPPED_KEY_IDX, cmd_in->unwrap.wkb,
  513. cmd_in->unwrap.sz);
  514. status = qti_hwkm_run_transaction(mmio_data, ICEMEM_SLAVE, cmd,
  515. UNWRAP_IMPORT_CMD_WORDS, rsp, UNWRAP_IMPORT_RSP_WORDS);
  516. if (status) {
  517. pr_err("%s: Error running transaction %d\n", __func__, status);
  518. return status;
  519. }
  520. rsp_in->status = rsp[RESPONSE_ERR_IDX];
  521. if (rsp_in->status) {
  522. pr_err("%s: KEY_UNWRAP_IMPORT error status 0x%x\n", __func__,
  523. rsp_in->status);
  524. return rsp_in->status;
  525. }
  526. return status;
  527. }
  528. /*
  529. * qti_handle_keyslot_clear() - Clear ICE slave keyslot.
  530. *
  531. * @mmio_data: Structure holding ICE address registers.
  532. * @cmd_in: Pointer to input commnad packet.
  533. * @rsp_in: Pointer to output response packet.
  534. *
  535. * Command packet format (word indices):
  536. * CMD[0] = Operation info (OP, IRQ_EN, DKS, DK, LEN)
  537. * CMD[1] = CRC (disabled)
  538. *
  539. * Response packet format (word indices):
  540. * RSP[0] = Operation info (OP, IRQ_EN, LEN)
  541. * RSP[1] = Error status
  542. *
  543. * Context: Any context.
  544. *
  545. * Return: Return: Return 0 if success, -EINVAL on failure.
  546. */
  547. static int qti_handle_keyslot_clear(struct ice_mmio_data *mmio_data, const struct hwkm_cmd *cmd_in,
  548. struct hwkm_rsp *rsp_in)
  549. {
  550. int status;
  551. u32 cmd[KEYSLOT_CLEAR_CMD_WORDS] = {0};
  552. u32 rsp[KEYSLOT_CLEAR_RSP_WORDS] = {0};
  553. struct hwkm_operation_info operation = {
  554. .op = KEY_SLOT_CLEAR,
  555. .irq_en = ASYNC_CMD_HANDLING,
  556. .slot1_desc = cmd_in->clear.dks,
  557. .op_flag = cmd_in->clear.is_double_key,
  558. .len = KEYSLOT_CLEAR_CMD_WORDS
  559. };
  560. pr_debug("%s: KEY_SLOT_CLEAR start\n", __func__);
  561. memcpy(cmd, &operation, OPERATION_INFO_LENGTH);
  562. status = qti_hwkm_run_transaction(mmio_data, ICEMEM_SLAVE, cmd,
  563. KEYSLOT_CLEAR_CMD_WORDS, rsp,
  564. KEYSLOT_CLEAR_RSP_WORDS);
  565. if (status) {
  566. pr_err("%s: Error running transaction %d\n", __func__, status);
  567. return status;
  568. }
  569. rsp_in->status = rsp[RESPONSE_ERR_IDX];
  570. if (rsp_in->status)
  571. return rsp_in->status;
  572. return status;
  573. }
  574. /*
  575. * qti_handle_system_kdf(): Process key derivation function.
  576. *
  577. * @mmio_data: Structure holding ICE address registers.
  578. * @cmd_in: Pointer to input commnad packet.
  579. * @rsp_in: Pointer to output response packet.
  580. *
  581. * NOTE: The command packet can vary in length. If BE = 0, the last 2 indices
  582. * for the BSVE are skipped. Similarly, if Software Context Length (SCL) < 16,
  583. * only SCL words are written to the packet. The CRC word is after the last
  584. * word of the SWC. The LEN field of this command does not include the SCL
  585. * (unlike other commands where the LEN field is the length of the entire
  586. * packet). The HW will expect SCL + LEN words to be sent.
  587. *
  588. * Command packet format (word indices):
  589. * CMD[0] = Operation info (OP, IRQ_EN, DKS, KDK, BE, SCL, LEN)
  590. * CMD[1:2] = Policy
  591. * CMD[3] = BSVE[0] if BE = 1, 0 if BE = 0
  592. * CMD[4:5] = BSVE[1:2] if BE = 1, skipped if BE = 0
  593. * CMD[6:21] = Software Context, only writing the number of words in SCL
  594. * CMD[22] = CRC
  595. *
  596. * Response packet format (word indices):
  597. * RSP[0] = Operation info (OP, IRQ_EN, LEN)
  598. * RSP[1] = Error status
  599. *
  600. * Context: Any context.
  601. *
  602. * Return: Return 0 if success, -EINVAL on failure.
  603. */
  604. static int qti_handle_system_kdf(struct ice_mmio_data *mmio_data, const struct hwkm_cmd *cmd_in,
  605. struct hwkm_rsp *rsp_in)
  606. {
  607. int status;
  608. u32 cmd[SYSTEM_KDF_CMD_MAX_WORDS] = {0};
  609. u32 rsp[SYSTEM_KDF_RSP_WORDS] = {0};
  610. u8 *cmd_ptr = (u8 *) cmd;
  611. struct hwkm_serialized_policy policy;
  612. struct hwkm_operation_info operation = {
  613. .op = SYSTEM_KDF,
  614. .irq_en = ASYNC_CMD_HANDLING,
  615. .slot1_desc = cmd_in->kdf.dks,
  616. .slot2_desc = cmd_in->kdf.kdk,
  617. .op_flag = cmd_in->kdf.bsve.enabled,
  618. .context_len = BYTES_TO_WORDS(cmd_in->kdf.sz),
  619. .len = SYSTEM_KDF_CMD_MIN_WORDS +
  620. (cmd_in->kdf.bsve.enabled ? BSVE_WORDS : 1)
  621. };
  622. pr_debug("%s: SYSTEM_KDF start\n", __func__);
  623. serialize_policy(&policy, &cmd_in->kdf.policy);
  624. /*
  625. * If context is not a multiple of 8 bytes, but a multiple
  626. * of 4 bytes, add a zero word at the end, to have a context multiple
  627. * of 8 bytes. This is to facilitate the context reordering that will
  628. * happen later
  629. */
  630. if ((cmd_in->kdf.sz) % BYTE_ORDER_VAL == (BYTE_ORDER_VAL/2))
  631. operation.context_len += 1;
  632. WRITE_TO_KDF_PACKET(cmd_ptr, &operation, OPERATION_INFO_LENGTH);
  633. WRITE_TO_KDF_PACKET(cmd_ptr, &policy, KEY_POLICY_LENGTH);
  634. if (cmd_in->kdf.bsve.enabled) {
  635. struct hwkm_kdf_bsve bsve;
  636. serialize_kdf_bsve(&bsve, &cmd_in->kdf.bsve, cmd_in->kdf.mks);
  637. WRITE_TO_KDF_PACKET(cmd_ptr, &bsve, MAX_BSVE_LENGTH);
  638. } else {
  639. /* Skip 4 bytes to align to start of context. */
  640. cmd_ptr += 4 * (sizeof(u8));
  641. }
  642. /*
  643. * Reorder context to reverse context bytes at the 8 byte
  644. * boundary. This is because crypto lib reads at this
  645. * boundary when populating the AD.
  646. */
  647. reorder_ctx((u8 *) cmd_in->kdf.ctx, cmd_in->kdf.sz);
  648. WRITE_TO_KDF_PACKET(cmd_ptr, cmd_in->kdf.ctx, cmd_in->kdf.sz);
  649. status = qti_hwkm_run_transaction(mmio_data, ICEMEM_SLAVE, cmd,
  650. operation.len + operation.context_len,
  651. rsp, SYSTEM_KDF_RSP_WORDS);
  652. if (status) {
  653. pr_err("%s: Error running transaction %d\n", __func__, status);
  654. return status;
  655. }
  656. rsp_in->status = rsp[RESPONSE_ERR_IDX];
  657. if (rsp_in->status) {
  658. pr_err("%s: SYSTEM_KDF error status 0x%x\n", __func__,
  659. rsp_in->status);
  660. return rsp_in->status;
  661. }
  662. return status;
  663. }
  664. /*
  665. * qti_handle_set_tpkey() - Send TP Key to ICE slave.
  666. *
  667. * @mmio_data: Structure holding ICE address registers.
  668. * @cmd_in: Pointer to input commnad packet.
  669. * @rsp_in: Pointer to output response packet.
  670. *
  671. * Command packet format (word indices):
  672. * CMD[0] = Operation info (OP, IRQ_EN, SKS, LEN)
  673. * CMD[1] = CRC (disabled)
  674. *
  675. * Response packet format (word indices):
  676. * RSP[0] = Operation info (OP, IRQ_EN, LEN)
  677. * RSP[1] = Error status
  678. *
  679. * Context: Any context.
  680. *
  681. * Return: Return 0 if success, -EINVAL on failure.
  682. */
  683. static int qti_handle_set_tpkey(struct ice_mmio_data *mmio_data, const struct hwkm_cmd *cmd_in,
  684. struct hwkm_rsp *rsp_in)
  685. {
  686. int status;
  687. u32 cmd[SET_TPKEY_CMD_WORDS] = {0};
  688. u32 rsp[SET_TPKEY_RSP_WORDS] = {0};
  689. struct hwkm_operation_info operation = {
  690. .op = SET_TPKEY,
  691. .irq_en = ASYNC_CMD_HANDLING,
  692. .slot1_desc = cmd_in->set_tpkey.sks,
  693. .len = SET_TPKEY_CMD_WORDS
  694. };
  695. pr_debug("%s: SET_TPKEY start\n", __func__);
  696. memcpy(cmd, &operation, OPERATION_INFO_LENGTH);
  697. status = qti_hwkm_run_transaction(mmio_data, KM_MASTER, cmd,
  698. SET_TPKEY_CMD_WORDS, rsp, SET_TPKEY_RSP_WORDS);
  699. if (status) {
  700. pr_err("%s: Error running transaction %d\n", __func__, status);
  701. return status;
  702. }
  703. rsp_in->status = rsp[RESPONSE_ERR_IDX];
  704. if (rsp_in->status) {
  705. pr_err("%s: SET_TPKEY error status 0x%x\n", __func__,
  706. rsp_in->status);
  707. return rsp_in->status;
  708. }
  709. return status;
  710. }
  711. /**
  712. * qti_handle_keyslot_rdwr() - Process read-write command.
  713. *
  714. * @mmio_data: Structure holding ICE address registers.
  715. * @cmd_in: Pointer to input commnad packet.
  716. * @rsp_in: Pointer to output response packet.
  717. *
  718. * 254 * NOTE: To anyone maintaining or porting this code wondering why the key
  719. * is reversed in the command packet: the plaintext key value is expected by
  720. * the HW in reverse byte order.
  721. * See section 1.8.2.2 of the HWKM CPAS for more details
  722. * Mapping of key to CE key read order:
  723. * Key[255:224] -> CRYPTO0_CRYPTO_ENCR_KEY0
  724. * Key[223:192] -> CRYPTO0_CRYPTO_ENCR_KEY1
  725. * ...
  726. * Key[63:32] -> CRYPTO0_CRYPTO_ENCR_KEY6
  727. * Key[31:0] -> CRYPTO0_CRYPTO_ENCR_KEY7
  728. * In this notation Key[31:0] is the least significant word of the key
  729. * If the key length is less than 256 bits, the key is filled in from
  730. * higher index to lower
  731. * For example, for a 128 bit key, Key[255:128] would have the key,
  732. * Key[127:0] would be all 0
  733. * This means that CMD[3:6] is all 0, CMD[7:10] has the key value.
  734. *
  735. * Command packet format (word indices):
  736. * CMD[0] = Operation info (OP, IRQ_EN, DKS/SKS, WE, LEN)
  737. * CMD[1:2] = Policy (0 if we == 0)
  738. * CMD[3:10] = Write key value (0 if we == 0)
  739. * CMD[11] = CRC (disabled)
  740. *
  741. * Response packet format (word indices):
  742. * RSP[0] = Operation info (OP, IRQ_EN, LEN)
  743. * RSP[1] = Error status
  744. * RSP[2:3] = Policy (0 if we == 1)
  745. * RSP[4:11] = Read key value (0 if we == 1)
  746. *
  747. * Context: Any context.
  748. *
  749. * Return: Return: Return 0 if success, -EINVAL on failure.
  750. *
  751. **/
  752. static int qti_handle_keyslot_rdwr(struct ice_mmio_data *mmio_data, const struct hwkm_cmd *cmd_in,
  753. struct hwkm_rsp *rsp_in)
  754. {
  755. int status;
  756. u32 cmd[KEYSLOT_RDWR_CMD_WORDS] = {0};
  757. u32 rsp[KEYSLOT_RDWR_RSP_WORDS] = {0};
  758. struct hwkm_serialized_policy policy;
  759. struct hwkm_operation_info operation = {
  760. .op = KEY_SLOT_RDWR,
  761. .irq_en = ASYNC_CMD_HANDLING,
  762. .slot1_desc = cmd_in->rdwr.slot,
  763. .op_flag = cmd_in->rdwr.is_write,
  764. .len = KEYSLOT_RDWR_CMD_WORDS
  765. };
  766. pr_debug("%s: KEY_SLOT_RDWR start\n", __func__);
  767. memcpy(cmd, &operation, OPERATION_INFO_LENGTH);
  768. if (cmd_in->rdwr.is_write) {
  769. serialize_policy(&policy, &cmd_in->rdwr.policy);
  770. memcpy(cmd + COMMAND_KEY_POLICY_IDX, &policy,
  771. KEY_POLICY_LENGTH);
  772. memcpy(cmd + COMMAND_KEY_VALUE_IDX, cmd_in->rdwr.key,
  773. cmd_in->rdwr.sz);
  774. /* Need to reverse the key because the HW expects it in reverse byte order */
  775. reverse_bytes((u8 *) (cmd + COMMAND_KEY_VALUE_IDX),
  776. HWKM_MAX_KEY_SIZE);
  777. }
  778. status = qti_hwkm_run_transaction(mmio_data, ICEMEM_SLAVE, cmd,
  779. KEYSLOT_RDWR_CMD_WORDS, rsp, KEYSLOT_RDWR_RSP_WORDS);
  780. if (status) {
  781. pr_err("%s: Error running transaction %d\n", __func__, status);
  782. return status;
  783. }
  784. rsp_in->status = rsp[RESPONSE_ERR_IDX];
  785. if (rsp_in->status) {
  786. pr_err("%s: KEY_SLOT_RDWR error status 0x%x\n",
  787. __func__, rsp_in->status);
  788. return rsp_in->status;
  789. }
  790. if (!cmd_in->rdwr.is_write &&
  791. (rsp_in->status == 0)) {
  792. memcpy(&policy, rsp + RESPONSE_KEY_POLICY_IDX,
  793. KEY_POLICY_LENGTH);
  794. memcpy(rsp_in->rdwr.key,
  795. rsp + RESPONSE_KEY_VALUE_IDX, RESPONSE_KEY_LENGTH);
  796. /* Need to reverse the key because the HW returns it in reverse byte order */
  797. reverse_bytes(rsp_in->rdwr.key, HWKM_MAX_KEY_SIZE);
  798. rsp_in->rdwr.sz = RESPONSE_KEY_LENGTH;
  799. deserialize_policy(&rsp_in->rdwr.policy, &policy);
  800. }
  801. /* Clear cmd and rsp buffers, since they may contain plaintext keys */
  802. memset(cmd, 0, sizeof(cmd));
  803. memset(rsp, 0, sizeof(rsp));
  804. return status;
  805. }
  806. static int qti_hwkm_parse_clock_info(struct platform_device *pdev,
  807. struct ice_mmio_data *hwkm_dev)
  808. {
  809. int ret = -EINVAL, cnt, i, len;
  810. struct device *dev = &pdev->dev;
  811. struct device_node *np = dev->of_node;
  812. char *name;
  813. struct hwkm_clk_info *clki;
  814. u32 *clkfreq = NULL;
  815. if (!np)
  816. goto out;
  817. cnt = of_property_count_strings(np, "clock-names");
  818. if (cnt <= 0) {
  819. dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
  820. __func__);
  821. ret = cnt;
  822. goto out;
  823. }
  824. if (!of_get_property(np, "qcom,op-freq-hz", &len)) {
  825. dev_info(dev, "qcom,op-freq-hz property not specified\n");
  826. goto out;
  827. }
  828. len = len/sizeof(*clkfreq);
  829. if (len != cnt)
  830. goto out;
  831. clkfreq = devm_kzalloc(dev, len * sizeof(*clkfreq), GFP_KERNEL);
  832. if (!clkfreq) {
  833. ret = -ENOMEM;
  834. goto out;
  835. }
  836. ret = of_property_read_u32_array(np, "qcom,op-freq-hz", clkfreq, len);
  837. INIT_LIST_HEAD(&hwkm_dev->clk_list_head);
  838. for (i = 0; i < cnt; i++) {
  839. ret = of_property_read_string_index(np,
  840. "clock-names", i, (const char **)&name);
  841. if (ret)
  842. goto out;
  843. clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
  844. if (!clki) {
  845. ret = -ENOMEM;
  846. goto out;
  847. }
  848. clki->max_freq = clkfreq[i];
  849. clki->name = kstrdup(name, GFP_KERNEL);
  850. list_add_tail(&clki->list, &hwkm_dev->clk_list_head);
  851. }
  852. out:
  853. return ret;
  854. }
  855. static int qti_hwkm_init_clocks(struct ice_mmio_data *hwkm_dev)
  856. {
  857. int ret = -EINVAL;
  858. struct hwkm_clk_info *clki = NULL;
  859. struct device *dev = hwkm_dev->dev;
  860. struct list_head *head = &hwkm_dev->clk_list_head;
  861. if (!hwkm_dev->is_hwkm_clk_available)
  862. return 0;
  863. if (!head || list_empty(head)) {
  864. dev_err(dev, "%s: HWKM clock list null/empty\n", __func__);
  865. goto out;
  866. }
  867. list_for_each_entry(clki, head, list) {
  868. if (!clki->name)
  869. continue;
  870. clki->clk = devm_clk_get(dev, clki->name);
  871. if (IS_ERR(clki->clk)) {
  872. ret = PTR_ERR(clki->clk);
  873. dev_err(dev, "%s: %s clk get failed, %d\n",
  874. __func__, clki->name, ret);
  875. goto out;
  876. }
  877. ret = 0;
  878. if (clki->max_freq) {
  879. ret = clk_set_rate(clki->clk, clki->max_freq);
  880. if (ret) {
  881. dev_err(dev,
  882. "%s: %s clk set rate(%dHz) failed, %d\n",
  883. __func__, clki->name, clki->max_freq, ret);
  884. goto out;
  885. }
  886. clki->curr_freq = clki->max_freq;
  887. dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
  888. clki->name, clk_get_rate(clki->clk));
  889. }
  890. }
  891. out:
  892. return ret;
  893. }
  894. static int qti_hwkm_enable_disable_clocks(struct ice_mmio_data *hwkm_dev,
  895. bool enable)
  896. {
  897. int ret;
  898. struct hwkm_clk_info *clki = NULL;
  899. struct device *dev = hwkm_dev->dev;
  900. struct list_head *head = &hwkm_dev->clk_list_head;
  901. if (!head || list_empty(head)) {
  902. dev_err(dev, "%s: HWKM clock list null/empty\n", __func__);
  903. ret = -EINVAL;
  904. goto out;
  905. }
  906. if (!hwkm_dev->is_hwkm_clk_available) {
  907. dev_err(dev, "%s: HWKM clock not available\n", __func__);
  908. ret = -EINVAL;
  909. goto out;
  910. }
  911. list_for_each_entry(clki, head, list) {
  912. if (!clki->name)
  913. continue;
  914. if (enable)
  915. ret = clk_prepare_enable(clki->clk);
  916. else
  917. clk_disable_unprepare(clki->clk);
  918. if (ret) {
  919. dev_err(dev, "Unable to %s HWKM clock\n",
  920. enable?"enable":"disable");
  921. goto out;
  922. }
  923. }
  924. out:
  925. return ret;
  926. }
  927. int qti_hwkm_clocks(bool on)
  928. {
  929. int ret;
  930. ret = qti_hwkm_enable_disable_clocks(mmio_data_ref, on);
  931. if (ret) {
  932. pr_err("%s:%pK Could not enable/disable clocks\n",
  933. __func__, mmio_data_ref);
  934. }
  935. return ret;
  936. }
  937. EXPORT_SYMBOL(qti_hwkm_clocks);
  938. static int qti_hwkm_get_device_tree_data(struct platform_device *pdev,
  939. struct ice_mmio_data *hwkm_dev)
  940. {
  941. struct device *dev = &pdev->dev;
  942. int ret = 0;
  943. hwkm_dev->km_res = platform_get_resource_byname(pdev,
  944. IORESOURCE_MEM, "km_master");
  945. if (!hwkm_dev->km_res) {
  946. pr_err("%s: No memory available for IORESOURCE\n", __func__);
  947. return -ENOMEM;
  948. }
  949. hwkm_dev->km_base = devm_ioremap_resource(dev, hwkm_dev->km_res);
  950. if (IS_ERR(hwkm_dev->km_base)) {
  951. ret = PTR_ERR(hwkm_dev->km_base);
  952. pr_err("%s: Error = %d mapping HWKM memory\n", __func__, ret);
  953. goto out;
  954. }
  955. hwkm_dev->is_hwkm_clk_available = of_property_read_bool(
  956. dev->of_node, "qcom,enable-hwkm-clk");
  957. if (hwkm_dev->is_hwkm_clk_available) {
  958. ret = qti_hwkm_parse_clock_info(pdev, hwkm_dev);
  959. if (ret) {
  960. pr_err("%s: qti_hwkm_parse_clock_info failed (%d)\n",
  961. __func__, ret);
  962. goto out;
  963. }
  964. }
  965. out:
  966. return ret;
  967. }
  968. int qti_hwkm_handle_cmd(struct hwkm_cmd *cmd, struct hwkm_rsp *rsp)
  969. {
  970. switch (cmd->op) {
  971. case SET_TPKEY:
  972. return qti_handle_set_tpkey(mmio_data_ref, cmd, rsp);
  973. case KEY_UNWRAP_IMPORT:
  974. return qti_handle_key_unwrap_import(mmio_data_ref, cmd, rsp);
  975. case KEY_SLOT_CLEAR:
  976. return qti_handle_keyslot_clear(mmio_data_ref, cmd, rsp);
  977. case KEY_SLOT_RDWR:
  978. return qti_handle_keyslot_rdwr(mmio_data_ref, cmd, rsp);
  979. case SYSTEM_KDF:
  980. return qti_handle_system_kdf(mmio_data_ref, cmd, rsp);
  981. case NIST_KEYGEN:
  982. case KEY_WRAP_EXPORT:
  983. case QFPROM_KEY_RDWR: /* cmd for HW initialization cmd only */
  984. default:
  985. return -EINVAL;
  986. }
  987. return 0;
  988. }
  989. EXPORT_SYMBOL(qti_hwkm_handle_cmd);
  990. static void qti_hwkm_configure_slot_access(struct ice_mmio_data *mmio_data)
  991. {
  992. qti_hwkm_writel(mmio_data, 0xffffffff,
  993. QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_0, ICEMEM_SLAVE);
  994. qti_hwkm_writel(mmio_data, 0xffffffff,
  995. QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_1, ICEMEM_SLAVE);
  996. qti_hwkm_writel(mmio_data, 0xffffffff,
  997. QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_2, ICEMEM_SLAVE);
  998. qti_hwkm_writel(mmio_data, 0xffffffff,
  999. QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_3, ICEMEM_SLAVE);
  1000. qti_hwkm_writel(mmio_data, 0xffffffff,
  1001. QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_4, ICEMEM_SLAVE);
  1002. }
  1003. static int qti_hwkm_check_bist_status(struct ice_mmio_data *mmio_data)
  1004. {
  1005. if (!qti_hwkm_testb(mmio_data, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
  1006. BIST_DONE, ICEMEM_SLAVE)) {
  1007. pr_err("%s: Error with BIST_DONE\n", __func__);
  1008. return -EINVAL;
  1009. }
  1010. if (!qti_hwkm_testb(mmio_data, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
  1011. CRYPTO_LIB_BIST_DONE, ICEMEM_SLAVE)) {
  1012. pr_err("%s: Error with CRYPTO_LIB_BIST_DONE\n", __func__);
  1013. return -EINVAL;
  1014. }
  1015. if (!qti_hwkm_testb(mmio_data, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
  1016. BOOT_CMD_LIST1_DONE, ICEMEM_SLAVE)) {
  1017. pr_err("%s: Error with BOOT_CMD_LIST1_DONE\n", __func__);
  1018. return -EINVAL;
  1019. }
  1020. if (!qti_hwkm_testb(mmio_data, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
  1021. BOOT_CMD_LIST0_DONE, ICEMEM_SLAVE)) {
  1022. pr_err("%s: Error with BOOT_CMD_LIST0_DONE\n", __func__);
  1023. return -EINVAL;
  1024. }
  1025. if (!qti_hwkm_testb(mmio_data, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
  1026. KT_CLEAR_DONE, ICEMEM_SLAVE)) {
  1027. pr_err("%s: KT_CLEAR_DONE\n", __func__);
  1028. return -EINVAL;
  1029. }
  1030. return 0;
  1031. }
  1032. static int qti_hwkm_ice_init_sequence(struct ice_mmio_data *mmio_data)
  1033. {
  1034. int ret;
  1035. /* Put ICE in standard mode */
  1036. qti_hwkm_writel(mmio_data, 0x7, QTI_HWKM_ICE_RG_TZ_KM_CTL, ICEMEM_SLAVE);
  1037. /* Write memory barrier */
  1038. wmb();
  1039. ret = qti_hwkm_check_bist_status(mmio_data);
  1040. if (ret) {
  1041. pr_err("%s: Error in BIST initialization %d\n", __func__, ret);
  1042. return ret;
  1043. }
  1044. /* Disable CRC checks */
  1045. qti_hwkm_clearb(mmio_data, QTI_HWKM_ICE_RG_TZ_KM_CTL,
  1046. CRC_CHECK_EN, ICEMEM_SLAVE);
  1047. /* Write memory barrier */
  1048. wmb();
  1049. /* Configure key slots to be accessed by HLOS */
  1050. qti_hwkm_configure_slot_access(mmio_data);
  1051. /* Write memory barrier */
  1052. wmb();
  1053. /* Clear RSP_FIFO_FULL bit */
  1054. qti_hwkm_setb(mmio_data,
  1055. QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS,
  1056. RSP_FIFO_FULL, ICEMEM_SLAVE);
  1057. /* Write memory barrier */
  1058. wmb();
  1059. return ret;
  1060. }
  1061. static void qti_hwkm_enable_slave_receive_mode(
  1062. const struct ice_mmio_data *mmio_data)
  1063. {
  1064. qti_hwkm_clearb(mmio_data,
  1065. QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL, TPKEY_EN, ICEMEM_SLAVE);
  1066. /* Write memory barrier */
  1067. wmb();
  1068. qti_hwkm_writel(mmio_data, ICEMEM_SLAVE_TPKEY_VAL,
  1069. QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL, ICEMEM_SLAVE);
  1070. /* Write memory barrier */
  1071. wmb();
  1072. }
  1073. static void qti_hwkm_disable_slave_receive_mode(
  1074. struct ice_mmio_data *mmio_data)
  1075. {
  1076. qti_hwkm_clearb(mmio_data,
  1077. QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL, TPKEY_EN, ICEMEM_SLAVE);
  1078. /* Write memory barrier */
  1079. wmb();
  1080. }
  1081. static void qti_hwkm_check_tpkey_status(struct ice_mmio_data *mmio_data)
  1082. {
  1083. int val;
  1084. val = qti_hwkm_readl(mmio_data,
  1085. QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_STATUS, ICEMEM_SLAVE);
  1086. pr_debug("%s: Tpkey receive status 0x%x\n", __func__, val);
  1087. }
  1088. static int qti_hwkm_set_tpkey(struct ice_mmio_data *mmio_data)
  1089. {
  1090. int err;
  1091. struct hwkm_cmd cmd_settpkey = {0};
  1092. struct hwkm_rsp rsp_settpkey = {0};
  1093. cmd_settpkey.op = SET_TPKEY;
  1094. cmd_settpkey.set_tpkey.sks = KM_MASTER_TPKEY_SLOT;
  1095. qti_hwkm_enable_slave_receive_mode(mmio_data);
  1096. err = qti_hwkm_handle_cmd(&cmd_settpkey, &rsp_settpkey);
  1097. if (err) {
  1098. pr_err("%s: Error with Set TP key in master %d\n", __func__,
  1099. err);
  1100. return -EINVAL;
  1101. }
  1102. qti_hwkm_check_tpkey_status(mmio_data);
  1103. qti_hwkm_disable_slave_receive_mode(mmio_data);
  1104. return 0;
  1105. }
  1106. int qti_hwkm_init(const struct ice_mmio_data *mmio_data)
  1107. {
  1108. int ret;
  1109. pr_debug("%s %d: HWKM init starts\n", __func__, __LINE__);
  1110. if (!mmio_data->ice_hwkm_mmio || !mmio_data->ice_base_mmio) {
  1111. pr_err("%s: HWKM ICE slave mmio invalid\n", __func__);
  1112. return -EINVAL;
  1113. }
  1114. mmio_data_ref->ice_hwkm_mmio = mmio_data->ice_hwkm_mmio;
  1115. mmio_data_ref->ice_base_mmio = mmio_data->ice_base_mmio;
  1116. ret = qti_hwkm_ice_init_sequence(mmio_data_ref);
  1117. if (ret) {
  1118. pr_err("%s: Error in ICE init sequence %d\n", __func__, ret);
  1119. return ret;
  1120. }
  1121. ret = qti_hwkm_set_tpkey(mmio_data_ref);
  1122. if (ret) {
  1123. pr_err("%s: Error setting ICE to receive %d\n", __func__, ret);
  1124. return ret;
  1125. }
  1126. /* Write memory barrier */
  1127. wmb();
  1128. pr_debug("%s %d: HWKM init ends\n", __func__, __LINE__);
  1129. return ret;
  1130. }
  1131. EXPORT_SYMBOL(qti_hwkm_init);
  1132. static int qti_hwkm_probe(struct platform_device *pdev)
  1133. {
  1134. struct ice_mmio_data *hwkm_dev;
  1135. int ret;
  1136. pr_debug("%s %d: HWKM probe start\n", __func__, __LINE__);
  1137. if (!pdev) {
  1138. pr_err("%s: Invalid platform_device passed\n", __func__);
  1139. return -EINVAL;
  1140. }
  1141. hwkm_dev = kzalloc(sizeof(struct ice_mmio_data), GFP_KERNEL);
  1142. if (!hwkm_dev) {
  1143. ret = -ENOMEM;
  1144. pr_err("%s: Error %d allocating memory for HWKM device\n",
  1145. __func__, ret);
  1146. goto err_hwkm_dev;
  1147. }
  1148. hwkm_dev->dev = &pdev->dev;
  1149. if (!hwkm_dev->dev) {
  1150. ret = -EINVAL;
  1151. pr_err("%s: Invalid device passed in platform_device\n",
  1152. __func__);
  1153. goto err_hwkm_dev;
  1154. }
  1155. if (pdev->dev.of_node)
  1156. ret = qti_hwkm_get_device_tree_data(pdev, hwkm_dev);
  1157. else {
  1158. ret = -EINVAL;
  1159. pr_err("%s: HWKM device node not found\n", __func__);
  1160. }
  1161. if (ret)
  1162. goto err_hwkm_dev;
  1163. ret = qti_hwkm_init_clocks(hwkm_dev);
  1164. if (ret) {
  1165. pr_err("%s: Error initializing clocks %d\n", __func__, ret);
  1166. goto err_hwkm_dev;
  1167. }
  1168. hwkm_dev->is_hwkm_enabled = true;
  1169. mmio_data_ref = hwkm_dev;
  1170. platform_set_drvdata(pdev, hwkm_dev);
  1171. pr_err("%s %d:HWKM probe ends\n", __func__, __LINE__);
  1172. return ret;
  1173. err_hwkm_dev:
  1174. mmio_data_ref = NULL;
  1175. kfree(hwkm_dev);
  1176. return ret;
  1177. }
  1178. static int qti_hwkm_remove(struct platform_device *pdev)
  1179. {
  1180. kfree(mmio_data_ref);
  1181. return 0;
  1182. }
  1183. static const struct of_device_id qti_hwkm_match[] = {
  1184. { .compatible = "qcom,hwkm"},
  1185. {},
  1186. };
  1187. MODULE_DEVICE_TABLE(of, qti_hwkm_match);
  1188. static struct platform_driver qti_hwkm_driver = {
  1189. .probe = qti_hwkm_probe,
  1190. .remove = qti_hwkm_remove,
  1191. .driver = {
  1192. .name = "qti_hwkm",
  1193. .of_match_table = qti_hwkm_match,
  1194. },
  1195. };
  1196. module_platform_driver(qti_hwkm_driver);
  1197. MODULE_LICENSE("GPL");
  1198. MODULE_DESCRIPTION("QTI Hardware Key Manager library");