security.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2020 HabanaLabs, Ltd.
  4. * All Rights Reserved.
  5. */
  6. #include "habanalabs.h"
  7. /**
  8. * hl_get_pb_block - return the relevant block within the block array
  9. *
  10. * @hdev: pointer to hl_device structure
  11. * @mm_reg_addr: register address in the desired block
  12. * @pb_blocks: blocks array
  13. * @array_size: blocks array size
  14. *
  15. */
  16. static int hl_get_pb_block(struct hl_device *hdev, u32 mm_reg_addr,
  17. const u32 pb_blocks[], int array_size)
  18. {
  19. int i;
  20. u32 start_addr, end_addr;
  21. for (i = 0 ; i < array_size ; i++) {
  22. start_addr = pb_blocks[i];
  23. end_addr = start_addr + HL_BLOCK_SIZE;
  24. if ((mm_reg_addr >= start_addr) && (mm_reg_addr < end_addr))
  25. return i;
  26. }
  27. dev_err(hdev->dev, "No protection domain was found for 0x%x\n",
  28. mm_reg_addr);
  29. return -EDOM;
  30. }
  31. /**
  32. * hl_unset_pb_in_block - clear a specific protection bit in a block
  33. *
  34. * @hdev: pointer to hl_device structure
  35. * @reg_offset: register offset will be converted to bit offset in pb block
  36. * @sgs_entry: pb array
  37. *
  38. */
  39. static int hl_unset_pb_in_block(struct hl_device *hdev, u32 reg_offset,
  40. struct hl_block_glbl_sec *sgs_entry)
  41. {
  42. if ((reg_offset >= HL_BLOCK_SIZE) || (reg_offset & 0x3)) {
  43. dev_err(hdev->dev,
  44. "Register offset(%d) is out of range(%d) or invalid\n",
  45. reg_offset, HL_BLOCK_SIZE);
  46. return -EINVAL;
  47. }
  48. UNSET_GLBL_SEC_BIT(sgs_entry->sec_array,
  49. (reg_offset & (HL_BLOCK_SIZE - 1)) >> 2);
  50. return 0;
  51. }
  52. /**
  53. * hl_unsecure_register - locate the relevant block for this register and
  54. * remove corresponding protection bit
  55. *
  56. * @hdev: pointer to hl_device structure
  57. * @mm_reg_addr: register address to unsecure
  58. * @offset: additional offset to the register address
  59. * @pb_blocks: blocks array
  60. * @sgs_array: pb array
  61. * @array_size: blocks array size
  62. *
  63. */
  64. int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset,
  65. const u32 pb_blocks[], struct hl_block_glbl_sec sgs_array[],
  66. int array_size)
  67. {
  68. u32 reg_offset;
  69. int block_num;
  70. block_num = hl_get_pb_block(hdev, mm_reg_addr + offset, pb_blocks,
  71. array_size);
  72. if (block_num < 0)
  73. return block_num;
  74. reg_offset = (mm_reg_addr + offset) - pb_blocks[block_num];
  75. return hl_unset_pb_in_block(hdev, reg_offset, &sgs_array[block_num]);
  76. }
  77. /**
  78. * hl_unsecure_register_range - locate the relevant block for this register
  79. * range and remove corresponding protection bit
  80. *
  81. * @hdev: pointer to hl_device structure
  82. * @mm_reg_range: register address range to unsecure
  83. * @offset: additional offset to the register address
  84. * @pb_blocks: blocks array
  85. * @sgs_array: pb array
  86. * @array_size: blocks array size
  87. *
  88. */
  89. static int hl_unsecure_register_range(struct hl_device *hdev,
  90. struct range mm_reg_range, int offset, const u32 pb_blocks[],
  91. struct hl_block_glbl_sec sgs_array[],
  92. int array_size)
  93. {
  94. u32 reg_offset;
  95. int i, block_num, rc = 0;
  96. block_num = hl_get_pb_block(hdev,
  97. mm_reg_range.start + offset, pb_blocks,
  98. array_size);
  99. if (block_num < 0)
  100. return block_num;
  101. for (i = mm_reg_range.start ; i <= mm_reg_range.end ; i += 4) {
  102. reg_offset = (i + offset) - pb_blocks[block_num];
  103. rc |= hl_unset_pb_in_block(hdev, reg_offset,
  104. &sgs_array[block_num]);
  105. }
  106. return rc;
  107. }
  108. /**
  109. * hl_unsecure_registers - locate the relevant block for all registers and
  110. * remove corresponding protection bit
  111. *
  112. * @hdev: pointer to hl_device structure
  113. * @mm_reg_array: register address array to unsecure
  114. * @mm_array_size: register array size
  115. * @offset: additional offset to the register address
  116. * @pb_blocks: blocks array
  117. * @sgs_array: pb array
  118. * @blocks_array_size: blocks array size
  119. *
  120. */
  121. int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[],
  122. int mm_array_size, int offset, const u32 pb_blocks[],
  123. struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
  124. {
  125. int i, rc = 0;
  126. for (i = 0 ; i < mm_array_size ; i++) {
  127. rc = hl_unsecure_register(hdev, mm_reg_array[i], offset,
  128. pb_blocks, sgs_array, blocks_array_size);
  129. if (rc)
  130. return rc;
  131. }
  132. return rc;
  133. }
  134. /**
  135. * hl_unsecure_registers_range - locate the relevant block for all register
  136. * ranges and remove corresponding protection bit
  137. *
  138. * @hdev: pointer to hl_device structure
  139. * @mm_reg_range_array: register address range array to unsecure
  140. * @mm_array_size: register array size
  141. * @offset: additional offset to the register address
  142. * @pb_blocks: blocks array
  143. * @sgs_array: pb array
  144. * @blocks_array_size: blocks array size
  145. *
  146. */
  147. static int hl_unsecure_registers_range(struct hl_device *hdev,
  148. const struct range mm_reg_range_array[], int mm_array_size,
  149. int offset, const u32 pb_blocks[],
  150. struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
  151. {
  152. int i, rc = 0;
  153. for (i = 0 ; i < mm_array_size ; i++) {
  154. rc = hl_unsecure_register_range(hdev, mm_reg_range_array[i],
  155. offset, pb_blocks, sgs_array, blocks_array_size);
  156. if (rc)
  157. return rc;
  158. }
  159. return rc;
  160. }
  161. /**
  162. * hl_ack_pb_security_violations - Ack security violation
  163. *
  164. * @hdev: pointer to hl_device structure
  165. * @pb_blocks: blocks array
  166. * @block_offset: additional offset to the block
  167. * @array_size: blocks array size
  168. *
  169. */
  170. static void hl_ack_pb_security_violations(struct hl_device *hdev,
  171. const u32 pb_blocks[], u32 block_offset, int array_size)
  172. {
  173. int i;
  174. u32 cause, addr, block_base;
  175. for (i = 0 ; i < array_size ; i++) {
  176. block_base = pb_blocks[i] + block_offset;
  177. cause = RREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE);
  178. if (cause) {
  179. addr = RREG32(block_base + HL_BLOCK_GLBL_ERR_ADDR);
  180. hdev->asic_funcs->pb_print_security_errors(hdev,
  181. block_base, cause, addr);
  182. WREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE, cause);
  183. }
  184. }
  185. }
  186. /**
  187. * hl_config_glbl_sec - set pb in HW according to given pb array
  188. *
  189. * @hdev: pointer to hl_device structure
  190. * @pb_blocks: blocks array
  191. * @sgs_array: pb array
  192. * @block_offset: additional offset to the block
  193. * @array_size: blocks array size
  194. *
  195. */
  196. void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[],
  197. struct hl_block_glbl_sec sgs_array[], u32 block_offset,
  198. int array_size)
  199. {
  200. int i, j;
  201. u32 sgs_base;
  202. if (hdev->pldm)
  203. usleep_range(100, 1000);
  204. for (i = 0 ; i < array_size ; i++) {
  205. sgs_base = block_offset + pb_blocks[i] +
  206. HL_BLOCK_GLBL_SEC_OFFS;
  207. for (j = 0 ; j < HL_BLOCK_GLBL_SEC_LEN ; j++)
  208. WREG32(sgs_base + j * sizeof(u32),
  209. sgs_array[i].sec_array[j]);
  210. }
  211. }
  212. /**
  213. * hl_secure_block - locally memsets a block to 0
  214. *
  215. * @hdev: pointer to hl_device structure
  216. * @sgs_array: pb array to clear
  217. * @array_size: blocks array size
  218. *
  219. */
  220. void hl_secure_block(struct hl_device *hdev,
  221. struct hl_block_glbl_sec sgs_array[], int array_size)
  222. {
  223. int i;
  224. for (i = 0 ; i < array_size ; i++)
  225. memset((char *)(sgs_array[i].sec_array), 0,
  226. HL_BLOCK_GLBL_SEC_SIZE);
  227. }
  228. /**
  229. * hl_init_pb_with_mask - set selected pb instances with mask in HW according
  230. * to given configuration
  231. *
  232. * @hdev: pointer to hl_device structure
  233. * @num_dcores: number of decores to apply configuration to
  234. * set to HL_PB_SHARED if need to apply only once
  235. * @dcore_offset: offset between dcores
  236. * @num_instances: number of instances to apply configuration to
  237. * @instance_offset: offset between instances
  238. * @pb_blocks: blocks array
  239. * @blocks_array_size: blocks array size
  240. * @regs_array: register array
  241. * @regs_array_size: register array size
  242. * @mask: enabled instances mask: 1- enabled, 0- disabled
  243. */
  244. int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
  245. u32 dcore_offset, u32 num_instances, u32 instance_offset,
  246. const u32 pb_blocks[], u32 blocks_array_size,
  247. const u32 *regs_array, u32 regs_array_size, u64 mask)
  248. {
  249. int i, j;
  250. struct hl_block_glbl_sec *glbl_sec;
  251. glbl_sec = kcalloc(blocks_array_size,
  252. sizeof(struct hl_block_glbl_sec),
  253. GFP_KERNEL);
  254. if (!glbl_sec)
  255. return -ENOMEM;
  256. hl_secure_block(hdev, glbl_sec, blocks_array_size);
  257. hl_unsecure_registers(hdev, regs_array, regs_array_size, 0, pb_blocks,
  258. glbl_sec, blocks_array_size);
  259. /* Fill all blocks with the same configuration */
  260. for (i = 0 ; i < num_dcores ; i++) {
  261. for (j = 0 ; j < num_instances ; j++) {
  262. int seq = i * num_instances + j;
  263. if (!(mask & BIT_ULL(seq)))
  264. continue;
  265. hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
  266. i * dcore_offset + j * instance_offset,
  267. blocks_array_size);
  268. }
  269. }
  270. kfree(glbl_sec);
  271. return 0;
  272. }
  273. /**
  274. * hl_init_pb - set pb in HW according to given configuration
  275. *
  276. * @hdev: pointer to hl_device structure
  277. * @num_dcores: number of decores to apply configuration to
  278. * set to HL_PB_SHARED if need to apply only once
  279. * @dcore_offset: offset between dcores
  280. * @num_instances: number of instances to apply configuration to
  281. * @instance_offset: offset between instances
  282. * @pb_blocks: blocks array
  283. * @blocks_array_size: blocks array size
  284. * @regs_array: register array
  285. * @regs_array_size: register array size
  286. *
  287. */
  288. int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
  289. u32 num_instances, u32 instance_offset,
  290. const u32 pb_blocks[], u32 blocks_array_size,
  291. const u32 *regs_array, u32 regs_array_size)
  292. {
  293. return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset,
  294. num_instances, instance_offset, pb_blocks,
  295. blocks_array_size, regs_array, regs_array_size,
  296. ULLONG_MAX);
  297. }
  298. /**
  299. * hl_init_pb_ranges_with_mask - set pb instances using mask in HW according to
  300. * given configuration unsecurring registers
  301. * ranges instead of specific registers
  302. *
  303. * @hdev: pointer to hl_device structure
  304. * @num_dcores: number of decores to apply configuration to
  305. * set to HL_PB_SHARED if need to apply only once
  306. * @dcore_offset: offset between dcores
  307. * @num_instances: number of instances to apply configuration to
  308. * @instance_offset: offset between instances
  309. * @pb_blocks: blocks array
  310. * @blocks_array_size: blocks array size
  311. * @regs_range_array: register range array
  312. * @regs_range_array_size: register range array size
  313. * @mask: enabled instances mask: 1- enabled, 0- disabled
  314. */
  315. int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
  316. u32 dcore_offset, u32 num_instances, u32 instance_offset,
  317. const u32 pb_blocks[], u32 blocks_array_size,
  318. const struct range *regs_range_array, u32 regs_range_array_size,
  319. u64 mask)
  320. {
  321. int i, j, rc = 0;
  322. struct hl_block_glbl_sec *glbl_sec;
  323. glbl_sec = kcalloc(blocks_array_size,
  324. sizeof(struct hl_block_glbl_sec),
  325. GFP_KERNEL);
  326. if (!glbl_sec)
  327. return -ENOMEM;
  328. hl_secure_block(hdev, glbl_sec, blocks_array_size);
  329. rc = hl_unsecure_registers_range(hdev, regs_range_array,
  330. regs_range_array_size, 0, pb_blocks, glbl_sec,
  331. blocks_array_size);
  332. if (rc)
  333. goto free_glbl_sec;
  334. /* Fill all blocks with the same configuration */
  335. for (i = 0 ; i < num_dcores ; i++) {
  336. for (j = 0 ; j < num_instances ; j++) {
  337. int seq = i * num_instances + j;
  338. if (!(mask & BIT_ULL(seq)))
  339. continue;
  340. hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
  341. i * dcore_offset + j * instance_offset,
  342. blocks_array_size);
  343. }
  344. }
  345. free_glbl_sec:
  346. kfree(glbl_sec);
  347. return rc;
  348. }
  349. /**
  350. * hl_init_pb_ranges - set pb in HW according to given configuration unsecurring
  351. * registers ranges instead of specific registers
  352. *
  353. * @hdev: pointer to hl_device structure
  354. * @num_dcores: number of decores to apply configuration to
  355. * set to HL_PB_SHARED if need to apply only once
  356. * @dcore_offset: offset between dcores
  357. * @num_instances: number of instances to apply configuration to
  358. * @instance_offset: offset between instances
  359. * @pb_blocks: blocks array
  360. * @blocks_array_size: blocks array size
  361. * @regs_range_array: register range array
  362. * @regs_range_array_size: register range array size
  363. *
  364. */
  365. int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
  366. u32 dcore_offset, u32 num_instances, u32 instance_offset,
  367. const u32 pb_blocks[], u32 blocks_array_size,
  368. const struct range *regs_range_array, u32 regs_range_array_size)
  369. {
  370. return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset,
  371. num_instances, instance_offset, pb_blocks,
  372. blocks_array_size, regs_range_array,
  373. regs_range_array_size, ULLONG_MAX);
  374. }
  375. /**
  376. * hl_init_pb_single_dcore - set pb for a single docre in HW
  377. * according to given configuration
  378. *
  379. * @hdev: pointer to hl_device structure
  380. * @dcore_offset: offset from the dcore0
  381. * @num_instances: number of instances to apply configuration to
  382. * @instance_offset: offset between instances
  383. * @pb_blocks: blocks array
  384. * @blocks_array_size: blocks array size
  385. * @regs_array: register array
  386. * @regs_array_size: register array size
  387. *
  388. */
  389. int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
  390. u32 num_instances, u32 instance_offset,
  391. const u32 pb_blocks[], u32 blocks_array_size,
  392. const u32 *regs_array, u32 regs_array_size)
  393. {
  394. int i, rc = 0;
  395. struct hl_block_glbl_sec *glbl_sec;
  396. glbl_sec = kcalloc(blocks_array_size,
  397. sizeof(struct hl_block_glbl_sec),
  398. GFP_KERNEL);
  399. if (!glbl_sec)
  400. return -ENOMEM;
  401. hl_secure_block(hdev, glbl_sec, blocks_array_size);
  402. rc = hl_unsecure_registers(hdev, regs_array, regs_array_size, 0,
  403. pb_blocks, glbl_sec, blocks_array_size);
  404. if (rc)
  405. goto free_glbl_sec;
  406. /* Fill all blocks with the same configuration */
  407. for (i = 0 ; i < num_instances ; i++)
  408. hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
  409. dcore_offset + i * instance_offset,
  410. blocks_array_size);
  411. free_glbl_sec:
  412. kfree(glbl_sec);
  413. return rc;
  414. }
  415. /**
  416. * hl_init_pb_ranges_single_dcore - set pb for a single docre in HW according
  417. * to given configuration unsecurring
  418. * registers ranges instead of specific
  419. * registers
  420. *
  421. * @hdev: pointer to hl_device structure
  422. * @dcore_offset: offset from the dcore0
  423. * @num_instances: number of instances to apply configuration to
  424. * @instance_offset: offset between instances
  425. * @pb_blocks: blocks array
  426. * @blocks_array_size: blocks array size
  427. * @regs_range_array: register range array
  428. * @regs_range_array_size: register range array size
  429. *
  430. */
  431. int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
  432. u32 num_instances, u32 instance_offset,
  433. const u32 pb_blocks[], u32 blocks_array_size,
  434. const struct range *regs_range_array, u32 regs_range_array_size)
  435. {
  436. int i;
  437. struct hl_block_glbl_sec *glbl_sec;
  438. glbl_sec = kcalloc(blocks_array_size,
  439. sizeof(struct hl_block_glbl_sec),
  440. GFP_KERNEL);
  441. if (!glbl_sec)
  442. return -ENOMEM;
  443. hl_secure_block(hdev, glbl_sec, blocks_array_size);
  444. hl_unsecure_registers_range(hdev, regs_range_array,
  445. regs_range_array_size, 0, pb_blocks, glbl_sec,
  446. blocks_array_size);
  447. /* Fill all blocks with the same configuration */
  448. for (i = 0 ; i < num_instances ; i++)
  449. hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
  450. dcore_offset + i * instance_offset,
  451. blocks_array_size);
  452. kfree(glbl_sec);
  453. return 0;
  454. }
  455. /**
  456. * hl_ack_pb_with_mask - ack pb with mask in HW according to given configuration
  457. *
  458. * @hdev: pointer to hl_device structure
  459. * @num_dcores: number of decores to apply configuration to
  460. * set to HL_PB_SHARED if need to apply only once
  461. * @dcore_offset: offset between dcores
  462. * @num_instances: number of instances to apply configuration to
  463. * @instance_offset: offset between instances
  464. * @pb_blocks: blocks array
  465. * @blocks_array_size: blocks array size
  466. * @mask: enabled instances mask: 1- enabled, 0- disabled
  467. *
  468. */
  469. void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
  470. u32 dcore_offset, u32 num_instances, u32 instance_offset,
  471. const u32 pb_blocks[], u32 blocks_array_size, u64 mask)
  472. {
  473. int i, j;
  474. /* ack all blocks */
  475. for (i = 0 ; i < num_dcores ; i++) {
  476. for (j = 0 ; j < num_instances ; j++) {
  477. int seq = i * num_instances + j;
  478. if (!(mask & BIT_ULL(seq)))
  479. continue;
  480. hl_ack_pb_security_violations(hdev, pb_blocks,
  481. i * dcore_offset + j * instance_offset,
  482. blocks_array_size);
  483. }
  484. }
  485. }
  486. /**
  487. * hl_ack_pb - ack pb in HW according to given configuration
  488. *
  489. * @hdev: pointer to hl_device structure
  490. * @num_dcores: number of decores to apply configuration to
  491. * set to HL_PB_SHARED if need to apply only once
  492. * @dcore_offset: offset between dcores
  493. * @num_instances: number of instances to apply configuration to
  494. * @instance_offset: offset between instances
  495. * @pb_blocks: blocks array
  496. * @blocks_array_size: blocks array size
  497. *
  498. */
  499. void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
  500. u32 num_instances, u32 instance_offset,
  501. const u32 pb_blocks[], u32 blocks_array_size)
  502. {
  503. hl_ack_pb_with_mask(hdev, num_dcores, dcore_offset, num_instances,
  504. instance_offset, pb_blocks, blocks_array_size,
  505. ULLONG_MAX);
  506. }
  507. /**
  508. * hl_ack_pb_single_dcore - ack pb for single docre in HW
  509. * according to given configuration
  510. *
  511. * @hdev: pointer to hl_device structure
  512. * @dcore_offset: offset from dcore0
  513. * @num_instances: number of instances to apply configuration to
  514. * @instance_offset: offset between instances
  515. * @pb_blocks: blocks array
  516. * @blocks_array_size: blocks array size
  517. *
  518. */
  519. void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
  520. u32 num_instances, u32 instance_offset,
  521. const u32 pb_blocks[], u32 blocks_array_size)
  522. {
  523. int i;
  524. /* ack all blocks */
  525. for (i = 0 ; i < num_instances ; i++)
  526. hl_ack_pb_security_violations(hdev, pb_blocks,
  527. dcore_offset + i * instance_offset,
  528. blocks_array_size);
  529. }