hw_fence_drv_utils.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/of_platform.h>
  6. #include <linux/of_address.h>
  7. #include <linux/io.h>
  8. #include <linux/gunyah/gh_rm_drv.h>
  9. #include <linux/gunyah/gh_dbl.h>
  10. #include <soc/qcom/secure_buffer.h>
  11. #include "hw_fence_drv_priv.h"
  12. #include "hw_fence_drv_utils.h"
  13. #include "hw_fence_drv_ipc.h"
  14. #include "hw_fence_drv_debug.h"
  15. static void _lock(uint64_t *wait)
  16. {
  17. /* WFE Wait */
  18. #if defined(__aarch64__)
  19. __asm__("SEVL\n\t"
  20. "PRFM PSTL1KEEP, [%x[i_lock]]\n\t"
  21. "1:\n\t"
  22. "WFE\n\t"
  23. "LDAXR W5, [%x[i_lock]]\n\t"
  24. "CBNZ W5, 1b\n\t"
  25. "STXR W5, W0, [%x[i_lock]]\n\t"
  26. "CBNZ W5, 1b\n"
  27. :
  28. : [i_lock] "r" (wait)
  29. : "memory");
  30. #endif
  31. }
  32. static void _unlock(uint64_t *lock)
  33. {
  34. /* Signal Client */
  35. #if defined(__aarch64__)
  36. __asm__("STLR WZR, [%x[i_out]]\n\t"
  37. "SEV\n"
  38. :
  39. : [i_out] "r" (lock)
  40. : "memory");
  41. #endif
  42. }
  43. void global_atomic_store(uint64_t *lock, bool val)
  44. {
  45. if (val)
  46. _lock(lock);
  47. else
  48. _unlock(lock);
  49. }
  50. /*
  51. * Each bit in this mask represents each of the loopback clients supported in
  52. * the enum hw_fence_loopback_id
  53. */
  54. #define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7f
  55. static inline int _process_dpu_client_loopback(struct hw_fence_driver_data *drv_data,
  56. int client_id)
  57. {
  58. int ctl_id = client_id; /* dpu ctl path id is mapped to client id used for the loopback */
  59. void *ctl_start_reg;
  60. u32 val;
  61. if (ctl_id > HW_FENCE_LOOPBACK_DPU_CTL_5) {
  62. HWFNC_ERR("invalid ctl_id:%d\n", ctl_id);
  63. return -EINVAL;
  64. }
  65. ctl_start_reg = drv_data->ctl_start_ptr[ctl_id];
  66. if (!ctl_start_reg) {
  67. HWFNC_ERR("ctl_start reg not valid for ctl_id:%d\n", ctl_id);
  68. return -EINVAL;
  69. }
  70. HWFNC_DBG_H("Processing DPU loopback ctl_id:%d\n", ctl_id);
  71. val = 0x1; /* ctl_start trigger */
  72. #ifdef CTL_START_SIM
  73. HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x\n", ctl_start_reg, val, ctl_id);
  74. writel_relaxed(val, ctl_start_reg);
  75. #else
  76. HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x (COMMENTED)\n", ctl_id,
  77. ctl_start_reg, val);
  78. #endif
  79. return 0;
  80. }
  81. static inline int _process_gfx_client_loopback(struct hw_fence_driver_data *drv_data,
  82. int client_id)
  83. {
  84. int queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */
  85. struct msm_hw_fence_queue_payload payload;
  86. int read = 1;
  87. HWFNC_DBG_IRQ("Processing GFX loopback client_id:%d\n", client_id);
  88. while (read) {
  89. /*
  90. * 'client_id' is the loopback-client-id, not the hw-fence client_id,
  91. * so use GFX hw-fence client id, to get the client data
  92. */
  93. read = hw_fence_read_queue(drv_data->clients[HW_FENCE_CLIENT_ID_CTX0], &payload,
  94. queue_type);
  95. if (read < 0) {
  96. HWFNC_ERR("unable to read gfx rxq\n");
  97. break;
  98. }
  99. HWFNC_DBG_L("GFX loopback rxq read: hash:%llu ctx:%llu seq:%llu f:%llu e:%lu\n",
  100. payload.hash, payload.ctxt_id, payload.seqno, payload.flags, payload.error);
  101. }
  102. return read;
  103. }
  104. static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int client_id)
  105. {
  106. int ret;
  107. HWFNC_DBG_H("Processing loopback client_id:%d\n", client_id);
  108. switch (client_id) {
  109. case HW_FENCE_LOOPBACK_DPU_CTL_0:
  110. case HW_FENCE_LOOPBACK_DPU_CTL_1:
  111. case HW_FENCE_LOOPBACK_DPU_CTL_2:
  112. case HW_FENCE_LOOPBACK_DPU_CTL_3:
  113. case HW_FENCE_LOOPBACK_DPU_CTL_4:
  114. case HW_FENCE_LOOPBACK_DPU_CTL_5:
  115. ret = _process_dpu_client_loopback(drv_data, client_id);
  116. break;
  117. case HW_FENCE_LOOPBACK_GFX_CTX_0:
  118. ret = _process_gfx_client_loopback(drv_data, client_id);
  119. break;
  120. #if IS_ENABLED(CONFIG_DEBUG_FS)
  121. case HW_FENCE_LOOPBACK_VAL_0:
  122. case HW_FENCE_LOOPBACK_VAL_1:
  123. case HW_FENCE_LOOPBACK_VAL_2:
  124. case HW_FENCE_LOOPBACK_VAL_3:
  125. case HW_FENCE_LOOPBACK_VAL_4:
  126. case HW_FENCE_LOOPBACK_VAL_5:
  127. case HW_FENCE_LOOPBACK_VAL_6:
  128. ret = process_validation_client_loopback(drv_data, client_id);
  129. break;
  130. #endif /* CONFIG_DEBUG_FS */
  131. default:
  132. HWFNC_ERR("unknown client:%d\n", client_id);
  133. ret = -EINVAL;
  134. }
  135. return ret;
  136. }
  137. void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags)
  138. {
  139. int client_id = HW_FENCE_LOOPBACK_DPU_CTL_0;
  140. u64 mask;
  141. for (; client_id < HW_FENCE_LOOPBACK_MAX; client_id++) {
  142. mask = 1 << client_id;
  143. if (mask & db_flags) {
  144. HWFNC_DBG_H("client_id:%d signaled! flags:0x%llx\n", client_id, db_flags);
  145. /* process client */
  146. if (_process_doorbell_client(drv_data, client_id))
  147. HWFNC_ERR("Failed to process client:%d\n", client_id);
  148. /* clear mask for this client and if nothing else pending finish */
  149. db_flags = db_flags & ~(mask);
  150. HWFNC_DBG_H("client_id:%d cleared flags:0x%llx mask:0x%llx ~mask:0x%llx\n",
  151. client_id, db_flags, mask, ~(mask));
  152. if (!db_flags)
  153. break;
  154. }
  155. }
  156. }
  157. /* doorbell callback */
  158. static void _hw_fence_cb(int irq, void *data)
  159. {
  160. struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data;
  161. gh_dbl_flags_t clear_flags = HW_FENCE_LOOPBACK_CLIENTS_MASK;
  162. int ret;
  163. if (!drv_data)
  164. return;
  165. ret = gh_dbl_read_and_clean(drv_data->rx_dbl, &clear_flags, 0);
  166. if (ret) {
  167. HWFNC_ERR("hw_fence db callback, retrieve flags fail ret:%d\n", ret);
  168. return;
  169. }
  170. HWFNC_DBG_IRQ("db callback label:%d irq:%d flags:0x%llx qtime:%llu\n", drv_data->db_label,
  171. irq, clear_flags, hw_fence_get_qtime(drv_data));
  172. hw_fence_utils_process_doorbell_mask(drv_data, clear_flags);
  173. }
  174. int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data)
  175. {
  176. struct device_node *node = drv_data->dev->of_node;
  177. struct device_node *node_compat;
  178. const char *compat = "qcom,msm-hw-fence-db";
  179. int ret;
  180. node_compat = of_find_compatible_node(node, NULL, compat);
  181. if (!node_compat) {
  182. HWFNC_ERR("Failed to find dev node with compat:%s\n", compat);
  183. return -EINVAL;
  184. }
  185. ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->db_label);
  186. if (ret) {
  187. HWFNC_ERR("failed to find label info %d\n", ret);
  188. return ret;
  189. }
  190. HWFNC_DBG_IRQ("registering doorbell db_label:%d\n", drv_data->db_label);
  191. drv_data->rx_dbl = gh_dbl_rx_register(drv_data->db_label, _hw_fence_cb, drv_data);
  192. if (IS_ERR_OR_NULL(drv_data->rx_dbl)) {
  193. ret = PTR_ERR(drv_data->rx_dbl);
  194. HWFNC_ERR("Failed to register doorbell\n");
  195. return ret;
  196. }
  197. return 0;
  198. }
  199. static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data,
  200. gh_vmid_t self, gh_vmid_t peer)
  201. {
  202. u32 src_vmlist[1] = {self};
  203. int src_perms[2] = {PERM_READ | PERM_WRITE | PERM_EXEC};
  204. int dst_vmlist[2] = {self, peer};
  205. int dst_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
  206. struct gh_acl_desc *acl;
  207. struct gh_sgl_desc *sgl;
  208. int ret;
  209. ret = hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res),
  210. src_vmlist, 1, dst_vmlist, dst_perms, 2);
  211. if (ret) {
  212. HWFNC_ERR("%s: hyp_assign_phys failed addr=%x size=%u err=%d\n",
  213. __func__, drv_data->res.start, drv_data->size, ret);
  214. return ret;
  215. }
  216. acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL);
  217. if (!acl)
  218. return -ENOMEM;
  219. sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
  220. if (!sgl) {
  221. kfree(acl);
  222. return -ENOMEM;
  223. }
  224. acl->n_acl_entries = 2;
  225. acl->acl_entries[0].vmid = (u16)self;
  226. acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
  227. acl->acl_entries[1].vmid = (u16)peer;
  228. acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W;
  229. sgl->n_sgl_entries = 1;
  230. sgl->sgl_entries[0].ipa_base = drv_data->res.start;
  231. sgl->sgl_entries[0].size = resource_size(&drv_data->res);
  232. ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label,
  233. acl, sgl, NULL, &drv_data->memparcel);
  234. if (ret) {
  235. HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n",
  236. __func__, drv_data->res.start, drv_data->size, ret);
  237. /* Attempt to give resource back to HLOS */
  238. hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res),
  239. dst_vmlist, 2,
  240. src_vmlist, src_perms, 1);
  241. ret = -EPROBE_DEFER;
  242. }
  243. kfree(acl);
  244. kfree(sgl);
  245. return ret;
  246. }
  247. static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *data)
  248. {
  249. struct gh_rm_notif_vm_status_payload *vm_status_payload;
  250. struct hw_fence_driver_data *drv_data;
  251. gh_vmid_t peer_vmid;
  252. gh_vmid_t self_vmid;
  253. drv_data = container_of(nb, struct hw_fence_driver_data, rm_nb);
  254. HWFNC_DBG_INIT("cmd:0x%lx ++\n", cmd);
  255. if (cmd != GH_RM_NOTIF_VM_STATUS)
  256. goto end;
  257. vm_status_payload = data;
  258. HWFNC_DBG_INIT("payload vm_status:%d\n", vm_status_payload->vm_status);
  259. if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY &&
  260. vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET)
  261. goto end;
  262. if (gh_rm_get_vmid(drv_data->peer_name, &peer_vmid))
  263. goto end;
  264. if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
  265. goto end;
  266. if (peer_vmid != vm_status_payload->vmid)
  267. goto end;
  268. switch (vm_status_payload->vm_status) {
  269. case GH_RM_VM_STATUS_READY:
  270. HWFNC_DBG_INIT("init mem\n");
  271. if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid))
  272. HWFNC_ERR("failed to share memory\n");
  273. else
  274. drv_data->vm_ready = true;
  275. break;
  276. case GH_RM_VM_STATUS_RESET:
  277. HWFNC_DBG_INIT("reset\n");
  278. break;
  279. }
  280. end:
  281. return NOTIFY_DONE;
  282. }
  283. /* Allocates carved-out mapped memory */
  284. int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data)
  285. {
  286. struct device_node *node = drv_data->dev->of_node;
  287. struct device_node *node_compat;
  288. const char *compat = "qcom,msm-hw-fence-mem";
  289. struct device *dev = drv_data->dev;
  290. struct device_node *np;
  291. int notifier_ret, ret;
  292. node_compat = of_find_compatible_node(node, NULL, compat);
  293. if (!node_compat) {
  294. HWFNC_ERR("Failed to find dev node with compat:%s\n", compat);
  295. return -EINVAL;
  296. }
  297. ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->label);
  298. if (ret) {
  299. HWFNC_ERR("failed to find label info %d\n", ret);
  300. return ret;
  301. }
  302. np = of_parse_phandle(node_compat, "shared-buffer", 0);
  303. if (!np) {
  304. HWFNC_ERR("failed to read shared-buffer info\n");
  305. return -ENOMEM;
  306. }
  307. ret = of_address_to_resource(np, 0, &drv_data->res);
  308. of_node_put(np);
  309. if (ret) {
  310. HWFNC_ERR("of_address_to_resource failed %d\n", ret);
  311. return -EINVAL;
  312. }
  313. drv_data->io_mem_base = devm_ioremap(dev, drv_data->res.start,
  314. resource_size(&drv_data->res));
  315. if (!drv_data->io_mem_base) {
  316. HWFNC_ERR("ioremap failed!\n");
  317. return -ENXIO;
  318. }
  319. drv_data->size = resource_size(&drv_data->res);
  320. HWFNC_DBG_INIT("io_mem_base:0x%x start:0x%x end:0x%x size:0x%x name:%s\n",
  321. drv_data->io_mem_base, drv_data->res.start,
  322. drv_data->res.end, drv_data->size, drv_data->res.name);
  323. memset_io(drv_data->io_mem_base, 0x0, drv_data->size);
  324. /* Register memory with HYP */
  325. ret = of_property_read_u32(node_compat, "peer-name", &drv_data->peer_name);
  326. if (ret)
  327. drv_data->peer_name = GH_SELF_VM;
  328. drv_data->rm_nb.notifier_call = hw_fence_rm_cb;
  329. drv_data->rm_nb.priority = INT_MAX;
  330. notifier_ret = gh_rm_register_notifier(&drv_data->rm_nb);
  331. HWFNC_DBG_INIT("notifier: ret:%d peer_name:%d notifier_ret:%d\n", ret,
  332. drv_data->peer_name, notifier_ret);
  333. if (notifier_ret) {
  334. HWFNC_ERR("fail to register notifier ret:%d\n", notifier_ret);
  335. return -EPROBE_DEFER;
  336. }
  337. return 0;
  338. }
  339. char *_get_mem_reserve_type(enum hw_fence_mem_reserve type)
  340. {
  341. switch (type) {
  342. case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
  343. return "HW_FENCE_MEM_RESERVE_CTRL_QUEUE";
  344. case HW_FENCE_MEM_RESERVE_LOCKS_REGION:
  345. return "HW_FENCE_MEM_RESERVE_LOCKS_REGION";
  346. case HW_FENCE_MEM_RESERVE_TABLE:
  347. return "HW_FENCE_MEM_RESERVE_TABLE";
  348. case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
  349. return "HW_FENCE_MEM_RESERVE_CLIENT_QUEUE";
  350. }
  351. return "Unknown";
  352. }
  353. /* Calculates the memory range for each of the elements in the carved-out memory */
  354. int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
  355. enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id)
  356. {
  357. int ret = 0;
  358. u32 start_offset = 0;
  359. switch (type) {
  360. case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
  361. start_offset = 0;
  362. *size = drv_data->hw_fence_mem_ctrl_queues_size;
  363. break;
  364. case HW_FENCE_MEM_RESERVE_LOCKS_REGION:
  365. /* Locks region starts at the end of the ctrl queues */
  366. start_offset = drv_data->hw_fence_mem_ctrl_queues_size;
  367. *size = HW_FENCE_MEM_LOCKS_SIZE;
  368. break;
  369. case HW_FENCE_MEM_RESERVE_TABLE:
  370. /* HW Fence table starts at the end of the Locks region */
  371. start_offset = drv_data->hw_fence_mem_ctrl_queues_size + HW_FENCE_MEM_LOCKS_SIZE;
  372. *size = drv_data->hw_fence_mem_fences_table_size;
  373. break;
  374. case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
  375. if (client_id >= HW_FENCE_CLIENT_MAX) {
  376. HWFNC_ERR("unexpected client_id:%d\n", client_id);
  377. ret = -EINVAL;
  378. goto exit;
  379. }
  380. start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size +
  381. HW_FENCE_MEM_LOCKS_SIZE +
  382. drv_data->hw_fence_mem_fences_table_size) +
  383. ((client_id - 1) * drv_data->hw_fence_mem_clients_queues_size);
  384. *size = drv_data->hw_fence_mem_clients_queues_size;
  385. break;
  386. default:
  387. HWFNC_ERR("Invalid mem reserve type:%d\n", type);
  388. ret = -EINVAL;
  389. break;
  390. }
  391. if (start_offset + *size > drv_data->size) {
  392. HWFNC_ERR("reservation request:%lu exceeds total size:%d\n",
  393. start_offset + *size, drv_data->size);
  394. return -ENOMEM;
  395. }
  396. HWFNC_DBG_INIT("type:%s (%d) io_mem_base:0x%x start:0x%x start_offset:%lu size:0x%x\n",
  397. _get_mem_reserve_type(type), type, drv_data->io_mem_base, drv_data->res.start,
  398. start_offset, *size);
  399. *phys = drv_data->res.start + (phys_addr_t)start_offset;
  400. *pa = (drv_data->io_mem_base + start_offset); /* offset is in bytes */
  401. HWFNC_DBG_H("phys:0x%x pa:0x%pK\n", *phys, *pa);
  402. exit:
  403. return ret;
  404. }
  405. int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data)
  406. {
  407. int ret;
  408. u32 val = 0;
  409. ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val);
  410. if (ret || !val) {
  411. HWFNC_ERR("missing hw fences table entry or invalid ret:%d val:%d\n", ret, val);
  412. return ret;
  413. }
  414. drv_data->hw_fence_table_entries = val;
  415. if (drv_data->hw_fence_table_entries >= U32_MAX / sizeof(struct msm_hw_fence)) {
  416. HWFNC_ERR("table entries:%lu will overflow table size\n",
  417. drv_data->hw_fence_table_entries);
  418. return -EINVAL;
  419. }
  420. drv_data->hw_fence_mem_fences_table_size = (sizeof(struct msm_hw_fence) *
  421. drv_data->hw_fence_table_entries);
  422. ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-queue-entries", &val);
  423. if (ret || !val) {
  424. HWFNC_ERR("missing queue entries table entry or invalid ret:%d val:%d\n", ret, val);
  425. return ret;
  426. }
  427. drv_data->hw_fence_queue_entries = val;
  428. /* ctrl queues init */
  429. if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CTRL_QUEUE_PAYLOAD) {
  430. HWFNC_ERR("queue entries:%lu will overflow ctrl queue size\n",
  431. drv_data->hw_fence_queue_entries);
  432. return -EINVAL;
  433. }
  434. drv_data->hw_fence_ctrl_queue_size = HW_FENCE_CTRL_QUEUE_PAYLOAD *
  435. drv_data->hw_fence_queue_entries;
  436. if (drv_data->hw_fence_ctrl_queue_size >= (U32_MAX - HW_FENCE_HFI_CTRL_HEADERS_SIZE) /
  437. HW_FENCE_CTRL_QUEUES) {
  438. HWFNC_ERR("queue size:%lu will overflow ctrl queue mem size\n",
  439. drv_data->hw_fence_ctrl_queue_size);
  440. return -EINVAL;
  441. }
  442. drv_data->hw_fence_mem_ctrl_queues_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE +
  443. (HW_FENCE_CTRL_QUEUES * drv_data->hw_fence_ctrl_queue_size);
  444. /* clients queues init */
  445. if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) {
  446. HWFNC_ERR("queue entries:%lu will overflow client queue size\n",
  447. drv_data->hw_fence_queue_entries);
  448. return -EINVAL;
  449. }
  450. drv_data->hw_fence_client_queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD *
  451. drv_data->hw_fence_queue_entries;
  452. if (drv_data->hw_fence_client_queue_size >= ((U32_MAX & PAGE_MASK) -
  453. HW_FENCE_HFI_CLIENT_HEADERS_SIZE) / HW_FENCE_CLIENT_QUEUES) {
  454. HWFNC_ERR("queue size:%lu will overflow client queue mem size\n",
  455. drv_data->hw_fence_client_queue_size);
  456. return -EINVAL;
  457. }
  458. drv_data->hw_fence_mem_clients_queues_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE +
  459. (HW_FENCE_CLIENT_QUEUES * drv_data->hw_fence_client_queue_size));
  460. HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b",
  461. drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size,
  462. drv_data->hw_fence_queue_entries);
  463. HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu clients queues: size=%lu mem_size=%lu\b",
  464. drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size,
  465. drv_data->hw_fence_client_queue_size, drv_data->hw_fence_mem_clients_queues_size);
  466. return 0;
  467. }
  468. int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data)
  469. {
  470. int ret;
  471. u32 reg_config[2];
  472. void __iomem *ptr;
  473. /* Get ipcc memory range */
  474. ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,ipcc-reg",
  475. reg_config, 2);
  476. if (ret) {
  477. HWFNC_ERR("failed to read ipcc reg: %d\n", ret);
  478. return ret;
  479. }
  480. drv_data->ipcc_reg_base = reg_config[0];
  481. drv_data->ipcc_size = reg_config[1];
  482. /* Mmap ipcc registers */
  483. ptr = devm_ioremap(drv_data->dev, drv_data->ipcc_reg_base, drv_data->ipcc_size);
  484. if (!ptr) {
  485. HWFNC_ERR("failed to ioremap ipcc regs\n");
  486. return -ENOMEM;
  487. }
  488. drv_data->ipcc_io_mem = ptr;
  489. HWFNC_DBG_H("mapped address:0x%x size:0x%x io_mem:0x%pK\n",
  490. drv_data->ipcc_reg_base, drv_data->ipcc_size,
  491. drv_data->ipcc_io_mem);
  492. hw_fence_ipcc_enable_signaling(drv_data);
  493. return ret;
  494. }
  495. int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data)
  496. {
  497. int ret = 0;
  498. unsigned int reg_config[2];
  499. void __iomem *ptr;
  500. ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,qtime-reg",
  501. reg_config, 2);
  502. if (ret) {
  503. HWFNC_ERR("failed to read qtimer reg: %d\n", ret);
  504. return ret;
  505. }
  506. drv_data->qtime_reg_base = reg_config[0];
  507. drv_data->qtime_size = reg_config[1];
  508. ptr = devm_ioremap(drv_data->dev, drv_data->qtime_reg_base, drv_data->qtime_size);
  509. if (!ptr) {
  510. HWFNC_ERR("failed to ioremap qtime regs\n");
  511. return -ENOMEM;
  512. }
  513. drv_data->qtime_io_mem = ptr;
  514. return ret;
  515. }
  516. static int _map_ctl_start(struct hw_fence_driver_data *drv_data, u32 ctl_id,
  517. void **iomem_ptr, uint32_t *iomem_size)
  518. {
  519. u32 reg_config[2];
  520. void __iomem *ptr;
  521. char name[30] = {0};
  522. int ret;
  523. snprintf(name, sizeof(name), "qcom,dpu-ctl-start-%d-reg", ctl_id);
  524. ret = of_property_read_u32_array(drv_data->dev->of_node, name, reg_config, 2);
  525. if (ret)
  526. return 0; /* this is an optional property */
  527. /* Mmap registers */
  528. ptr = devm_ioremap(drv_data->dev, reg_config[0], reg_config[1]);
  529. if (!ptr) {
  530. HWFNC_ERR("failed to ioremap %s reg\n", name);
  531. return -ENOMEM;
  532. }
  533. *iomem_ptr = ptr;
  534. *iomem_size = reg_config[1];
  535. HWFNC_DBG_INIT("mapped ctl_start ctl_id:%d name:%s address:0x%x size:0x%x io_mem:0x%pK\n",
  536. ctl_id, name, reg_config[0], reg_config[1], ptr);
  537. return 0;
  538. }
  539. int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data)
  540. {
  541. u32 ctl_id = HW_FENCE_LOOPBACK_DPU_CTL_0;
  542. for (; ctl_id <= HW_FENCE_LOOPBACK_DPU_CTL_5; ctl_id++) {
  543. if (_map_ctl_start(drv_data, ctl_id, &drv_data->ctl_start_ptr[ctl_id],
  544. &drv_data->ctl_start_size[ctl_id])) {
  545. HWFNC_ERR("cannot map ctl_start ctl_id:%d\n", ctl_id);
  546. } else {
  547. if (drv_data->ctl_start_ptr[ctl_id])
  548. HWFNC_DBG_INIT("mapped ctl_id:%d ctl_start_ptr:0x%pK size:%u\n",
  549. ctl_id, drv_data->ctl_start_ptr[ctl_id],
  550. drv_data->ctl_start_size[ctl_id]);
  551. }
  552. }
  553. return 0;
  554. }