hw_fence_drv_utils.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/of_platform.h>
  6. #include <linux/of_address.h>
  7. #include <linux/io.h>
  8. #include <linux/gunyah/gh_rm_drv.h>
  9. #include <linux/gunyah/gh_dbl.h>
  10. #include <linux/qcom_scm.h>
  11. #include <linux/version.h>
  12. #include <linux/gh_cpusys_vm_mem_access.h>
  13. #include <soc/qcom/secure_buffer.h>
  14. #include "hw_fence_drv_priv.h"
  15. #include "hw_fence_drv_utils.h"
  16. #include "hw_fence_drv_ipc.h"
  17. #include "hw_fence_drv_debug.h"
  18. /**
  19. * MAX_CLIENT_QUEUE_MEM_SIZE:
  20. * Maximum memory size for client queues of a hw fence client.
  21. */
  22. #define MAX_CLIENT_QUEUE_MEM_SIZE 0x100000
  23. /**
  24. * HW_FENCE_MAX_CLIENT_TYPE:
  25. * Total number of client types with and without configurable number of sub-clients
  26. */
  27. #define HW_FENCE_MAX_CLIENT_TYPE (HW_FENCE_MAX_CLIENT_TYPE_STATIC + \
  28. HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE)
  29. /**
  30. * HW_FENCE_MIN_RXQ_CLIENTS:
  31. * Minimum number of static hw fence clients with rxq
  32. */
  33. #define HW_FENCE_MIN_RXQ_CLIENTS HW_FENCE_CLIENT_ID_VAL6
  34. /**
  35. * HW_FENCE_MIN_RXQ_CLIENT_TYPE:
  36. * Minimum number of static hw fence client types with rxq (GFX, DPU, VAL)
  37. */
  38. #define HW_FENCE_MIN_RXQ_CLIENT_TYPE 3
  39. /* Maximum number of clients for each client type */
  40. #define HW_FENCE_CLIENT_TYPE_MAX_GPU 1
  41. #define HW_FENCE_CLIENT_TYPE_MAX_DPU 6
  42. #define HW_FENCE_CLIENT_TYPE_MAX_VAL 7
  43. #define HW_FENCE_CLIENT_TYPE_MAX_IPE 32
  44. #define HW_FENCE_CLIENT_TYPE_MAX_VPU 32
  45. #define HW_FENCE_CLIENT_TYPE_MAX_IFE 32
  46. /*
  47. * Each bit in this mask represents each of the loopback clients supported in
  48. * the enum hw_fence_loopback_id
  49. */
  50. #define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7fff
  51. /**
  52. * struct hw_fence_client_types - Table describing all supported client types, used to parse
  53. * device-tree properties related to client queue size.
  54. *
  55. * The fields name, init_id, and max_clients_num are constants. Default values for clients_num,
  56. * queues_num, and skip_txq_wr_idx are provided in this table, and clients_num, queues_num,
  57. * queue_entries, and skip_txq_wr_idx can be read from device-tree.
  58. *
  59. * If a value for queue entries is not parsed for the client type, then the default number of client
  60. * queue entries (parsed from device-tree) is used.
  61. *
  62. * Notes:
  63. * 1. Client types must be in the same order as client_ids within the enum 'hw_fence_client_id'.
  64. * 2. Each HW Fence client ID must be described by one of the client types in this table.
  65. * 3. A new client type must set: name, init_id, max_clients_num, clients_num, queues_num, and
  66. * skip_txq_wr_idx.
  67. * 4. Either constant HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE or HW_FENCE_MAX_CLIENT_TYPE_STATIC must
  68. * be incremented as appropriate for new client types.
  69. */
  70. struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = {
  71. {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU,
  72. HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false},
  73. {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU,
  74. HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false},
  75. {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL,
  76. HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false},
  77. {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, HW_FENCE_CLIENT_QUEUES,
  78. 0, 0, 0, 0, 0, 0, false},
  79. {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, HW_FENCE_CLIENT_QUEUES,
  80. 0, 0, 0, 0, 0, 0, false},
  81. {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  82. true},
  83. {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  84. true},
  85. {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  86. true},
  87. {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  88. true},
  89. {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  90. true},
  91. {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  92. true},
  93. {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  94. true},
  95. {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  96. true},
  97. };
  98. static void _lock(uint64_t *wait)
  99. {
  100. #if defined(__aarch64__)
  101. __asm__(
  102. // Sequence to wait for lock to be free (i.e. zero)
  103. "PRFM PSTL1KEEP, [%x[i_lock]]\n\t"
  104. "1:\n\t"
  105. "LDAXR W5, [%x[i_lock]]\n\t"
  106. "CBNZ W5, 1b\n\t"
  107. // Sequence to set PVM BIT0
  108. "LDR W7, =0x1\n\t" // Load BIT0 (0x1) into W7
  109. "STXR W5, W7, [%x[i_lock]]\n\t" // Atomic Store exclusive BIT0 (lock = 0x1)
  110. "CBNZ W5, 1b\n\t" // If cannot set it, goto 1
  111. :
  112. : [i_lock] "r" (wait)
  113. : "memory");
  114. #endif
  115. }
  116. static void _unlock(struct hw_fence_driver_data *drv_data, uint64_t *lock)
  117. {
  118. uint64_t lock_val;
  119. #if defined(__aarch64__)
  120. __asm__(
  121. // Sequence to clear PVM BIT0
  122. "2:\n\t"
  123. "LDAXR W5, [%x[i_out]]\n\t" // Atomic Fetch Lock
  124. "AND W6, W5, #0xFFFFFFFFFFFFFFFE\n\t" // AND to clear BIT0 (lock &= ~0x1))
  125. "STXR W5, W6, [%x[i_out]]\n\t" // Store exclusive result
  126. "CBNZ W5, 2b\n\t" // If cannot store exclusive, goto 2
  127. :
  128. : [i_out] "r" (lock)
  129. : "memory");
  130. #endif
  131. mb(); /* Make sure the memory is updated */
  132. lock_val = *lock; /* Read the lock value */
  133. HWFNC_DBG_LOCK("unlock: lock_val after:0x%llx\n", lock_val);
  134. if (lock_val & 0x2) { /* check if SVM BIT1 is set*/
  135. /*
  136. * SVM is in WFI state, since SVM acquire bit is set
  137. * Trigger IRQ to Wake-Up SVM Client
  138. */
  139. #if IS_ENABLED(CONFIG_DEBUG_FS)
  140. drv_data->debugfs_data.lock_wake_cnt++;
  141. HWFNC_DBG_LOCK("triggering ipc to unblock SVM lock_val:%d cnt:%llu\n", lock_val,
  142. drv_data->debugfs_data.lock_wake_cnt);
  143. #endif
  144. hw_fence_ipcc_trigger_signal(drv_data,
  145. drv_data->ipcc_client_pid,
  146. drv_data->ipcc_client_vid, 30); /* Trigger APPS Signal 30 */
  147. }
  148. }
  149. void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val)
  150. {
  151. if (val) {
  152. preempt_disable();
  153. _lock(lock);
  154. } else {
  155. _unlock(drv_data, lock);
  156. preempt_enable();
  157. }
  158. }
  159. static inline int _process_dpu_client_loopback(struct hw_fence_driver_data *drv_data,
  160. int client_id)
  161. {
  162. int ctl_id = client_id; /* dpu ctl path id is mapped to client id used for the loopback */
  163. void *ctl_start_reg;
  164. u32 val;
  165. if (ctl_id > HW_FENCE_LOOPBACK_DPU_CTL_5) {
  166. HWFNC_ERR("invalid ctl_id:%d\n", ctl_id);
  167. return -EINVAL;
  168. }
  169. ctl_start_reg = drv_data->ctl_start_ptr[ctl_id];
  170. if (!ctl_start_reg) {
  171. HWFNC_ERR("ctl_start reg not valid for ctl_id:%d\n", ctl_id);
  172. return -EINVAL;
  173. }
  174. HWFNC_DBG_H("Processing DPU loopback ctl_id:%d\n", ctl_id);
  175. val = 0x1; /* ctl_start trigger */
  176. #ifdef CTL_START_SIM
  177. HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x\n", ctl_start_reg, val, ctl_id);
  178. writel_relaxed(val, ctl_start_reg);
  179. #else
  180. HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x (COMMENTED)\n", ctl_id,
  181. ctl_start_reg, val);
  182. #endif
  183. return 0;
  184. }
  185. static inline int _process_gfx_client_loopback(struct hw_fence_driver_data *drv_data,
  186. int client_id)
  187. {
  188. int queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */
  189. struct msm_hw_fence_queue_payload payload;
  190. int read = 1;
  191. HWFNC_DBG_IRQ("Processing GFX loopback client_id:%d\n", client_id);
  192. while (read) {
  193. /*
  194. * 'client_id' is the loopback-client-id, not the hw-fence client_id,
  195. * so use GFX hw-fence client id, to get the client data
  196. */
  197. read = hw_fence_read_queue(drv_data->clients[HW_FENCE_CLIENT_ID_CTX0], &payload,
  198. queue_type);
  199. if (read < 0) {
  200. HWFNC_ERR("unable to read gfx rxq\n");
  201. break;
  202. }
  203. HWFNC_DBG_L("GFX loopback rxq read: hash:%llu ctx:%llu seq:%llu f:%llu e:%lu\n",
  204. payload.hash, payload.ctxt_id, payload.seqno, payload.flags, payload.error);
  205. }
  206. return read;
  207. }
  208. static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int client_id)
  209. {
  210. int ret;
  211. HWFNC_DBG_H("Processing loopback client_id:%d\n", client_id);
  212. switch (client_id) {
  213. case HW_FENCE_LOOPBACK_DPU_CTL_0:
  214. case HW_FENCE_LOOPBACK_DPU_CTL_1:
  215. case HW_FENCE_LOOPBACK_DPU_CTL_2:
  216. case HW_FENCE_LOOPBACK_DPU_CTL_3:
  217. case HW_FENCE_LOOPBACK_DPU_CTL_4:
  218. case HW_FENCE_LOOPBACK_DPU_CTL_5:
  219. ret = _process_dpu_client_loopback(drv_data, client_id);
  220. break;
  221. case HW_FENCE_LOOPBACK_GFX_CTX_0:
  222. ret = _process_gfx_client_loopback(drv_data, client_id);
  223. break;
  224. #if IS_ENABLED(CONFIG_DEBUG_FS)
  225. case HW_FENCE_LOOPBACK_VAL_0:
  226. case HW_FENCE_LOOPBACK_VAL_1:
  227. case HW_FENCE_LOOPBACK_VAL_2:
  228. case HW_FENCE_LOOPBACK_VAL_3:
  229. case HW_FENCE_LOOPBACK_VAL_4:
  230. case HW_FENCE_LOOPBACK_VAL_5:
  231. case HW_FENCE_LOOPBACK_VAL_6:
  232. ret = process_validation_client_loopback(drv_data, client_id);
  233. break;
  234. #endif /* CONFIG_DEBUG_FS */
  235. default:
  236. HWFNC_ERR("unknown client:%d\n", client_id);
  237. ret = -EINVAL;
  238. }
  239. return ret;
  240. }
  241. void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags)
  242. {
  243. int client_id = HW_FENCE_LOOPBACK_DPU_CTL_0;
  244. u64 mask;
  245. for (; client_id < HW_FENCE_LOOPBACK_MAX; client_id++) {
  246. mask = 1 << client_id;
  247. if (mask & db_flags) {
  248. HWFNC_DBG_H("client_id:%d signaled! flags:0x%llx\n", client_id, db_flags);
  249. /* process client */
  250. if (_process_doorbell_client(drv_data, client_id))
  251. HWFNC_ERR("Failed to process client:%d\n", client_id);
  252. /* clear mask for this client and if nothing else pending finish */
  253. db_flags = db_flags & ~(mask);
  254. HWFNC_DBG_H("client_id:%d cleared flags:0x%llx mask:0x%llx ~mask:0x%llx\n",
  255. client_id, db_flags, mask, ~(mask));
  256. if (!db_flags)
  257. break;
  258. }
  259. }
  260. }
  261. /* doorbell callback */
  262. static void _hw_fence_cb(int irq, void *data)
  263. {
  264. struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data;
  265. gh_dbl_flags_t clear_flags = HW_FENCE_LOOPBACK_CLIENTS_MASK;
  266. int ret;
  267. if (!drv_data)
  268. return;
  269. ret = gh_dbl_read_and_clean(drv_data->rx_dbl, &clear_flags, 0);
  270. if (ret) {
  271. HWFNC_ERR("hw_fence db callback, retrieve flags fail ret:%d\n", ret);
  272. return;
  273. }
  274. HWFNC_DBG_IRQ("db callback label:%d irq:%d flags:0x%llx qtime:%llu\n", drv_data->db_label,
  275. irq, clear_flags, hw_fence_get_qtime(drv_data));
  276. hw_fence_utils_process_doorbell_mask(drv_data, clear_flags);
  277. }
  278. int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data)
  279. {
  280. struct device_node *node = drv_data->dev->of_node;
  281. struct device_node *node_compat;
  282. const char *compat = "qcom,msm-hw-fence-db";
  283. int ret;
  284. node_compat = of_find_compatible_node(node, NULL, compat);
  285. if (!node_compat) {
  286. HWFNC_ERR("Failed to find dev node with compat:%s\n", compat);
  287. return -EINVAL;
  288. }
  289. ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->db_label);
  290. if (ret) {
  291. HWFNC_ERR("failed to find label info %d\n", ret);
  292. return ret;
  293. }
  294. HWFNC_DBG_IRQ("registering doorbell db_label:%d\n", drv_data->db_label);
  295. drv_data->rx_dbl = gh_dbl_rx_register(drv_data->db_label, _hw_fence_cb, drv_data);
  296. if (IS_ERR_OR_NULL(drv_data->rx_dbl)) {
  297. ret = PTR_ERR(drv_data->rx_dbl);
  298. HWFNC_ERR("Failed to register doorbell\n");
  299. return ret;
  300. }
  301. return 0;
  302. }
  303. static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data,
  304. gh_vmid_t self, gh_vmid_t peer)
  305. {
  306. struct qcom_scm_vmperm src_vmlist[] = {{self, PERM_READ | PERM_WRITE | PERM_EXEC}};
  307. struct qcom_scm_vmperm dst_vmlist[] = {{self, PERM_READ | PERM_WRITE},
  308. {peer, PERM_READ | PERM_WRITE}};
  309. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
  310. u64 srcvmids, dstvmids;
  311. #else
  312. unsigned int srcvmids, dstvmids;
  313. #endif
  314. struct gh_acl_desc *acl;
  315. struct gh_sgl_desc *sgl;
  316. int ret;
  317. srcvmids = BIT(src_vmlist[0].vmid);
  318. dstvmids = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid);
  319. ret = qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), &srcvmids,
  320. dst_vmlist, ARRAY_SIZE(dst_vmlist));
  321. if (ret) {
  322. HWFNC_ERR("%s: qcom_scm_assign_mem failed addr=%x size=%u err=%d\n",
  323. __func__, drv_data->res.start, drv_data->size, ret);
  324. return ret;
  325. }
  326. acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL);
  327. if (!acl)
  328. return -ENOMEM;
  329. sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
  330. if (!sgl) {
  331. kfree(acl);
  332. return -ENOMEM;
  333. }
  334. acl->n_acl_entries = 2;
  335. acl->acl_entries[0].vmid = (u16)self;
  336. acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
  337. acl->acl_entries[1].vmid = (u16)peer;
  338. acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W;
  339. sgl->n_sgl_entries = 1;
  340. sgl->sgl_entries[0].ipa_base = drv_data->res.start;
  341. sgl->sgl_entries[0].size = resource_size(&drv_data->res);
  342. ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label,
  343. acl, sgl, NULL, &drv_data->memparcel);
  344. if (ret) {
  345. HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n",
  346. __func__, drv_data->res.start, drv_data->size, ret);
  347. /* Attempt to give resource back to HLOS */
  348. qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res),
  349. &dstvmids, src_vmlist, ARRAY_SIZE(src_vmlist));
  350. ret = -EPROBE_DEFER;
  351. }
  352. kfree(acl);
  353. kfree(sgl);
  354. return ret;
  355. }
  356. static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *data)
  357. {
  358. struct gh_rm_notif_vm_status_payload *vm_status_payload;
  359. struct hw_fence_driver_data *drv_data;
  360. struct resource res;
  361. gh_vmid_t peer_vmid;
  362. gh_vmid_t self_vmid;
  363. int ret;
  364. drv_data = container_of(nb, struct hw_fence_driver_data, rm_nb);
  365. HWFNC_DBG_INIT("cmd:0x%lx ++\n", cmd);
  366. if (cmd != GH_RM_NOTIF_VM_STATUS)
  367. goto end;
  368. vm_status_payload = data;
  369. HWFNC_DBG_INIT("payload vm_status:%d\n", vm_status_payload->vm_status);
  370. if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY &&
  371. vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET)
  372. goto end;
  373. if (gh_rm_get_vmid(drv_data->peer_name, &peer_vmid))
  374. goto end;
  375. if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
  376. goto end;
  377. if (peer_vmid != vm_status_payload->vmid)
  378. goto end;
  379. switch (vm_status_payload->vm_status) {
  380. case GH_RM_VM_STATUS_READY:
  381. ret = gh_cpusys_vm_get_share_mem_info(&res);
  382. if (ret) {
  383. HWFNC_DBG_INIT("mem not shared ret:%d, attempt share\n", ret);
  384. if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid))
  385. HWFNC_ERR("failed to share memory\n");
  386. else
  387. drv_data->vm_ready = true;
  388. } else {
  389. if (drv_data->res.start == res.start &&
  390. resource_size(&drv_data->res) == resource_size(&res)) {
  391. drv_data->vm_ready = true;
  392. HWFNC_DBG_INIT("mem_ready: add:0x%x size:%d ret:%d\n", res.start,
  393. resource_size(&res), ret);
  394. } else {
  395. HWFNC_ERR("mem-shared mismatch:[0x%x,%d] expected:[0x%x,%d]\n",
  396. res.start, resource_size(&res), drv_data->res.start,
  397. resource_size(&drv_data->res));
  398. }
  399. }
  400. break;
  401. case GH_RM_VM_STATUS_RESET:
  402. HWFNC_DBG_INIT("reset\n");
  403. break;
  404. }
  405. end:
  406. return NOTIFY_DONE;
  407. }
  408. /* Allocates carved-out mapped memory */
  409. int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data)
  410. {
  411. struct device_node *node = drv_data->dev->of_node;
  412. struct device_node *node_compat;
  413. const char *compat = "qcom,msm-hw-fence-mem";
  414. struct device *dev = drv_data->dev;
  415. struct device_node *np;
  416. int notifier_ret, ret;
  417. node_compat = of_find_compatible_node(node, NULL, compat);
  418. if (!node_compat) {
  419. HWFNC_ERR("Failed to find dev node with compat:%s\n", compat);
  420. return -EINVAL;
  421. }
  422. ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->label);
  423. if (ret) {
  424. HWFNC_ERR("failed to find label info %d\n", ret);
  425. return ret;
  426. }
  427. np = of_parse_phandle(node_compat, "shared-buffer", 0);
  428. if (!np) {
  429. HWFNC_ERR("failed to read shared-buffer info\n");
  430. return -ENOMEM;
  431. }
  432. ret = of_address_to_resource(np, 0, &drv_data->res);
  433. of_node_put(np);
  434. if (ret) {
  435. HWFNC_ERR("of_address_to_resource failed %d\n", ret);
  436. return -EINVAL;
  437. }
  438. drv_data->io_mem_base = devm_ioremap_wc(dev, drv_data->res.start,
  439. resource_size(&drv_data->res));
  440. if (!drv_data->io_mem_base) {
  441. HWFNC_ERR("ioremap failed!\n");
  442. return -ENXIO;
  443. }
  444. drv_data->size = resource_size(&drv_data->res);
  445. if (drv_data->size < drv_data->used_mem_size) {
  446. HWFNC_ERR("0x%x size of carved-out memory region is less than required size:0x%x\n",
  447. drv_data->size, drv_data->used_mem_size);
  448. return -ENOMEM;
  449. }
  450. HWFNC_DBG_INIT("io_mem_base:0x%x start:0x%x end:0x%x size:0x%x name:%s\n",
  451. drv_data->io_mem_base, drv_data->res.start,
  452. drv_data->res.end, drv_data->size, drv_data->res.name);
  453. memset_io(drv_data->io_mem_base, 0x0, drv_data->size);
  454. /* Register memory with HYP */
  455. ret = of_property_read_u32(node_compat, "peer-name", &drv_data->peer_name);
  456. if (ret)
  457. drv_data->peer_name = GH_SELF_VM;
  458. drv_data->rm_nb.notifier_call = hw_fence_rm_cb;
  459. drv_data->rm_nb.priority = INT_MAX;
  460. notifier_ret = gh_rm_register_notifier(&drv_data->rm_nb);
  461. HWFNC_DBG_INIT("notifier: ret:%d peer_name:%d notifier_ret:%d\n", ret,
  462. drv_data->peer_name, notifier_ret);
  463. if (notifier_ret) {
  464. HWFNC_ERR("fail to register notifier ret:%d\n", notifier_ret);
  465. return -EPROBE_DEFER;
  466. }
  467. return 0;
  468. }
  469. char *_get_mem_reserve_type(enum hw_fence_mem_reserve type)
  470. {
  471. switch (type) {
  472. case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
  473. return "HW_FENCE_MEM_RESERVE_CTRL_QUEUE";
  474. case HW_FENCE_MEM_RESERVE_LOCKS_REGION:
  475. return "HW_FENCE_MEM_RESERVE_LOCKS_REGION";
  476. case HW_FENCE_MEM_RESERVE_TABLE:
  477. return "HW_FENCE_MEM_RESERVE_TABLE";
  478. case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
  479. return "HW_FENCE_MEM_RESERVE_CLIENT_QUEUE";
  480. }
  481. return "Unknown";
  482. }
  483. /* Calculates the memory range for each of the elements in the carved-out memory */
  484. int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
  485. enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id)
  486. {
  487. int ret = 0;
  488. u32 start_offset = 0;
  489. switch (type) {
  490. case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
  491. start_offset = 0;
  492. *size = drv_data->hw_fence_mem_ctrl_queues_size;
  493. break;
  494. case HW_FENCE_MEM_RESERVE_LOCKS_REGION:
  495. /* Locks region starts at the end of the ctrl queues */
  496. start_offset = drv_data->hw_fence_mem_ctrl_queues_size;
  497. *size = HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num);
  498. break;
  499. case HW_FENCE_MEM_RESERVE_TABLE:
  500. /* HW Fence table starts at the end of the Locks region */
  501. start_offset = drv_data->hw_fence_mem_ctrl_queues_size +
  502. HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num);
  503. *size = drv_data->hw_fence_mem_fences_table_size;
  504. break;
  505. case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
  506. if (client_id >= drv_data->clients_num ||
  507. !drv_data->hw_fence_client_queue_size[client_id].type) {
  508. HWFNC_ERR("unexpected client_id:%d for clients_num:%lu\n", client_id,
  509. drv_data->clients_num);
  510. ret = -EINVAL;
  511. goto exit;
  512. }
  513. start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset;
  514. *size = drv_data->hw_fence_client_queue_size[client_id].type->mem_size;
  515. break;
  516. default:
  517. HWFNC_ERR("Invalid mem reserve type:%d\n", type);
  518. ret = -EINVAL;
  519. break;
  520. }
  521. if (start_offset + *size > drv_data->size) {
  522. HWFNC_ERR("reservation request:%lu exceeds total size:%d\n",
  523. start_offset + *size, drv_data->size);
  524. return -ENOMEM;
  525. }
  526. HWFNC_DBG_INIT("type:%s (%d) io_mem_base:0x%x start:0x%x start_offset:%lu size:0x%x\n",
  527. _get_mem_reserve_type(type), type, drv_data->io_mem_base, drv_data->res.start,
  528. start_offset, *size);
  529. *phys = drv_data->res.start + (phys_addr_t)start_offset;
  530. *pa = (drv_data->io_mem_base + start_offset); /* offset is in bytes */
  531. HWFNC_DBG_H("phys:0x%x pa:0x%pK\n", *phys, *pa);
  532. exit:
  533. return ret;
  534. }
  535. static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_data,
  536. struct hw_fence_client_type_desc *desc)
  537. {
  538. u32 max_idx_from_zero, payload_size_u32 = HW_FENCE_CLIENT_QUEUE_PAYLOAD / sizeof(u32);
  539. char name[40];
  540. u32 tmp[4];
  541. bool idx_by_payload = false;
  542. int count, ret;
  543. snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s-extra", desc->name);
  544. /* check if property is present */
  545. ret = of_property_read_bool(drv_data->dev->of_node, name);
  546. if (!ret)
  547. return 0;
  548. count = of_property_count_u32_elems(drv_data->dev->of_node, name);
  549. if (count <= 0 || count > 4) {
  550. HWFNC_ERR("invalid %s extra dt props count:%d\n", desc->name, count);
  551. return -EINVAL;
  552. }
  553. ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, count);
  554. if (ret) {
  555. HWFNC_ERR("Failed to read %s extra dt properties ret=%d count=%d\n", desc->name,
  556. ret, count);
  557. ret = -EINVAL;
  558. goto exit;
  559. }
  560. desc->start_padding = tmp[0];
  561. if (count >= 2)
  562. desc->end_padding = tmp[1];
  563. if (count >= 3)
  564. desc->txq_idx_start = tmp[2];
  565. if (count >= 4) {
  566. if (tmp[3] > 1) {
  567. HWFNC_ERR("%s invalid txq_idx_by_payload prop:%lu\n", desc->name, tmp[3]);
  568. ret = -EINVAL;
  569. goto exit;
  570. }
  571. idx_by_payload = tmp[3];
  572. desc->txq_idx_factor = idx_by_payload ? payload_size_u32 : 1;
  573. }
  574. if (desc->start_padding % sizeof(u32) || desc->end_padding % sizeof(u32) ||
  575. (desc->start_padding + desc->end_padding) % sizeof(u64)) {
  576. HWFNC_ERR("%s start_padding:%lu end_padding:%lu violates mem alignment\n",
  577. desc->name, desc->start_padding, desc->end_padding);
  578. ret = -EINVAL;
  579. goto exit;
  580. }
  581. if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) {
  582. HWFNC_ERR("%s client queues_num:%lu start_padding:%lu will overflow mem_size\n",
  583. desc->name, desc->queues_num, desc->start_padding);
  584. ret = -EINVAL;
  585. goto exit;
  586. }
  587. if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) -
  588. desc->start_padding) {
  589. HWFNC_ERR("%s client q_num:%lu start_p:%lu end_p:%lu will overflow mem_size\n",
  590. desc->name, desc->queues_num, desc->start_padding, desc->end_padding);
  591. ret = -EINVAL;
  592. goto exit;
  593. }
  594. max_idx_from_zero = idx_by_payload ? desc->queue_entries :
  595. desc->queue_entries * payload_size_u32;
  596. if (desc->txq_idx_start >= U32_MAX - max_idx_from_zero) {
  597. HWFNC_ERR("%s txq_idx start:%lu by_payload:%s q_entries:%d will overflow txq_idx\n",
  598. desc->name, desc->txq_idx_start, idx_by_payload ? "true" : "false",
  599. desc->queue_entries);
  600. ret = -EINVAL;
  601. goto exit;
  602. }
  603. HWFNC_DBG_INIT("%s: start_p=%lu end_p=%lu txq_idx_start:%lu txq_idx_by_payload:%s\n",
  604. desc->name, desc->start_padding, desc->end_padding, desc->txq_idx_start,
  605. idx_by_payload ? "true" : "false");
  606. exit:
  607. return ret;
  608. }
  609. static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data,
  610. struct hw_fence_client_type_desc *desc)
  611. {
  612. char name[31];
  613. u32 tmp[4];
  614. u32 queue_size;
  615. int ret;
  616. /* parse client queue properties from device-tree */
  617. snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name);
  618. ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 4);
  619. if (ret) {
  620. HWFNC_DBG_INIT("missing %s client queue entry or invalid ret:%d\n", desc->name,
  621. ret);
  622. desc->queue_entries = drv_data->hw_fence_queue_entries;
  623. } else {
  624. desc->clients_num = tmp[0];
  625. desc->queues_num = tmp[1];
  626. desc->queue_entries = tmp[2];
  627. if (tmp[3] > 1) {
  628. HWFNC_ERR("%s invalid skip_txq_wr_idx prop:%lu\n", desc->name, tmp[3]);
  629. return -EINVAL;
  630. }
  631. desc->skip_txq_wr_idx = tmp[3];
  632. }
  633. if (desc->clients_num > desc->max_clients_num || !desc->queues_num ||
  634. desc->queues_num > HW_FENCE_CLIENT_QUEUES || !desc->queue_entries) {
  635. HWFNC_ERR("%s invalid dt: clients_num:%lu queues_num:%lu, queue_entries:%lu\n",
  636. desc->name, desc->clients_num, desc->queues_num, desc->queue_entries);
  637. return -EINVAL;
  638. }
  639. /* parse extra client queue properties from device-tree */
  640. ret = _parse_client_queue_dt_props_extra(drv_data, desc);
  641. if (ret) {
  642. HWFNC_ERR("%s failed to parse extra dt props\n", desc->name);
  643. return -EINVAL;
  644. }
  645. /* compute mem_size */
  646. if (desc->queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) {
  647. HWFNC_ERR("%s client queue entries:%lu will overflow client queue size\n",
  648. desc->name, desc->queue_entries);
  649. return -EINVAL;
  650. }
  651. queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries;
  652. if (queue_size >= ((U32_MAX & PAGE_MASK) -
  653. (HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) +
  654. desc->start_padding + desc->end_padding)) / desc->queues_num) {
  655. HWFNC_ERR("%s client queue_sz:%lu start_p:%lu end_p:%lu will overflow mem size\n",
  656. desc->name, queue_size, desc->start_padding, desc->end_padding);
  657. return -EINVAL;
  658. }
  659. desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) +
  660. (queue_size * desc->queues_num) + desc->start_padding + desc->end_padding);
  661. if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) {
  662. HWFNC_ERR("%s client queue mem_size:%lu greater than max mem size:%lu\n",
  663. desc->name, desc->mem_size, MAX_CLIENT_QUEUE_MEM_SIZE);
  664. return -EINVAL;
  665. }
  666. HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu skips_wr_ptr:%s\n",
  667. desc->name, desc->clients_num, desc->queues_num, desc->queue_entries,
  668. desc->mem_size, desc->skip_txq_wr_idx ? "true" : "false");
  669. return 0;
  670. }
  671. static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data)
  672. {
  673. struct hw_fence_client_type_desc *desc;
  674. int i, j, ret;
  675. u32 start_offset;
  676. size_t size;
  677. int configurable_clients_num = 0;
  678. drv_data->rxq_clients_num = HW_FENCE_MIN_RXQ_CLIENTS;
  679. for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) {
  680. desc = &hw_fence_client_types[i];
  681. ret = _parse_client_queue_dt_props_indv(drv_data, desc);
  682. if (ret) {
  683. HWFNC_ERR("failed to initialize %s client queue size properties\n",
  684. desc->name);
  685. return ret;
  686. }
  687. if (i >= HW_FENCE_MIN_RXQ_CLIENT_TYPE &&
  688. desc->queues_num == HW_FENCE_CLIENT_QUEUES)
  689. drv_data->rxq_clients_num += desc->clients_num;
  690. if (i >= HW_FENCE_MAX_CLIENT_TYPE_STATIC)
  691. configurable_clients_num += desc->clients_num;
  692. }
  693. /* store client type descriptors for configurable client indexing logic */
  694. drv_data->hw_fence_client_types = hw_fence_client_types;
  695. /* clients and size desc are allocated for all static clients regardless of device-tree */
  696. drv_data->clients_num = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + configurable_clients_num;
  697. /* allocate memory for client queue size descriptors */
  698. size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_desc);
  699. drv_data->hw_fence_client_queue_size = kzalloc(size, GFP_KERNEL);
  700. if (!drv_data->hw_fence_client_queue_size)
  701. return -ENOMEM;
  702. /* initialize client queue size desc for each client */
  703. start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size +
  704. HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num) +
  705. drv_data->hw_fence_mem_fences_table_size);
  706. for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) {
  707. desc = &hw_fence_client_types[i];
  708. for (j = 0; j < desc->clients_num; j++) {
  709. enum hw_fence_client_id client_id_ext = desc->init_id + j;
  710. enum hw_fence_client_id client_id =
  711. hw_fence_utils_get_client_id_priv(drv_data, client_id_ext);
  712. drv_data->hw_fence_client_queue_size[client_id] =
  713. (struct hw_fence_client_queue_desc){desc, start_offset};
  714. HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n",
  715. desc->name, client_id_ext, client_id, start_offset);
  716. start_offset += desc->mem_size;
  717. }
  718. }
  719. drv_data->used_mem_size = start_offset;
  720. return 0;
  721. }
  722. int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data)
  723. {
  724. int ret;
  725. size_t size;
  726. u32 val = 0;
  727. ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val);
  728. if (ret || !val) {
  729. HWFNC_ERR("missing hw fences table entry or invalid ret:%d val:%d\n", ret, val);
  730. return ret;
  731. }
  732. drv_data->hw_fence_table_entries = val;
  733. if (drv_data->hw_fence_table_entries >= U32_MAX / sizeof(struct msm_hw_fence)) {
  734. HWFNC_ERR("table entries:%lu will overflow table size\n",
  735. drv_data->hw_fence_table_entries);
  736. return -EINVAL;
  737. }
  738. drv_data->hw_fence_mem_fences_table_size = (sizeof(struct msm_hw_fence) *
  739. drv_data->hw_fence_table_entries);
  740. ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-queue-entries", &val);
  741. if (ret || !val) {
  742. HWFNC_ERR("missing queue entries table entry or invalid ret:%d val:%d\n", ret, val);
  743. return ret;
  744. }
  745. drv_data->hw_fence_queue_entries = val;
  746. /* ctrl queues init */
  747. if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CTRL_QUEUE_PAYLOAD) {
  748. HWFNC_ERR("queue entries:%lu will overflow ctrl queue size\n",
  749. drv_data->hw_fence_queue_entries);
  750. return -EINVAL;
  751. }
  752. drv_data->hw_fence_ctrl_queue_size = HW_FENCE_CTRL_QUEUE_PAYLOAD *
  753. drv_data->hw_fence_queue_entries;
  754. if (drv_data->hw_fence_ctrl_queue_size >= (U32_MAX - HW_FENCE_HFI_CTRL_HEADERS_SIZE) /
  755. HW_FENCE_CTRL_QUEUES) {
  756. HWFNC_ERR("queue size:%lu will overflow ctrl queue mem size\n",
  757. drv_data->hw_fence_ctrl_queue_size);
  758. return -EINVAL;
  759. }
  760. drv_data->hw_fence_mem_ctrl_queues_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE +
  761. (HW_FENCE_CTRL_QUEUES * drv_data->hw_fence_ctrl_queue_size);
  762. /* clients queues init */
  763. ret = _parse_client_queue_dt_props(drv_data);
  764. if (ret) {
  765. HWFNC_ERR("failed to parse client queue properties\n");
  766. return -EINVAL;
  767. }
  768. /* allocate clients */
  769. size = drv_data->clients_num * sizeof(struct msm_hw_fence_client *);
  770. drv_data->clients = kzalloc(size, GFP_KERNEL);
  771. if (!drv_data->clients)
  772. return -ENOMEM;
  773. HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b",
  774. drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size,
  775. drv_data->hw_fence_queue_entries);
  776. HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu\b",
  777. drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size);
  778. HWFNC_DBG_INIT("clients_num: %lu, total_mem_size:%lu\n", drv_data->clients_num,
  779. drv_data->used_mem_size);
  780. return 0;
  781. }
  782. int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data)
  783. {
  784. int ret;
  785. u32 reg_config[2];
  786. void __iomem *ptr;
  787. /* Get ipcc memory range */
  788. ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,ipcc-reg",
  789. reg_config, 2);
  790. if (ret) {
  791. HWFNC_ERR("failed to read ipcc reg: %d\n", ret);
  792. return ret;
  793. }
  794. drv_data->ipcc_reg_base = reg_config[0];
  795. drv_data->ipcc_size = reg_config[1];
  796. /* Mmap ipcc registers */
  797. ptr = devm_ioremap(drv_data->dev, drv_data->ipcc_reg_base, drv_data->ipcc_size);
  798. if (!ptr) {
  799. HWFNC_ERR("failed to ioremap ipcc regs\n");
  800. return -ENOMEM;
  801. }
  802. drv_data->ipcc_io_mem = ptr;
  803. HWFNC_DBG_H("mapped address:0x%x size:0x%x io_mem:0x%pK\n",
  804. drv_data->ipcc_reg_base, drv_data->ipcc_size,
  805. drv_data->ipcc_io_mem);
  806. hw_fence_ipcc_enable_signaling(drv_data);
  807. return ret;
  808. }
  809. int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data)
  810. {
  811. int ret = 0;
  812. unsigned int reg_config[2];
  813. void __iomem *ptr;
  814. ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,qtime-reg",
  815. reg_config, 2);
  816. if (ret) {
  817. HWFNC_ERR("failed to read qtimer reg: %d\n", ret);
  818. return ret;
  819. }
  820. drv_data->qtime_reg_base = reg_config[0];
  821. drv_data->qtime_size = reg_config[1];
  822. ptr = devm_ioremap(drv_data->dev, drv_data->qtime_reg_base, drv_data->qtime_size);
  823. if (!ptr) {
  824. HWFNC_ERR("failed to ioremap qtime regs\n");
  825. return -ENOMEM;
  826. }
  827. drv_data->qtime_io_mem = ptr;
  828. return ret;
  829. }
  830. static int _map_ctl_start(struct hw_fence_driver_data *drv_data, u32 ctl_id,
  831. void **iomem_ptr, uint32_t *iomem_size)
  832. {
  833. u32 reg_config[2];
  834. void __iomem *ptr;
  835. char name[30] = {0};
  836. int ret;
  837. snprintf(name, sizeof(name), "qcom,dpu-ctl-start-%d-reg", ctl_id);
  838. ret = of_property_read_u32_array(drv_data->dev->of_node, name, reg_config, 2);
  839. if (ret)
  840. return 0; /* this is an optional property */
  841. /* Mmap registers */
  842. ptr = devm_ioremap(drv_data->dev, reg_config[0], reg_config[1]);
  843. if (!ptr) {
  844. HWFNC_ERR("failed to ioremap %s reg\n", name);
  845. return -ENOMEM;
  846. }
  847. *iomem_ptr = ptr;
  848. *iomem_size = reg_config[1];
  849. HWFNC_DBG_INIT("mapped ctl_start ctl_id:%d name:%s address:0x%x size:0x%x io_mem:0x%pK\n",
  850. ctl_id, name, reg_config[0], reg_config[1], ptr);
  851. return 0;
  852. }
  853. int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data)
  854. {
  855. u32 ctl_id = HW_FENCE_LOOPBACK_DPU_CTL_0;
  856. for (; ctl_id <= HW_FENCE_LOOPBACK_DPU_CTL_5; ctl_id++) {
  857. if (_map_ctl_start(drv_data, ctl_id, &drv_data->ctl_start_ptr[ctl_id],
  858. &drv_data->ctl_start_size[ctl_id])) {
  859. HWFNC_ERR("cannot map ctl_start ctl_id:%d\n", ctl_id);
  860. } else {
  861. if (drv_data->ctl_start_ptr[ctl_id])
  862. HWFNC_DBG_INIT("mapped ctl_id:%d ctl_start_ptr:0x%pK size:%u\n",
  863. ctl_id, drv_data->ctl_start_ptr[ctl_id],
  864. drv_data->ctl_start_size[ctl_id]);
  865. }
  866. }
  867. return 0;
  868. }
  869. enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data,
  870. enum hw_fence_client_id client_id)
  871. {
  872. int i, client_type, offset;
  873. enum hw_fence_client_id client_id_priv;
  874. if (client_id < HW_FENCE_MAX_STATIC_CLIENTS_INDEX)
  875. return client_id;
  876. /* consolidate external 'hw_fence_client_id' enum into consecutive internal client IDs */
  877. client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC +
  878. (client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) /
  879. MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT;
  880. offset = (client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) %
  881. MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT;
  882. /* invalid client id out of range of supported configurable sub-clients */
  883. if (offset >= drv_data->hw_fence_client_types[client_type].clients_num)
  884. return HW_FENCE_CLIENT_MAX;
  885. client_id_priv = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + offset;
  886. for (i = HW_FENCE_MAX_CLIENT_TYPE_STATIC; i < client_type; i++)
  887. client_id_priv += drv_data->hw_fence_client_types[i].clients_num;
  888. return client_id_priv;
  889. }
  890. int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id)
  891. {
  892. if (!drv_data || client_id >= drv_data->clients_num ||
  893. !drv_data->hw_fence_client_queue_size[client_id].type) {
  894. HWFNC_ERR("invalid access to client:%d queues_num\n", client_id);
  895. return 0;
  896. }
  897. return drv_data->hw_fence_client_queue_size[client_id].type->queues_num;
  898. }