hw_fence_drv_utils.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/of_platform.h>
  6. #include <linux/of_address.h>
  7. #include <linux/io.h>
  8. #include <linux/gunyah/gh_rm_drv.h>
  9. #include <linux/gunyah/gh_dbl.h>
  10. #include <soc/qcom/secure_buffer.h>
  11. #include "hw_fence_drv_priv.h"
  12. #include "hw_fence_drv_utils.h"
  13. #include "hw_fence_drv_ipc.h"
  14. #include "hw_fence_drv_debug.h"
  15. /**
  16. * MAX_CLIENT_QUEUE_MEM_SIZE:
  17. * Maximum memory size for client queues of a hw fence client.
  18. */
  19. #define MAX_CLIENT_QUEUE_MEM_SIZE 0x100000
  20. /**
  21. * HW_FENCE_MAX_CLIENT_TYPE:
  22. * Total number of client types (GFX, DPU, VAL)
  23. */
  24. #define HW_FENCE_MAX_CLIENT_TYPE 3
  25. /* Maximum number of clients for each client type */
  26. #define HW_FENCE_CLIENT_TYPE_MAX_GPU 1
  27. #define HW_FENCE_CLIENT_TYPE_MAX_DPU 6
  28. #define HW_FENCE_CLIENT_TYPE_MAX_VAL 7
  29. /**
  30. * struct hw_fence_client_type_desc - Structure holding client type properties, including static
  31. * properties and client queue properties read from device-tree.
  32. *
  33. * @name: name of client type, used to parse properties from device-tree
  34. * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g.
  35. * HW_FENCE_CLIENT_ID_CTL0 for DPU clients
  36. * @max_clients_num: maximum number of clients of given client type
  37. * @clients_num: number of clients of given client type
  38. * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or
  39. * two (for both Tx and Rx Queues)
  40. * @queue_entries: number of entries per client queue of given client type
  41. * @mem_size: size of memory allocated for client queue(s) per client
  42. */
  43. struct hw_fence_client_type_desc {
  44. char *name;
  45. enum hw_fence_client_id init_id;
  46. u32 max_clients_num;
  47. u32 clients_num;
  48. u32 queues_num;
  49. u32 queue_entries;
  50. u32 mem_size;
  51. };
  52. /**
  53. * struct hw_fence_client_types - Table describing all supported client types, used to parse
  54. * device-tree properties related to client queue size.
  55. *
  56. * The fields name, init_id, and max_clients_num are constants. Default values for clients_num and
  57. * queues_num are provided in this table, and clients_num, queues_num, and queue_entries can be read
  58. * from device-tree.
  59. *
  60. * If a value for queue entries is not parsed for the client type, then the default number of client
  61. * queue entries (parsed from device-tree) is used.
  62. *
  63. * Notes:
  64. * 1. Client types must be in the same order as client_ids within the enum 'hw_fence_client_id'.
  65. * 2. Each HW Fence client ID must be described by one of the client types in this table.
  66. * 3. A new client type must set: name, init_id, max_clients_num, clients_num, and queues_num.
  67. * 4. HW_FENCE_MAX_CLIENT_TYPE must be incremented for new client types.
  68. */
  69. struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = {
  70. {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU,
  71. HW_FENCE_CLIENT_QUEUES, 0, 0},
  72. {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU,
  73. HW_FENCE_CLIENT_QUEUES, 0, 0},
  74. {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL,
  75. HW_FENCE_CLIENT_QUEUES, 0, 0},
  76. };
  77. static void _lock(uint64_t *wait)
  78. {
  79. #if defined(__aarch64__)
  80. __asm__(
  81. // Sequence to wait for lock to be free (i.e. zero)
  82. "PRFM PSTL1KEEP, [%x[i_lock]]\n\t"
  83. "1:\n\t"
  84. "LDAXR W5, [%x[i_lock]]\n\t"
  85. "CBNZ W5, 1b\n\t"
  86. // Sequence to set PVM BIT0
  87. "LDR W7, =0x1\n\t" // Load BIT0 (0x1) into W7
  88. "STXR W5, W7, [%x[i_lock]]\n\t" // Atomic Store exclusive BIT0 (lock = 0x1)
  89. "CBNZ W5, 1b\n\t" // If cannot set it, goto 1
  90. :
  91. : [i_lock] "r" (wait)
  92. : "memory");
  93. #endif
  94. }
  95. static void _unlock(struct hw_fence_driver_data *drv_data, uint64_t *lock)
  96. {
  97. uint64_t lock_val;
  98. #if defined(__aarch64__)
  99. __asm__(
  100. // Sequence to clear PVM BIT0
  101. "2:\n\t"
  102. "LDAXR W5, [%x[i_out]]\n\t" // Atomic Fetch Lock
  103. "AND W6, W5, #0xFFFFFFFFFFFFFFFE\n\t" // AND to clear BIT0 (lock &= ~0x1))
  104. "STXR W5, W6, [%x[i_out]]\n\t" // Store exclusive result
  105. "CBNZ W5, 2b\n\t" // If cannot store exclusive, goto 2
  106. :
  107. : [i_out] "r" (lock)
  108. : "memory");
  109. #endif
  110. mb(); /* Make sure the memory is updated */
  111. lock_val = *lock; /* Read the lock value */
  112. HWFNC_DBG_LOCK("unlock: lock_val after:0x%llx\n", lock_val);
  113. if (lock_val & 0x2) { /* check if SVM BIT1 is set*/
  114. /*
  115. * SVM is in WFI state, since SVM acquire bit is set
  116. * Trigger IRQ to Wake-Up SVM Client
  117. */
  118. #if IS_ENABLED(CONFIG_DEBUG_FS)
  119. drv_data->debugfs_data.lock_wake_cnt++;
  120. HWFNC_DBG_LOCK("triggering ipc to unblock SVM lock_val:%d cnt:%llu\n", lock_val,
  121. drv_data->debugfs_data.lock_wake_cnt);
  122. #endif
  123. hw_fence_ipcc_trigger_signal(drv_data,
  124. drv_data->ipcc_client_pid,
  125. drv_data->ipcc_client_vid, 30); /* Trigger APPS Signal 30 */
  126. }
  127. }
  128. void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val)
  129. {
  130. if (val) {
  131. preempt_disable();
  132. _lock(lock);
  133. } else {
  134. _unlock(drv_data, lock);
  135. preempt_enable();
  136. }
  137. }
  138. /*
  139. * Each bit in this mask represents each of the loopback clients supported in
  140. * the enum hw_fence_loopback_id
  141. */
  142. #define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7f
  143. static inline int _process_dpu_client_loopback(struct hw_fence_driver_data *drv_data,
  144. int client_id)
  145. {
  146. int ctl_id = client_id; /* dpu ctl path id is mapped to client id used for the loopback */
  147. void *ctl_start_reg;
  148. u32 val;
  149. if (ctl_id > HW_FENCE_LOOPBACK_DPU_CTL_5) {
  150. HWFNC_ERR("invalid ctl_id:%d\n", ctl_id);
  151. return -EINVAL;
  152. }
  153. ctl_start_reg = drv_data->ctl_start_ptr[ctl_id];
  154. if (!ctl_start_reg) {
  155. HWFNC_ERR("ctl_start reg not valid for ctl_id:%d\n", ctl_id);
  156. return -EINVAL;
  157. }
  158. HWFNC_DBG_H("Processing DPU loopback ctl_id:%d\n", ctl_id);
  159. val = 0x1; /* ctl_start trigger */
  160. #ifdef CTL_START_SIM
  161. HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x\n", ctl_start_reg, val, ctl_id);
  162. writel_relaxed(val, ctl_start_reg);
  163. #else
  164. HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x (COMMENTED)\n", ctl_id,
  165. ctl_start_reg, val);
  166. #endif
  167. return 0;
  168. }
  169. static inline int _process_gfx_client_loopback(struct hw_fence_driver_data *drv_data,
  170. int client_id)
  171. {
  172. int queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */
  173. struct msm_hw_fence_queue_payload payload;
  174. int read = 1;
  175. HWFNC_DBG_IRQ("Processing GFX loopback client_id:%d\n", client_id);
  176. while (read) {
  177. /*
  178. * 'client_id' is the loopback-client-id, not the hw-fence client_id,
  179. * so use GFX hw-fence client id, to get the client data
  180. */
  181. read = hw_fence_read_queue(drv_data->clients[HW_FENCE_CLIENT_ID_CTX0], &payload,
  182. queue_type);
  183. if (read < 0) {
  184. HWFNC_ERR("unable to read gfx rxq\n");
  185. break;
  186. }
  187. HWFNC_DBG_L("GFX loopback rxq read: hash:%llu ctx:%llu seq:%llu f:%llu e:%lu\n",
  188. payload.hash, payload.ctxt_id, payload.seqno, payload.flags, payload.error);
  189. }
  190. return read;
  191. }
  192. static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int client_id)
  193. {
  194. int ret;
  195. HWFNC_DBG_H("Processing loopback client_id:%d\n", client_id);
  196. switch (client_id) {
  197. case HW_FENCE_LOOPBACK_DPU_CTL_0:
  198. case HW_FENCE_LOOPBACK_DPU_CTL_1:
  199. case HW_FENCE_LOOPBACK_DPU_CTL_2:
  200. case HW_FENCE_LOOPBACK_DPU_CTL_3:
  201. case HW_FENCE_LOOPBACK_DPU_CTL_4:
  202. case HW_FENCE_LOOPBACK_DPU_CTL_5:
  203. ret = _process_dpu_client_loopback(drv_data, client_id);
  204. break;
  205. case HW_FENCE_LOOPBACK_GFX_CTX_0:
  206. ret = _process_gfx_client_loopback(drv_data, client_id);
  207. break;
  208. #if IS_ENABLED(CONFIG_DEBUG_FS)
  209. case HW_FENCE_LOOPBACK_VAL_0:
  210. case HW_FENCE_LOOPBACK_VAL_1:
  211. case HW_FENCE_LOOPBACK_VAL_2:
  212. case HW_FENCE_LOOPBACK_VAL_3:
  213. case HW_FENCE_LOOPBACK_VAL_4:
  214. case HW_FENCE_LOOPBACK_VAL_5:
  215. case HW_FENCE_LOOPBACK_VAL_6:
  216. ret = process_validation_client_loopback(drv_data, client_id);
  217. break;
  218. #endif /* CONFIG_DEBUG_FS */
  219. default:
  220. HWFNC_ERR("unknown client:%d\n", client_id);
  221. ret = -EINVAL;
  222. }
  223. return ret;
  224. }
  225. void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags)
  226. {
  227. int client_id = HW_FENCE_LOOPBACK_DPU_CTL_0;
  228. u64 mask;
  229. for (; client_id < HW_FENCE_LOOPBACK_MAX; client_id++) {
  230. mask = 1 << client_id;
  231. if (mask & db_flags) {
  232. HWFNC_DBG_H("client_id:%d signaled! flags:0x%llx\n", client_id, db_flags);
  233. /* process client */
  234. if (_process_doorbell_client(drv_data, client_id))
  235. HWFNC_ERR("Failed to process client:%d\n", client_id);
  236. /* clear mask for this client and if nothing else pending finish */
  237. db_flags = db_flags & ~(mask);
  238. HWFNC_DBG_H("client_id:%d cleared flags:0x%llx mask:0x%llx ~mask:0x%llx\n",
  239. client_id, db_flags, mask, ~(mask));
  240. if (!db_flags)
  241. break;
  242. }
  243. }
  244. }
  245. /* doorbell callback */
  246. static void _hw_fence_cb(int irq, void *data)
  247. {
  248. struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data;
  249. gh_dbl_flags_t clear_flags = HW_FENCE_LOOPBACK_CLIENTS_MASK;
  250. int ret;
  251. if (!drv_data)
  252. return;
  253. ret = gh_dbl_read_and_clean(drv_data->rx_dbl, &clear_flags, 0);
  254. if (ret) {
  255. HWFNC_ERR("hw_fence db callback, retrieve flags fail ret:%d\n", ret);
  256. return;
  257. }
  258. HWFNC_DBG_IRQ("db callback label:%d irq:%d flags:0x%llx qtime:%llu\n", drv_data->db_label,
  259. irq, clear_flags, hw_fence_get_qtime(drv_data));
  260. hw_fence_utils_process_doorbell_mask(drv_data, clear_flags);
  261. }
  262. int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data)
  263. {
  264. struct device_node *node = drv_data->dev->of_node;
  265. struct device_node *node_compat;
  266. const char *compat = "qcom,msm-hw-fence-db";
  267. int ret;
  268. node_compat = of_find_compatible_node(node, NULL, compat);
  269. if (!node_compat) {
  270. HWFNC_ERR("Failed to find dev node with compat:%s\n", compat);
  271. return -EINVAL;
  272. }
  273. ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->db_label);
  274. if (ret) {
  275. HWFNC_ERR("failed to find label info %d\n", ret);
  276. return ret;
  277. }
  278. HWFNC_DBG_IRQ("registering doorbell db_label:%d\n", drv_data->db_label);
  279. drv_data->rx_dbl = gh_dbl_rx_register(drv_data->db_label, _hw_fence_cb, drv_data);
  280. if (IS_ERR_OR_NULL(drv_data->rx_dbl)) {
  281. ret = PTR_ERR(drv_data->rx_dbl);
  282. HWFNC_ERR("Failed to register doorbell\n");
  283. return ret;
  284. }
  285. return 0;
  286. }
  287. static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data,
  288. gh_vmid_t self, gh_vmid_t peer)
  289. {
  290. u32 src_vmlist[1] = {self};
  291. int src_perms[2] = {PERM_READ | PERM_WRITE | PERM_EXEC};
  292. int dst_vmlist[2] = {self, peer};
  293. int dst_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
  294. struct gh_acl_desc *acl;
  295. struct gh_sgl_desc *sgl;
  296. int ret;
  297. ret = hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res),
  298. src_vmlist, 1, dst_vmlist, dst_perms, 2);
  299. if (ret) {
  300. HWFNC_ERR("%s: hyp_assign_phys failed addr=%x size=%u err=%d\n",
  301. __func__, drv_data->res.start, drv_data->size, ret);
  302. return ret;
  303. }
  304. acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL);
  305. if (!acl)
  306. return -ENOMEM;
  307. sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
  308. if (!sgl) {
  309. kfree(acl);
  310. return -ENOMEM;
  311. }
  312. acl->n_acl_entries = 2;
  313. acl->acl_entries[0].vmid = (u16)self;
  314. acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
  315. acl->acl_entries[1].vmid = (u16)peer;
  316. acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W;
  317. sgl->n_sgl_entries = 1;
  318. sgl->sgl_entries[0].ipa_base = drv_data->res.start;
  319. sgl->sgl_entries[0].size = resource_size(&drv_data->res);
  320. ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label,
  321. acl, sgl, NULL, &drv_data->memparcel);
  322. if (ret) {
  323. HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n",
  324. __func__, drv_data->res.start, drv_data->size, ret);
  325. /* Attempt to give resource back to HLOS */
  326. hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res),
  327. dst_vmlist, 2,
  328. src_vmlist, src_perms, 1);
  329. ret = -EPROBE_DEFER;
  330. }
  331. kfree(acl);
  332. kfree(sgl);
  333. return ret;
  334. }
  335. static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *data)
  336. {
  337. struct gh_rm_notif_vm_status_payload *vm_status_payload;
  338. struct hw_fence_driver_data *drv_data;
  339. gh_vmid_t peer_vmid;
  340. gh_vmid_t self_vmid;
  341. drv_data = container_of(nb, struct hw_fence_driver_data, rm_nb);
  342. HWFNC_DBG_INIT("cmd:0x%lx ++\n", cmd);
  343. if (cmd != GH_RM_NOTIF_VM_STATUS)
  344. goto end;
  345. vm_status_payload = data;
  346. HWFNC_DBG_INIT("payload vm_status:%d\n", vm_status_payload->vm_status);
  347. if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY &&
  348. vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET)
  349. goto end;
  350. if (gh_rm_get_vmid(drv_data->peer_name, &peer_vmid))
  351. goto end;
  352. if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
  353. goto end;
  354. if (peer_vmid != vm_status_payload->vmid)
  355. goto end;
  356. switch (vm_status_payload->vm_status) {
  357. case GH_RM_VM_STATUS_READY:
  358. HWFNC_DBG_INIT("init mem\n");
  359. if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid))
  360. HWFNC_ERR("failed to share memory\n");
  361. else
  362. drv_data->vm_ready = true;
  363. break;
  364. case GH_RM_VM_STATUS_RESET:
  365. HWFNC_DBG_INIT("reset\n");
  366. break;
  367. }
  368. end:
  369. return NOTIFY_DONE;
  370. }
  371. /* Allocates carved-out mapped memory */
  372. int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data)
  373. {
  374. struct device_node *node = drv_data->dev->of_node;
  375. struct device_node *node_compat;
  376. const char *compat = "qcom,msm-hw-fence-mem";
  377. struct device *dev = drv_data->dev;
  378. struct device_node *np;
  379. int notifier_ret, ret;
  380. node_compat = of_find_compatible_node(node, NULL, compat);
  381. if (!node_compat) {
  382. HWFNC_ERR("Failed to find dev node with compat:%s\n", compat);
  383. return -EINVAL;
  384. }
  385. ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->label);
  386. if (ret) {
  387. HWFNC_ERR("failed to find label info %d\n", ret);
  388. return ret;
  389. }
  390. np = of_parse_phandle(node_compat, "shared-buffer", 0);
  391. if (!np) {
  392. HWFNC_ERR("failed to read shared-buffer info\n");
  393. return -ENOMEM;
  394. }
  395. ret = of_address_to_resource(np, 0, &drv_data->res);
  396. of_node_put(np);
  397. if (ret) {
  398. HWFNC_ERR("of_address_to_resource failed %d\n", ret);
  399. return -EINVAL;
  400. }
  401. drv_data->io_mem_base = devm_ioremap(dev, drv_data->res.start,
  402. resource_size(&drv_data->res));
  403. if (!drv_data->io_mem_base) {
  404. HWFNC_ERR("ioremap failed!\n");
  405. return -ENXIO;
  406. }
  407. drv_data->size = resource_size(&drv_data->res);
  408. if (drv_data->size < drv_data->used_mem_size) {
  409. HWFNC_ERR("0x%x size of carved-out memory region is less than required size:0x%x\n",
  410. drv_data->size, drv_data->used_mem_size);
  411. return -ENOMEM;
  412. }
  413. HWFNC_DBG_INIT("io_mem_base:0x%x start:0x%x end:0x%x size:0x%x name:%s\n",
  414. drv_data->io_mem_base, drv_data->res.start,
  415. drv_data->res.end, drv_data->size, drv_data->res.name);
  416. memset_io(drv_data->io_mem_base, 0x0, drv_data->size);
  417. /* Register memory with HYP */
  418. ret = of_property_read_u32(node_compat, "peer-name", &drv_data->peer_name);
  419. if (ret)
  420. drv_data->peer_name = GH_SELF_VM;
  421. drv_data->rm_nb.notifier_call = hw_fence_rm_cb;
  422. drv_data->rm_nb.priority = INT_MAX;
  423. notifier_ret = gh_rm_register_notifier(&drv_data->rm_nb);
  424. HWFNC_DBG_INIT("notifier: ret:%d peer_name:%d notifier_ret:%d\n", ret,
  425. drv_data->peer_name, notifier_ret);
  426. if (notifier_ret) {
  427. HWFNC_ERR("fail to register notifier ret:%d\n", notifier_ret);
  428. return -EPROBE_DEFER;
  429. }
  430. return 0;
  431. }
  432. char *_get_mem_reserve_type(enum hw_fence_mem_reserve type)
  433. {
  434. switch (type) {
  435. case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
  436. return "HW_FENCE_MEM_RESERVE_CTRL_QUEUE";
  437. case HW_FENCE_MEM_RESERVE_LOCKS_REGION:
  438. return "HW_FENCE_MEM_RESERVE_LOCKS_REGION";
  439. case HW_FENCE_MEM_RESERVE_TABLE:
  440. return "HW_FENCE_MEM_RESERVE_TABLE";
  441. case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
  442. return "HW_FENCE_MEM_RESERVE_CLIENT_QUEUE";
  443. }
  444. return "Unknown";
  445. }
  446. /* Calculates the memory range for each of the elements in the carved-out memory */
  447. int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
  448. enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id)
  449. {
  450. int ret = 0;
  451. u32 start_offset = 0;
  452. switch (type) {
  453. case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
  454. start_offset = 0;
  455. *size = drv_data->hw_fence_mem_ctrl_queues_size;
  456. break;
  457. case HW_FENCE_MEM_RESERVE_LOCKS_REGION:
  458. /* Locks region starts at the end of the ctrl queues */
  459. start_offset = drv_data->hw_fence_mem_ctrl_queues_size;
  460. *size = HW_FENCE_MEM_LOCKS_SIZE;
  461. break;
  462. case HW_FENCE_MEM_RESERVE_TABLE:
  463. /* HW Fence table starts at the end of the Locks region */
  464. start_offset = drv_data->hw_fence_mem_ctrl_queues_size + HW_FENCE_MEM_LOCKS_SIZE;
  465. *size = drv_data->hw_fence_mem_fences_table_size;
  466. break;
  467. case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
  468. if (client_id >= HW_FENCE_CLIENT_MAX) {
  469. HWFNC_ERR("unexpected client_id:%d\n", client_id);
  470. ret = -EINVAL;
  471. goto exit;
  472. }
  473. start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset;
  474. *size = drv_data->hw_fence_client_queue_size[client_id].mem_size;
  475. /*
  476. * If this error occurs when client should be valid, check that support for this
  477. * client has been configured in device-tree properties.
  478. */
  479. if (!*size) {
  480. HWFNC_ERR("invalid client_id:%d not reserved client queue\n", client_id);
  481. ret = -EINVAL;
  482. }
  483. break;
  484. default:
  485. HWFNC_ERR("Invalid mem reserve type:%d\n", type);
  486. ret = -EINVAL;
  487. break;
  488. }
  489. if (start_offset + *size > drv_data->size) {
  490. HWFNC_ERR("reservation request:%lu exceeds total size:%d\n",
  491. start_offset + *size, drv_data->size);
  492. return -ENOMEM;
  493. }
  494. HWFNC_DBG_INIT("type:%s (%d) io_mem_base:0x%x start:0x%x start_offset:%lu size:0x%x\n",
  495. _get_mem_reserve_type(type), type, drv_data->io_mem_base, drv_data->res.start,
  496. start_offset, *size);
  497. *phys = drv_data->res.start + (phys_addr_t)start_offset;
  498. *pa = (drv_data->io_mem_base + start_offset); /* offset is in bytes */
  499. HWFNC_DBG_H("phys:0x%x pa:0x%pK\n", *phys, *pa);
  500. exit:
  501. return ret;
  502. }
  503. static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data,
  504. struct hw_fence_client_type_desc *desc)
  505. {
  506. char name[31];
  507. u32 tmp[3];
  508. u32 queue_size;
  509. int ret;
  510. /* parse client queue property from device-tree */
  511. snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name);
  512. ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 3);
  513. if (ret) {
  514. HWFNC_DBG_INIT("missing %s client queue entry or invalid ret:%d\n", desc->name,
  515. ret);
  516. desc->queue_entries = drv_data->hw_fence_queue_entries;
  517. } else {
  518. desc->clients_num = tmp[0];
  519. desc->queues_num = tmp[1];
  520. desc->queue_entries = tmp[2];
  521. }
  522. if (desc->clients_num > desc->max_clients_num || !desc->queues_num ||
  523. desc->queues_num > HW_FENCE_CLIENT_QUEUES || !desc->queue_entries) {
  524. HWFNC_ERR("%s invalid dt: clients_num:%lu queues_num:%lu, queue_entries:%lu\n",
  525. desc->name, desc->clients_num, desc->queues_num, desc->queue_entries);
  526. return -EINVAL;
  527. }
  528. /* compute mem_size */
  529. if (desc->queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) {
  530. HWFNC_ERR("%s client queue entries:%lu will overflow client queue size\n",
  531. desc->name, desc->queue_entries);
  532. return -EINVAL;
  533. }
  534. queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries;
  535. if (queue_size >= ((U32_MAX & PAGE_MASK) -
  536. HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) / desc->queues_num) {
  537. HWFNC_ERR("%s client queue size:%lu will overflow client queue mem size\n",
  538. desc->name, queue_size);
  539. return -EINVAL;
  540. }
  541. desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) +
  542. (queue_size * desc->queues_num));
  543. if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) {
  544. HWFNC_ERR("%s client queue mem_size:%lu greater than max client queue size:%lu\n",
  545. desc->name, desc->mem_size, MAX_CLIENT_QUEUE_MEM_SIZE);
  546. return -EINVAL;
  547. }
  548. HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu\n", desc->name,
  549. desc->clients_num, desc->queues_num, desc->queue_entries, desc->mem_size);
  550. return 0;
  551. }
  552. static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data)
  553. {
  554. struct hw_fence_client_type_desc *desc;
  555. int i, j, ret;
  556. u32 start_offset;
  557. start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size +
  558. HW_FENCE_MEM_LOCKS_SIZE + drv_data->hw_fence_mem_fences_table_size);
  559. for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) {
  560. desc = &hw_fence_client_types[i];
  561. ret = _parse_client_queue_dt_props_indv(drv_data, desc);
  562. if (ret) {
  563. HWFNC_ERR("failed to initialize %s client queue size properties\n",
  564. desc->name);
  565. return ret;
  566. }
  567. /* initialize client queue size desc for each client */
  568. for (j = 0; j < desc->clients_num; j++) {
  569. drv_data->hw_fence_client_queue_size[desc->init_id + j] =
  570. (struct hw_fence_client_queue_size_desc)
  571. {desc->queues_num, desc->queue_entries, desc->mem_size,
  572. start_offset};
  573. start_offset += desc->mem_size;
  574. }
  575. }
  576. drv_data->used_mem_size = start_offset;
  577. return 0;
  578. }
  579. int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data)
  580. {
  581. int ret;
  582. u32 val = 0;
  583. ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val);
  584. if (ret || !val) {
  585. HWFNC_ERR("missing hw fences table entry or invalid ret:%d val:%d\n", ret, val);
  586. return ret;
  587. }
  588. drv_data->hw_fence_table_entries = val;
  589. if (drv_data->hw_fence_table_entries >= U32_MAX / sizeof(struct msm_hw_fence)) {
  590. HWFNC_ERR("table entries:%lu will overflow table size\n",
  591. drv_data->hw_fence_table_entries);
  592. return -EINVAL;
  593. }
  594. drv_data->hw_fence_mem_fences_table_size = (sizeof(struct msm_hw_fence) *
  595. drv_data->hw_fence_table_entries);
  596. ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-queue-entries", &val);
  597. if (ret || !val) {
  598. HWFNC_ERR("missing queue entries table entry or invalid ret:%d val:%d\n", ret, val);
  599. return ret;
  600. }
  601. drv_data->hw_fence_queue_entries = val;
  602. /* ctrl queues init */
  603. if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CTRL_QUEUE_PAYLOAD) {
  604. HWFNC_ERR("queue entries:%lu will overflow ctrl queue size\n",
  605. drv_data->hw_fence_queue_entries);
  606. return -EINVAL;
  607. }
  608. drv_data->hw_fence_ctrl_queue_size = HW_FENCE_CTRL_QUEUE_PAYLOAD *
  609. drv_data->hw_fence_queue_entries;
  610. if (drv_data->hw_fence_ctrl_queue_size >= (U32_MAX - HW_FENCE_HFI_CTRL_HEADERS_SIZE) /
  611. HW_FENCE_CTRL_QUEUES) {
  612. HWFNC_ERR("queue size:%lu will overflow ctrl queue mem size\n",
  613. drv_data->hw_fence_ctrl_queue_size);
  614. return -EINVAL;
  615. }
  616. drv_data->hw_fence_mem_ctrl_queues_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE +
  617. (HW_FENCE_CTRL_QUEUES * drv_data->hw_fence_ctrl_queue_size);
  618. /* clients queues init */
  619. ret = _parse_client_queue_dt_props(drv_data);
  620. if (ret) {
  621. HWFNC_ERR("failed to parse client queue properties\n");
  622. return -EINVAL;
  623. }
  624. HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b",
  625. drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size,
  626. drv_data->hw_fence_queue_entries);
  627. HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu\b",
  628. drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size);
  629. return 0;
  630. }
  631. int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data)
  632. {
  633. int ret;
  634. u32 reg_config[2];
  635. void __iomem *ptr;
  636. /* Get ipcc memory range */
  637. ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,ipcc-reg",
  638. reg_config, 2);
  639. if (ret) {
  640. HWFNC_ERR("failed to read ipcc reg: %d\n", ret);
  641. return ret;
  642. }
  643. drv_data->ipcc_reg_base = reg_config[0];
  644. drv_data->ipcc_size = reg_config[1];
  645. /* Mmap ipcc registers */
  646. ptr = devm_ioremap(drv_data->dev, drv_data->ipcc_reg_base, drv_data->ipcc_size);
  647. if (!ptr) {
  648. HWFNC_ERR("failed to ioremap ipcc regs\n");
  649. return -ENOMEM;
  650. }
  651. drv_data->ipcc_io_mem = ptr;
  652. HWFNC_DBG_H("mapped address:0x%x size:0x%x io_mem:0x%pK\n",
  653. drv_data->ipcc_reg_base, drv_data->ipcc_size,
  654. drv_data->ipcc_io_mem);
  655. hw_fence_ipcc_enable_signaling(drv_data);
  656. return ret;
  657. }
  658. int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data)
  659. {
  660. int ret = 0;
  661. unsigned int reg_config[2];
  662. void __iomem *ptr;
  663. ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,qtime-reg",
  664. reg_config, 2);
  665. if (ret) {
  666. HWFNC_ERR("failed to read qtimer reg: %d\n", ret);
  667. return ret;
  668. }
  669. drv_data->qtime_reg_base = reg_config[0];
  670. drv_data->qtime_size = reg_config[1];
  671. ptr = devm_ioremap(drv_data->dev, drv_data->qtime_reg_base, drv_data->qtime_size);
  672. if (!ptr) {
  673. HWFNC_ERR("failed to ioremap qtime regs\n");
  674. return -ENOMEM;
  675. }
  676. drv_data->qtime_io_mem = ptr;
  677. return ret;
  678. }
  679. static int _map_ctl_start(struct hw_fence_driver_data *drv_data, u32 ctl_id,
  680. void **iomem_ptr, uint32_t *iomem_size)
  681. {
  682. u32 reg_config[2];
  683. void __iomem *ptr;
  684. char name[30] = {0};
  685. int ret;
  686. snprintf(name, sizeof(name), "qcom,dpu-ctl-start-%d-reg", ctl_id);
  687. ret = of_property_read_u32_array(drv_data->dev->of_node, name, reg_config, 2);
  688. if (ret)
  689. return 0; /* this is an optional property */
  690. /* Mmap registers */
  691. ptr = devm_ioremap(drv_data->dev, reg_config[0], reg_config[1]);
  692. if (!ptr) {
  693. HWFNC_ERR("failed to ioremap %s reg\n", name);
  694. return -ENOMEM;
  695. }
  696. *iomem_ptr = ptr;
  697. *iomem_size = reg_config[1];
  698. HWFNC_DBG_INIT("mapped ctl_start ctl_id:%d name:%s address:0x%x size:0x%x io_mem:0x%pK\n",
  699. ctl_id, name, reg_config[0], reg_config[1], ptr);
  700. return 0;
  701. }
  702. int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data)
  703. {
  704. u32 ctl_id = HW_FENCE_LOOPBACK_DPU_CTL_0;
  705. for (; ctl_id <= HW_FENCE_LOOPBACK_DPU_CTL_5; ctl_id++) {
  706. if (_map_ctl_start(drv_data, ctl_id, &drv_data->ctl_start_ptr[ctl_id],
  707. &drv_data->ctl_start_size[ctl_id])) {
  708. HWFNC_ERR("cannot map ctl_start ctl_id:%d\n", ctl_id);
  709. } else {
  710. if (drv_data->ctl_start_ptr[ctl_id])
  711. HWFNC_DBG_INIT("mapped ctl_id:%d ctl_start_ptr:0x%pK size:%u\n",
  712. ctl_id, drv_data->ctl_start_ptr[ctl_id],
  713. drv_data->ctl_start_size[ctl_id]);
  714. }
  715. }
  716. return 0;
  717. }