hw_fence_drv_utils.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/of_platform.h>
  6. #include <linux/of_address.h>
  7. #include <linux/io.h>
  8. #include <linux/gunyah/gh_rm_drv.h>
  9. #include <linux/gunyah/gh_dbl.h>
  10. #include <linux/qcom_scm.h>
  11. #include <linux/version.h>
  12. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  13. #include <linux/gh_cpusys_vm_mem_access.h>
  14. #endif
  15. #include <soc/qcom/secure_buffer.h>
  16. #include "hw_fence_drv_priv.h"
  17. #include "hw_fence_drv_utils.h"
  18. #include "hw_fence_drv_ipc.h"
  19. #include "hw_fence_drv_debug.h"
  20. /**
  21. * MAX_CLIENT_QUEUE_MEM_SIZE:
  22. * Maximum memory size for client queues of a hw fence client.
  23. */
  24. #define MAX_CLIENT_QUEUE_MEM_SIZE 0x100000
  25. /**
  26. * HW_FENCE_MAX_CLIENT_TYPE:
  27. * Total number of client types with and without configurable number of sub-clients
  28. */
  29. #define HW_FENCE_MAX_CLIENT_TYPE (HW_FENCE_MAX_CLIENT_TYPE_STATIC + \
  30. HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE)
  31. /**
  32. * HW_FENCE_MIN_RXQ_CLIENTS:
  33. * Minimum number of static hw fence clients with rxq
  34. */
  35. #define HW_FENCE_MIN_RXQ_CLIENTS HW_FENCE_CLIENT_ID_VAL6
  36. /**
  37. * HW_FENCE_MIN_RXQ_CLIENT_TYPE:
  38. * Minimum number of static hw fence client types with rxq (GFX, DPU, VAL)
  39. */
  40. #define HW_FENCE_MIN_RXQ_CLIENT_TYPE 3
  41. /* Maximum number of clients for each client type */
  42. #define HW_FENCE_CLIENT_TYPE_MAX_GPU 1
  43. #define HW_FENCE_CLIENT_TYPE_MAX_DPU 6
  44. #define HW_FENCE_CLIENT_TYPE_MAX_VAL 7
  45. #define HW_FENCE_CLIENT_TYPE_MAX_IPE 32
  46. #define HW_FENCE_CLIENT_TYPE_MAX_VPU 32
  47. #define HW_FENCE_CLIENT_TYPE_MAX_IFE 32
  48. /**
  49. * HW_FENCE_CTRL_QUEUE_DOORBELL:
  50. * Bit set in doorbell flags mask if hw fence driver should read ctrl rx queue
  51. */
  52. #define HW_FENCE_CTRL_QUEUE_DOORBELL 0
  53. /**
  54. * HW_FENCE_DOORBELL_FLAGS_ID_LAST:
  55. * Last doorbell flags id for which HW Fence Driver can receive doorbell
  56. */
  57. #if IS_ENABLED(CONFIG_DEBUG_FS)
  58. #define HW_FENCE_DOORBELL_FLAGS_ID_LAST HW_FENCE_CLIENT_ID_VAL6
  59. #else
  60. #define HW_FENCE_DOORBELL_FLAGS_ID_LAST HW_FENCE_CTRL_QUEUE_DOORBELL
  61. #endif /* CONFIG_DEBUG_FS */
  62. /**
  63. * HW_FENCE_DOORBELL_MASK:
  64. * Each bit in this mask represents possible doorbell flag ids for which hw fence driver can receive
  65. */
  66. #define HW_FENCE_DOORBELL_MASK \
  67. GENMASK(HW_FENCE_DOORBELL_FLAGS_ID_LAST, HW_FENCE_CTRL_QUEUE_DOORBELL)
  68. /**
  69. * HW_FENCE_MAX_ITER_READ:
  70. * Maximum number of iterations when reading queue
  71. */
  72. #define HW_FENCE_MAX_ITER_READ 100
  73. /**
  74. * HW_FENCE_MAX_EVENTS:
  75. * Maximum number of HW Fence debug events
  76. */
  77. #define HW_FENCE_MAX_EVENTS 1000
  78. /**
  79. * struct hw_fence_client_types - Table describing all supported client types, used to parse
  80. * device-tree properties related to client queue size.
  81. *
  82. * The fields name, init_id, and max_clients_num are constants. Default values for clients_num,
  83. * queues_num, and skip_txq_wr_idx are provided in this table, and clients_num, queues_num,
  84. * queue_entries, and skip_txq_wr_idx can be read from device-tree.
  85. *
  86. * If a value for queue entries is not parsed for the client type, then the default number of client
  87. * queue entries (parsed from device-tree) is used.
  88. *
  89. * Notes:
  90. * 1. Client types must be in the same order as client_ids within the enum 'hw_fence_client_id'.
  91. * 2. Each HW Fence client ID must be described by one of the client types in this table.
  92. * 3. A new client type must set: name, init_id, max_clients_num, clients_num, queues_num, and
  93. * skip_txq_wr_idx.
  94. * 4. Either constant HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE or HW_FENCE_MAX_CLIENT_TYPE_STATIC must
  95. * be incremented as appropriate for new client types.
  96. */
  97. struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = {
  98. {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU,
  99. HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false},
  100. {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU,
  101. HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false},
  102. {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL,
  103. HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false},
  104. {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, HW_FENCE_CLIENT_QUEUES,
  105. 0, 0, 0, 0, 0, 0, false},
  106. {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, HW_FENCE_CLIENT_QUEUES,
  107. 0, 0, 0, 0, 0, 0, false},
  108. {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  109. true},
  110. {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  111. true},
  112. {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  113. true},
  114. {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  115. true},
  116. {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  117. true},
  118. {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  119. true},
  120. {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  121. true},
  122. {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
  123. true},
  124. };
  125. static void _lock(uint64_t *wait)
  126. {
  127. #if defined(__aarch64__)
  128. __asm__(
  129. // Sequence to wait for lock to be free (i.e. zero)
  130. "PRFM PSTL1KEEP, [%x[i_lock]]\n\t"
  131. "1:\n\t"
  132. "LDAXR W5, [%x[i_lock]]\n\t"
  133. "CBNZ W5, 1b\n\t"
  134. // Sequence to set PVM BIT0
  135. "LDR W7, =0x1\n\t" // Load BIT0 (0x1) into W7
  136. "STXR W5, W7, [%x[i_lock]]\n\t" // Atomic Store exclusive BIT0 (lock = 0x1)
  137. "CBNZ W5, 1b\n\t" // If cannot set it, goto 1
  138. :
  139. : [i_lock] "r" (wait)
  140. : "memory");
  141. #endif
  142. }
  143. static void _unlock(struct hw_fence_driver_data *drv_data, uint64_t *lock)
  144. {
  145. uint64_t lock_val;
  146. #if defined(__aarch64__)
  147. __asm__(
  148. // Sequence to clear PVM BIT0
  149. "2:\n\t"
  150. "LDAXR W5, [%x[i_out]]\n\t" // Atomic Fetch Lock
  151. "AND W6, W5, #0xFFFFFFFFFFFFFFFE\n\t" // AND to clear BIT0 (lock &= ~0x1))
  152. "STXR W5, W6, [%x[i_out]]\n\t" // Store exclusive result
  153. "CBNZ W5, 2b\n\t" // If cannot store exclusive, goto 2
  154. :
  155. : [i_out] "r" (lock)
  156. : "memory");
  157. #endif
  158. mb(); /* Make sure the memory is updated */
  159. lock_val = *lock; /* Read the lock value */
  160. HWFNC_DBG_LOCK("unlock: lock_val after:0x%llx\n", lock_val);
  161. if (lock_val & 0x2) { /* check if SVM BIT1 is set*/
  162. /*
  163. * SVM is in WFI state, since SVM acquire bit is set
  164. * Trigger IRQ to Wake-Up SVM Client
  165. */
  166. #if IS_ENABLED(CONFIG_DEBUG_FS)
  167. drv_data->debugfs_data.lock_wake_cnt++;
  168. HWFNC_DBG_LOCK("triggering ipc to unblock SVM lock_val:%d cnt:%llu\n", lock_val,
  169. drv_data->debugfs_data.lock_wake_cnt);
  170. #endif
  171. hw_fence_ipcc_trigger_signal(drv_data,
  172. drv_data->ipcc_client_pid,
  173. drv_data->ipcc_client_vid, 30); /* Trigger APPS Signal 30 */
  174. }
  175. }
  176. void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val)
  177. {
  178. if (val) {
  179. preempt_disable();
  180. _lock(lock);
  181. } else {
  182. _unlock(drv_data, lock);
  183. preempt_enable();
  184. }
  185. }
  186. int hw_fence_utils_fence_error_cb(struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id,
  187. u64 seqno, u64 hash, u64 flags, u32 error)
  188. {
  189. struct msm_hw_fence_cb_data cb_data;
  190. struct dma_fence fence;
  191. int ret = 0;
  192. if (IS_ERR_OR_NULL(hw_fence_client)) {
  193. HWFNC_ERR("Invalid client:0x%pK\n", hw_fence_client);
  194. return -EINVAL;
  195. }
  196. mutex_lock(&hw_fence_client->error_cb_lock);
  197. if (!error || !hw_fence_client->fence_error_cb) {
  198. HWFNC_ERR("Invalid error:%d fence_error_cb:0x%pK\n", error,
  199. hw_fence_client->fence_error_cb);
  200. ret = -EINVAL;
  201. goto exit;
  202. }
  203. /* initialize cb_data info */
  204. fence.context = ctxt_id;
  205. fence.seqno = seqno;
  206. fence.flags = flags;
  207. fence.error = error;
  208. cb_data.fence = &fence;
  209. cb_data.data = hw_fence_client->fence_error_cb_userdata;
  210. HWFNC_DBG_L("invoking cb for client:%d ctx:%llu seq:%llu flags:%llu e:%u data:0x%pK\n",
  211. hw_fence_client->client_id, ctxt_id, seqno, flags, error,
  212. hw_fence_client->fence_error_cb_userdata);
  213. hw_fence_client->fence_error_cb(hash, error, &cb_data);
  214. exit:
  215. mutex_unlock(&hw_fence_client->error_cb_lock);
  216. return ret;
  217. }
  218. static int _process_fence_error_client_loopback(struct hw_fence_driver_data *drv_data,
  219. int db_flag_id)
  220. {
  221. struct msm_hw_fence_client *hw_fence_client;
  222. struct msm_hw_fence_queue_payload payload;
  223. int i, cb_ret, ret = 0, read = 1;
  224. u32 client_id;
  225. for (i = 0; read && i < HW_FENCE_MAX_ITER_READ; i++) {
  226. read = hw_fence_read_queue_helper(&drv_data->ctrl_queues[HW_FENCE_RX_QUEUE - 1],
  227. &payload);
  228. if (read < 0) {
  229. HWFNC_DBG_Q("unable to read ctrl rxq for db_flag_id:%d\n", db_flag_id);
  230. return read;
  231. }
  232. if (payload.type != HW_FENCE_PAYLOAD_TYPE_2) {
  233. HWFNC_ERR("unsupported payload type in ctrl rxq received:%u expected:%u\n",
  234. payload.type, HW_FENCE_PAYLOAD_TYPE_2);
  235. ret = -EINVAL;
  236. continue;
  237. }
  238. if (payload.client_data < HW_FENCE_CLIENT_ID_CTX0 ||
  239. payload.client_data >= drv_data->clients_num) {
  240. HWFNC_ERR("read invalid client_id:%llu from ctrl rxq min:%u max:%u\n",
  241. payload.client_data, HW_FENCE_CLIENT_ID_CTX0,
  242. drv_data->clients_num);
  243. ret = -EINVAL;
  244. continue;
  245. }
  246. client_id = payload.client_data;
  247. HWFNC_DBG_Q("ctrl rxq rd: it:%d h:%llu ctx:%llu seq:%llu f:%llu e:%u client:%u\n",
  248. i, payload.hash, payload.ctxt_id, payload.seqno, payload.flags,
  249. payload.error, client_id);
  250. hw_fence_client = drv_data->clients[client_id];
  251. if (!hw_fence_client) {
  252. HWFNC_ERR("processing fence error cb for unregistered client_id:%u\n",
  253. client_id);
  254. ret = -EINVAL;
  255. continue;
  256. }
  257. cb_ret = hw_fence_utils_fence_error_cb(hw_fence_client, payload.ctxt_id,
  258. payload.seqno, payload.hash, payload.flags, payload.error);
  259. if (cb_ret) {
  260. HWFNC_ERR("fence_error_cb failed for client:%u ctx:%llu seq:%llu err:%u\n",
  261. client_id, payload.ctxt_id, payload.seqno, payload.error);
  262. ret = cb_ret;
  263. }
  264. }
  265. return ret;
  266. }
  267. static int _process_doorbell_id(struct hw_fence_driver_data *drv_data, int db_flag_id)
  268. {
  269. int ret;
  270. HWFNC_DBG_H("Processing doorbell mask id:%d\n", db_flag_id);
  271. switch (db_flag_id) {
  272. case HW_FENCE_CTRL_QUEUE_DOORBELL:
  273. ret = _process_fence_error_client_loopback(drv_data, db_flag_id);
  274. break;
  275. #if IS_ENABLED(CONFIG_DEBUG_FS)
  276. case HW_FENCE_CLIENT_ID_VAL0:
  277. case HW_FENCE_CLIENT_ID_VAL1:
  278. case HW_FENCE_CLIENT_ID_VAL2:
  279. case HW_FENCE_CLIENT_ID_VAL3:
  280. case HW_FENCE_CLIENT_ID_VAL4:
  281. case HW_FENCE_CLIENT_ID_VAL5:
  282. case HW_FENCE_CLIENT_ID_VAL6:
  283. ret = process_validation_client_loopback(drv_data, db_flag_id);
  284. break;
  285. #endif /* CONFIG_DEBUG_FS */
  286. default:
  287. HWFNC_ERR("unknown mask id:%d\n", db_flag_id);
  288. ret = -EINVAL;
  289. }
  290. return ret;
  291. }
  292. void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags)
  293. {
  294. int db_flag_id = HW_FENCE_CTRL_QUEUE_DOORBELL;
  295. u64 mask;
  296. for (; db_flag_id <= HW_FENCE_DOORBELL_FLAGS_ID_LAST; db_flag_id++) {
  297. mask = 1 << db_flag_id;
  298. if (mask & db_flags) {
  299. HWFNC_DBG_H("db_flag:%d signaled! flags:0x%llx\n", db_flag_id, db_flags);
  300. if (_process_doorbell_id(drv_data, db_flag_id))
  301. HWFNC_ERR("Failed to process db_flag_id:%d\n", db_flag_id);
  302. /* clear mask for this flag id if nothing else pending finish */
  303. db_flags = db_flags & ~(mask);
  304. HWFNC_DBG_H("db_flag_id:%d cleared flags:0x%llx mask:0x%llx ~mask:0x%llx\n",
  305. db_flag_id, db_flags, mask, ~(mask));
  306. if (!db_flags)
  307. break;
  308. }
  309. }
  310. }
  311. /* doorbell callback */
  312. static void _hw_fence_cb(int irq, void *data)
  313. {
  314. struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data;
  315. gh_dbl_flags_t clear_flags = HW_FENCE_DOORBELL_MASK;
  316. int ret;
  317. if (!drv_data)
  318. return;
  319. ret = gh_dbl_read_and_clean(drv_data->rx_dbl, &clear_flags, 0);
  320. if (ret) {
  321. HWFNC_ERR("hw_fence db callback, retrieve flags fail ret:%d\n", ret);
  322. return;
  323. }
  324. HWFNC_DBG_IRQ("db callback label:%d irq:%d flags:0x%llx qtime:%llu\n", drv_data->db_label,
  325. irq, clear_flags, hw_fence_get_qtime(drv_data));
  326. hw_fence_utils_process_doorbell_mask(drv_data, clear_flags);
  327. }
  328. int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data)
  329. {
  330. struct device_node *node = drv_data->dev->of_node;
  331. struct device_node *node_compat;
  332. const char *compat = "qcom,msm-hw-fence-db";
  333. int ret;
  334. node_compat = of_find_compatible_node(node, NULL, compat);
  335. if (!node_compat) {
  336. HWFNC_ERR("Failed to find dev node with compat:%s\n", compat);
  337. return -EINVAL;
  338. }
  339. ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->db_label);
  340. if (ret) {
  341. HWFNC_ERR("failed to find label info %d\n", ret);
  342. return ret;
  343. }
  344. HWFNC_DBG_IRQ("registering doorbell db_label:%d\n", drv_data->db_label);
  345. drv_data->rx_dbl = gh_dbl_rx_register(drv_data->db_label, _hw_fence_cb, drv_data);
  346. if (IS_ERR_OR_NULL(drv_data->rx_dbl)) {
  347. ret = PTR_ERR(drv_data->rx_dbl);
  348. HWFNC_ERR("Failed to register doorbell\n");
  349. return ret;
  350. }
  351. return 0;
  352. }
  353. static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data,
  354. gh_vmid_t self, gh_vmid_t peer)
  355. {
  356. struct qcom_scm_vmperm src_vmlist[] = {{self, PERM_READ | PERM_WRITE | PERM_EXEC}};
  357. struct qcom_scm_vmperm dst_vmlist[] = {{self, PERM_READ | PERM_WRITE},
  358. {peer, PERM_READ | PERM_WRITE}};
  359. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
  360. u64 srcvmids, dstvmids;
  361. #else
  362. unsigned int srcvmids, dstvmids;
  363. #endif
  364. struct gh_acl_desc *acl;
  365. struct gh_sgl_desc *sgl;
  366. int ret;
  367. srcvmids = BIT(src_vmlist[0].vmid);
  368. dstvmids = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid);
  369. ret = qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), &srcvmids,
  370. dst_vmlist, ARRAY_SIZE(dst_vmlist));
  371. if (ret) {
  372. HWFNC_ERR("%s: qcom_scm_assign_mem failed addr=%x size=%u err=%d\n",
  373. __func__, drv_data->res.start, drv_data->size, ret);
  374. return ret;
  375. }
  376. acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL);
  377. if (!acl)
  378. return -ENOMEM;
  379. sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
  380. if (!sgl) {
  381. kfree(acl);
  382. return -ENOMEM;
  383. }
  384. acl->n_acl_entries = 2;
  385. acl->acl_entries[0].vmid = (u16)self;
  386. acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
  387. acl->acl_entries[1].vmid = (u16)peer;
  388. acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W;
  389. sgl->n_sgl_entries = 1;
  390. sgl->sgl_entries[0].ipa_base = drv_data->res.start;
  391. sgl->sgl_entries[0].size = resource_size(&drv_data->res);
  392. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  393. ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label,
  394. acl, sgl, NULL, &drv_data->memparcel);
  395. #else
  396. ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label,
  397. acl, sgl, NULL, &drv_data->memparcel);
  398. #endif
  399. if (ret) {
  400. HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n",
  401. __func__, drv_data->res.start, drv_data->size, ret);
  402. /* Attempt to give resource back to HLOS */
  403. qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res),
  404. &dstvmids, src_vmlist, ARRAY_SIZE(src_vmlist));
  405. ret = -EPROBE_DEFER;
  406. }
  407. kfree(acl);
  408. kfree(sgl);
  409. return ret;
  410. }
  411. static int _is_mem_shared(struct resource *res)
  412. {
  413. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  414. return gh_cpusys_vm_get_share_mem_info(res);
  415. #else
  416. return -EINVAL;
  417. #endif
  418. }
  419. static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *data)
  420. {
  421. struct gh_rm_notif_vm_status_payload *vm_status_payload;
  422. struct hw_fence_driver_data *drv_data;
  423. struct resource res;
  424. gh_vmid_t peer_vmid;
  425. gh_vmid_t self_vmid;
  426. int ret;
  427. drv_data = container_of(nb, struct hw_fence_driver_data, rm_nb);
  428. HWFNC_DBG_INIT("cmd:0x%lx ++\n", cmd);
  429. if (cmd != GH_RM_NOTIF_VM_STATUS)
  430. goto end;
  431. vm_status_payload = data;
  432. HWFNC_DBG_INIT("payload vm_status:%d\n", vm_status_payload->vm_status);
  433. if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY &&
  434. vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET)
  435. goto end;
  436. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  437. if (ghd_rm_get_vmid(drv_data->peer_name, &peer_vmid))
  438. goto end;
  439. if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
  440. goto end;
  441. #else
  442. if (gh_rm_get_vmid(drv_data->peer_name, &peer_vmid))
  443. goto end;
  444. if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
  445. goto end;
  446. #endif
  447. if (peer_vmid != vm_status_payload->vmid)
  448. goto end;
  449. switch (vm_status_payload->vm_status) {
  450. case GH_RM_VM_STATUS_READY:
  451. ret = _is_mem_shared(&res);
  452. if (ret) {
  453. HWFNC_DBG_INIT("mem not shared ret:%d, attempt share\n", ret);
  454. if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid))
  455. HWFNC_ERR("failed to share memory\n");
  456. else
  457. drv_data->vm_ready = true;
  458. } else {
  459. if (drv_data->res.start == res.start &&
  460. resource_size(&drv_data->res) == resource_size(&res)) {
  461. drv_data->vm_ready = true;
  462. HWFNC_DBG_INIT("mem_ready: add:0x%x size:%d ret:%d\n", res.start,
  463. resource_size(&res), ret);
  464. } else {
  465. HWFNC_ERR("mem-shared mismatch:[0x%x,%d] expected:[0x%x,%d]\n",
  466. res.start, resource_size(&res), drv_data->res.start,
  467. resource_size(&drv_data->res));
  468. }
  469. }
  470. break;
  471. case GH_RM_VM_STATUS_RESET:
  472. HWFNC_DBG_INIT("reset\n");
  473. break;
  474. }
  475. end:
  476. return NOTIFY_DONE;
  477. }
  478. /* Allocates carved-out mapped memory */
  479. int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data)
  480. {
  481. struct device_node *node = drv_data->dev->of_node;
  482. struct device_node *node_compat;
  483. const char *compat = "qcom,msm-hw-fence-mem";
  484. struct device *dev = drv_data->dev;
  485. struct device_node *np;
  486. int notifier_ret, ret;
  487. node_compat = of_find_compatible_node(node, NULL, compat);
  488. if (!node_compat) {
  489. HWFNC_ERR("Failed to find dev node with compat:%s\n", compat);
  490. return -EINVAL;
  491. }
  492. ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->label);
  493. if (ret) {
  494. HWFNC_ERR("failed to find label info %d\n", ret);
  495. return ret;
  496. }
  497. np = of_parse_phandle(node_compat, "shared-buffer", 0);
  498. if (!np) {
  499. HWFNC_ERR("failed to read shared-buffer info\n");
  500. return -ENOMEM;
  501. }
  502. ret = of_address_to_resource(np, 0, &drv_data->res);
  503. of_node_put(np);
  504. if (ret) {
  505. HWFNC_ERR("of_address_to_resource failed %d\n", ret);
  506. return -EINVAL;
  507. }
  508. drv_data->io_mem_base = devm_ioremap_wc(dev, drv_data->res.start,
  509. resource_size(&drv_data->res));
  510. if (!drv_data->io_mem_base) {
  511. HWFNC_ERR("ioremap failed!\n");
  512. return -ENXIO;
  513. }
  514. drv_data->size = resource_size(&drv_data->res);
  515. if (drv_data->size < drv_data->used_mem_size) {
  516. HWFNC_ERR("0x%x size of carved-out memory region is less than required size:0x%x\n",
  517. drv_data->size, drv_data->used_mem_size);
  518. return -ENOMEM;
  519. }
  520. HWFNC_DBG_INIT("io_mem_base:0x%x start:0x%x end:0x%x size:0x%x name:%s\n",
  521. drv_data->io_mem_base, drv_data->res.start,
  522. drv_data->res.end, drv_data->size, drv_data->res.name);
  523. memset_io(drv_data->io_mem_base, 0x0, drv_data->size);
  524. /* Register memory with HYP */
  525. ret = of_property_read_u32(node_compat, "peer-name", &drv_data->peer_name);
  526. if (ret)
  527. drv_data->peer_name = GH_SELF_VM;
  528. drv_data->rm_nb.notifier_call = hw_fence_rm_cb;
  529. drv_data->rm_nb.priority = INT_MAX;
  530. notifier_ret = gh_rm_register_notifier(&drv_data->rm_nb);
  531. HWFNC_DBG_INIT("notifier: ret:%d peer_name:%d notifier_ret:%d\n", ret,
  532. drv_data->peer_name, notifier_ret);
  533. if (notifier_ret) {
  534. HWFNC_ERR("fail to register notifier ret:%d\n", notifier_ret);
  535. return -EPROBE_DEFER;
  536. }
  537. return 0;
  538. }
  539. char *_get_mem_reserve_type(enum hw_fence_mem_reserve type)
  540. {
  541. switch (type) {
  542. case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
  543. return "HW_FENCE_MEM_RESERVE_CTRL_QUEUE";
  544. case HW_FENCE_MEM_RESERVE_LOCKS_REGION:
  545. return "HW_FENCE_MEM_RESERVE_LOCKS_REGION";
  546. case HW_FENCE_MEM_RESERVE_TABLE:
  547. return "HW_FENCE_MEM_RESERVE_TABLE";
  548. case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
  549. return "HW_FENCE_MEM_RESERVE_CLIENT_QUEUE";
  550. case HW_FENCE_MEM_RESERVE_EVENTS_BUFF:
  551. return "HW_FENCE_MEM_RESERVE_EVENTS_BUFF";
  552. }
  553. return "Unknown";
  554. }
  555. /* Calculates the memory range for each of the elements in the carved-out memory */
  556. int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
  557. enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id)
  558. {
  559. int ret = 0;
  560. u32 start_offset = 0;
  561. u32 remaining_size_bytes;
  562. u32 total_events;
  563. switch (type) {
  564. case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
  565. start_offset = 0;
  566. *size = drv_data->hw_fence_mem_ctrl_queues_size;
  567. break;
  568. case HW_FENCE_MEM_RESERVE_LOCKS_REGION:
  569. /* Locks region starts at the end of the ctrl queues */
  570. start_offset = drv_data->hw_fence_mem_ctrl_queues_size;
  571. *size = HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num);
  572. break;
  573. case HW_FENCE_MEM_RESERVE_TABLE:
  574. /* HW Fence table starts at the end of the Locks region */
  575. start_offset = drv_data->hw_fence_mem_ctrl_queues_size +
  576. HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num);
  577. *size = drv_data->hw_fence_mem_fences_table_size;
  578. break;
  579. case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
  580. if (client_id >= drv_data->clients_num ||
  581. !drv_data->hw_fence_client_queue_size[client_id].type) {
  582. HWFNC_ERR("unexpected client_id:%d for clients_num:%lu\n", client_id,
  583. drv_data->clients_num);
  584. ret = -EINVAL;
  585. goto exit;
  586. }
  587. start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset;
  588. *size = drv_data->hw_fence_client_queue_size[client_id].type->mem_size;
  589. break;
  590. case HW_FENCE_MEM_RESERVE_EVENTS_BUFF:
  591. start_offset = drv_data->used_mem_size;
  592. remaining_size_bytes = drv_data->size - start_offset;
  593. if (start_offset >= drv_data->size ||
  594. remaining_size_bytes < sizeof(struct msm_hw_fence_event)) {
  595. HWFNC_DBG_INFO("no space for events total_sz:%lu offset:%lu evt_sz:%lu\n",
  596. drv_data->size, start_offset, sizeof(struct msm_hw_fence_event));
  597. ret = -ENOMEM;
  598. goto exit;
  599. }
  600. total_events = remaining_size_bytes / sizeof(struct msm_hw_fence_event);
  601. if (total_events > HW_FENCE_MAX_EVENTS)
  602. total_events = HW_FENCE_MAX_EVENTS;
  603. *size = total_events * sizeof(struct msm_hw_fence_event);
  604. break;
  605. default:
  606. HWFNC_ERR("Invalid mem reserve type:%d\n", type);
  607. ret = -EINVAL;
  608. break;
  609. }
  610. if (start_offset + *size > drv_data->size) {
  611. HWFNC_ERR("reservation request:%lu exceeds total size:%d\n",
  612. start_offset + *size, drv_data->size);
  613. return -ENOMEM;
  614. }
  615. HWFNC_DBG_INIT("type:%s (%d) io_mem_base:0x%x start:0x%x start_offset:%lu size:0x%x\n",
  616. _get_mem_reserve_type(type), type, drv_data->io_mem_base, drv_data->res.start,
  617. start_offset, *size);
  618. *phys = drv_data->res.start + (phys_addr_t)start_offset;
  619. *pa = (drv_data->io_mem_base + start_offset); /* offset is in bytes */
  620. HWFNC_DBG_H("phys:0x%x pa:0x%pK\n", *phys, *pa);
  621. exit:
  622. return ret;
  623. }
  624. static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_data,
  625. struct hw_fence_client_type_desc *desc)
  626. {
  627. u32 max_idx_from_zero, payload_size_u32 = HW_FENCE_CLIENT_QUEUE_PAYLOAD / sizeof(u32);
  628. char name[40];
  629. u32 tmp[4];
  630. bool idx_by_payload = false;
  631. int count, ret;
  632. snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s-extra", desc->name);
  633. /* check if property is present */
  634. ret = of_property_read_bool(drv_data->dev->of_node, name);
  635. if (!ret)
  636. return 0;
  637. count = of_property_count_u32_elems(drv_data->dev->of_node, name);
  638. if (count <= 0 || count > 4) {
  639. HWFNC_ERR("invalid %s extra dt props count:%d\n", desc->name, count);
  640. return -EINVAL;
  641. }
  642. ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, count);
  643. if (ret) {
  644. HWFNC_ERR("Failed to read %s extra dt properties ret=%d count=%d\n", desc->name,
  645. ret, count);
  646. ret = -EINVAL;
  647. goto exit;
  648. }
  649. desc->start_padding = tmp[0];
  650. if (count >= 2)
  651. desc->end_padding = tmp[1];
  652. if (count >= 3)
  653. desc->txq_idx_start = tmp[2];
  654. if (count >= 4) {
  655. if (tmp[3] > 1) {
  656. HWFNC_ERR("%s invalid txq_idx_by_payload prop:%lu\n", desc->name, tmp[3]);
  657. ret = -EINVAL;
  658. goto exit;
  659. }
  660. idx_by_payload = tmp[3];
  661. desc->txq_idx_factor = idx_by_payload ? payload_size_u32 : 1;
  662. }
  663. if (desc->start_padding % sizeof(u32) || desc->end_padding % sizeof(u32) ||
  664. (desc->start_padding + desc->end_padding) % sizeof(u64)) {
  665. HWFNC_ERR("%s start_padding:%lu end_padding:%lu violates mem alignment\n",
  666. desc->name, desc->start_padding, desc->end_padding);
  667. ret = -EINVAL;
  668. goto exit;
  669. }
  670. if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) {
  671. HWFNC_ERR("%s client queues_num:%lu start_padding:%lu will overflow mem_size\n",
  672. desc->name, desc->queues_num, desc->start_padding);
  673. ret = -EINVAL;
  674. goto exit;
  675. }
  676. if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) -
  677. desc->start_padding) {
  678. HWFNC_ERR("%s client q_num:%lu start_p:%lu end_p:%lu will overflow mem_size\n",
  679. desc->name, desc->queues_num, desc->start_padding, desc->end_padding);
  680. ret = -EINVAL;
  681. goto exit;
  682. }
  683. max_idx_from_zero = idx_by_payload ? desc->queue_entries :
  684. desc->queue_entries * payload_size_u32;
  685. if (desc->txq_idx_start >= U32_MAX - max_idx_from_zero) {
  686. HWFNC_ERR("%s txq_idx start:%lu by_payload:%s q_entries:%d will overflow txq_idx\n",
  687. desc->name, desc->txq_idx_start, idx_by_payload ? "true" : "false",
  688. desc->queue_entries);
  689. ret = -EINVAL;
  690. goto exit;
  691. }
  692. HWFNC_DBG_INIT("%s: start_p=%lu end_p=%lu txq_idx_start:%lu txq_idx_by_payload:%s\n",
  693. desc->name, desc->start_padding, desc->end_padding, desc->txq_idx_start,
  694. idx_by_payload ? "true" : "false");
  695. exit:
  696. return ret;
  697. }
  698. static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data,
  699. struct hw_fence_client_type_desc *desc)
  700. {
  701. char name[31];
  702. u32 tmp[4];
  703. u32 queue_size;
  704. int ret;
  705. /* parse client queue properties from device-tree */
  706. snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name);
  707. ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 4);
  708. if (ret) {
  709. HWFNC_DBG_INIT("missing %s client queue entry or invalid ret:%d\n", desc->name,
  710. ret);
  711. desc->queue_entries = drv_data->hw_fence_queue_entries;
  712. } else {
  713. desc->clients_num = tmp[0];
  714. desc->queues_num = tmp[1];
  715. desc->queue_entries = tmp[2];
  716. if (tmp[3] > 1) {
  717. HWFNC_ERR("%s invalid skip_txq_wr_idx prop:%lu\n", desc->name, tmp[3]);
  718. return -EINVAL;
  719. }
  720. desc->skip_txq_wr_idx = tmp[3];
  721. }
  722. if (desc->clients_num > desc->max_clients_num || !desc->queues_num ||
  723. desc->queues_num > HW_FENCE_CLIENT_QUEUES || !desc->queue_entries) {
  724. HWFNC_ERR("%s invalid dt: clients_num:%lu queues_num:%lu, queue_entries:%lu\n",
  725. desc->name, desc->clients_num, desc->queues_num, desc->queue_entries);
  726. return -EINVAL;
  727. }
  728. /* parse extra client queue properties from device-tree */
  729. ret = _parse_client_queue_dt_props_extra(drv_data, desc);
  730. if (ret) {
  731. HWFNC_ERR("%s failed to parse extra dt props\n", desc->name);
  732. return -EINVAL;
  733. }
  734. /* compute mem_size */
  735. if (desc->queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) {
  736. HWFNC_ERR("%s client queue entries:%lu will overflow client queue size\n",
  737. desc->name, desc->queue_entries);
  738. return -EINVAL;
  739. }
  740. queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries;
  741. if (queue_size >= ((U32_MAX & PAGE_MASK) -
  742. (HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) +
  743. desc->start_padding + desc->end_padding)) / desc->queues_num) {
  744. HWFNC_ERR("%s client queue_sz:%lu start_p:%lu end_p:%lu will overflow mem size\n",
  745. desc->name, queue_size, desc->start_padding, desc->end_padding);
  746. return -EINVAL;
  747. }
  748. desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) +
  749. (queue_size * desc->queues_num) + desc->start_padding + desc->end_padding);
  750. if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) {
  751. HWFNC_ERR("%s client queue mem_size:%lu greater than max mem size:%lu\n",
  752. desc->name, desc->mem_size, MAX_CLIENT_QUEUE_MEM_SIZE);
  753. return -EINVAL;
  754. }
  755. HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu skips_wr_ptr:%s\n",
  756. desc->name, desc->clients_num, desc->queues_num, desc->queue_entries,
  757. desc->mem_size, desc->skip_txq_wr_idx ? "true" : "false");
  758. return 0;
  759. }
  760. static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data)
  761. {
  762. struct hw_fence_client_type_desc *desc;
  763. int i, j, ret;
  764. u32 start_offset;
  765. size_t size;
  766. int configurable_clients_num = 0;
  767. drv_data->rxq_clients_num = HW_FENCE_MIN_RXQ_CLIENTS;
  768. for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) {
  769. desc = &hw_fence_client_types[i];
  770. ret = _parse_client_queue_dt_props_indv(drv_data, desc);
  771. if (ret) {
  772. HWFNC_ERR("failed to initialize %s client queue size properties\n",
  773. desc->name);
  774. return ret;
  775. }
  776. if (i >= HW_FENCE_MIN_RXQ_CLIENT_TYPE &&
  777. desc->queues_num == HW_FENCE_CLIENT_QUEUES)
  778. drv_data->rxq_clients_num += desc->clients_num;
  779. if (i >= HW_FENCE_MAX_CLIENT_TYPE_STATIC)
  780. configurable_clients_num += desc->clients_num;
  781. }
  782. /* store client type descriptors for configurable client indexing logic */
  783. drv_data->hw_fence_client_types = hw_fence_client_types;
  784. /* clients and size desc are allocated for all static clients regardless of device-tree */
  785. drv_data->clients_num = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + configurable_clients_num;
  786. /* allocate memory for client queue size descriptors */
  787. size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_desc);
  788. drv_data->hw_fence_client_queue_size = kzalloc(size, GFP_KERNEL);
  789. if (!drv_data->hw_fence_client_queue_size)
  790. return -ENOMEM;
  791. /* initialize client queue size desc for each client */
  792. start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size +
  793. HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num) +
  794. drv_data->hw_fence_mem_fences_table_size);
  795. for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) {
  796. desc = &hw_fence_client_types[i];
  797. for (j = 0; j < desc->clients_num; j++) {
  798. enum hw_fence_client_id client_id_ext = desc->init_id + j;
  799. enum hw_fence_client_id client_id =
  800. hw_fence_utils_get_client_id_priv(drv_data, client_id_ext);
  801. drv_data->hw_fence_client_queue_size[client_id] =
  802. (struct hw_fence_client_queue_desc){desc, start_offset};
  803. HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n",
  804. desc->name, client_id_ext, client_id, start_offset);
  805. start_offset += desc->mem_size;
  806. }
  807. }
  808. drv_data->used_mem_size = start_offset;
  809. return 0;
  810. }
  811. int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data)
  812. {
  813. int ret;
  814. size_t size;
  815. u32 val = 0;
  816. ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val);
  817. if (ret || !val) {
  818. HWFNC_ERR("missing hw fences table entry or invalid ret:%d val:%d\n", ret, val);
  819. return ret;
  820. }
  821. drv_data->hw_fence_table_entries = val;
  822. if (drv_data->hw_fence_table_entries >= U32_MAX / sizeof(struct msm_hw_fence)) {
  823. HWFNC_ERR("table entries:%lu will overflow table size\n",
  824. drv_data->hw_fence_table_entries);
  825. return -EINVAL;
  826. }
  827. drv_data->hw_fence_mem_fences_table_size = (sizeof(struct msm_hw_fence) *
  828. drv_data->hw_fence_table_entries);
  829. ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-queue-entries", &val);
  830. if (ret || !val) {
  831. HWFNC_ERR("missing queue entries table entry or invalid ret:%d val:%d\n", ret, val);
  832. return ret;
  833. }
  834. drv_data->hw_fence_queue_entries = val;
  835. /* ctrl queues init */
  836. if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CTRL_QUEUE_PAYLOAD) {
  837. HWFNC_ERR("queue entries:%lu will overflow ctrl queue size\n",
  838. drv_data->hw_fence_queue_entries);
  839. return -EINVAL;
  840. }
  841. drv_data->hw_fence_ctrl_queue_size = HW_FENCE_CTRL_QUEUE_PAYLOAD *
  842. drv_data->hw_fence_queue_entries;
  843. if (drv_data->hw_fence_ctrl_queue_size >= (U32_MAX - HW_FENCE_HFI_CTRL_HEADERS_SIZE) /
  844. HW_FENCE_CTRL_QUEUES) {
  845. HWFNC_ERR("queue size:%lu will overflow ctrl queue mem size\n",
  846. drv_data->hw_fence_ctrl_queue_size);
  847. return -EINVAL;
  848. }
  849. drv_data->hw_fence_mem_ctrl_queues_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE +
  850. (HW_FENCE_CTRL_QUEUES * drv_data->hw_fence_ctrl_queue_size);
  851. /* clients queues init */
  852. ret = _parse_client_queue_dt_props(drv_data);
  853. if (ret) {
  854. HWFNC_ERR("failed to parse client queue properties\n");
  855. return -EINVAL;
  856. }
  857. /* allocate clients */
  858. size = drv_data->clients_num * sizeof(struct msm_hw_fence_client *);
  859. drv_data->clients = kzalloc(size, GFP_KERNEL);
  860. if (!drv_data->clients)
  861. return -ENOMEM;
  862. HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b",
  863. drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size,
  864. drv_data->hw_fence_queue_entries);
  865. HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu\b",
  866. drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size);
  867. HWFNC_DBG_INIT("clients_num: %lu, total_mem_size:%lu\n", drv_data->clients_num,
  868. drv_data->used_mem_size);
  869. return 0;
  870. }
  871. int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data)
  872. {
  873. int ret;
  874. u32 reg_config[2];
  875. void __iomem *ptr;
  876. /* Get ipcc memory range */
  877. ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,ipcc-reg",
  878. reg_config, 2);
  879. if (ret) {
  880. HWFNC_ERR("failed to read ipcc reg: %d\n", ret);
  881. return ret;
  882. }
  883. drv_data->ipcc_reg_base = reg_config[0];
  884. drv_data->ipcc_size = reg_config[1];
  885. /* Mmap ipcc registers */
  886. ptr = devm_ioremap(drv_data->dev, drv_data->ipcc_reg_base, drv_data->ipcc_size);
  887. if (!ptr) {
  888. HWFNC_ERR("failed to ioremap ipcc regs\n");
  889. return -ENOMEM;
  890. }
  891. drv_data->ipcc_io_mem = ptr;
  892. HWFNC_DBG_H("mapped address:0x%x size:0x%x io_mem:0x%pK\n",
  893. drv_data->ipcc_reg_base, drv_data->ipcc_size,
  894. drv_data->ipcc_io_mem);
  895. hw_fence_ipcc_enable_signaling(drv_data);
  896. return ret;
  897. }
  898. int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data)
  899. {
  900. int ret = 0;
  901. unsigned int reg_config[2];
  902. void __iomem *ptr;
  903. ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,qtime-reg",
  904. reg_config, 2);
  905. if (ret) {
  906. HWFNC_ERR("failed to read qtimer reg: %d\n", ret);
  907. return ret;
  908. }
  909. drv_data->qtime_reg_base = reg_config[0];
  910. drv_data->qtime_size = reg_config[1];
  911. ptr = devm_ioremap(drv_data->dev, drv_data->qtime_reg_base, drv_data->qtime_size);
  912. if (!ptr) {
  913. HWFNC_ERR("failed to ioremap qtime regs\n");
  914. return -ENOMEM;
  915. }
  916. drv_data->qtime_io_mem = ptr;
  917. return ret;
  918. }
  919. enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data,
  920. enum hw_fence_client_id client_id)
  921. {
  922. int i, client_type, offset;
  923. enum hw_fence_client_id client_id_priv;
  924. if (client_id < HW_FENCE_MAX_STATIC_CLIENTS_INDEX)
  925. return client_id;
  926. /* consolidate external 'hw_fence_client_id' enum into consecutive internal client IDs */
  927. client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC +
  928. (client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) /
  929. MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT;
  930. offset = (client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) %
  931. MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT;
  932. /* invalid client id out of range of supported configurable sub-clients */
  933. if (offset >= drv_data->hw_fence_client_types[client_type].clients_num)
  934. return HW_FENCE_CLIENT_MAX;
  935. client_id_priv = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + offset;
  936. for (i = HW_FENCE_MAX_CLIENT_TYPE_STATIC; i < client_type; i++)
  937. client_id_priv += drv_data->hw_fence_client_types[i].clients_num;
  938. return client_id_priv;
  939. }
  940. int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id)
  941. {
  942. if (!drv_data || client_id >= drv_data->clients_num ||
  943. !drv_data->hw_fence_client_queue_size[client_id].type) {
  944. HWFNC_ERR("invalid access to client:%d queues_num\n", client_id);
  945. return 0;
  946. }
  947. return drv_data->hw_fence_client_queue_size[client_id].type->queues_num;
  948. }