adreno_gen8_hfi.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/delay.h>
  7. #include <linux/nvmem-consumer.h>
  8. #include "adreno.h"
  9. #include "adreno_gen8.h"
  10. #include "adreno_gen8_gmu.h"
  11. #include "adreno_gen8_hfi.h"
  12. #include "kgsl_device.h"
  13. #include "kgsl_trace.h"
  14. /* Below section is for all structures related to HFI queues */
  15. #define HFI_QUEUE_MAX HFI_QUEUE_DEFAULT_CNT
  16. /* Total header sizes + queue sizes + 16 for alignment */
  17. #define HFIMEM_SIZE (sizeof(struct hfi_queue_table) + 16 + \
  18. (HFI_QUEUE_SIZE * HFI_QUEUE_MAX))
  19. #define HOST_QUEUE_START_ADDR(hfi_mem, i) \
  20. ((hfi_mem)->hostptr + HFI_QUEUE_OFFSET(i))
  21. struct gen8_hfi *to_gen8_hfi(struct adreno_device *adreno_dev)
  22. {
  23. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  24. return &gmu->hfi;
  25. }
  26. /* Size in below functions are in unit of dwords */
  27. int gen8_hfi_queue_read(struct gen8_gmu_device *gmu, u32 queue_idx,
  28. u32 *output, u32 max_size)
  29. {
  30. struct kgsl_memdesc *mem_addr = gmu->hfi.hfi_mem;
  31. struct hfi_queue_table *tbl = mem_addr->hostptr;
  32. struct hfi_queue_header *hdr = &tbl->qhdr[queue_idx];
  33. u32 *queue;
  34. u32 msg_hdr;
  35. u32 i, read;
  36. u32 size;
  37. int result = 0;
  38. if (hdr->status == HFI_QUEUE_STATUS_DISABLED)
  39. return -EINVAL;
  40. if (hdr->read_index == hdr->write_index)
  41. return -ENODATA;
  42. /* Clear the output data before populating */
  43. memset(output, 0, max_size);
  44. queue = HOST_QUEUE_START_ADDR(mem_addr, queue_idx);
  45. msg_hdr = queue[hdr->read_index];
  46. size = MSG_HDR_GET_SIZE(msg_hdr);
  47. if (size > (max_size >> 2)) {
  48. dev_err(&gmu->pdev->dev,
  49. "HFI message too big: hdr:0x%x rd idx=%d\n",
  50. msg_hdr, hdr->read_index);
  51. result = -EMSGSIZE;
  52. goto done;
  53. }
  54. read = hdr->read_index;
  55. if (read < hdr->queue_size) {
  56. for (i = 0; i < size && i < (max_size >> 2); i++) {
  57. output[i] = queue[read];
  58. read = (read + 1)%hdr->queue_size;
  59. }
  60. result = size;
  61. } else {
  62. /* In case FW messed up */
  63. dev_err(&gmu->pdev->dev,
  64. "Read index %d greater than queue size %d\n",
  65. hdr->read_index, hdr->queue_size);
  66. result = -ENODATA;
  67. }
  68. read = ALIGN(read, SZ_4) % hdr->queue_size;
  69. hfi_update_read_idx(hdr, read);
  70. /* For acks, trace the packet for which this ack was sent */
  71. if (MSG_HDR_GET_TYPE(msg_hdr) == HFI_MSG_ACK)
  72. trace_kgsl_hfi_receive(MSG_HDR_GET_ID(output[1]),
  73. MSG_HDR_GET_SIZE(output[1]),
  74. MSG_HDR_GET_SEQNUM(output[1]));
  75. else
  76. trace_kgsl_hfi_receive(MSG_HDR_GET_ID(msg_hdr),
  77. MSG_HDR_GET_SIZE(msg_hdr), MSG_HDR_GET_SEQNUM(msg_hdr));
  78. done:
  79. return result;
  80. }
  81. int gen8_hfi_queue_write(struct adreno_device *adreno_dev, u32 queue_idx,
  82. u32 *msg, u32 size_bytes)
  83. {
  84. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  85. struct hfi_queue_table *tbl = gmu->hfi.hfi_mem->hostptr;
  86. struct hfi_queue_header *hdr = &tbl->qhdr[queue_idx];
  87. u32 *queue;
  88. u32 i, write_idx, read_idx, empty_space;
  89. u32 size_dwords = size_bytes >> 2;
  90. u32 align_size = ALIGN(size_dwords, SZ_4);
  91. u32 id = MSG_HDR_GET_ID(*msg);
  92. if (hdr->status == HFI_QUEUE_STATUS_DISABLED || !IS_ALIGNED(size_bytes, sizeof(u32)))
  93. return -EINVAL;
  94. queue = HOST_QUEUE_START_ADDR(gmu->hfi.hfi_mem, queue_idx);
  95. write_idx = hdr->write_index;
  96. read_idx = hdr->read_index;
  97. empty_space = (write_idx >= read_idx) ?
  98. (hdr->queue_size - (write_idx - read_idx))
  99. : (read_idx - write_idx);
  100. if (empty_space <= align_size)
  101. return -ENOSPC;
  102. for (i = 0; i < size_dwords; i++) {
  103. queue[write_idx] = msg[i];
  104. write_idx = (write_idx + 1) % hdr->queue_size;
  105. }
  106. /* Cookify any non used data at the end of the write buffer */
  107. for (; i < align_size; i++) {
  108. queue[write_idx] = 0xfafafafa;
  109. write_idx = (write_idx + 1) % hdr->queue_size;
  110. }
  111. trace_kgsl_hfi_send(id, size_dwords, MSG_HDR_GET_SEQNUM(*msg));
  112. hfi_update_write_idx(&hdr->write_index, write_idx);
  113. return 0;
  114. }
  115. int gen8_hfi_cmdq_write(struct adreno_device *adreno_dev, u32 *msg, u32 size_bytes)
  116. {
  117. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  118. struct gen8_hfi *hfi = &gmu->hfi;
  119. int ret;
  120. spin_lock(&hfi->cmdq_lock);
  121. if (test_bit(MSG_HDR_GET_ID(msg[0]), hfi->wb_set_record_bitmask))
  122. *msg = RECORD_MSG_HDR(*msg);
  123. ret = gen8_hfi_queue_write(adreno_dev, HFI_CMD_ID, msg, size_bytes);
  124. /*
  125. * Some messages like ACD table and perf table are saved in memory, so we need
  126. * to reset the header to make sure we do not send a record enabled bit incase
  127. * we change the warmboot setting from debugfs
  128. */
  129. *msg = CLEAR_RECORD_MSG_HDR(*msg);
  130. /*
  131. * Memory barrier to make sure packet and write index are written before
  132. * an interrupt is raised
  133. */
  134. wmb();
  135. /* Send interrupt to GMU to receive the message */
  136. if (!ret)
  137. gmu_core_regwrite(KGSL_DEVICE(adreno_dev),
  138. GEN8_GMUCX_HOST2GMU_INTR_SET, 0x1);
  139. spin_unlock(&hfi->cmdq_lock);
  140. return ret;
  141. }
  142. /* Sizes of the queue and message are in unit of dwords */
  143. static void init_queues(struct adreno_device *adreno_dev)
  144. {
  145. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  146. struct kgsl_memdesc *mem_addr = gmu->hfi.hfi_mem;
  147. int i;
  148. struct hfi_queue_table *tbl;
  149. struct hfi_queue_header *hdr;
  150. struct {
  151. u32 idx;
  152. u32 pri;
  153. u32 status;
  154. } queue[HFI_QUEUE_MAX] = {
  155. { HFI_CMD_ID, HFI_CMD_PRI, HFI_QUEUE_STATUS_ENABLED },
  156. { HFI_MSG_ID, HFI_MSG_PRI, HFI_QUEUE_STATUS_ENABLED },
  157. { HFI_DBG_ID, HFI_DBG_PRI, HFI_QUEUE_STATUS_ENABLED },
  158. };
  159. /* Fill Table Header */
  160. tbl = mem_addr->hostptr;
  161. tbl->qtbl_hdr.version = 0;
  162. tbl->qtbl_hdr.size = sizeof(struct hfi_queue_table) >> 2;
  163. tbl->qtbl_hdr.qhdr0_offset = sizeof(struct hfi_queue_table_header) >> 2;
  164. tbl->qtbl_hdr.qhdr_size = sizeof(struct hfi_queue_header) >> 2;
  165. tbl->qtbl_hdr.num_q = HFI_QUEUE_MAX;
  166. tbl->qtbl_hdr.num_active_q = HFI_QUEUE_MAX;
  167. memset(&tbl->qhdr[0], 0, sizeof(tbl->qhdr));
  168. /* Fill Individual Queue Headers */
  169. for (i = 0; i < HFI_QUEUE_MAX; i++) {
  170. hdr = &tbl->qhdr[i];
  171. hdr->start_addr = GMU_QUEUE_START_ADDR(mem_addr->gmuaddr, i);
  172. hdr->type = QUEUE_HDR_TYPE(queue[i].idx, queue[i].pri, 0, 0);
  173. hdr->status = queue[i].status;
  174. hdr->queue_size = HFI_QUEUE_SIZE >> 2; /* convert to dwords */
  175. }
  176. }
  177. int gen8_hfi_init(struct adreno_device *adreno_dev)
  178. {
  179. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  180. struct gen8_hfi *hfi = &gmu->hfi;
  181. /* Allocates & maps memory for HFI */
  182. if (IS_ERR_OR_NULL(hfi->hfi_mem)) {
  183. hfi->hfi_mem = gen8_reserve_gmu_kernel_block(gmu, 0,
  184. HFIMEM_SIZE, GMU_NONCACHED_KERNEL, 0);
  185. if (!IS_ERR(hfi->hfi_mem))
  186. init_queues(adreno_dev);
  187. }
  188. return PTR_ERR_OR_ZERO(hfi->hfi_mem);
  189. }
  190. int gen8_receive_ack_cmd(struct gen8_gmu_device *gmu, void *rcvd,
  191. struct pending_cmd *ret_cmd)
  192. {
  193. struct adreno_device *adreno_dev = gen8_gmu_to_adreno(gmu);
  194. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  195. u32 *ack = rcvd;
  196. u32 hdr = ack[0];
  197. u32 req_hdr = ack[1];
  198. if (ret_cmd == NULL)
  199. return -EINVAL;
  200. if (CMP_HFI_ACK_HDR(ret_cmd->sent_hdr, req_hdr)) {
  201. memcpy(&ret_cmd->results, ack, MSG_HDR_GET_SIZE(hdr) << 2);
  202. return 0;
  203. }
  204. /* Didn't find the sender, list the waiter */
  205. dev_err_ratelimited(&gmu->pdev->dev,
  206. "HFI ACK: Cannot find sender for 0x%8.8x Waiter: 0x%8.8x\n",
  207. req_hdr, ret_cmd->sent_hdr);
  208. gmu_core_fault_snapshot(device);
  209. return -ENODEV;
  210. }
  211. static int poll_gmu_reg(struct adreno_device *adreno_dev,
  212. u32 offsetdwords, u32 expected_val,
  213. u32 mask, u32 timeout_ms)
  214. {
  215. u32 val;
  216. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  217. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  218. unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
  219. bool nmi = false;
  220. while (time_is_after_jiffies(timeout)) {
  221. gmu_core_regread(device, offsetdwords, &val);
  222. if ((val & mask) == expected_val)
  223. return 0;
  224. /*
  225. * If GMU firmware fails any assertion, error message is sent
  226. * to KMD and NMI is triggered. So check if GMU is in NMI and
  227. * timeout early. Bits [11:9] of A6XX_GMU_CM3_FW_INIT_RESULT
  228. * contain GMU reset status. Non zero value here indicates that
  229. * GMU reset is active, NMI handler would eventually complete
  230. * and GMU would wait for recovery.
  231. */
  232. gmu_core_regread(device, GEN8_GMUCX_CM3_FW_INIT_RESULT, &val);
  233. if (val & 0xE00) {
  234. nmi = true;
  235. break;
  236. }
  237. usleep_range(10, 100);
  238. }
  239. /* Check one last time */
  240. gmu_core_regread(device, offsetdwords, &val);
  241. if ((val & mask) == expected_val)
  242. return 0;
  243. dev_err(&gmu->pdev->dev,
  244. "Reg poll %s: offset 0x%x, want 0x%x, got 0x%x\n",
  245. nmi ? "abort" : "timeout", offsetdwords, expected_val,
  246. val & mask);
  247. return -ETIMEDOUT;
  248. }
  249. static int gen8_hfi_send_cmd_wait_inline(struct adreno_device *adreno_dev,
  250. void *data, u32 size_bytes, struct pending_cmd *ret_cmd)
  251. {
  252. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  253. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  254. int rc;
  255. u32 *cmd = data;
  256. struct gen8_hfi *hfi = &gmu->hfi;
  257. u32 seqnum = atomic_inc_return(&hfi->seqnum);
  258. *cmd = MSG_HDR_SET_SEQNUM_SIZE(*cmd, seqnum, size_bytes >> 2);
  259. if (ret_cmd == NULL)
  260. return gen8_hfi_cmdq_write(adreno_dev, cmd, size_bytes);
  261. ret_cmd->sent_hdr = cmd[0];
  262. rc = gen8_hfi_cmdq_write(adreno_dev, cmd, size_bytes);
  263. if (rc)
  264. return rc;
  265. rc = poll_gmu_reg(adreno_dev, GEN8_GMUCX_GMU2HOST_INTR_INFO,
  266. HFI_IRQ_MSGQ_MASK, HFI_IRQ_MSGQ_MASK, HFI_RSP_TIMEOUT);
  267. if (rc) {
  268. gmu_core_fault_snapshot(device);
  269. dev_err(&gmu->pdev->dev,
  270. "Timed out waiting on ack for 0x%8.8x (id %d, sequence %d)\n",
  271. cmd[0], MSG_HDR_GET_ID(*cmd), MSG_HDR_GET_SEQNUM(*cmd));
  272. return rc;
  273. }
  274. /* Clear the interrupt */
  275. gmu_core_regwrite(device, GEN8_GMUCX_GMU2HOST_INTR_CLR,
  276. HFI_IRQ_MSGQ_MASK);
  277. rc = gen8_hfi_process_queue(gmu, HFI_MSG_ID, ret_cmd);
  278. return rc;
  279. }
  280. int gen8_hfi_send_generic_req(struct adreno_device *adreno_dev, void *cmd, u32 size_bytes)
  281. {
  282. struct pending_cmd ret_cmd;
  283. int rc;
  284. memset(&ret_cmd, 0, sizeof(ret_cmd));
  285. rc = gen8_hfi_send_cmd_wait_inline(adreno_dev, cmd, size_bytes, &ret_cmd);
  286. if (rc)
  287. return rc;
  288. if (ret_cmd.results[2]) {
  289. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  290. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  291. gmu_core_fault_snapshot(device);
  292. dev_err(&gmu->pdev->dev,
  293. "HFI ACK failure: Req=0x%8.8X, Result=0x%8.8X\n",
  294. ret_cmd.results[1],
  295. ret_cmd.results[2]);
  296. return -EINVAL;
  297. }
  298. return 0;
  299. }
  300. int gen8_hfi_send_core_fw_start(struct adreno_device *adreno_dev)
  301. {
  302. struct hfi_core_fw_start_cmd cmd = {
  303. .handle = 0x0,
  304. };
  305. int ret;
  306. ret = CMD_MSG_HDR(cmd, H2F_MSG_CORE_FW_START);
  307. if (ret)
  308. return ret;
  309. return gen8_hfi_send_generic_req(adreno_dev, &cmd, sizeof(cmd));
  310. }
  311. static const char *feature_to_string(u32 feature)
  312. {
  313. if (feature == HFI_FEATURE_ACD)
  314. return "ACD";
  315. return "unknown";
  316. }
  317. /* For sending hfi message inline to handle GMU return type error */
  318. int gen8_hfi_send_generic_req_v5(struct adreno_device *adreno_dev, void *cmd,
  319. struct pending_cmd *ret_cmd, u32 size_bytes)
  320. {
  321. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  322. int rc;
  323. if (GMU_VER_MINOR(gmu->ver.hfi) <= 4)
  324. return gen8_hfi_send_generic_req(adreno_dev, cmd, size_bytes);
  325. rc = gen8_hfi_send_cmd_wait_inline(adreno_dev, cmd, size_bytes, ret_cmd);
  326. if (rc)
  327. return rc;
  328. switch (ret_cmd->results[3]) {
  329. case GMU_SUCCESS:
  330. rc = ret_cmd->results[2];
  331. break;
  332. case GMU_ERROR_NO_ENTRY:
  333. /* Unique error to handle undefined HFI msgs by caller */
  334. rc = -ENOENT;
  335. break;
  336. case GMU_ERROR_TIMEOUT:
  337. rc = -EINVAL;
  338. break;
  339. default:
  340. gmu_core_fault_snapshot(KGSL_DEVICE(adreno_dev));
  341. dev_err(&gmu->pdev->dev,
  342. "HFI ACK: Req=0x%8.8X, Result=0x%8.8X Error:0x%8.8X\n",
  343. ret_cmd->results[1], ret_cmd->results[2], ret_cmd->results[3]);
  344. rc = -EINVAL;
  345. break;
  346. }
  347. return rc;
  348. }
  349. int gen8_hfi_send_feature_ctrl(struct adreno_device *adreno_dev,
  350. u32 feature, u32 enable, u32 data)
  351. {
  352. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  353. struct pending_cmd ret_cmd = {0};
  354. struct hfi_feature_ctrl_cmd cmd = {
  355. .feature = feature,
  356. .enable = enable,
  357. .data = data,
  358. };
  359. int ret;
  360. ret = CMD_MSG_HDR(cmd, H2F_MSG_FEATURE_CTRL);
  361. if (ret)
  362. return ret;
  363. ret = gen8_hfi_send_generic_req_v5(adreno_dev, &cmd, &ret_cmd, sizeof(cmd));
  364. if (ret < 0)
  365. dev_err(&gmu->pdev->dev,
  366. "Unable to %s feature %s (%d)\n",
  367. enable ? "enable" : "disable",
  368. feature_to_string(feature),
  369. feature);
  370. return ret;
  371. }
  372. int gen8_hfi_send_get_value(struct adreno_device *adreno_dev, u32 type, u32 subtype)
  373. {
  374. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  375. struct pending_cmd ret_cmd = {0};
  376. struct hfi_get_value_cmd cmd = {
  377. .type = type,
  378. .subtype = subtype,
  379. };
  380. int ret;
  381. ret = CMD_MSG_HDR(cmd, H2F_MSG_GET_VALUE);
  382. if (ret)
  383. return ret;
  384. ret = gen8_hfi_send_generic_req_v5(adreno_dev, &cmd, &ret_cmd, sizeof(cmd));
  385. if (ret < 0)
  386. dev_err(&gmu->pdev->dev,
  387. "Unable to get HFI Value type: %d, subtype: %d, error = %d\n",
  388. type, subtype, ret);
  389. return ret;
  390. }
  391. int gen8_hfi_send_set_value(struct adreno_device *adreno_dev,
  392. u32 type, u32 subtype, u32 data)
  393. {
  394. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  395. struct pending_cmd ret_cmd = {0};
  396. struct hfi_set_value_cmd cmd = {
  397. .type = type,
  398. .subtype = subtype,
  399. .data = data,
  400. };
  401. int ret;
  402. ret = CMD_MSG_HDR(cmd, H2F_MSG_SET_VALUE);
  403. if (ret)
  404. return ret;
  405. ret = gen8_hfi_send_generic_req_v5(adreno_dev, &cmd, &ret_cmd, sizeof(cmd));
  406. if (ret < 0)
  407. dev_err(&gmu->pdev->dev,
  408. "Unable to set HFI Value %d, %d to %d, error = %d\n",
  409. type, subtype, data, ret);
  410. return ret;
  411. }
  412. void adreno_gen8_receive_err_req(struct gen8_gmu_device *gmu, void *rcvd)
  413. {
  414. struct hfi_err_cmd *cmd = rcvd;
  415. dev_err(&gmu->pdev->dev, "HFI Error Received: %d %d %.16s\n",
  416. ((cmd->error_code >> 16) & 0xffff),
  417. (cmd->error_code & 0xffff),
  418. (char *) cmd->data);
  419. }
  420. void adreno_gen8_receive_debug_req(struct gen8_gmu_device *gmu, void *rcvd)
  421. {
  422. struct hfi_debug_cmd *cmd = rcvd;
  423. dev_dbg(&gmu->pdev->dev, "HFI Debug Received: %d %d %d\n",
  424. cmd->type, cmd->timestamp, cmd->data);
  425. }
  426. int gen8_hfi_process_queue(struct gen8_gmu_device *gmu,
  427. u32 queue_idx, struct pending_cmd *ret_cmd)
  428. {
  429. u32 rcvd[MAX_RCVD_SIZE];
  430. while (gen8_hfi_queue_read(gmu, queue_idx, rcvd, sizeof(rcvd)) > 0) {
  431. /* ACK Handler */
  432. if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_MSG_ACK) {
  433. int ret = gen8_receive_ack_cmd(gmu, rcvd, ret_cmd);
  434. if (ret)
  435. return ret;
  436. continue;
  437. }
  438. /* Request Handler */
  439. switch (MSG_HDR_GET_ID(rcvd[0])) {
  440. case F2H_MSG_ERR: /* No Reply */
  441. adreno_gen8_receive_err_req(gmu, rcvd);
  442. break;
  443. case F2H_MSG_DEBUG: /* No Reply */
  444. adreno_gen8_receive_debug_req(gmu, rcvd);
  445. break;
  446. default: /* No Reply */
  447. dev_err(&gmu->pdev->dev,
  448. "HFI request %d not supported\n",
  449. MSG_HDR_GET_ID(rcvd[0]));
  450. break;
  451. }
  452. }
  453. return 0;
  454. }
  455. int gen8_hfi_send_bcl_feature_ctrl(struct adreno_device *adreno_dev)
  456. {
  457. if (!adreno_dev->bcl_enabled)
  458. return 0;
  459. /*
  460. * BCL data is expected by gmu in below format
  461. * BIT[0] - response type
  462. * BIT[1:7] - Throttle level 1 (optional)
  463. * BIT[8:14] - Throttle level 2 (optional)
  464. * BIT[15:21] - Throttle level 3 (optional)
  465. */
  466. return gen8_hfi_send_feature_ctrl(adreno_dev, HFI_FEATURE_BCL, 1, adreno_dev->bcl_data);
  467. }
  468. int gen8_hfi_send_clx_feature_ctrl(struct adreno_device *adreno_dev)
  469. {
  470. int ret = 0;
  471. struct hfi_clx_table_v2_cmd cmd = {0};
  472. if (!adreno_dev->clx_enabled)
  473. return 0;
  474. /* Make sure the table is valid before enabling feature */
  475. ret = CMD_MSG_HDR(cmd, H2F_MSG_CLX_TBL);
  476. if (ret)
  477. return ret;
  478. ret = gen8_hfi_send_feature_ctrl(adreno_dev, HFI_FEATURE_CLX, 1, 0);
  479. if (ret)
  480. return ret;
  481. cmd.version = FIELD_PREP(GENMASK(31, 16), 0x2) | FIELD_PREP(GENMASK(15, 0), 0x1);
  482. /* cmd.domain[0] is never used but needed per hfi spec */
  483. cmd.domain[1].data0 = FIELD_PREP(GENMASK(31, 29), 1) |
  484. FIELD_PREP(GENMASK(28, 28), 1) |
  485. FIELD_PREP(GENMASK(27, 22), 1) |
  486. FIELD_PREP(GENMASK(21, 16), 40) |
  487. FIELD_PREP(GENMASK(15, 0), 0);
  488. cmd.domain[1].clxt = 0;
  489. cmd.domain[1].clxh = 0;
  490. cmd.domain[1].urgmode = 1;
  491. cmd.domain[1].lkgen = 0;
  492. cmd.domain[1].currbudget = 50;
  493. return gen8_hfi_send_generic_req(adreno_dev, &cmd, sizeof(cmd));
  494. }
  495. #define EVENT_PWR_ACD_THROTTLE_PROF 44
  496. int gen8_hfi_send_acd_feature_ctrl(struct adreno_device *adreno_dev)
  497. {
  498. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  499. int ret = 0;
  500. if (adreno_dev->acd_enabled) {
  501. ret = gen8_hfi_send_feature_ctrl(adreno_dev,
  502. HFI_FEATURE_ACD, 1, 0);
  503. if (ret)
  504. return ret;
  505. ret = gen8_hfi_send_generic_req(adreno_dev,
  506. &gmu->hfi.acd_table, sizeof(gmu->hfi.acd_table));
  507. if (ret)
  508. return ret;
  509. gen8_hfi_send_set_value(adreno_dev, HFI_VALUE_LOG_EVENT_ON,
  510. EVENT_PWR_ACD_THROTTLE_PROF, 0);
  511. }
  512. return 0;
  513. }
  514. int gen8_hfi_send_ifpc_feature_ctrl(struct adreno_device *adreno_dev)
  515. {
  516. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  517. if (gmu->idle_level == GPU_HW_IFPC)
  518. return gen8_hfi_send_feature_ctrl(adreno_dev,
  519. HFI_FEATURE_IFPC, 1, adreno_dev->ifpc_hyst);
  520. return 0;
  521. }
  522. static void reset_hfi_queues(struct adreno_device *adreno_dev)
  523. {
  524. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  525. struct kgsl_memdesc *mem_addr = gmu->hfi.hfi_mem;
  526. struct hfi_queue_table *tbl = mem_addr->hostptr;
  527. struct hfi_queue_header *hdr;
  528. u32 i;
  529. /* Flush HFI queues */
  530. for (i = 0; i < HFI_QUEUE_MAX; i++) {
  531. hdr = &tbl->qhdr[i];
  532. if (hdr->status == HFI_QUEUE_STATUS_DISABLED)
  533. continue;
  534. hdr->read_index = hdr->write_index;
  535. }
  536. }
  537. /* Fill the entry and return the dword count written */
  538. static u32 _fill_table_entry(struct hfi_table_entry *entry, u32 count,
  539. u32 stride_bytes, u32 *data)
  540. {
  541. entry->count = count;
  542. entry->stride = stride_bytes >> 2; /* entry->stride is in dwords */
  543. memcpy(entry->data, data, stride_bytes * count);
  544. /* Return total dword count of entry + data */
  545. return (sizeof(*entry) >> 2) + (entry->count * entry->stride);
  546. }
  547. int gen8_hfi_send_gpu_perf_table(struct adreno_device *adreno_dev)
  548. {
  549. /*
  550. * Buffer to store either hfi_table_cmd or hfi_dcvstable_cmd.
  551. * Current max size for either is 165 dwords.
  552. */
  553. static u32 cmd_buf[200];
  554. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  555. struct gen8_dcvs_table *tbl = &gmu->dcvs_table;
  556. int ret = 0;
  557. /* Starting with GMU HFI Version 2.6.1, use H2F_MSG_TABLE */
  558. if (gmu->ver.hfi >= HFI_VERSION(2, 6, 1)) {
  559. struct hfi_table_cmd *cmd = (struct hfi_table_cmd *)&cmd_buf[0];
  560. u32 dword_off;
  561. /* Already setup, so just send cmd */
  562. if (cmd->hdr)
  563. return gen8_hfi_send_generic_req(adreno_dev, cmd,
  564. MSG_HDR_GET_SIZE(cmd->hdr) << 2);
  565. if (tbl->gpu_level_num > MAX_GX_LEVELS || tbl->gmu_level_num > MAX_CX_LEVELS)
  566. return -EINVAL;
  567. /* CMD starts with struct hfi_table_cmd data */
  568. cmd->type = HFI_TABLE_GPU_PERF;
  569. dword_off = sizeof(*cmd) >> 2;
  570. /* Fill in the table entry and data starting at dword_off */
  571. dword_off += _fill_table_entry((struct hfi_table_entry *)&cmd_buf[dword_off],
  572. tbl->gpu_level_num, sizeof(struct opp_gx_desc),
  573. (u32 *)tbl->gx_votes);
  574. /* Fill in the table entry and data starting at dword_off */
  575. dword_off += _fill_table_entry((struct hfi_table_entry *)&cmd_buf[dword_off],
  576. tbl->gmu_level_num, sizeof(struct opp_desc),
  577. (u32 *)tbl->cx_votes);
  578. cmd->hdr = CREATE_MSG_HDR(H2F_MSG_TABLE, HFI_MSG_CMD);
  579. cmd->hdr = MSG_HDR_SET_SIZE(cmd->hdr, dword_off);
  580. ret = gen8_hfi_send_generic_req(adreno_dev, cmd, dword_off << 2);
  581. } else {
  582. struct hfi_dcvstable_cmd *cmd = (struct hfi_dcvstable_cmd *)&cmd_buf[0];
  583. /* Already setup, so just send cmd */
  584. if (cmd->hdr)
  585. return gen8_hfi_send_generic_req(adreno_dev, cmd, sizeof(*cmd));
  586. if (tbl->gpu_level_num > MAX_GX_LEVELS_LEGACY || tbl->gmu_level_num > MAX_CX_LEVELS)
  587. return -EINVAL;
  588. ret = CMD_MSG_HDR(*cmd, H2F_MSG_PERF_TBL);
  589. if (ret)
  590. return ret;
  591. cmd->gpu_level_num = tbl->gpu_level_num;
  592. cmd->gmu_level_num = tbl->gmu_level_num;
  593. memcpy(&cmd->gx_votes, tbl->gx_votes,
  594. sizeof(struct opp_gx_desc) * cmd->gpu_level_num);
  595. memcpy(&cmd->cx_votes, tbl->cx_votes,
  596. sizeof(struct opp_desc) * cmd->gmu_level_num);
  597. ret = gen8_hfi_send_generic_req(adreno_dev, cmd, sizeof(*cmd));
  598. }
  599. return ret;
  600. }
  601. int gen8_hfi_start(struct adreno_device *adreno_dev)
  602. {
  603. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  604. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  605. int result;
  606. reset_hfi_queues(adreno_dev);
  607. result = gen8_hfi_send_gpu_perf_table(adreno_dev);
  608. if (result)
  609. goto err;
  610. result = gen8_hfi_send_generic_req(adreno_dev, &gmu->hfi.bw_table,
  611. sizeof(gmu->hfi.bw_table));
  612. if (result)
  613. goto err;
  614. result = gen8_hfi_send_acd_feature_ctrl(adreno_dev);
  615. if (result)
  616. goto err;
  617. result = gen8_hfi_send_bcl_feature_ctrl(adreno_dev);
  618. if (result)
  619. goto err;
  620. result = gen8_hfi_send_clx_feature_ctrl(adreno_dev);
  621. if (result)
  622. goto err;
  623. result = gen8_hfi_send_ifpc_feature_ctrl(adreno_dev);
  624. if (result)
  625. goto err;
  626. result = gen8_hfi_send_core_fw_start(adreno_dev);
  627. if (result)
  628. goto err;
  629. set_bit(GMU_PRIV_HFI_STARTED, &gmu->flags);
  630. /* Request default DCVS level */
  631. result = kgsl_pwrctrl_set_default_gpu_pwrlevel(device);
  632. if (result)
  633. goto err;
  634. /* Request default BW vote */
  635. result = kgsl_pwrctrl_axi(device, true);
  636. err:
  637. if (result)
  638. gen8_hfi_stop(adreno_dev);
  639. return result;
  640. }
  641. void gen8_hfi_stop(struct adreno_device *adreno_dev)
  642. {
  643. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  644. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  645. kgsl_pwrctrl_axi(device, false);
  646. clear_bit(GMU_PRIV_HFI_STARTED, &gmu->flags);
  647. }
  648. /* HFI interrupt handler */
  649. irqreturn_t gen8_hfi_irq_handler(int irq, void *data)
  650. {
  651. struct kgsl_device *device = data;
  652. struct gen8_gmu_device *gmu = to_gen8_gmu(ADRENO_DEVICE(device));
  653. u32 status = 0;
  654. gmu_core_regread(device, GEN8_GMUCX_GMU2HOST_INTR_INFO, &status);
  655. gmu_core_regwrite(device, GEN8_GMUCX_GMU2HOST_INTR_CLR, HFI_IRQ_MASK);
  656. if (status & HFI_IRQ_DBGQ_MASK)
  657. gen8_hfi_process_queue(gmu, HFI_DBG_ID, NULL);
  658. if (status & HFI_IRQ_CM3_FAULT_MASK) {
  659. dev_err_ratelimited(&gmu->pdev->dev,
  660. "GMU CM3 fault interrupt received\n");
  661. atomic_set(&gmu->cm3_fault, 1);
  662. /* make sure other CPUs see the update */
  663. smp_wmb();
  664. }
  665. if (status & ~HFI_IRQ_MASK)
  666. dev_err_ratelimited(&gmu->pdev->dev,
  667. "Unhandled HFI interrupts 0x%lx\n",
  668. status & ~HFI_IRQ_MASK);
  669. return IRQ_HANDLED;
  670. }