adreno_a6xx_hfi.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/delay.h>
  7. #include <linux/nvmem-consumer.h>
  8. #include "adreno.h"
  9. #include "adreno_a6xx.h"
  10. #include "adreno_a6xx_hfi.h"
  11. #include "kgsl_device.h"
  12. #include "kgsl_trace.h"
  13. /* Below section is for all structures related to HFI queues */
  14. #define HFI_QUEUE_MAX HFI_QUEUE_DEFAULT_CNT
  15. /* Total header sizes + queue sizes + 16 for alignment */
  16. #define HFIMEM_SIZE (sizeof(struct hfi_queue_table) + 16 + \
  17. (HFI_QUEUE_SIZE * HFI_QUEUE_MAX))
  18. struct a6xx_hfi *to_a6xx_hfi(struct adreno_device *adreno_dev)
  19. {
  20. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  21. return &gmu->hfi;
  22. }
  23. /* Size in below functions are in unit of dwords */
  24. int a6xx_hfi_queue_read(struct a6xx_gmu_device *gmu, uint32_t queue_idx,
  25. unsigned int *output, unsigned int max_size)
  26. {
  27. struct kgsl_memdesc *mem_addr = gmu->hfi.hfi_mem;
  28. struct hfi_queue_table *tbl = mem_addr->hostptr;
  29. struct hfi_queue_header *hdr = &tbl->qhdr[queue_idx];
  30. uint32_t *queue;
  31. uint32_t msg_hdr;
  32. uint32_t i, read;
  33. uint32_t size;
  34. int result = 0;
  35. if (hdr->status == HFI_QUEUE_STATUS_DISABLED)
  36. return -EINVAL;
  37. if (hdr->read_index == hdr->write_index)
  38. return -ENODATA;
  39. /* Clear the output data before populating */
  40. memset(output, 0, max_size);
  41. queue = HOST_QUEUE_START_ADDR(mem_addr, queue_idx);
  42. msg_hdr = queue[hdr->read_index];
  43. size = MSG_HDR_GET_SIZE(msg_hdr);
  44. if (size > (max_size >> 2)) {
  45. dev_err(&gmu->pdev->dev,
  46. "HFI message too big: hdr:0x%x rd idx=%d\n",
  47. msg_hdr, hdr->read_index);
  48. result = -EMSGSIZE;
  49. goto done;
  50. }
  51. read = hdr->read_index;
  52. if (read < hdr->queue_size) {
  53. for (i = 0; i < size && i < (max_size >> 2); i++) {
  54. output[i] = queue[read];
  55. read = (read + 1)%hdr->queue_size;
  56. }
  57. result = size;
  58. } else {
  59. /* In case FW messed up */
  60. dev_err(&gmu->pdev->dev,
  61. "Read index %d greater than queue size %d\n",
  62. hdr->read_index, hdr->queue_size);
  63. result = -ENODATA;
  64. }
  65. if (GMU_VER_MAJOR(gmu->ver.hfi) >= 2)
  66. read = ALIGN(read, SZ_4) % hdr->queue_size;
  67. /* For acks, trace the packet for which this ack was sent */
  68. if (MSG_HDR_GET_TYPE(msg_hdr) == HFI_MSG_ACK)
  69. trace_kgsl_hfi_receive(MSG_HDR_GET_ID(output[1]),
  70. MSG_HDR_GET_SIZE(output[1]),
  71. MSG_HDR_GET_SEQNUM(output[1]));
  72. else
  73. trace_kgsl_hfi_receive(MSG_HDR_GET_ID(msg_hdr),
  74. MSG_HDR_GET_SIZE(msg_hdr), MSG_HDR_GET_SEQNUM(msg_hdr));
  75. hfi_update_read_idx(hdr, read);
  76. done:
  77. return result;
  78. }
  79. /* Size in below functions are in unit of dwords */
  80. int a6xx_hfi_queue_write(struct adreno_device *adreno_dev, uint32_t queue_idx,
  81. uint32_t *msg, u32 size_bytes)
  82. {
  83. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  84. struct hfi_queue_table *tbl = gmu->hfi.hfi_mem->hostptr;
  85. struct hfi_queue_header *hdr = &tbl->qhdr[queue_idx];
  86. uint32_t *queue;
  87. uint32_t i, write_idx, read_idx, empty_space;
  88. uint32_t size_dwords = size_bytes >> 2;
  89. u32 align_size = ALIGN(size_dwords, SZ_4);
  90. uint32_t id = MSG_HDR_GET_ID(*msg);
  91. if (hdr->status == HFI_QUEUE_STATUS_DISABLED || !IS_ALIGNED(size_bytes, sizeof(u32)))
  92. return -EINVAL;
  93. queue = HOST_QUEUE_START_ADDR(gmu->hfi.hfi_mem, queue_idx);
  94. write_idx = hdr->write_index;
  95. read_idx = hdr->read_index;
  96. empty_space = (write_idx >= read_idx) ?
  97. (hdr->queue_size - (write_idx - read_idx))
  98. : (read_idx - write_idx);
  99. if (empty_space <= align_size)
  100. return -ENOSPC;
  101. for (i = 0; i < size_dwords; i++) {
  102. queue[write_idx] = msg[i];
  103. write_idx = (write_idx + 1) % hdr->queue_size;
  104. }
  105. /* Cookify any non used data at the end of the write buffer */
  106. if (GMU_VER_MAJOR(gmu->ver.hfi) >= 2) {
  107. for (; i < align_size; i++) {
  108. queue[write_idx] = 0xFAFAFAFA;
  109. write_idx = (write_idx + 1) % hdr->queue_size;
  110. }
  111. }
  112. trace_kgsl_hfi_send(id, size_dwords, MSG_HDR_GET_SEQNUM(*msg));
  113. hfi_update_write_idx(&hdr->write_index, write_idx);
  114. return 0;
  115. }
  116. int a6xx_hfi_cmdq_write(struct adreno_device *adreno_dev, u32 *msg, u32 size_bytes)
  117. {
  118. int ret;
  119. ret = a6xx_hfi_queue_write(adreno_dev, HFI_CMD_ID, msg, size_bytes);
  120. /*
  121. * Memory barrier to make sure packet and write index are written before
  122. * an interrupt is raised
  123. */
  124. wmb();
  125. /* Send interrupt to GMU to receive the message */
  126. if (!ret)
  127. gmu_core_regwrite(KGSL_DEVICE(adreno_dev),
  128. A6XX_GMU_HOST2GMU_INTR_SET,
  129. 0x1);
  130. return ret;
  131. }
  132. /* Sizes of the queue and message are in unit of dwords */
  133. static void init_queues(struct adreno_device *adreno_dev)
  134. {
  135. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  136. struct kgsl_memdesc *mem_addr = gmu->hfi.hfi_mem;
  137. int i;
  138. struct hfi_queue_table *tbl;
  139. struct hfi_queue_header *hdr;
  140. struct {
  141. unsigned int idx;
  142. unsigned int pri;
  143. unsigned int status;
  144. } queue[HFI_QUEUE_MAX] = {
  145. { HFI_CMD_IDX, HFI_CMD_PRI, HFI_QUEUE_STATUS_ENABLED },
  146. { HFI_MSG_IDX, HFI_MSG_PRI, HFI_QUEUE_STATUS_ENABLED },
  147. { HFI_DBG_IDX, HFI_DBG_PRI, HFI_QUEUE_STATUS_ENABLED },
  148. };
  149. /*
  150. * Overwrite the queue IDs for A630, A615 and A616 as they use
  151. * legacy firmware. Legacy firmware has different queue IDs for
  152. * message, debug and dispatch queues (dispatch queues aren't used
  153. * on these targets so the queue idx value update is not needed).
  154. */
  155. if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) {
  156. queue[HFI_MSG_ID].idx = HFI_MSG_IDX_LEGACY;
  157. queue[HFI_DBG_ID].idx = HFI_DBG_IDX_LEGACY;
  158. }
  159. /* Fill Table Header */
  160. tbl = mem_addr->hostptr;
  161. tbl->qtbl_hdr.version = 0;
  162. tbl->qtbl_hdr.size = sizeof(struct hfi_queue_table) >> 2;
  163. tbl->qtbl_hdr.qhdr0_offset = sizeof(struct hfi_queue_table_header) >> 2;
  164. tbl->qtbl_hdr.qhdr_size = sizeof(struct hfi_queue_header) >> 2;
  165. tbl->qtbl_hdr.num_q = HFI_QUEUE_MAX;
  166. tbl->qtbl_hdr.num_active_q = HFI_QUEUE_MAX;
  167. memset(&tbl->qhdr[0], 0, sizeof(tbl->qhdr));
  168. /* Fill Individual Queue Headers */
  169. for (i = 0; i < HFI_QUEUE_MAX; i++) {
  170. hdr = &tbl->qhdr[i];
  171. hdr->start_addr = GMU_QUEUE_START_ADDR(mem_addr->gmuaddr, i);
  172. hdr->type = QUEUE_HDR_TYPE(queue[i].idx, queue[i].pri, 0, 0);
  173. hdr->status = queue[i].status;
  174. hdr->queue_size = HFI_QUEUE_SIZE >> 2; /* convert to dwords */
  175. }
  176. }
  177. int a6xx_hfi_init(struct adreno_device *adreno_dev)
  178. {
  179. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  180. struct a6xx_hfi *hfi = &gmu->hfi;
  181. /* Allocates & maps memory for HFI */
  182. if (IS_ERR_OR_NULL(hfi->hfi_mem)) {
  183. hfi->hfi_mem = reserve_gmu_kernel_block(gmu, 0, HFIMEM_SIZE,
  184. GMU_NONCACHED_KERNEL, 0);
  185. if (!IS_ERR(hfi->hfi_mem))
  186. init_queues(adreno_dev);
  187. }
  188. return PTR_ERR_OR_ZERO(hfi->hfi_mem);
  189. }
  190. int a6xx_receive_ack_cmd(struct a6xx_gmu_device *gmu, void *rcvd,
  191. struct pending_cmd *ret_cmd)
  192. {
  193. struct adreno_device *adreno_dev = a6xx_gmu_to_adreno(gmu);
  194. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  195. uint32_t *ack = rcvd;
  196. uint32_t hdr = ack[0];
  197. uint32_t req_hdr = ack[1];
  198. if (ret_cmd == NULL)
  199. return -EINVAL;
  200. if (CMP_HFI_ACK_HDR(ret_cmd->sent_hdr, req_hdr)) {
  201. memcpy(&ret_cmd->results, ack, MSG_HDR_GET_SIZE(hdr) << 2);
  202. return 0;
  203. }
  204. /* Didn't find the sender, list the waiter */
  205. dev_err_ratelimited(&gmu->pdev->dev,
  206. "HFI ACK: Cannot find sender for 0x%8.8x Waiter: 0x%8.8x\n",
  207. req_hdr, ret_cmd->sent_hdr);
  208. gmu_core_fault_snapshot(device);
  209. return -ENODEV;
  210. }
  211. static int poll_gmu_reg(struct adreno_device *adreno_dev,
  212. u32 offsetdwords, unsigned int expected_val,
  213. unsigned int mask, unsigned int timeout_ms)
  214. {
  215. unsigned int val;
  216. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  217. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  218. unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
  219. u64 ao_pre_poll, ao_post_poll;
  220. bool nmi = false;
  221. ao_pre_poll = a6xx_read_alwayson(adreno_dev);
  222. /* FIXME: readl_poll_timeout? */
  223. while (time_is_after_jiffies(timeout)) {
  224. gmu_core_regread(device, offsetdwords, &val);
  225. if ((val & mask) == expected_val)
  226. return 0;
  227. /*
  228. * If GMU firmware fails any assertion, error message is sent
  229. * to KMD and NMI is triggered. So check if GMU is in NMI and
  230. * timeout early. Bits [11:9] of A6XX_GMU_CM3_FW_INIT_RESULT
  231. * contain GMU reset status. Non zero value here indicates that
  232. * GMU reset is active, NMI handler would eventually complete
  233. * and GMU would wait for recovery.
  234. */
  235. gmu_core_regread(device, A6XX_GMU_CM3_FW_INIT_RESULT, &val);
  236. if (val & 0xE00) {
  237. nmi = true;
  238. break;
  239. }
  240. usleep_range(10, 100);
  241. }
  242. ao_post_poll = a6xx_read_alwayson(adreno_dev);
  243. /* Check one last time */
  244. gmu_core_regread(device, offsetdwords, &val);
  245. if ((val & mask) == expected_val)
  246. return 0;
  247. dev_err(&gmu->pdev->dev, "kgsl hfi poll %s: always on: %lld ms\n",
  248. nmi ? "abort" : "timeout",
  249. div_u64((ao_post_poll - ao_pre_poll) * 52, USEC_PER_SEC));
  250. return -ETIMEDOUT;
  251. }
  252. static int a6xx_hfi_send_cmd_wait_inline(struct adreno_device *adreno_dev,
  253. void *data, u32 size_bytes, struct pending_cmd *ret_cmd)
  254. {
  255. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  256. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  257. int rc;
  258. uint32_t *cmd = data;
  259. struct a6xx_hfi *hfi = &gmu->hfi;
  260. unsigned int seqnum = atomic_inc_return(&hfi->seqnum);
  261. *cmd = MSG_HDR_SET_SEQNUM_SIZE(*cmd, seqnum, size_bytes >> 2);
  262. if (ret_cmd == NULL)
  263. return a6xx_hfi_cmdq_write(adreno_dev, cmd, size_bytes);
  264. ret_cmd->sent_hdr = cmd[0];
  265. rc = a6xx_hfi_cmdq_write(adreno_dev, cmd, size_bytes);
  266. if (rc)
  267. return rc;
  268. rc = poll_gmu_reg(adreno_dev, A6XX_GMU_GMU2HOST_INTR_INFO,
  269. HFI_IRQ_MSGQ_MASK, HFI_IRQ_MSGQ_MASK, HFI_RSP_TIMEOUT);
  270. if (rc) {
  271. gmu_core_fault_snapshot(device);
  272. dev_err(&gmu->pdev->dev,
  273. "Timed out waiting on ack for 0x%8.8x (id %d, sequence %d)\n",
  274. cmd[0], MSG_HDR_GET_ID(*cmd), MSG_HDR_GET_SEQNUM(*cmd));
  275. return rc;
  276. }
  277. /* Clear the interrupt */
  278. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR,
  279. HFI_IRQ_MSGQ_MASK);
  280. rc = a6xx_hfi_process_queue(gmu, HFI_MSG_ID, ret_cmd);
  281. return rc;
  282. }
  283. int a6xx_hfi_send_generic_req(struct adreno_device *adreno_dev, void *cmd, u32 size_bytes)
  284. {
  285. struct pending_cmd ret_cmd;
  286. int rc;
  287. memset(&ret_cmd, 0, sizeof(ret_cmd));
  288. rc = a6xx_hfi_send_cmd_wait_inline(adreno_dev, cmd, size_bytes, &ret_cmd);
  289. if (rc)
  290. return rc;
  291. if (ret_cmd.results[2]) {
  292. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  293. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  294. gmu_core_fault_snapshot(device);
  295. dev_err(&gmu->pdev->dev,
  296. "HFI ACK failure: Req=0x%8.8X, Result=0x%8.8X\n",
  297. ret_cmd.results[1],
  298. ret_cmd.results[2]);
  299. return -EINVAL;
  300. }
  301. return 0;
  302. }
  303. static int a6xx_hfi_send_gmu_init(struct adreno_device *adreno_dev)
  304. {
  305. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  306. struct hfi_gmu_init_cmd cmd = {
  307. .seg_id = 0,
  308. .dbg_buffer_addr = (unsigned int) gmu->dump_mem->gmuaddr,
  309. .dbg_buffer_size = (unsigned int) gmu->dump_mem->size,
  310. .boot_state = 0x1,
  311. };
  312. int ret;
  313. ret = CMD_MSG_HDR(cmd, H2F_MSG_INIT);
  314. if (ret)
  315. return ret;
  316. return a6xx_hfi_send_generic_req(adreno_dev, &cmd, sizeof(cmd));
  317. }
  318. static int a6xx_hfi_get_fw_version(struct adreno_device *adreno_dev,
  319. uint32_t expected_ver, uint32_t *ver)
  320. {
  321. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  322. struct hfi_fw_version_cmd cmd = {
  323. .supported_ver = expected_ver,
  324. };
  325. int rc;
  326. struct pending_cmd ret_cmd;
  327. rc = CMD_MSG_HDR(cmd, H2F_MSG_FW_VER);
  328. if (rc)
  329. return rc;
  330. memset(&ret_cmd, 0, sizeof(ret_cmd));
  331. rc = a6xx_hfi_send_cmd_wait_inline(adreno_dev, &cmd, sizeof(cmd), &ret_cmd);
  332. if (rc)
  333. return rc;
  334. rc = ret_cmd.results[2];
  335. if (!rc)
  336. *ver = ret_cmd.results[3];
  337. else
  338. dev_err(&gmu->pdev->dev,
  339. "gmu get fw ver failed with error=%d\n", rc);
  340. return rc;
  341. }
  342. int a6xx_hfi_send_core_fw_start(struct adreno_device *adreno_dev)
  343. {
  344. struct hfi_core_fw_start_cmd cmd = {
  345. .handle = 0x0,
  346. };
  347. int ret;
  348. ret = CMD_MSG_HDR(cmd, H2F_MSG_CORE_FW_START);
  349. if (ret)
  350. return ret;
  351. return a6xx_hfi_send_generic_req(adreno_dev, &cmd, sizeof(cmd));
  352. }
  353. static const char *feature_to_string(uint32_t feature)
  354. {
  355. if (feature == HFI_FEATURE_ACD)
  356. return "ACD";
  357. else if (feature == HFI_FEATURE_LM)
  358. return "LM";
  359. return "unknown";
  360. }
  361. int a6xx_hfi_send_feature_ctrl(struct adreno_device *adreno_dev,
  362. uint32_t feature, uint32_t enable, uint32_t data)
  363. {
  364. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  365. struct hfi_feature_ctrl_cmd cmd = {
  366. .feature = feature,
  367. .enable = enable,
  368. .data = data,
  369. };
  370. int ret;
  371. ret = CMD_MSG_HDR(cmd, H2F_MSG_FEATURE_CTRL);
  372. if (ret)
  373. return ret;
  374. ret = a6xx_hfi_send_generic_req(adreno_dev, &cmd, sizeof(cmd));
  375. if (ret)
  376. dev_err(&gmu->pdev->dev,
  377. "Unable to %s feature %s (%d)\n",
  378. enable ? "enable" : "disable",
  379. feature_to_string(feature),
  380. feature);
  381. return ret;
  382. }
  383. int a6xx_hfi_send_set_value(struct adreno_device *adreno_dev,
  384. u32 type, u32 subtype, u32 data)
  385. {
  386. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  387. struct hfi_set_value_cmd cmd = {
  388. .type = type,
  389. .subtype = subtype,
  390. .data = data,
  391. };
  392. int ret;
  393. ret = CMD_MSG_HDR(cmd, H2F_MSG_SET_VALUE);
  394. if (ret)
  395. return ret;
  396. ret = a6xx_hfi_send_generic_req(adreno_dev, &cmd, sizeof(cmd));
  397. if (ret)
  398. dev_err(&gmu->pdev->dev,
  399. "Unable to set HFI Value %d, %d to %d, error = %d\n",
  400. type, subtype, data, ret);
  401. return ret;
  402. }
  403. static int a6xx_hfi_send_dcvstbl_v1(struct adreno_device *adreno_dev)
  404. {
  405. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  406. struct hfi_dcvstable_cmd *table = &gmu->hfi.dcvs_table;
  407. struct hfi_dcvstable_v1_cmd cmd = {
  408. .gpu_level_num = table->gpu_level_num,
  409. .gmu_level_num = table->gmu_level_num,
  410. };
  411. int i, ret;
  412. ret = CMD_MSG_HDR(cmd, H2F_MSG_PERF_TBL);
  413. if (ret)
  414. return ret;
  415. for (i = 0; i < table->gpu_level_num; i++) {
  416. cmd.gx_votes[i].vote = table->gx_votes[i].vote;
  417. cmd.gx_votes[i].freq = table->gx_votes[i].freq;
  418. }
  419. cmd.cx_votes[0].vote = table->cx_votes[0].vote;
  420. cmd.cx_votes[0].freq = table->cx_votes[0].freq;
  421. cmd.cx_votes[1].vote = table->cx_votes[1].vote;
  422. cmd.cx_votes[1].freq = table->cx_votes[1].freq;
  423. return a6xx_hfi_send_generic_req(adreno_dev, &cmd, sizeof(cmd));
  424. }
  425. static int a6xx_hfi_send_test(struct adreno_device *adreno_dev)
  426. {
  427. struct hfi_test_cmd cmd;
  428. int ret;
  429. ret = CMD_MSG_HDR(cmd, H2F_MSG_TEST);
  430. if (ret)
  431. return ret;
  432. cmd.data = 0;
  433. return a6xx_hfi_send_generic_req(adreno_dev, &cmd, sizeof(cmd));
  434. }
  435. void adreno_a6xx_receive_err_req(struct a6xx_gmu_device *gmu, void *rcvd)
  436. {
  437. struct hfi_err_cmd *cmd = rcvd;
  438. dev_err(&gmu->pdev->dev, "HFI Error Received: %d %d %.16s\n",
  439. ((cmd->error_code >> 16) & 0xFFFF),
  440. (cmd->error_code & 0xFFFF),
  441. (char *) cmd->data);
  442. }
  443. void adreno_a6xx_receive_debug_req(struct a6xx_gmu_device *gmu, void *rcvd)
  444. {
  445. struct hfi_debug_cmd *cmd = rcvd;
  446. dev_dbg(&gmu->pdev->dev, "HFI Debug Received: %d %d %d\n",
  447. cmd->type, cmd->timestamp, cmd->data);
  448. }
  449. static void a6xx_hfi_v1_receiver(struct a6xx_gmu_device *gmu, uint32_t *rcvd,
  450. struct pending_cmd *ret_cmd)
  451. {
  452. /* V1 ACK Handler */
  453. if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_V1_MSG_ACK) {
  454. a6xx_receive_ack_cmd(gmu, rcvd, ret_cmd);
  455. return;
  456. }
  457. /* V1 Request Handler */
  458. switch (MSG_HDR_GET_ID(rcvd[0])) {
  459. case F2H_MSG_ERR: /* No Reply */
  460. adreno_a6xx_receive_err_req(gmu, rcvd);
  461. break;
  462. case F2H_MSG_DEBUG: /* No Reply */
  463. adreno_a6xx_receive_debug_req(gmu, rcvd);
  464. break;
  465. default: /* No Reply */
  466. dev_err(&gmu->pdev->dev,
  467. "HFI V1 request %d not supported\n",
  468. MSG_HDR_GET_ID(rcvd[0]));
  469. break;
  470. }
  471. }
  472. int a6xx_hfi_process_queue(struct a6xx_gmu_device *gmu,
  473. uint32_t queue_idx, struct pending_cmd *ret_cmd)
  474. {
  475. uint32_t rcvd[MAX_RCVD_SIZE];
  476. while (a6xx_hfi_queue_read(gmu, queue_idx, rcvd, sizeof(rcvd)) > 0) {
  477. /* Special case if we're v1 */
  478. if (GMU_VER_MAJOR(gmu->ver.hfi) < 2) {
  479. a6xx_hfi_v1_receiver(gmu, rcvd, ret_cmd);
  480. continue;
  481. }
  482. /* V2 ACK Handler */
  483. if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_MSG_ACK) {
  484. int ret = a6xx_receive_ack_cmd(gmu, rcvd, ret_cmd);
  485. if (ret)
  486. return ret;
  487. continue;
  488. }
  489. /* V2 Request Handler */
  490. switch (MSG_HDR_GET_ID(rcvd[0])) {
  491. case F2H_MSG_ERR: /* No Reply */
  492. adreno_a6xx_receive_err_req(gmu, rcvd);
  493. break;
  494. case F2H_MSG_DEBUG: /* No Reply */
  495. adreno_a6xx_receive_debug_req(gmu, rcvd);
  496. break;
  497. default: /* No Reply */
  498. dev_err(&gmu->pdev->dev,
  499. "HFI request %d not supported\n",
  500. MSG_HDR_GET_ID(rcvd[0]));
  501. break;
  502. }
  503. }
  504. return 0;
  505. }
  506. static int a6xx_hfi_verify_fw_version(struct adreno_device *adreno_dev)
  507. {
  508. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  509. const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
  510. int result;
  511. unsigned int ver, major, minor;
  512. /* GMU version is already known, so don't waste time finding again */
  513. if (gmu->ver.core != 0)
  514. return 0;
  515. major = a6xx_core->gmu_major;
  516. minor = a6xx_core->gmu_minor;
  517. result = a6xx_hfi_get_fw_version(adreno_dev, GMU_VERSION(major, minor, 0),
  518. &ver);
  519. if (result) {
  520. dev_err_once(&gmu->pdev->dev,
  521. "Failed to get FW version via HFI\n");
  522. return result;
  523. }
  524. /* For now, warn once. Could return error later if needed */
  525. if (major != GMU_VER_MAJOR(ver))
  526. dev_err_once(&gmu->pdev->dev,
  527. "FW Major Error: Wanted %d, got %d\n",
  528. major, GMU_VER_MAJOR(ver));
  529. if (minor > GMU_VER_MINOR(ver))
  530. dev_err_once(&gmu->pdev->dev,
  531. "FW Minor Error: Wanted < %d, got %d\n",
  532. GMU_VER_MINOR(ver), minor);
  533. /* Save the gmu version information */
  534. gmu->ver.core = ver;
  535. return 0;
  536. }
  537. int a6xx_hfi_send_bcl_feature_ctrl(struct adreno_device *adreno_dev)
  538. {
  539. int ret;
  540. if (!adreno_dev->bcl_enabled)
  541. return 0;
  542. ret = a6xx_hfi_send_feature_ctrl(adreno_dev, HFI_FEATURE_BCL, 1, 0);
  543. return ret;
  544. }
  545. int a6xx_hfi_send_lm_feature_ctrl(struct adreno_device *adreno_dev)
  546. {
  547. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  548. struct hfi_set_value_cmd req;
  549. u32 slope = 0;
  550. int ret;
  551. if (!adreno_dev->lm_enabled)
  552. return 0;
  553. memset(&req, 0, sizeof(req));
  554. nvmem_cell_read_u32(&device->pdev->dev, "isense_slope", &slope);
  555. ret = CMD_MSG_HDR(req, H2F_MSG_SET_VALUE);
  556. if (ret)
  557. return ret;
  558. req.type = HFI_VALUE_LM_CS0;
  559. req.subtype = 0;
  560. req.data = slope;
  561. ret = a6xx_hfi_send_feature_ctrl(adreno_dev, HFI_FEATURE_LM, 1,
  562. device->pwrctrl.throttle_mask);
  563. if (!ret)
  564. ret = a6xx_hfi_send_generic_req(adreno_dev, &req, sizeof(req));
  565. return ret;
  566. }
  567. int a6xx_hfi_send_acd_feature_ctrl(struct adreno_device *adreno_dev)
  568. {
  569. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  570. int ret = 0;
  571. if (adreno_dev->acd_enabled) {
  572. ret = a6xx_hfi_send_generic_req(adreno_dev,
  573. &gmu->hfi.acd_table, sizeof(gmu->hfi.acd_table));
  574. if (!ret)
  575. ret = a6xx_hfi_send_feature_ctrl(adreno_dev,
  576. HFI_FEATURE_ACD, 1, 0);
  577. }
  578. return ret;
  579. }
  580. static void reset_hfi_queues(struct adreno_device *adreno_dev)
  581. {
  582. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  583. struct kgsl_memdesc *mem_addr = gmu->hfi.hfi_mem;
  584. struct hfi_queue_table *tbl = mem_addr->hostptr;
  585. struct hfi_queue_header *hdr;
  586. unsigned int i;
  587. /* Flush HFI queues */
  588. for (i = 0; i < HFI_QUEUE_MAX; i++) {
  589. hdr = &tbl->qhdr[i];
  590. if (hdr->status == HFI_QUEUE_STATUS_DISABLED)
  591. continue;
  592. hdr->read_index = hdr->write_index;
  593. }
  594. }
  595. int a6xx_hfi_start(struct adreno_device *adreno_dev)
  596. {
  597. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  598. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  599. int result;
  600. reset_hfi_queues(adreno_dev);
  601. /* This is legacy HFI message for A630 and A615 family firmware */
  602. if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) {
  603. result = a6xx_hfi_send_gmu_init(adreno_dev);
  604. if (result)
  605. goto err;
  606. }
  607. result = a6xx_hfi_verify_fw_version(adreno_dev);
  608. if (result)
  609. goto err;
  610. if (GMU_VER_MAJOR(gmu->ver.hfi) < 2)
  611. result = a6xx_hfi_send_dcvstbl_v1(adreno_dev);
  612. else
  613. result = a6xx_hfi_send_generic_req(adreno_dev,
  614. &gmu->hfi.dcvs_table, sizeof(gmu->hfi.dcvs_table));
  615. if (result)
  616. goto err;
  617. result = a6xx_hfi_send_generic_req(adreno_dev, &gmu->hfi.bw_table,
  618. sizeof(gmu->hfi.bw_table));
  619. if (result)
  620. goto err;
  621. /*
  622. * If quirk is enabled send H2F_MSG_TEST and tell the GMU
  623. * we are sending no more HFIs until the next boot otherwise
  624. * send H2F_MSG_CORE_FW_START and features for A640 devices
  625. */
  626. if (GMU_VER_MAJOR(gmu->ver.hfi) >= 2) {
  627. result = a6xx_hfi_send_acd_feature_ctrl(adreno_dev);
  628. if (result)
  629. goto err;
  630. result = a6xx_hfi_send_lm_feature_ctrl(adreno_dev);
  631. if (result)
  632. goto err;
  633. result = a6xx_hfi_send_bcl_feature_ctrl(adreno_dev);
  634. if (result)
  635. goto err;
  636. result = a6xx_hfi_send_core_fw_start(adreno_dev);
  637. if (result)
  638. goto err;
  639. } else {
  640. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
  641. result = a6xx_hfi_send_test(adreno_dev);
  642. if (result)
  643. goto err;
  644. }
  645. }
  646. set_bit(GMU_PRIV_HFI_STARTED, &gmu->flags);
  647. /* Request default DCVS level */
  648. result = kgsl_pwrctrl_set_default_gpu_pwrlevel(device);
  649. if (result)
  650. goto err;
  651. /* Request default BW vote */
  652. result = kgsl_pwrctrl_axi(device, true);
  653. err:
  654. if (result)
  655. a6xx_hfi_stop(adreno_dev);
  656. return result;
  657. }
  658. void a6xx_hfi_stop(struct adreno_device *adreno_dev)
  659. {
  660. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  661. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  662. kgsl_pwrctrl_axi(device, false);
  663. clear_bit(GMU_PRIV_HFI_STARTED, &gmu->flags);
  664. }
  665. /* HFI interrupt handler */
  666. irqreturn_t a6xx_hfi_irq_handler(int irq, void *data)
  667. {
  668. struct kgsl_device *device = data;
  669. struct a6xx_gmu_device *gmu = to_a6xx_gmu(ADRENO_DEVICE(device));
  670. unsigned int status = 0;
  671. gmu_core_regread(device, A6XX_GMU_GMU2HOST_INTR_INFO, &status);
  672. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, HFI_IRQ_MASK);
  673. if (status & HFI_IRQ_DBGQ_MASK)
  674. a6xx_hfi_process_queue(gmu, HFI_DBG_ID, NULL);
  675. if (status & HFI_IRQ_CM3_FAULT_MASK) {
  676. dev_err_ratelimited(&gmu->pdev->dev,
  677. "GMU CM3 fault interrupt received\n");
  678. atomic_set(&gmu->cm3_fault, 1);
  679. /* make sure other CPUs see the update */
  680. smp_wmb();
  681. }
  682. if (status & ~HFI_IRQ_MASK)
  683. dev_err_ratelimited(&gmu->pdev->dev,
  684. "Unhandled HFI interrupts 0x%lx\n",
  685. status & ~HFI_IRQ_MASK);
  686. return IRQ_HANDLED;
  687. }