mtk_scp.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Copyright (c) 2019 MediaTek Inc.
  4. #include <asm/barrier.h>
  5. #include <linux/clk.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/err.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/of_address.h>
  12. #include <linux/of_platform.h>
  13. #include <linux/of_reserved_mem.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/remoteproc.h>
  16. #include <linux/remoteproc/mtk_scp.h>
  17. #include <linux/rpmsg/mtk_rpmsg.h>
  18. #include "mtk_common.h"
  19. #include "remoteproc_internal.h"
  20. #define MAX_CODE_SIZE 0x500000
  21. #define SECTION_NAME_IPI_BUFFER ".ipi_buffer"
  22. /**
  23. * scp_get() - get a reference to SCP.
  24. *
  25. * @pdev: the platform device of the module requesting SCP platform
  26. * device for using SCP API.
  27. *
  28. * Return: Return NULL if failed. otherwise reference to SCP.
  29. **/
  30. struct mtk_scp *scp_get(struct platform_device *pdev)
  31. {
  32. struct device *dev = &pdev->dev;
  33. struct device_node *scp_node;
  34. struct platform_device *scp_pdev;
  35. scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
  36. if (!scp_node) {
  37. dev_err(dev, "can't get SCP node\n");
  38. return NULL;
  39. }
  40. scp_pdev = of_find_device_by_node(scp_node);
  41. of_node_put(scp_node);
  42. if (WARN_ON(!scp_pdev)) {
  43. dev_err(dev, "SCP pdev failed\n");
  44. return NULL;
  45. }
  46. return platform_get_drvdata(scp_pdev);
  47. }
  48. EXPORT_SYMBOL_GPL(scp_get);
  49. /**
  50. * scp_put() - "free" the SCP
  51. *
  52. * @scp: mtk_scp structure from scp_get().
  53. **/
  54. void scp_put(struct mtk_scp *scp)
  55. {
  56. put_device(scp->dev);
  57. }
  58. EXPORT_SYMBOL_GPL(scp_put);
  59. static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
  60. {
  61. dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
  62. rproc_report_crash(scp->rproc, RPROC_WATCHDOG);
  63. }
  64. static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
  65. {
  66. struct mtk_scp *scp = (struct mtk_scp *)priv;
  67. struct scp_run *run = (struct scp_run *)data;
  68. scp->run.signaled = run->signaled;
  69. strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
  70. scp->run.dec_capability = run->dec_capability;
  71. scp->run.enc_capability = run->enc_capability;
  72. wake_up_interruptible(&scp->run.wq);
  73. }
  74. static void scp_ipi_handler(struct mtk_scp *scp)
  75. {
  76. struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
  77. struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
  78. u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
  79. scp_ipi_handler_t handler;
  80. u32 id = readl(&rcv_obj->id);
  81. u32 len = readl(&rcv_obj->len);
  82. if (len > SCP_SHARE_BUFFER_SIZE) {
  83. dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
  84. SCP_SHARE_BUFFER_SIZE);
  85. return;
  86. }
  87. if (id >= SCP_IPI_MAX) {
  88. dev_err(scp->dev, "No such ipi id = %d\n", id);
  89. return;
  90. }
  91. scp_ipi_lock(scp, id);
  92. handler = ipi_desc[id].handler;
  93. if (!handler) {
  94. dev_err(scp->dev, "No such ipi id = %d\n", id);
  95. scp_ipi_unlock(scp, id);
  96. return;
  97. }
  98. memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
  99. handler(tmp_data, len, ipi_desc[id].priv);
  100. scp_ipi_unlock(scp, id);
  101. scp->ipi_id_ack[id] = true;
  102. wake_up(&scp->ack_wq);
  103. }
  104. static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
  105. const struct firmware *fw,
  106. size_t *offset);
  107. static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
  108. {
  109. int ret;
  110. size_t offset;
  111. /* read the ipi buf addr from FW itself first */
  112. ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
  113. if (ret) {
  114. /* use default ipi buf addr if the FW doesn't have it */
  115. offset = scp->data->ipi_buf_offset;
  116. if (!offset)
  117. return ret;
  118. }
  119. dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
  120. scp->recv_buf = (struct mtk_share_obj __iomem *)
  121. (scp->sram_base + offset);
  122. scp->send_buf = (struct mtk_share_obj __iomem *)
  123. (scp->sram_base + offset + sizeof(*scp->recv_buf));
  124. memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf));
  125. memset_io(scp->send_buf, 0, sizeof(*scp->send_buf));
  126. return 0;
  127. }
  128. static void mt8183_scp_reset_assert(struct mtk_scp *scp)
  129. {
  130. u32 val;
  131. val = readl(scp->reg_base + MT8183_SW_RSTN);
  132. val &= ~MT8183_SW_RSTN_BIT;
  133. writel(val, scp->reg_base + MT8183_SW_RSTN);
  134. }
  135. static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
  136. {
  137. u32 val;
  138. val = readl(scp->reg_base + MT8183_SW_RSTN);
  139. val |= MT8183_SW_RSTN_BIT;
  140. writel(val, scp->reg_base + MT8183_SW_RSTN);
  141. }
  142. static void mt8192_scp_reset_assert(struct mtk_scp *scp)
  143. {
  144. writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
  145. }
  146. static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
  147. {
  148. writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_CLR);
  149. }
  150. static void mt8183_scp_irq_handler(struct mtk_scp *scp)
  151. {
  152. u32 scp_to_host;
  153. scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST);
  154. if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
  155. scp_ipi_handler(scp);
  156. else
  157. scp_wdt_handler(scp, scp_to_host);
  158. /* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
  159. writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
  160. scp->reg_base + MT8183_SCP_TO_HOST);
  161. }
  162. static void mt8192_scp_irq_handler(struct mtk_scp *scp)
  163. {
  164. u32 scp_to_host;
  165. scp_to_host = readl(scp->reg_base + MT8192_SCP2APMCU_IPC_SET);
  166. if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
  167. scp_ipi_handler(scp);
  168. /*
  169. * SCP won't send another interrupt until we clear
  170. * MT8192_SCP2APMCU_IPC.
  171. */
  172. writel(MT8192_SCP_IPC_INT_BIT,
  173. scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
  174. } else {
  175. scp_wdt_handler(scp, scp_to_host);
  176. writel(1, scp->reg_base + MT8192_CORE0_WDT_IRQ);
  177. }
  178. }
  179. static irqreturn_t scp_irq_handler(int irq, void *priv)
  180. {
  181. struct mtk_scp *scp = priv;
  182. int ret;
  183. ret = clk_prepare_enable(scp->clk);
  184. if (ret) {
  185. dev_err(scp->dev, "failed to enable clocks\n");
  186. return IRQ_NONE;
  187. }
  188. scp->data->scp_irq_handler(scp);
  189. clk_disable_unprepare(scp->clk);
  190. return IRQ_HANDLED;
  191. }
  192. static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
  193. {
  194. struct device *dev = &rproc->dev;
  195. struct elf32_hdr *ehdr;
  196. struct elf32_phdr *phdr;
  197. int i, ret = 0;
  198. const u8 *elf_data = fw->data;
  199. ehdr = (struct elf32_hdr *)elf_data;
  200. phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
  201. /* go through the available ELF segments */
  202. for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
  203. u32 da = phdr->p_paddr;
  204. u32 memsz = phdr->p_memsz;
  205. u32 filesz = phdr->p_filesz;
  206. u32 offset = phdr->p_offset;
  207. void __iomem *ptr;
  208. dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
  209. phdr->p_type, da, memsz, filesz);
  210. if (phdr->p_type != PT_LOAD)
  211. continue;
  212. if (!filesz)
  213. continue;
  214. if (filesz > memsz) {
  215. dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
  216. filesz, memsz);
  217. ret = -EINVAL;
  218. break;
  219. }
  220. if (offset + filesz > fw->size) {
  221. dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
  222. offset + filesz, fw->size);
  223. ret = -EINVAL;
  224. break;
  225. }
  226. /* grab the kernel address for this device address */
  227. ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL);
  228. if (!ptr) {
  229. dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
  230. ret = -EINVAL;
  231. break;
  232. }
  233. /* put the segment where the remote processor expects it */
  234. scp_memcpy_aligned(ptr, elf_data + phdr->p_offset, filesz);
  235. }
  236. return ret;
  237. }
  238. static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
  239. const struct firmware *fw,
  240. size_t *offset)
  241. {
  242. struct elf32_hdr *ehdr;
  243. struct elf32_shdr *shdr, *shdr_strtab;
  244. int i;
  245. const u8 *elf_data = fw->data;
  246. const char *strtab;
  247. ehdr = (struct elf32_hdr *)elf_data;
  248. shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
  249. shdr_strtab = shdr + ehdr->e_shstrndx;
  250. strtab = (const char *)(elf_data + shdr_strtab->sh_offset);
  251. for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
  252. if (strcmp(strtab + shdr->sh_name,
  253. SECTION_NAME_IPI_BUFFER) == 0) {
  254. *offset = shdr->sh_addr;
  255. return 0;
  256. }
  257. }
  258. return -ENOENT;
  259. }
  260. static int mt8183_scp_clk_get(struct mtk_scp *scp)
  261. {
  262. struct device *dev = scp->dev;
  263. int ret = 0;
  264. scp->clk = devm_clk_get(dev, "main");
  265. if (IS_ERR(scp->clk)) {
  266. dev_err(dev, "Failed to get clock\n");
  267. ret = PTR_ERR(scp->clk);
  268. }
  269. return ret;
  270. }
  271. static int mt8192_scp_clk_get(struct mtk_scp *scp)
  272. {
  273. return mt8183_scp_clk_get(scp);
  274. }
  275. static int mt8195_scp_clk_get(struct mtk_scp *scp)
  276. {
  277. scp->clk = NULL;
  278. return 0;
  279. }
  280. static int mt8183_scp_before_load(struct mtk_scp *scp)
  281. {
  282. /* Clear SCP to host interrupt */
  283. writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
  284. /* Reset clocks before loading FW */
  285. writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
  286. writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
  287. /* Initialize TCM before loading FW. */
  288. writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
  289. writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
  290. /* Turn on the power of SCP's SRAM before using it. */
  291. writel(0x0, scp->reg_base + MT8183_SCP_SRAM_PDN);
  292. /*
  293. * Set I-cache and D-cache size before loading SCP FW.
  294. * SCP SRAM logical address may change when cache size setting differs.
  295. */
  296. writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
  297. scp->reg_base + MT8183_SCP_CACHE_CON);
  298. writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
  299. return 0;
  300. }
  301. static void scp_sram_power_on(void __iomem *addr, u32 reserved_mask)
  302. {
  303. int i;
  304. for (i = 31; i >= 0; i--)
  305. writel(GENMASK(i, 0) & ~reserved_mask, addr);
  306. writel(0, addr);
  307. }
  308. static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
  309. {
  310. int i;
  311. writel(0, addr);
  312. for (i = 0; i < 32; i++)
  313. writel(GENMASK(i, 0) & ~reserved_mask, addr);
  314. }
  315. static int mt8186_scp_before_load(struct mtk_scp *scp)
  316. {
  317. /* Clear SCP to host interrupt */
  318. writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
  319. /* Reset clocks before loading FW */
  320. writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
  321. writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
  322. /* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
  323. scp_sram_power_on(scp->reg_base + MT8183_SCP_SRAM_PDN, 0);
  324. /* Initialize TCM before loading FW. */
  325. writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
  326. writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
  327. writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
  328. writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
  329. /*
  330. * Set I-cache and D-cache size before loading SCP FW.
  331. * SCP SRAM logical address may change when cache size setting differs.
  332. */
  333. writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
  334. scp->reg_base + MT8183_SCP_CACHE_CON);
  335. writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
  336. return 0;
  337. }
  338. static int mt8192_scp_before_load(struct mtk_scp *scp)
  339. {
  340. /* clear SPM interrupt, SCP2SPM_IPC_CLR */
  341. writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
  342. writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
  343. /* enable SRAM clock */
  344. scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
  345. scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
  346. scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
  347. scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
  348. scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
  349. /* enable MPU for all memory regions */
  350. writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
  351. return 0;
  352. }
  353. static int mt8195_scp_before_load(struct mtk_scp *scp)
  354. {
  355. /* clear SPM interrupt, SCP2SPM_IPC_CLR */
  356. writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
  357. writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
  358. /* enable SRAM clock */
  359. scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
  360. scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
  361. scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
  362. scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
  363. MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
  364. scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
  365. /* enable MPU for all memory regions */
  366. writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
  367. return 0;
  368. }
  369. static int scp_load(struct rproc *rproc, const struct firmware *fw)
  370. {
  371. struct mtk_scp *scp = rproc->priv;
  372. struct device *dev = scp->dev;
  373. int ret;
  374. ret = clk_prepare_enable(scp->clk);
  375. if (ret) {
  376. dev_err(dev, "failed to enable clocks\n");
  377. return ret;
  378. }
  379. /* Hold SCP in reset while loading FW. */
  380. scp->data->scp_reset_assert(scp);
  381. ret = scp->data->scp_before_load(scp);
  382. if (ret < 0)
  383. goto leave;
  384. ret = scp_elf_load_segments(rproc, fw);
  385. leave:
  386. clk_disable_unprepare(scp->clk);
  387. return ret;
  388. }
  389. static int scp_parse_fw(struct rproc *rproc, const struct firmware *fw)
  390. {
  391. struct mtk_scp *scp = rproc->priv;
  392. struct device *dev = scp->dev;
  393. int ret;
  394. ret = clk_prepare_enable(scp->clk);
  395. if (ret) {
  396. dev_err(dev, "failed to enable clocks\n");
  397. return ret;
  398. }
  399. ret = scp_ipi_init(scp, fw);
  400. clk_disable_unprepare(scp->clk);
  401. return ret;
  402. }
  403. static int scp_start(struct rproc *rproc)
  404. {
  405. struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
  406. struct device *dev = scp->dev;
  407. struct scp_run *run = &scp->run;
  408. int ret;
  409. ret = clk_prepare_enable(scp->clk);
  410. if (ret) {
  411. dev_err(dev, "failed to enable clocks\n");
  412. return ret;
  413. }
  414. run->signaled = false;
  415. scp->data->scp_reset_deassert(scp);
  416. ret = wait_event_interruptible_timeout(
  417. run->wq,
  418. run->signaled,
  419. msecs_to_jiffies(2000));
  420. if (ret == 0) {
  421. dev_err(dev, "wait SCP initialization timeout!\n");
  422. ret = -ETIME;
  423. goto stop;
  424. }
  425. if (ret == -ERESTARTSYS) {
  426. dev_err(dev, "wait SCP interrupted by a signal!\n");
  427. goto stop;
  428. }
  429. clk_disable_unprepare(scp->clk);
  430. dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
  431. return 0;
  432. stop:
  433. scp->data->scp_reset_assert(scp);
  434. clk_disable_unprepare(scp->clk);
  435. return ret;
  436. }
  437. static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
  438. {
  439. int offset;
  440. if (da < scp->sram_size) {
  441. offset = da;
  442. if (offset >= 0 && (offset + len) <= scp->sram_size)
  443. return (void __force *)scp->sram_base + offset;
  444. } else if (scp->dram_size) {
  445. offset = da - scp->dma_addr;
  446. if (offset >= 0 && (offset + len) <= scp->dram_size)
  447. return scp->cpu_addr + offset;
  448. }
  449. return NULL;
  450. }
  451. static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
  452. {
  453. int offset;
  454. if (da >= scp->sram_phys &&
  455. (da + len) <= scp->sram_phys + scp->sram_size) {
  456. offset = da - scp->sram_phys;
  457. return (void __force *)scp->sram_base + offset;
  458. }
  459. /* optional memory region */
  460. if (scp->l1tcm_size &&
  461. da >= scp->l1tcm_phys &&
  462. (da + len) <= scp->l1tcm_phys + scp->l1tcm_size) {
  463. offset = da - scp->l1tcm_phys;
  464. return (void __force *)scp->l1tcm_base + offset;
  465. }
  466. /* optional memory region */
  467. if (scp->dram_size &&
  468. da >= scp->dma_addr &&
  469. (da + len) <= scp->dma_addr + scp->dram_size) {
  470. offset = da - scp->dma_addr;
  471. return scp->cpu_addr + offset;
  472. }
  473. return NULL;
  474. }
  475. static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
  476. {
  477. struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
  478. return scp->data->scp_da_to_va(scp, da, len);
  479. }
  480. static void mt8183_scp_stop(struct mtk_scp *scp)
  481. {
  482. /* Disable SCP watchdog */
  483. writel(0, scp->reg_base + MT8183_WDT_CFG);
  484. }
  485. static void mt8192_scp_stop(struct mtk_scp *scp)
  486. {
  487. /* Disable SRAM clock */
  488. scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
  489. scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
  490. scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
  491. scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
  492. scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
  493. /* Disable SCP watchdog */
  494. writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
  495. }
  496. static void mt8195_scp_stop(struct mtk_scp *scp)
  497. {
  498. /* Disable SRAM clock */
  499. scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
  500. scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
  501. scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
  502. scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
  503. MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
  504. scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
  505. /* Disable SCP watchdog */
  506. writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
  507. }
  508. static int scp_stop(struct rproc *rproc)
  509. {
  510. struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
  511. int ret;
  512. ret = clk_prepare_enable(scp->clk);
  513. if (ret) {
  514. dev_err(scp->dev, "failed to enable clocks\n");
  515. return ret;
  516. }
  517. scp->data->scp_reset_assert(scp);
  518. scp->data->scp_stop(scp);
  519. clk_disable_unprepare(scp->clk);
  520. return 0;
  521. }
  522. static const struct rproc_ops scp_ops = {
  523. .start = scp_start,
  524. .stop = scp_stop,
  525. .load = scp_load,
  526. .da_to_va = scp_da_to_va,
  527. .parse_fw = scp_parse_fw,
  528. };
  529. /**
  530. * scp_get_device() - get device struct of SCP
  531. *
  532. * @scp: mtk_scp structure
  533. **/
  534. struct device *scp_get_device(struct mtk_scp *scp)
  535. {
  536. return scp->dev;
  537. }
  538. EXPORT_SYMBOL_GPL(scp_get_device);
  539. /**
  540. * scp_get_rproc() - get rproc struct of SCP
  541. *
  542. * @scp: mtk_scp structure
  543. **/
  544. struct rproc *scp_get_rproc(struct mtk_scp *scp)
  545. {
  546. return scp->rproc;
  547. }
  548. EXPORT_SYMBOL_GPL(scp_get_rproc);
  549. /**
  550. * scp_get_vdec_hw_capa() - get video decoder hardware capability
  551. *
  552. * @scp: mtk_scp structure
  553. *
  554. * Return: video decoder hardware capability
  555. **/
  556. unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
  557. {
  558. return scp->run.dec_capability;
  559. }
  560. EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
  561. /**
  562. * scp_get_venc_hw_capa() - get video encoder hardware capability
  563. *
  564. * @scp: mtk_scp structure
  565. *
  566. * Return: video encoder hardware capability
  567. **/
  568. unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
  569. {
  570. return scp->run.enc_capability;
  571. }
  572. EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
  573. /**
  574. * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
  575. *
  576. * @scp: mtk_scp structure
  577. * @mem_addr: SCP views memory address
  578. *
  579. * Mapping the SCP's SRAM address /
  580. * DMEM (Data Extended Memory) memory address /
  581. * Working buffer memory address to
  582. * kernel virtual address.
  583. *
  584. * Return: Return ERR_PTR(-EINVAL) if mapping failed,
  585. * otherwise the mapped kernel virtual address
  586. **/
  587. void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
  588. {
  589. void *ptr;
  590. ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL);
  591. if (!ptr)
  592. return ERR_PTR(-EINVAL);
  593. return ptr;
  594. }
  595. EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
  596. static int scp_map_memory_region(struct mtk_scp *scp)
  597. {
  598. int ret;
  599. ret = of_reserved_mem_device_init(scp->dev);
  600. /* reserved memory is optional. */
  601. if (ret == -ENODEV) {
  602. dev_info(scp->dev, "skipping reserved memory initialization.");
  603. return 0;
  604. }
  605. if (ret) {
  606. dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
  607. return -ENOMEM;
  608. }
  609. /* Reserved SCP code size */
  610. scp->dram_size = MAX_CODE_SIZE;
  611. scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
  612. &scp->dma_addr, GFP_KERNEL);
  613. if (!scp->cpu_addr)
  614. return -ENOMEM;
  615. return 0;
  616. }
  617. static void scp_unmap_memory_region(struct mtk_scp *scp)
  618. {
  619. if (scp->dram_size == 0)
  620. return;
  621. dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
  622. scp->dma_addr);
  623. of_reserved_mem_device_release(scp->dev);
  624. }
  625. static int scp_register_ipi(struct platform_device *pdev, u32 id,
  626. ipi_handler_t handler, void *priv)
  627. {
  628. struct mtk_scp *scp = platform_get_drvdata(pdev);
  629. return scp_ipi_register(scp, id, handler, priv);
  630. }
  631. static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
  632. {
  633. struct mtk_scp *scp = platform_get_drvdata(pdev);
  634. scp_ipi_unregister(scp, id);
  635. }
  636. static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
  637. unsigned int len, unsigned int wait)
  638. {
  639. struct mtk_scp *scp = platform_get_drvdata(pdev);
  640. return scp_ipi_send(scp, id, buf, len, wait);
  641. }
  642. static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
  643. .send_ipi = scp_send_ipi,
  644. .register_ipi = scp_register_ipi,
  645. .unregister_ipi = scp_unregister_ipi,
  646. .ns_ipi_id = SCP_IPI_NS_SERVICE,
  647. };
  648. static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
  649. {
  650. scp->rpmsg_subdev =
  651. mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
  652. &mtk_scp_rpmsg_info);
  653. if (scp->rpmsg_subdev)
  654. rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
  655. }
  656. static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
  657. {
  658. if (scp->rpmsg_subdev) {
  659. rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
  660. mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
  661. scp->rpmsg_subdev = NULL;
  662. }
  663. }
  664. static int scp_probe(struct platform_device *pdev)
  665. {
  666. struct device *dev = &pdev->dev;
  667. struct device_node *np = dev->of_node;
  668. struct mtk_scp *scp;
  669. struct rproc *rproc;
  670. struct resource *res;
  671. const char *fw_name = "scp.img";
  672. int ret, i;
  673. ret = rproc_of_parse_firmware(dev, 0, &fw_name);
  674. if (ret < 0 && ret != -EINVAL)
  675. return ret;
  676. rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
  677. if (!rproc)
  678. return dev_err_probe(dev, -ENOMEM, "unable to allocate remoteproc\n");
  679. scp = (struct mtk_scp *)rproc->priv;
  680. scp->rproc = rproc;
  681. scp->dev = dev;
  682. scp->data = of_device_get_match_data(dev);
  683. platform_set_drvdata(pdev, scp);
  684. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
  685. scp->sram_base = devm_ioremap_resource(dev, res);
  686. if (IS_ERR(scp->sram_base))
  687. return dev_err_probe(dev, PTR_ERR(scp->sram_base),
  688. "Failed to parse and map sram memory\n");
  689. scp->sram_size = resource_size(res);
  690. scp->sram_phys = res->start;
  691. /* l1tcm is an optional memory region */
  692. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
  693. scp->l1tcm_base = devm_ioremap_resource(dev, res);
  694. if (IS_ERR(scp->l1tcm_base)) {
  695. ret = PTR_ERR(scp->l1tcm_base);
  696. if (ret != -EINVAL) {
  697. return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
  698. }
  699. } else {
  700. scp->l1tcm_size = resource_size(res);
  701. scp->l1tcm_phys = res->start;
  702. }
  703. scp->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
  704. if (IS_ERR(scp->reg_base))
  705. return dev_err_probe(dev, PTR_ERR(scp->reg_base),
  706. "Failed to parse and map cfg memory\n");
  707. ret = scp->data->scp_clk_get(scp);
  708. if (ret)
  709. return ret;
  710. ret = scp_map_memory_region(scp);
  711. if (ret)
  712. return ret;
  713. mutex_init(&scp->send_lock);
  714. for (i = 0; i < SCP_IPI_MAX; i++)
  715. mutex_init(&scp->ipi_desc[i].lock);
  716. /* register SCP initialization IPI */
  717. ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
  718. if (ret) {
  719. dev_err(dev, "Failed to register IPI_SCP_INIT\n");
  720. goto release_dev_mem;
  721. }
  722. init_waitqueue_head(&scp->run.wq);
  723. init_waitqueue_head(&scp->ack_wq);
  724. scp_add_rpmsg_subdev(scp);
  725. ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
  726. scp_irq_handler, IRQF_ONESHOT,
  727. pdev->name, scp);
  728. if (ret) {
  729. dev_err(dev, "failed to request irq\n");
  730. goto remove_subdev;
  731. }
  732. ret = rproc_add(rproc);
  733. if (ret)
  734. goto remove_subdev;
  735. return 0;
  736. remove_subdev:
  737. scp_remove_rpmsg_subdev(scp);
  738. scp_ipi_unregister(scp, SCP_IPI_INIT);
  739. release_dev_mem:
  740. scp_unmap_memory_region(scp);
  741. for (i = 0; i < SCP_IPI_MAX; i++)
  742. mutex_destroy(&scp->ipi_desc[i].lock);
  743. mutex_destroy(&scp->send_lock);
  744. return ret;
  745. }
  746. static int scp_remove(struct platform_device *pdev)
  747. {
  748. struct mtk_scp *scp = platform_get_drvdata(pdev);
  749. int i;
  750. rproc_del(scp->rproc);
  751. scp_remove_rpmsg_subdev(scp);
  752. scp_ipi_unregister(scp, SCP_IPI_INIT);
  753. scp_unmap_memory_region(scp);
  754. for (i = 0; i < SCP_IPI_MAX; i++)
  755. mutex_destroy(&scp->ipi_desc[i].lock);
  756. mutex_destroy(&scp->send_lock);
  757. return 0;
  758. }
  759. static const struct mtk_scp_of_data mt8183_of_data = {
  760. .scp_clk_get = mt8183_scp_clk_get,
  761. .scp_before_load = mt8183_scp_before_load,
  762. .scp_irq_handler = mt8183_scp_irq_handler,
  763. .scp_reset_assert = mt8183_scp_reset_assert,
  764. .scp_reset_deassert = mt8183_scp_reset_deassert,
  765. .scp_stop = mt8183_scp_stop,
  766. .scp_da_to_va = mt8183_scp_da_to_va,
  767. .host_to_scp_reg = MT8183_HOST_TO_SCP,
  768. .host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
  769. .ipi_buf_offset = 0x7bdb0,
  770. };
  771. static const struct mtk_scp_of_data mt8186_of_data = {
  772. .scp_clk_get = mt8195_scp_clk_get,
  773. .scp_before_load = mt8186_scp_before_load,
  774. .scp_irq_handler = mt8183_scp_irq_handler,
  775. .scp_reset_assert = mt8183_scp_reset_assert,
  776. .scp_reset_deassert = mt8183_scp_reset_deassert,
  777. .scp_stop = mt8183_scp_stop,
  778. .scp_da_to_va = mt8183_scp_da_to_va,
  779. .host_to_scp_reg = MT8183_HOST_TO_SCP,
  780. .host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
  781. .ipi_buf_offset = 0x3bdb0,
  782. };
  783. static const struct mtk_scp_of_data mt8188_of_data = {
  784. .scp_clk_get = mt8195_scp_clk_get,
  785. .scp_before_load = mt8192_scp_before_load,
  786. .scp_irq_handler = mt8192_scp_irq_handler,
  787. .scp_reset_assert = mt8192_scp_reset_assert,
  788. .scp_reset_deassert = mt8192_scp_reset_deassert,
  789. .scp_stop = mt8192_scp_stop,
  790. .scp_da_to_va = mt8192_scp_da_to_va,
  791. .host_to_scp_reg = MT8192_GIPC_IN_SET,
  792. .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
  793. };
  794. static const struct mtk_scp_of_data mt8192_of_data = {
  795. .scp_clk_get = mt8192_scp_clk_get,
  796. .scp_before_load = mt8192_scp_before_load,
  797. .scp_irq_handler = mt8192_scp_irq_handler,
  798. .scp_reset_assert = mt8192_scp_reset_assert,
  799. .scp_reset_deassert = mt8192_scp_reset_deassert,
  800. .scp_stop = mt8192_scp_stop,
  801. .scp_da_to_va = mt8192_scp_da_to_va,
  802. .host_to_scp_reg = MT8192_GIPC_IN_SET,
  803. .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
  804. };
  805. static const struct mtk_scp_of_data mt8195_of_data = {
  806. .scp_clk_get = mt8195_scp_clk_get,
  807. .scp_before_load = mt8195_scp_before_load,
  808. .scp_irq_handler = mt8192_scp_irq_handler,
  809. .scp_reset_assert = mt8192_scp_reset_assert,
  810. .scp_reset_deassert = mt8192_scp_reset_deassert,
  811. .scp_stop = mt8195_scp_stop,
  812. .scp_da_to_va = mt8192_scp_da_to_va,
  813. .host_to_scp_reg = MT8192_GIPC_IN_SET,
  814. .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
  815. };
  816. static const struct of_device_id mtk_scp_of_match[] = {
  817. { .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
  818. { .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
  819. { .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
  820. { .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
  821. { .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
  822. {},
  823. };
  824. MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
  825. static struct platform_driver mtk_scp_driver = {
  826. .probe = scp_probe,
  827. .remove = scp_remove,
  828. .driver = {
  829. .name = "mtk-scp",
  830. .of_match_table = mtk_scp_of_match,
  831. },
  832. };
  833. module_platform_driver(mtk_scp_driver);
  834. MODULE_LICENSE("GPL v2");
  835. MODULE_DESCRIPTION("MediaTek SCP control driver");