mc.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
  4. */
  5. #include <linux/clk.h>
  6. #include <linux/delay.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/export.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/of.h>
  13. #include <linux/of_device.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/slab.h>
  16. #include <linux/sort.h>
  17. #include <soc/tegra/fuse.h>
  18. #include "mc.h"
  19. static const struct of_device_id tegra_mc_of_match[] = {
  20. #ifdef CONFIG_ARCH_TEGRA_2x_SOC
  21. { .compatible = "nvidia,tegra20-mc-gart", .data = &tegra20_mc_soc },
  22. #endif
  23. #ifdef CONFIG_ARCH_TEGRA_3x_SOC
  24. { .compatible = "nvidia,tegra30-mc", .data = &tegra30_mc_soc },
  25. #endif
  26. #ifdef CONFIG_ARCH_TEGRA_114_SOC
  27. { .compatible = "nvidia,tegra114-mc", .data = &tegra114_mc_soc },
  28. #endif
  29. #ifdef CONFIG_ARCH_TEGRA_124_SOC
  30. { .compatible = "nvidia,tegra124-mc", .data = &tegra124_mc_soc },
  31. #endif
  32. #ifdef CONFIG_ARCH_TEGRA_132_SOC
  33. { .compatible = "nvidia,tegra132-mc", .data = &tegra132_mc_soc },
  34. #endif
  35. #ifdef CONFIG_ARCH_TEGRA_210_SOC
  36. { .compatible = "nvidia,tegra210-mc", .data = &tegra210_mc_soc },
  37. #endif
  38. #ifdef CONFIG_ARCH_TEGRA_186_SOC
  39. { .compatible = "nvidia,tegra186-mc", .data = &tegra186_mc_soc },
  40. #endif
  41. #ifdef CONFIG_ARCH_TEGRA_194_SOC
  42. { .compatible = "nvidia,tegra194-mc", .data = &tegra194_mc_soc },
  43. #endif
  44. #ifdef CONFIG_ARCH_TEGRA_234_SOC
  45. { .compatible = "nvidia,tegra234-mc", .data = &tegra234_mc_soc },
  46. #endif
  47. { /* sentinel */ }
  48. };
  49. MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
  50. static void tegra_mc_devm_action_put_device(void *data)
  51. {
  52. struct tegra_mc *mc = data;
  53. put_device(mc->dev);
  54. }
  55. /**
  56. * devm_tegra_memory_controller_get() - get Tegra Memory Controller handle
  57. * @dev: device pointer for the consumer device
  58. *
  59. * This function will search for the Memory Controller node in a device-tree
  60. * and retrieve the Memory Controller handle.
  61. *
  62. * Return: ERR_PTR() on error or a valid pointer to a struct tegra_mc.
  63. */
  64. struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev)
  65. {
  66. struct platform_device *pdev;
  67. struct device_node *np;
  68. struct tegra_mc *mc;
  69. int err;
  70. np = of_parse_phandle(dev->of_node, "nvidia,memory-controller", 0);
  71. if (!np)
  72. return ERR_PTR(-ENOENT);
  73. pdev = of_find_device_by_node(np);
  74. of_node_put(np);
  75. if (!pdev)
  76. return ERR_PTR(-ENODEV);
  77. mc = platform_get_drvdata(pdev);
  78. if (!mc) {
  79. put_device(&pdev->dev);
  80. return ERR_PTR(-EPROBE_DEFER);
  81. }
  82. err = devm_add_action_or_reset(dev, tegra_mc_devm_action_put_device, mc);
  83. if (err)
  84. return ERR_PTR(err);
  85. return mc;
  86. }
  87. EXPORT_SYMBOL_GPL(devm_tegra_memory_controller_get);
  88. int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev)
  89. {
  90. if (mc->soc->ops && mc->soc->ops->probe_device)
  91. return mc->soc->ops->probe_device(mc, dev);
  92. return 0;
  93. }
  94. EXPORT_SYMBOL_GPL(tegra_mc_probe_device);
  95. static int tegra_mc_block_dma_common(struct tegra_mc *mc,
  96. const struct tegra_mc_reset *rst)
  97. {
  98. unsigned long flags;
  99. u32 value;
  100. spin_lock_irqsave(&mc->lock, flags);
  101. value = mc_readl(mc, rst->control) | BIT(rst->bit);
  102. mc_writel(mc, value, rst->control);
  103. spin_unlock_irqrestore(&mc->lock, flags);
  104. return 0;
  105. }
  106. static bool tegra_mc_dma_idling_common(struct tegra_mc *mc,
  107. const struct tegra_mc_reset *rst)
  108. {
  109. return (mc_readl(mc, rst->status) & BIT(rst->bit)) != 0;
  110. }
  111. static int tegra_mc_unblock_dma_common(struct tegra_mc *mc,
  112. const struct tegra_mc_reset *rst)
  113. {
  114. unsigned long flags;
  115. u32 value;
  116. spin_lock_irqsave(&mc->lock, flags);
  117. value = mc_readl(mc, rst->control) & ~BIT(rst->bit);
  118. mc_writel(mc, value, rst->control);
  119. spin_unlock_irqrestore(&mc->lock, flags);
  120. return 0;
  121. }
  122. static int tegra_mc_reset_status_common(struct tegra_mc *mc,
  123. const struct tegra_mc_reset *rst)
  124. {
  125. return (mc_readl(mc, rst->control) & BIT(rst->bit)) != 0;
  126. }
  127. const struct tegra_mc_reset_ops tegra_mc_reset_ops_common = {
  128. .block_dma = tegra_mc_block_dma_common,
  129. .dma_idling = tegra_mc_dma_idling_common,
  130. .unblock_dma = tegra_mc_unblock_dma_common,
  131. .reset_status = tegra_mc_reset_status_common,
  132. };
  133. static inline struct tegra_mc *reset_to_mc(struct reset_controller_dev *rcdev)
  134. {
  135. return container_of(rcdev, struct tegra_mc, reset);
  136. }
  137. static const struct tegra_mc_reset *tegra_mc_reset_find(struct tegra_mc *mc,
  138. unsigned long id)
  139. {
  140. unsigned int i;
  141. for (i = 0; i < mc->soc->num_resets; i++)
  142. if (mc->soc->resets[i].id == id)
  143. return &mc->soc->resets[i];
  144. return NULL;
  145. }
  146. static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev,
  147. unsigned long id)
  148. {
  149. struct tegra_mc *mc = reset_to_mc(rcdev);
  150. const struct tegra_mc_reset_ops *rst_ops;
  151. const struct tegra_mc_reset *rst;
  152. int retries = 500;
  153. int err;
  154. rst = tegra_mc_reset_find(mc, id);
  155. if (!rst)
  156. return -ENODEV;
  157. rst_ops = mc->soc->reset_ops;
  158. if (!rst_ops)
  159. return -ENODEV;
  160. /* DMA flushing will fail if reset is already asserted */
  161. if (rst_ops->reset_status) {
  162. /* check whether reset is asserted */
  163. if (rst_ops->reset_status(mc, rst))
  164. return 0;
  165. }
  166. if (rst_ops->block_dma) {
  167. /* block clients DMA requests */
  168. err = rst_ops->block_dma(mc, rst);
  169. if (err) {
  170. dev_err(mc->dev, "failed to block %s DMA: %d\n",
  171. rst->name, err);
  172. return err;
  173. }
  174. }
  175. if (rst_ops->dma_idling) {
  176. /* wait for completion of the outstanding DMA requests */
  177. while (!rst_ops->dma_idling(mc, rst)) {
  178. if (!retries--) {
  179. dev_err(mc->dev, "failed to flush %s DMA\n",
  180. rst->name);
  181. return -EBUSY;
  182. }
  183. usleep_range(10, 100);
  184. }
  185. }
  186. if (rst_ops->hotreset_assert) {
  187. /* clear clients DMA requests sitting before arbitration */
  188. err = rst_ops->hotreset_assert(mc, rst);
  189. if (err) {
  190. dev_err(mc->dev, "failed to hot reset %s: %d\n",
  191. rst->name, err);
  192. return err;
  193. }
  194. }
  195. return 0;
  196. }
  197. static int tegra_mc_hotreset_deassert(struct reset_controller_dev *rcdev,
  198. unsigned long id)
  199. {
  200. struct tegra_mc *mc = reset_to_mc(rcdev);
  201. const struct tegra_mc_reset_ops *rst_ops;
  202. const struct tegra_mc_reset *rst;
  203. int err;
  204. rst = tegra_mc_reset_find(mc, id);
  205. if (!rst)
  206. return -ENODEV;
  207. rst_ops = mc->soc->reset_ops;
  208. if (!rst_ops)
  209. return -ENODEV;
  210. if (rst_ops->hotreset_deassert) {
  211. /* take out client from hot reset */
  212. err = rst_ops->hotreset_deassert(mc, rst);
  213. if (err) {
  214. dev_err(mc->dev, "failed to deassert hot reset %s: %d\n",
  215. rst->name, err);
  216. return err;
  217. }
  218. }
  219. if (rst_ops->unblock_dma) {
  220. /* allow new DMA requests to proceed to arbitration */
  221. err = rst_ops->unblock_dma(mc, rst);
  222. if (err) {
  223. dev_err(mc->dev, "failed to unblock %s DMA : %d\n",
  224. rst->name, err);
  225. return err;
  226. }
  227. }
  228. return 0;
  229. }
  230. static int tegra_mc_hotreset_status(struct reset_controller_dev *rcdev,
  231. unsigned long id)
  232. {
  233. struct tegra_mc *mc = reset_to_mc(rcdev);
  234. const struct tegra_mc_reset_ops *rst_ops;
  235. const struct tegra_mc_reset *rst;
  236. rst = tegra_mc_reset_find(mc, id);
  237. if (!rst)
  238. return -ENODEV;
  239. rst_ops = mc->soc->reset_ops;
  240. if (!rst_ops)
  241. return -ENODEV;
  242. return rst_ops->reset_status(mc, rst);
  243. }
  244. static const struct reset_control_ops tegra_mc_reset_ops = {
  245. .assert = tegra_mc_hotreset_assert,
  246. .deassert = tegra_mc_hotreset_deassert,
  247. .status = tegra_mc_hotreset_status,
  248. };
  249. static int tegra_mc_reset_setup(struct tegra_mc *mc)
  250. {
  251. int err;
  252. mc->reset.ops = &tegra_mc_reset_ops;
  253. mc->reset.owner = THIS_MODULE;
  254. mc->reset.of_node = mc->dev->of_node;
  255. mc->reset.of_reset_n_cells = 1;
  256. mc->reset.nr_resets = mc->soc->num_resets;
  257. err = reset_controller_register(&mc->reset);
  258. if (err < 0)
  259. return err;
  260. return 0;
  261. }
  262. int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
  263. {
  264. unsigned int i;
  265. struct tegra_mc_timing *timing = NULL;
  266. for (i = 0; i < mc->num_timings; i++) {
  267. if (mc->timings[i].rate == rate) {
  268. timing = &mc->timings[i];
  269. break;
  270. }
  271. }
  272. if (!timing) {
  273. dev_err(mc->dev, "no memory timing registered for rate %lu\n",
  274. rate);
  275. return -EINVAL;
  276. }
  277. for (i = 0; i < mc->soc->num_emem_regs; ++i)
  278. mc_writel(mc, timing->emem_data[i], mc->soc->emem_regs[i]);
  279. return 0;
  280. }
  281. EXPORT_SYMBOL_GPL(tegra_mc_write_emem_configuration);
  282. unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
  283. {
  284. u8 dram_count;
  285. dram_count = mc_readl(mc, MC_EMEM_ADR_CFG);
  286. dram_count &= MC_EMEM_ADR_CFG_EMEM_NUMDEV;
  287. dram_count++;
  288. return dram_count;
  289. }
  290. EXPORT_SYMBOL_GPL(tegra_mc_get_emem_device_count);
  291. #if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
  292. defined(CONFIG_ARCH_TEGRA_114_SOC) || \
  293. defined(CONFIG_ARCH_TEGRA_124_SOC) || \
  294. defined(CONFIG_ARCH_TEGRA_132_SOC) || \
  295. defined(CONFIG_ARCH_TEGRA_210_SOC)
  296. static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
  297. {
  298. unsigned long long tick;
  299. unsigned int i;
  300. u32 value;
  301. /* compute the number of MC clock cycles per tick */
  302. tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
  303. do_div(tick, NSEC_PER_SEC);
  304. value = mc_readl(mc, MC_EMEM_ARB_CFG);
  305. value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK;
  306. value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick);
  307. mc_writel(mc, value, MC_EMEM_ARB_CFG);
  308. /* write latency allowance defaults */
  309. for (i = 0; i < mc->soc->num_clients; i++) {
  310. const struct tegra_mc_client *client = &mc->soc->clients[i];
  311. u32 value;
  312. value = mc_readl(mc, client->regs.la.reg);
  313. value &= ~(client->regs.la.mask << client->regs.la.shift);
  314. value |= (client->regs.la.def & client->regs.la.mask) << client->regs.la.shift;
  315. mc_writel(mc, value, client->regs.la.reg);
  316. }
  317. /* latch new values */
  318. mc_writel(mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
  319. return 0;
  320. }
  321. static int load_one_timing(struct tegra_mc *mc,
  322. struct tegra_mc_timing *timing,
  323. struct device_node *node)
  324. {
  325. int err;
  326. u32 tmp;
  327. err = of_property_read_u32(node, "clock-frequency", &tmp);
  328. if (err) {
  329. dev_err(mc->dev,
  330. "timing %pOFn: failed to read rate\n", node);
  331. return err;
  332. }
  333. timing->rate = tmp;
  334. timing->emem_data = devm_kcalloc(mc->dev, mc->soc->num_emem_regs,
  335. sizeof(u32), GFP_KERNEL);
  336. if (!timing->emem_data)
  337. return -ENOMEM;
  338. err = of_property_read_u32_array(node, "nvidia,emem-configuration",
  339. timing->emem_data,
  340. mc->soc->num_emem_regs);
  341. if (err) {
  342. dev_err(mc->dev,
  343. "timing %pOFn: failed to read EMEM configuration\n",
  344. node);
  345. return err;
  346. }
  347. return 0;
  348. }
  349. static int load_timings(struct tegra_mc *mc, struct device_node *node)
  350. {
  351. struct device_node *child;
  352. struct tegra_mc_timing *timing;
  353. int child_count = of_get_child_count(node);
  354. int i = 0, err;
  355. mc->timings = devm_kcalloc(mc->dev, child_count, sizeof(*timing),
  356. GFP_KERNEL);
  357. if (!mc->timings)
  358. return -ENOMEM;
  359. mc->num_timings = child_count;
  360. for_each_child_of_node(node, child) {
  361. timing = &mc->timings[i++];
  362. err = load_one_timing(mc, timing, child);
  363. if (err) {
  364. of_node_put(child);
  365. return err;
  366. }
  367. }
  368. return 0;
  369. }
  370. static int tegra_mc_setup_timings(struct tegra_mc *mc)
  371. {
  372. struct device_node *node;
  373. u32 ram_code, node_ram_code;
  374. int err;
  375. ram_code = tegra_read_ram_code();
  376. mc->num_timings = 0;
  377. for_each_child_of_node(mc->dev->of_node, node) {
  378. err = of_property_read_u32(node, "nvidia,ram-code",
  379. &node_ram_code);
  380. if (err || (node_ram_code != ram_code))
  381. continue;
  382. err = load_timings(mc, node);
  383. of_node_put(node);
  384. if (err)
  385. return err;
  386. break;
  387. }
  388. if (mc->num_timings == 0)
  389. dev_warn(mc->dev,
  390. "no memory timings for RAM code %u registered\n",
  391. ram_code);
  392. return 0;
  393. }
  394. int tegra30_mc_probe(struct tegra_mc *mc)
  395. {
  396. int err;
  397. mc->clk = devm_clk_get_optional(mc->dev, "mc");
  398. if (IS_ERR(mc->clk)) {
  399. dev_err(mc->dev, "failed to get MC clock: %ld\n", PTR_ERR(mc->clk));
  400. return PTR_ERR(mc->clk);
  401. }
  402. /* ensure that debug features are disabled */
  403. mc_writel(mc, 0x00000000, MC_TIMING_CONTROL_DBG);
  404. err = tegra_mc_setup_latency_allowance(mc);
  405. if (err < 0) {
  406. dev_err(mc->dev, "failed to setup latency allowance: %d\n", err);
  407. return err;
  408. }
  409. err = tegra_mc_setup_timings(mc);
  410. if (err < 0) {
  411. dev_err(mc->dev, "failed to setup timings: %d\n", err);
  412. return err;
  413. }
  414. return 0;
  415. }
  416. const struct tegra_mc_ops tegra30_mc_ops = {
  417. .probe = tegra30_mc_probe,
  418. .handle_irq = tegra30_mc_handle_irq,
  419. };
  420. #endif
  421. static int mc_global_intstatus_to_channel(const struct tegra_mc *mc, u32 status,
  422. unsigned int *mc_channel)
  423. {
  424. if ((status & mc->soc->ch_intmask) == 0)
  425. return -EINVAL;
  426. *mc_channel = __ffs((status & mc->soc->ch_intmask) >>
  427. mc->soc->global_intstatus_channel_shift);
  428. return 0;
  429. }
  430. static u32 mc_channel_to_global_intstatus(const struct tegra_mc *mc,
  431. unsigned int channel)
  432. {
  433. return BIT(channel) << mc->soc->global_intstatus_channel_shift;
  434. }
  435. irqreturn_t tegra30_mc_handle_irq(int irq, void *data)
  436. {
  437. struct tegra_mc *mc = data;
  438. unsigned int bit, channel;
  439. unsigned long status;
  440. if (mc->soc->num_channels) {
  441. u32 global_status;
  442. int err;
  443. global_status = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, MC_GLOBAL_INTSTATUS);
  444. err = mc_global_intstatus_to_channel(mc, global_status, &channel);
  445. if (err < 0) {
  446. dev_err_ratelimited(mc->dev, "unknown interrupt channel 0x%08x\n",
  447. global_status);
  448. return IRQ_NONE;
  449. }
  450. /* mask all interrupts to avoid flooding */
  451. status = mc_ch_readl(mc, channel, MC_INTSTATUS) & mc->soc->intmask;
  452. } else {
  453. status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
  454. }
  455. if (!status)
  456. return IRQ_NONE;
  457. for_each_set_bit(bit, &status, 32) {
  458. const char *error = tegra_mc_status_names[bit] ?: "unknown";
  459. const char *client = "unknown", *desc;
  460. const char *direction, *secure;
  461. u32 status_reg, addr_reg;
  462. u32 intmask = BIT(bit);
  463. phys_addr_t addr = 0;
  464. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  465. u32 addr_hi_reg = 0;
  466. #endif
  467. unsigned int i;
  468. char perm[7];
  469. u8 id, type;
  470. u32 value;
  471. switch (intmask) {
  472. case MC_INT_DECERR_VPR:
  473. status_reg = MC_ERR_VPR_STATUS;
  474. addr_reg = MC_ERR_VPR_ADR;
  475. break;
  476. case MC_INT_SECERR_SEC:
  477. status_reg = MC_ERR_SEC_STATUS;
  478. addr_reg = MC_ERR_SEC_ADR;
  479. break;
  480. case MC_INT_DECERR_MTS:
  481. status_reg = MC_ERR_MTS_STATUS;
  482. addr_reg = MC_ERR_MTS_ADR;
  483. break;
  484. case MC_INT_DECERR_GENERALIZED_CARVEOUT:
  485. status_reg = MC_ERR_GENERALIZED_CARVEOUT_STATUS;
  486. addr_reg = MC_ERR_GENERALIZED_CARVEOUT_ADR;
  487. break;
  488. case MC_INT_DECERR_ROUTE_SANITY:
  489. status_reg = MC_ERR_ROUTE_SANITY_STATUS;
  490. addr_reg = MC_ERR_ROUTE_SANITY_ADR;
  491. break;
  492. default:
  493. status_reg = MC_ERR_STATUS;
  494. addr_reg = MC_ERR_ADR;
  495. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  496. if (mc->soc->has_addr_hi_reg)
  497. addr_hi_reg = MC_ERR_ADR_HI;
  498. #endif
  499. break;
  500. }
  501. if (mc->soc->num_channels)
  502. value = mc_ch_readl(mc, channel, status_reg);
  503. else
  504. value = mc_readl(mc, status_reg);
  505. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  506. if (mc->soc->num_address_bits > 32) {
  507. if (addr_hi_reg) {
  508. if (mc->soc->num_channels)
  509. addr = mc_ch_readl(mc, channel, addr_hi_reg);
  510. else
  511. addr = mc_readl(mc, addr_hi_reg);
  512. } else {
  513. addr = ((value >> MC_ERR_STATUS_ADR_HI_SHIFT) &
  514. MC_ERR_STATUS_ADR_HI_MASK);
  515. }
  516. addr <<= 32;
  517. }
  518. #endif
  519. if (value & MC_ERR_STATUS_RW)
  520. direction = "write";
  521. else
  522. direction = "read";
  523. if (value & MC_ERR_STATUS_SECURITY)
  524. secure = "secure ";
  525. else
  526. secure = "";
  527. id = value & mc->soc->client_id_mask;
  528. for (i = 0; i < mc->soc->num_clients; i++) {
  529. if (mc->soc->clients[i].id == id) {
  530. client = mc->soc->clients[i].name;
  531. break;
  532. }
  533. }
  534. type = (value & MC_ERR_STATUS_TYPE_MASK) >>
  535. MC_ERR_STATUS_TYPE_SHIFT;
  536. desc = tegra_mc_error_names[type];
  537. switch (value & MC_ERR_STATUS_TYPE_MASK) {
  538. case MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE:
  539. perm[0] = ' ';
  540. perm[1] = '[';
  541. if (value & MC_ERR_STATUS_READABLE)
  542. perm[2] = 'R';
  543. else
  544. perm[2] = '-';
  545. if (value & MC_ERR_STATUS_WRITABLE)
  546. perm[3] = 'W';
  547. else
  548. perm[3] = '-';
  549. if (value & MC_ERR_STATUS_NONSECURE)
  550. perm[4] = '-';
  551. else
  552. perm[4] = 'S';
  553. perm[5] = ']';
  554. perm[6] = '\0';
  555. break;
  556. default:
  557. perm[0] = '\0';
  558. break;
  559. }
  560. if (mc->soc->num_channels)
  561. value = mc_ch_readl(mc, channel, addr_reg);
  562. else
  563. value = mc_readl(mc, addr_reg);
  564. addr |= value;
  565. dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s%s)\n",
  566. client, secure, direction, &addr, error,
  567. desc, perm);
  568. }
  569. /* clear interrupts */
  570. if (mc->soc->num_channels) {
  571. mc_ch_writel(mc, channel, status, MC_INTSTATUS);
  572. mc_ch_writel(mc, MC_BROADCAST_CHANNEL,
  573. mc_channel_to_global_intstatus(mc, channel),
  574. MC_GLOBAL_INTSTATUS);
  575. } else {
  576. mc_writel(mc, status, MC_INTSTATUS);
  577. }
  578. return IRQ_HANDLED;
  579. }
  580. const char *const tegra_mc_status_names[32] = {
  581. [ 1] = "External interrupt",
  582. [ 6] = "EMEM address decode error",
  583. [ 7] = "GART page fault",
  584. [ 8] = "Security violation",
  585. [ 9] = "EMEM arbitration error",
  586. [10] = "Page fault",
  587. [11] = "Invalid APB ASID update",
  588. [12] = "VPR violation",
  589. [13] = "Secure carveout violation",
  590. [16] = "MTS carveout violation",
  591. [17] = "Generalized carveout violation",
  592. [20] = "Route Sanity error",
  593. };
  594. const char *const tegra_mc_error_names[8] = {
  595. [2] = "EMEM decode error",
  596. [3] = "TrustZone violation",
  597. [4] = "Carveout violation",
  598. [6] = "SMMU translation error",
  599. };
  600. /*
  601. * Memory Controller (MC) has few Memory Clients that are issuing memory
  602. * bandwidth allocation requests to the MC interconnect provider. The MC
  603. * provider aggregates the requests and then sends the aggregated request
  604. * up to the External Memory Controller (EMC) interconnect provider which
  605. * re-configures hardware interface to External Memory (EMEM) in accordance
  606. * to the required bandwidth. Each MC interconnect node represents an
  607. * individual Memory Client.
  608. *
  609. * Memory interconnect topology:
  610. *
  611. * +----+
  612. * +--------+ | |
  613. * | TEXSRD +--->+ |
  614. * +--------+ | |
  615. * | | +-----+ +------+
  616. * ... | MC +--->+ EMC +--->+ EMEM |
  617. * | | +-----+ +------+
  618. * +--------+ | |
  619. * | DISP.. +--->+ |
  620. * +--------+ | |
  621. * +----+
  622. */
  623. static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
  624. {
  625. struct icc_node *node;
  626. unsigned int i;
  627. int err;
  628. /* older device-trees don't have interconnect properties */
  629. if (!device_property_present(mc->dev, "#interconnect-cells") ||
  630. !mc->soc->icc_ops)
  631. return 0;
  632. mc->provider.dev = mc->dev;
  633. mc->provider.data = &mc->provider;
  634. mc->provider.set = mc->soc->icc_ops->set;
  635. mc->provider.aggregate = mc->soc->icc_ops->aggregate;
  636. mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
  637. icc_provider_init(&mc->provider);
  638. /* create Memory Controller node */
  639. node = icc_node_create(TEGRA_ICC_MC);
  640. if (IS_ERR(node))
  641. return PTR_ERR(node);
  642. node->name = "Memory Controller";
  643. icc_node_add(node, &mc->provider);
  644. /* link Memory Controller to External Memory Controller */
  645. err = icc_link_create(node, TEGRA_ICC_EMC);
  646. if (err)
  647. goto remove_nodes;
  648. for (i = 0; i < mc->soc->num_clients; i++) {
  649. /* create MC client node */
  650. node = icc_node_create(mc->soc->clients[i].id);
  651. if (IS_ERR(node)) {
  652. err = PTR_ERR(node);
  653. goto remove_nodes;
  654. }
  655. node->name = mc->soc->clients[i].name;
  656. icc_node_add(node, &mc->provider);
  657. /* link Memory Client to Memory Controller */
  658. err = icc_link_create(node, TEGRA_ICC_MC);
  659. if (err)
  660. goto remove_nodes;
  661. }
  662. err = icc_provider_register(&mc->provider);
  663. if (err)
  664. goto remove_nodes;
  665. return 0;
  666. remove_nodes:
  667. icc_nodes_remove(&mc->provider);
  668. return err;
  669. }
  670. static int tegra_mc_probe(struct platform_device *pdev)
  671. {
  672. struct tegra_mc *mc;
  673. u64 mask;
  674. int err;
  675. mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
  676. if (!mc)
  677. return -ENOMEM;
  678. platform_set_drvdata(pdev, mc);
  679. spin_lock_init(&mc->lock);
  680. mc->soc = of_device_get_match_data(&pdev->dev);
  681. mc->dev = &pdev->dev;
  682. mask = DMA_BIT_MASK(mc->soc->num_address_bits);
  683. err = dma_coerce_mask_and_coherent(&pdev->dev, mask);
  684. if (err < 0) {
  685. dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
  686. return err;
  687. }
  688. /* length of MC tick in nanoseconds */
  689. mc->tick = 30;
  690. mc->regs = devm_platform_ioremap_resource(pdev, 0);
  691. if (IS_ERR(mc->regs))
  692. return PTR_ERR(mc->regs);
  693. mc->debugfs.root = debugfs_create_dir("mc", NULL);
  694. if (mc->soc->ops && mc->soc->ops->probe) {
  695. err = mc->soc->ops->probe(mc);
  696. if (err < 0)
  697. return err;
  698. }
  699. if (mc->soc->ops && mc->soc->ops->handle_irq) {
  700. mc->irq = platform_get_irq(pdev, 0);
  701. if (mc->irq < 0)
  702. return mc->irq;
  703. WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n");
  704. if (mc->soc->num_channels)
  705. mc_ch_writel(mc, MC_BROADCAST_CHANNEL, mc->soc->intmask,
  706. MC_INTMASK);
  707. else
  708. mc_writel(mc, mc->soc->intmask, MC_INTMASK);
  709. err = devm_request_irq(&pdev->dev, mc->irq, mc->soc->ops->handle_irq, 0,
  710. dev_name(&pdev->dev), mc);
  711. if (err < 0) {
  712. dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq,
  713. err);
  714. return err;
  715. }
  716. }
  717. if (mc->soc->reset_ops) {
  718. err = tegra_mc_reset_setup(mc);
  719. if (err < 0)
  720. dev_err(&pdev->dev, "failed to register reset controller: %d\n", err);
  721. }
  722. err = tegra_mc_interconnect_setup(mc);
  723. if (err < 0)
  724. dev_err(&pdev->dev, "failed to initialize interconnect: %d\n",
  725. err);
  726. if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU) && mc->soc->smmu) {
  727. mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc);
  728. if (IS_ERR(mc->smmu)) {
  729. dev_err(&pdev->dev, "failed to probe SMMU: %ld\n",
  730. PTR_ERR(mc->smmu));
  731. mc->smmu = NULL;
  732. }
  733. }
  734. if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && !mc->soc->smmu) {
  735. mc->gart = tegra_gart_probe(&pdev->dev, mc);
  736. if (IS_ERR(mc->gart)) {
  737. dev_err(&pdev->dev, "failed to probe GART: %ld\n",
  738. PTR_ERR(mc->gart));
  739. mc->gart = NULL;
  740. }
  741. }
  742. return 0;
  743. }
  744. static int __maybe_unused tegra_mc_suspend(struct device *dev)
  745. {
  746. struct tegra_mc *mc = dev_get_drvdata(dev);
  747. if (mc->soc->ops && mc->soc->ops->suspend)
  748. return mc->soc->ops->suspend(mc);
  749. return 0;
  750. }
  751. static int __maybe_unused tegra_mc_resume(struct device *dev)
  752. {
  753. struct tegra_mc *mc = dev_get_drvdata(dev);
  754. if (mc->soc->ops && mc->soc->ops->resume)
  755. return mc->soc->ops->resume(mc);
  756. return 0;
  757. }
  758. static void tegra_mc_sync_state(struct device *dev)
  759. {
  760. struct tegra_mc *mc = dev_get_drvdata(dev);
  761. /* check whether ICC provider is registered */
  762. if (mc->provider.dev == dev)
  763. icc_sync_state(dev);
  764. }
  765. static const struct dev_pm_ops tegra_mc_pm_ops = {
  766. SET_SYSTEM_SLEEP_PM_OPS(tegra_mc_suspend, tegra_mc_resume)
  767. };
  768. static struct platform_driver tegra_mc_driver = {
  769. .driver = {
  770. .name = "tegra-mc",
  771. .of_match_table = tegra_mc_of_match,
  772. .pm = &tegra_mc_pm_ops,
  773. .suppress_bind_attrs = true,
  774. .sync_state = tegra_mc_sync_state,
  775. },
  776. .prevent_deferred_probe = true,
  777. .probe = tegra_mc_probe,
  778. };
  779. static int tegra_mc_init(void)
  780. {
  781. return platform_driver_register(&tegra_mc_driver);
  782. }
  783. arch_initcall(tegra_mc_init);
  784. MODULE_AUTHOR("Thierry Reding <[email protected]>");
  785. MODULE_DESCRIPTION("NVIDIA Tegra Memory Controller driver");
  786. MODULE_LICENSE("GPL v2");