dev.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Tegra host1x driver
  4. *
  5. * Copyright (c) 2010-2013, NVIDIA Corporation.
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/delay.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/io.h>
  11. #include <linux/list.h>
  12. #include <linux/module.h>
  13. #include <linux/of_device.h>
  14. #include <linux/of.h>
  15. #include <linux/pm_runtime.h>
  16. #include <linux/slab.h>
  17. #include <soc/tegra/common.h>
  18. #define CREATE_TRACE_POINTS
  19. #include <trace/events/host1x.h>
  20. #undef CREATE_TRACE_POINTS
  21. #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
  22. #include <asm/dma-iommu.h>
  23. #endif
  24. #include "bus.h"
  25. #include "channel.h"
  26. #include "context.h"
  27. #include "debug.h"
  28. #include "dev.h"
  29. #include "intr.h"
  30. #include "hw/host1x01.h"
  31. #include "hw/host1x02.h"
  32. #include "hw/host1x04.h"
  33. #include "hw/host1x05.h"
  34. #include "hw/host1x06.h"
  35. #include "hw/host1x07.h"
  36. #include "hw/host1x08.h"
  37. void host1x_common_writel(struct host1x *host1x, u32 v, u32 r)
  38. {
  39. writel(v, host1x->common_regs + r);
  40. }
  41. void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
  42. {
  43. writel(v, host1x->hv_regs + r);
  44. }
  45. u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
  46. {
  47. return readl(host1x->hv_regs + r);
  48. }
  49. void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
  50. {
  51. void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
  52. writel(v, sync_regs + r);
  53. }
  54. u32 host1x_sync_readl(struct host1x *host1x, u32 r)
  55. {
  56. void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
  57. return readl(sync_regs + r);
  58. }
  59. void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
  60. {
  61. writel(v, ch->regs + r);
  62. }
  63. u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
  64. {
  65. return readl(ch->regs + r);
  66. }
  67. static const struct host1x_info host1x01_info = {
  68. .nb_channels = 8,
  69. .nb_pts = 32,
  70. .nb_mlocks = 16,
  71. .nb_bases = 8,
  72. .init = host1x01_init,
  73. .sync_offset = 0x3000,
  74. .dma_mask = DMA_BIT_MASK(32),
  75. .has_wide_gather = false,
  76. .has_hypervisor = false,
  77. .num_sid_entries = 0,
  78. .sid_table = NULL,
  79. .reserve_vblank_syncpts = true,
  80. };
  81. static const struct host1x_info host1x02_info = {
  82. .nb_channels = 9,
  83. .nb_pts = 32,
  84. .nb_mlocks = 16,
  85. .nb_bases = 12,
  86. .init = host1x02_init,
  87. .sync_offset = 0x3000,
  88. .dma_mask = DMA_BIT_MASK(32),
  89. .has_wide_gather = false,
  90. .has_hypervisor = false,
  91. .num_sid_entries = 0,
  92. .sid_table = NULL,
  93. .reserve_vblank_syncpts = true,
  94. };
  95. static const struct host1x_info host1x04_info = {
  96. .nb_channels = 12,
  97. .nb_pts = 192,
  98. .nb_mlocks = 16,
  99. .nb_bases = 64,
  100. .init = host1x04_init,
  101. .sync_offset = 0x2100,
  102. .dma_mask = DMA_BIT_MASK(34),
  103. .has_wide_gather = false,
  104. .has_hypervisor = false,
  105. .num_sid_entries = 0,
  106. .sid_table = NULL,
  107. .reserve_vblank_syncpts = false,
  108. };
  109. static const struct host1x_info host1x05_info = {
  110. .nb_channels = 14,
  111. .nb_pts = 192,
  112. .nb_mlocks = 16,
  113. .nb_bases = 64,
  114. .init = host1x05_init,
  115. .sync_offset = 0x2100,
  116. .dma_mask = DMA_BIT_MASK(34),
  117. .has_wide_gather = false,
  118. .has_hypervisor = false,
  119. .num_sid_entries = 0,
  120. .sid_table = NULL,
  121. .reserve_vblank_syncpts = false,
  122. };
  123. static const struct host1x_sid_entry tegra186_sid_table[] = {
  124. {
  125. /* VIC */
  126. .base = 0x1af0,
  127. .offset = 0x30,
  128. .limit = 0x34
  129. },
  130. {
  131. /* NVDEC */
  132. .base = 0x1b00,
  133. .offset = 0x30,
  134. .limit = 0x34
  135. },
  136. };
  137. static const struct host1x_info host1x06_info = {
  138. .nb_channels = 63,
  139. .nb_pts = 576,
  140. .nb_mlocks = 24,
  141. .nb_bases = 16,
  142. .init = host1x06_init,
  143. .sync_offset = 0x0,
  144. .dma_mask = DMA_BIT_MASK(40),
  145. .has_wide_gather = true,
  146. .has_hypervisor = true,
  147. .num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
  148. .sid_table = tegra186_sid_table,
  149. .reserve_vblank_syncpts = false,
  150. };
  151. static const struct host1x_sid_entry tegra194_sid_table[] = {
  152. {
  153. /* VIC */
  154. .base = 0x1af0,
  155. .offset = 0x30,
  156. .limit = 0x34
  157. },
  158. {
  159. /* NVDEC */
  160. .base = 0x1b00,
  161. .offset = 0x30,
  162. .limit = 0x34
  163. },
  164. {
  165. /* NVDEC1 */
  166. .base = 0x1bc0,
  167. .offset = 0x30,
  168. .limit = 0x34
  169. },
  170. };
  171. static const struct host1x_info host1x07_info = {
  172. .nb_channels = 63,
  173. .nb_pts = 704,
  174. .nb_mlocks = 32,
  175. .nb_bases = 0,
  176. .init = host1x07_init,
  177. .sync_offset = 0x0,
  178. .dma_mask = DMA_BIT_MASK(40),
  179. .has_wide_gather = true,
  180. .has_hypervisor = true,
  181. .num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
  182. .sid_table = tegra194_sid_table,
  183. .reserve_vblank_syncpts = false,
  184. };
  185. /*
  186. * Tegra234 has two stream ID protection tables, one for setting stream IDs
  187. * through the channel path via SETSTREAMID, and one for setting them via
  188. * MMIO. We program each engine's data stream ID in the channel path table
  189. * and firmware stream ID in the MMIO path table.
  190. */
  191. static const struct host1x_sid_entry tegra234_sid_table[] = {
  192. {
  193. /* VIC channel */
  194. .base = 0x17b8,
  195. .offset = 0x30,
  196. .limit = 0x30
  197. },
  198. {
  199. /* VIC MMIO */
  200. .base = 0x1688,
  201. .offset = 0x34,
  202. .limit = 0x34
  203. },
  204. };
  205. static const struct host1x_info host1x08_info = {
  206. .nb_channels = 63,
  207. .nb_pts = 1024,
  208. .nb_mlocks = 24,
  209. .nb_bases = 0,
  210. .init = host1x08_init,
  211. .sync_offset = 0x0,
  212. .dma_mask = DMA_BIT_MASK(40),
  213. .has_wide_gather = true,
  214. .has_hypervisor = true,
  215. .has_common = true,
  216. .num_sid_entries = ARRAY_SIZE(tegra234_sid_table),
  217. .sid_table = tegra234_sid_table,
  218. .streamid_vm_table = { 0x1004, 128 },
  219. .classid_vm_table = { 0x1404, 25 },
  220. .mmio_vm_table = { 0x1504, 25 },
  221. .reserve_vblank_syncpts = false,
  222. };
  223. static const struct of_device_id host1x_of_match[] = {
  224. { .compatible = "nvidia,tegra234-host1x", .data = &host1x08_info, },
  225. { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
  226. { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
  227. { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
  228. { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
  229. { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
  230. { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
  231. { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
  232. { },
  233. };
  234. MODULE_DEVICE_TABLE(of, host1x_of_match);
  235. static void host1x_setup_virtualization_tables(struct host1x *host)
  236. {
  237. const struct host1x_info *info = host->info;
  238. unsigned int i;
  239. if (!info->has_hypervisor)
  240. return;
  241. for (i = 0; i < info->num_sid_entries; i++) {
  242. const struct host1x_sid_entry *entry = &info->sid_table[i];
  243. host1x_hypervisor_writel(host, entry->offset, entry->base);
  244. host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
  245. }
  246. for (i = 0; i < info->streamid_vm_table.count; i++) {
  247. /* Allow access to all stream IDs to all VMs. */
  248. host1x_hypervisor_writel(host, 0xff, info->streamid_vm_table.base + 4 * i);
  249. }
  250. for (i = 0; i < info->classid_vm_table.count; i++) {
  251. /* Allow access to all classes to all VMs. */
  252. host1x_hypervisor_writel(host, 0xff, info->classid_vm_table.base + 4 * i);
  253. }
  254. for (i = 0; i < info->mmio_vm_table.count; i++) {
  255. /* Use VM1 (that's us) as originator VMID for engine MMIO accesses. */
  256. host1x_hypervisor_writel(host, 0x1, info->mmio_vm_table.base + 4 * i);
  257. }
  258. }
  259. static bool host1x_wants_iommu(struct host1x *host1x)
  260. {
  261. /* Our IOMMU usage policy doesn't currently play well with GART */
  262. if (of_machine_is_compatible("nvidia,tegra20"))
  263. return false;
  264. /*
  265. * If we support addressing a maximum of 32 bits of physical memory
  266. * and if the host1x firewall is enabled, there's no need to enable
  267. * IOMMU support. This can happen for example on Tegra20, Tegra30
  268. * and Tegra114.
  269. *
  270. * Tegra124 and later can address up to 34 bits of physical memory and
  271. * many platforms come equipped with more than 2 GiB of system memory,
  272. * which requires crossing the 4 GiB boundary. But there's a catch: on
  273. * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
  274. * only address up to 32 bits of memory in GATHER opcodes, which means
  275. * that command buffers need to either be in the first 2 GiB of system
  276. * memory (which could quickly lead to memory exhaustion), or command
  277. * buffers need to be treated differently from other buffers (which is
  278. * not possible with the current ABI).
  279. *
  280. * A third option is to use the IOMMU in these cases to make sure all
  281. * buffers will be mapped into a 32-bit IOVA space that host1x can
  282. * address. This allows all of the system memory to be used and works
  283. * within the limitations of the host1x on these SoCs.
  284. *
  285. * In summary, default to enable IOMMU on Tegra124 and later. For any
  286. * of the earlier SoCs, only use the IOMMU for additional safety when
  287. * the host1x firewall is disabled.
  288. */
  289. if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
  290. if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
  291. return false;
  292. }
  293. return true;
  294. }
  295. static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
  296. {
  297. struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
  298. int err;
  299. #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
  300. if (host->dev->archdata.mapping) {
  301. struct dma_iommu_mapping *mapping =
  302. to_dma_iommu_mapping(host->dev);
  303. arm_iommu_detach_device(host->dev);
  304. arm_iommu_release_mapping(mapping);
  305. domain = iommu_get_domain_for_dev(host->dev);
  306. }
  307. #endif
  308. /*
  309. * We may not always want to enable IOMMU support (for example if the
  310. * host1x firewall is already enabled and we don't support addressing
  311. * more than 32 bits of physical memory), so check for that first.
  312. *
  313. * Similarly, if host1x is already attached to an IOMMU (via the DMA
  314. * API), don't try to attach again.
  315. */
  316. if (!host1x_wants_iommu(host) || domain)
  317. return domain;
  318. host->group = iommu_group_get(host->dev);
  319. if (host->group) {
  320. struct iommu_domain_geometry *geometry;
  321. dma_addr_t start, end;
  322. unsigned long order;
  323. err = iova_cache_get();
  324. if (err < 0)
  325. goto put_group;
  326. host->domain = iommu_domain_alloc(&platform_bus_type);
  327. if (!host->domain) {
  328. err = -ENOMEM;
  329. goto put_cache;
  330. }
  331. err = iommu_attach_group(host->domain, host->group);
  332. if (err) {
  333. if (err == -ENODEV)
  334. err = 0;
  335. goto free_domain;
  336. }
  337. geometry = &host->domain->geometry;
  338. start = geometry->aperture_start & host->info->dma_mask;
  339. end = geometry->aperture_end & host->info->dma_mask;
  340. order = __ffs(host->domain->pgsize_bitmap);
  341. init_iova_domain(&host->iova, 1UL << order, start >> order);
  342. host->iova_end = end;
  343. domain = host->domain;
  344. }
  345. return domain;
  346. free_domain:
  347. iommu_domain_free(host->domain);
  348. host->domain = NULL;
  349. put_cache:
  350. iova_cache_put();
  351. put_group:
  352. iommu_group_put(host->group);
  353. host->group = NULL;
  354. return ERR_PTR(err);
  355. }
  356. static int host1x_iommu_init(struct host1x *host)
  357. {
  358. u64 mask = host->info->dma_mask;
  359. struct iommu_domain *domain;
  360. int err;
  361. domain = host1x_iommu_attach(host);
  362. if (IS_ERR(domain)) {
  363. err = PTR_ERR(domain);
  364. dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
  365. return err;
  366. }
  367. /*
  368. * If we're not behind an IOMMU make sure we don't get push buffers
  369. * that are allocated outside of the range addressable by the GATHER
  370. * opcode.
  371. *
  372. * Newer generations of Tegra (Tegra186 and later) support a wide
  373. * variant of the GATHER opcode that allows addressing more bits.
  374. */
  375. if (!domain && !host->info->has_wide_gather)
  376. mask = DMA_BIT_MASK(32);
  377. err = dma_coerce_mask_and_coherent(host->dev, mask);
  378. if (err < 0) {
  379. dev_err(host->dev, "failed to set DMA mask: %d\n", err);
  380. return err;
  381. }
  382. return 0;
  383. }
  384. static void host1x_iommu_exit(struct host1x *host)
  385. {
  386. if (host->domain) {
  387. put_iova_domain(&host->iova);
  388. iommu_detach_group(host->domain, host->group);
  389. iommu_domain_free(host->domain);
  390. host->domain = NULL;
  391. iova_cache_put();
  392. iommu_group_put(host->group);
  393. host->group = NULL;
  394. }
  395. }
  396. static int host1x_get_resets(struct host1x *host)
  397. {
  398. int err;
  399. host->resets[0].id = "mc";
  400. host->resets[1].id = "host1x";
  401. host->nresets = ARRAY_SIZE(host->resets);
  402. err = devm_reset_control_bulk_get_optional_exclusive_released(
  403. host->dev, host->nresets, host->resets);
  404. if (err) {
  405. dev_err(host->dev, "failed to get reset: %d\n", err);
  406. return err;
  407. }
  408. return 0;
  409. }
  410. static int host1x_probe(struct platform_device *pdev)
  411. {
  412. struct host1x *host;
  413. int syncpt_irq;
  414. int err;
  415. host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
  416. if (!host)
  417. return -ENOMEM;
  418. host->info = of_device_get_match_data(&pdev->dev);
  419. if (host->info->has_hypervisor) {
  420. host->regs = devm_platform_ioremap_resource_byname(pdev, "vm");
  421. if (IS_ERR(host->regs))
  422. return PTR_ERR(host->regs);
  423. host->hv_regs = devm_platform_ioremap_resource_byname(pdev, "hypervisor");
  424. if (IS_ERR(host->hv_regs))
  425. return PTR_ERR(host->hv_regs);
  426. if (host->info->has_common) {
  427. host->common_regs = devm_platform_ioremap_resource_byname(pdev, "common");
  428. if (IS_ERR(host->common_regs))
  429. return PTR_ERR(host->common_regs);
  430. }
  431. } else {
  432. host->regs = devm_platform_ioremap_resource(pdev, 0);
  433. if (IS_ERR(host->regs))
  434. return PTR_ERR(host->regs);
  435. }
  436. syncpt_irq = platform_get_irq(pdev, 0);
  437. if (syncpt_irq < 0)
  438. return syncpt_irq;
  439. mutex_init(&host->devices_lock);
  440. INIT_LIST_HEAD(&host->devices);
  441. INIT_LIST_HEAD(&host->list);
  442. host->dev = &pdev->dev;
  443. /* set common host1x device data */
  444. platform_set_drvdata(pdev, host);
  445. host->dev->dma_parms = &host->dma_parms;
  446. dma_set_max_seg_size(host->dev, UINT_MAX);
  447. if (host->info->init) {
  448. err = host->info->init(host);
  449. if (err)
  450. return err;
  451. }
  452. host->clk = devm_clk_get(&pdev->dev, NULL);
  453. if (IS_ERR(host->clk)) {
  454. err = PTR_ERR(host->clk);
  455. if (err != -EPROBE_DEFER)
  456. dev_err(&pdev->dev, "failed to get clock: %d\n", err);
  457. return err;
  458. }
  459. err = host1x_get_resets(host);
  460. if (err)
  461. return err;
  462. host1x_bo_cache_init(&host->cache);
  463. err = host1x_iommu_init(host);
  464. if (err < 0) {
  465. dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
  466. goto destroy_cache;
  467. }
  468. err = host1x_channel_list_init(&host->channel_list,
  469. host->info->nb_channels);
  470. if (err) {
  471. dev_err(&pdev->dev, "failed to initialize channel list\n");
  472. goto iommu_exit;
  473. }
  474. err = host1x_memory_context_list_init(host);
  475. if (err) {
  476. dev_err(&pdev->dev, "failed to initialize context list\n");
  477. goto free_channels;
  478. }
  479. err = host1x_syncpt_init(host);
  480. if (err) {
  481. dev_err(&pdev->dev, "failed to initialize syncpts\n");
  482. goto free_contexts;
  483. }
  484. err = host1x_intr_init(host, syncpt_irq);
  485. if (err) {
  486. dev_err(&pdev->dev, "failed to initialize interrupts\n");
  487. goto deinit_syncpt;
  488. }
  489. pm_runtime_enable(&pdev->dev);
  490. err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
  491. if (err)
  492. goto pm_disable;
  493. /* the driver's code isn't ready yet for the dynamic RPM */
  494. err = pm_runtime_resume_and_get(&pdev->dev);
  495. if (err)
  496. goto pm_disable;
  497. host1x_debug_init(host);
  498. err = host1x_register(host);
  499. if (err < 0)
  500. goto deinit_debugfs;
  501. err = devm_of_platform_populate(&pdev->dev);
  502. if (err < 0)
  503. goto unregister;
  504. return 0;
  505. unregister:
  506. host1x_unregister(host);
  507. deinit_debugfs:
  508. host1x_debug_deinit(host);
  509. pm_runtime_put_sync_suspend(&pdev->dev);
  510. pm_disable:
  511. pm_runtime_disable(&pdev->dev);
  512. host1x_intr_deinit(host);
  513. deinit_syncpt:
  514. host1x_syncpt_deinit(host);
  515. free_contexts:
  516. host1x_memory_context_list_free(&host->context_list);
  517. free_channels:
  518. host1x_channel_list_free(&host->channel_list);
  519. iommu_exit:
  520. host1x_iommu_exit(host);
  521. destroy_cache:
  522. host1x_bo_cache_destroy(&host->cache);
  523. return err;
  524. }
  525. static int host1x_remove(struct platform_device *pdev)
  526. {
  527. struct host1x *host = platform_get_drvdata(pdev);
  528. host1x_unregister(host);
  529. host1x_debug_deinit(host);
  530. pm_runtime_force_suspend(&pdev->dev);
  531. host1x_intr_deinit(host);
  532. host1x_syncpt_deinit(host);
  533. host1x_memory_context_list_free(&host->context_list);
  534. host1x_channel_list_free(&host->channel_list);
  535. host1x_iommu_exit(host);
  536. host1x_bo_cache_destroy(&host->cache);
  537. return 0;
  538. }
  539. static int __maybe_unused host1x_runtime_suspend(struct device *dev)
  540. {
  541. struct host1x *host = dev_get_drvdata(dev);
  542. int err;
  543. host1x_intr_stop(host);
  544. host1x_syncpt_save(host);
  545. err = reset_control_bulk_assert(host->nresets, host->resets);
  546. if (err) {
  547. dev_err(dev, "failed to assert reset: %d\n", err);
  548. goto resume_host1x;
  549. }
  550. usleep_range(1000, 2000);
  551. clk_disable_unprepare(host->clk);
  552. reset_control_bulk_release(host->nresets, host->resets);
  553. return 0;
  554. resume_host1x:
  555. host1x_setup_virtualization_tables(host);
  556. host1x_syncpt_restore(host);
  557. host1x_intr_start(host);
  558. return err;
  559. }
  560. static int __maybe_unused host1x_runtime_resume(struct device *dev)
  561. {
  562. struct host1x *host = dev_get_drvdata(dev);
  563. int err;
  564. err = reset_control_bulk_acquire(host->nresets, host->resets);
  565. if (err) {
  566. dev_err(dev, "failed to acquire reset: %d\n", err);
  567. return err;
  568. }
  569. err = clk_prepare_enable(host->clk);
  570. if (err) {
  571. dev_err(dev, "failed to enable clock: %d\n", err);
  572. goto release_reset;
  573. }
  574. err = reset_control_bulk_deassert(host->nresets, host->resets);
  575. if (err < 0) {
  576. dev_err(dev, "failed to deassert reset: %d\n", err);
  577. goto disable_clk;
  578. }
  579. host1x_setup_virtualization_tables(host);
  580. host1x_syncpt_restore(host);
  581. host1x_intr_start(host);
  582. return 0;
  583. disable_clk:
  584. clk_disable_unprepare(host->clk);
  585. release_reset:
  586. reset_control_bulk_release(host->nresets, host->resets);
  587. return err;
  588. }
  589. static const struct dev_pm_ops host1x_pm_ops = {
  590. SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume,
  591. NULL)
  592. /* TODO: add system suspend-resume once driver will be ready for that */
  593. };
  594. static struct platform_driver tegra_host1x_driver = {
  595. .driver = {
  596. .name = "tegra-host1x",
  597. .of_match_table = host1x_of_match,
  598. .pm = &host1x_pm_ops,
  599. },
  600. .probe = host1x_probe,
  601. .remove = host1x_remove,
  602. };
  603. static struct platform_driver * const drivers[] = {
  604. &tegra_host1x_driver,
  605. &tegra_mipi_driver,
  606. };
  607. static int __init tegra_host1x_init(void)
  608. {
  609. int err;
  610. err = bus_register(&host1x_bus_type);
  611. if (err < 0)
  612. return err;
  613. err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  614. if (err < 0)
  615. bus_unregister(&host1x_bus_type);
  616. return err;
  617. }
  618. module_init(tegra_host1x_init);
  619. static void __exit tegra_host1x_exit(void)
  620. {
  621. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  622. bus_unregister(&host1x_bus_type);
  623. }
  624. module_exit(tegra_host1x_exit);
  625. /**
  626. * host1x_get_dma_mask() - query the supported DMA mask for host1x
  627. * @host1x: host1x instance
  628. *
  629. * Note that this returns the supported DMA mask for host1x, which can be
  630. * different from the applicable DMA mask under certain circumstances.
  631. */
  632. u64 host1x_get_dma_mask(struct host1x *host1x)
  633. {
  634. return host1x->info->dma_mask;
  635. }
  636. EXPORT_SYMBOL(host1x_get_dma_mask);
  637. MODULE_AUTHOR("Thierry Reding <[email protected]>");
  638. MODULE_AUTHOR("Terje Bergstrom <[email protected]>");
  639. MODULE_DESCRIPTION("Host1x driver for Tegra products");
  640. MODULE_LICENSE("GPL");