etnaviv_drv.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015-2018 Etnaviv Project
  4. */
  5. #include <linux/component.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/module.h>
  8. #include <linux/of_platform.h>
  9. #include <linux/uaccess.h>
  10. #include <drm/drm_debugfs.h>
  11. #include <drm/drm_drv.h>
  12. #include <drm/drm_file.h>
  13. #include <drm/drm_ioctl.h>
  14. #include <drm/drm_of.h>
  15. #include <drm/drm_prime.h>
  16. #include "etnaviv_cmdbuf.h"
  17. #include "etnaviv_drv.h"
  18. #include "etnaviv_gpu.h"
  19. #include "etnaviv_gem.h"
  20. #include "etnaviv_mmu.h"
  21. #include "etnaviv_perfmon.h"
  22. /*
  23. * DRM operations:
  24. */
  25. static void load_gpu(struct drm_device *dev)
  26. {
  27. struct etnaviv_drm_private *priv = dev->dev_private;
  28. unsigned int i;
  29. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  30. struct etnaviv_gpu *g = priv->gpu[i];
  31. if (g) {
  32. int ret;
  33. ret = etnaviv_gpu_init(g);
  34. if (ret)
  35. priv->gpu[i] = NULL;
  36. }
  37. }
  38. }
  39. static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
  40. {
  41. struct etnaviv_drm_private *priv = dev->dev_private;
  42. struct etnaviv_file_private *ctx;
  43. int ret, i;
  44. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  45. if (!ctx)
  46. return -ENOMEM;
  47. ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
  48. priv->cmdbuf_suballoc);
  49. if (!ctx->mmu) {
  50. ret = -ENOMEM;
  51. goto out_free;
  52. }
  53. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  54. struct etnaviv_gpu *gpu = priv->gpu[i];
  55. struct drm_gpu_scheduler *sched;
  56. if (gpu) {
  57. sched = &gpu->sched;
  58. drm_sched_entity_init(&ctx->sched_entity[i],
  59. DRM_SCHED_PRIORITY_NORMAL, &sched,
  60. 1, NULL);
  61. }
  62. }
  63. file->driver_priv = ctx;
  64. return 0;
  65. out_free:
  66. kfree(ctx);
  67. return ret;
  68. }
  69. static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
  70. {
  71. struct etnaviv_drm_private *priv = dev->dev_private;
  72. struct etnaviv_file_private *ctx = file->driver_priv;
  73. unsigned int i;
  74. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  75. struct etnaviv_gpu *gpu = priv->gpu[i];
  76. if (gpu)
  77. drm_sched_entity_destroy(&ctx->sched_entity[i]);
  78. }
  79. etnaviv_iommu_context_put(ctx->mmu);
  80. kfree(ctx);
  81. }
  82. /*
  83. * DRM debugfs:
  84. */
  85. #ifdef CONFIG_DEBUG_FS
  86. static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
  87. {
  88. struct etnaviv_drm_private *priv = dev->dev_private;
  89. etnaviv_gem_describe_objects(priv, m);
  90. return 0;
  91. }
  92. static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
  93. {
  94. struct drm_printer p = drm_seq_file_printer(m);
  95. read_lock(&dev->vma_offset_manager->vm_lock);
  96. drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
  97. read_unlock(&dev->vma_offset_manager->vm_lock);
  98. return 0;
  99. }
  100. static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
  101. {
  102. struct drm_printer p = drm_seq_file_printer(m);
  103. struct etnaviv_iommu_context *mmu_context;
  104. seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
  105. /*
  106. * Lock the GPU to avoid a MMU context switch just now and elevate
  107. * the refcount of the current context to avoid it disappearing from
  108. * under our feet.
  109. */
  110. mutex_lock(&gpu->lock);
  111. mmu_context = gpu->mmu_context;
  112. if (mmu_context)
  113. etnaviv_iommu_context_get(mmu_context);
  114. mutex_unlock(&gpu->lock);
  115. if (!mmu_context)
  116. return 0;
  117. mutex_lock(&mmu_context->lock);
  118. drm_mm_print(&mmu_context->mm, &p);
  119. mutex_unlock(&mmu_context->lock);
  120. etnaviv_iommu_context_put(mmu_context);
  121. return 0;
  122. }
  123. static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
  124. {
  125. struct etnaviv_cmdbuf *buf = &gpu->buffer;
  126. u32 size = buf->size;
  127. u32 *ptr = buf->vaddr;
  128. u32 i;
  129. seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
  130. buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
  131. size - buf->user_size);
  132. for (i = 0; i < size / 4; i++) {
  133. if (i && !(i % 4))
  134. seq_puts(m, "\n");
  135. if (i % 4 == 0)
  136. seq_printf(m, "\t0x%p: ", ptr + i);
  137. seq_printf(m, "%08x ", *(ptr + i));
  138. }
  139. seq_puts(m, "\n");
  140. }
  141. static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
  142. {
  143. seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
  144. mutex_lock(&gpu->lock);
  145. etnaviv_buffer_dump(gpu, m);
  146. mutex_unlock(&gpu->lock);
  147. return 0;
  148. }
  149. static int show_unlocked(struct seq_file *m, void *arg)
  150. {
  151. struct drm_info_node *node = (struct drm_info_node *) m->private;
  152. struct drm_device *dev = node->minor->dev;
  153. int (*show)(struct drm_device *dev, struct seq_file *m) =
  154. node->info_ent->data;
  155. return show(dev, m);
  156. }
  157. static int show_each_gpu(struct seq_file *m, void *arg)
  158. {
  159. struct drm_info_node *node = (struct drm_info_node *) m->private;
  160. struct drm_device *dev = node->minor->dev;
  161. struct etnaviv_drm_private *priv = dev->dev_private;
  162. struct etnaviv_gpu *gpu;
  163. int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
  164. node->info_ent->data;
  165. unsigned int i;
  166. int ret = 0;
  167. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  168. gpu = priv->gpu[i];
  169. if (!gpu)
  170. continue;
  171. ret = show(gpu, m);
  172. if (ret < 0)
  173. break;
  174. }
  175. return ret;
  176. }
  177. static struct drm_info_list etnaviv_debugfs_list[] = {
  178. {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
  179. {"gem", show_unlocked, 0, etnaviv_gem_show},
  180. { "mm", show_unlocked, 0, etnaviv_mm_show },
  181. {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
  182. {"ring", show_each_gpu, 0, etnaviv_ring_show},
  183. };
  184. static void etnaviv_debugfs_init(struct drm_minor *minor)
  185. {
  186. drm_debugfs_create_files(etnaviv_debugfs_list,
  187. ARRAY_SIZE(etnaviv_debugfs_list),
  188. minor->debugfs_root, minor);
  189. }
  190. #endif
  191. /*
  192. * DRM ioctls:
  193. */
  194. static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
  195. struct drm_file *file)
  196. {
  197. struct etnaviv_drm_private *priv = dev->dev_private;
  198. struct drm_etnaviv_param *args = data;
  199. struct etnaviv_gpu *gpu;
  200. if (args->pipe >= ETNA_MAX_PIPES)
  201. return -EINVAL;
  202. gpu = priv->gpu[args->pipe];
  203. if (!gpu)
  204. return -ENXIO;
  205. return etnaviv_gpu_get_param(gpu, args->param, &args->value);
  206. }
  207. static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
  208. struct drm_file *file)
  209. {
  210. struct drm_etnaviv_gem_new *args = data;
  211. if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
  212. ETNA_BO_FORCE_MMU))
  213. return -EINVAL;
  214. return etnaviv_gem_new_handle(dev, file, args->size,
  215. args->flags, &args->handle);
  216. }
  217. static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
  218. struct drm_file *file)
  219. {
  220. struct drm_etnaviv_gem_cpu_prep *args = data;
  221. struct drm_gem_object *obj;
  222. int ret;
  223. if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
  224. return -EINVAL;
  225. obj = drm_gem_object_lookup(file, args->handle);
  226. if (!obj)
  227. return -ENOENT;
  228. ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
  229. drm_gem_object_put(obj);
  230. return ret;
  231. }
  232. static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
  233. struct drm_file *file)
  234. {
  235. struct drm_etnaviv_gem_cpu_fini *args = data;
  236. struct drm_gem_object *obj;
  237. int ret;
  238. if (args->flags)
  239. return -EINVAL;
  240. obj = drm_gem_object_lookup(file, args->handle);
  241. if (!obj)
  242. return -ENOENT;
  243. ret = etnaviv_gem_cpu_fini(obj);
  244. drm_gem_object_put(obj);
  245. return ret;
  246. }
  247. static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
  248. struct drm_file *file)
  249. {
  250. struct drm_etnaviv_gem_info *args = data;
  251. struct drm_gem_object *obj;
  252. int ret;
  253. if (args->pad)
  254. return -EINVAL;
  255. obj = drm_gem_object_lookup(file, args->handle);
  256. if (!obj)
  257. return -ENOENT;
  258. ret = etnaviv_gem_mmap_offset(obj, &args->offset);
  259. drm_gem_object_put(obj);
  260. return ret;
  261. }
  262. static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
  263. struct drm_file *file)
  264. {
  265. struct drm_etnaviv_wait_fence *args = data;
  266. struct etnaviv_drm_private *priv = dev->dev_private;
  267. struct drm_etnaviv_timespec *timeout = &args->timeout;
  268. struct etnaviv_gpu *gpu;
  269. if (args->flags & ~(ETNA_WAIT_NONBLOCK))
  270. return -EINVAL;
  271. if (args->pipe >= ETNA_MAX_PIPES)
  272. return -EINVAL;
  273. gpu = priv->gpu[args->pipe];
  274. if (!gpu)
  275. return -ENXIO;
  276. if (args->flags & ETNA_WAIT_NONBLOCK)
  277. timeout = NULL;
  278. return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
  279. timeout);
  280. }
  281. static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
  282. struct drm_file *file)
  283. {
  284. struct drm_etnaviv_gem_userptr *args = data;
  285. if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
  286. args->flags == 0)
  287. return -EINVAL;
  288. if (offset_in_page(args->user_ptr | args->user_size) ||
  289. (uintptr_t)args->user_ptr != args->user_ptr ||
  290. (u32)args->user_size != args->user_size ||
  291. args->user_ptr & ~PAGE_MASK)
  292. return -EINVAL;
  293. if (!access_ok((void __user *)(unsigned long)args->user_ptr,
  294. args->user_size))
  295. return -EFAULT;
  296. return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
  297. args->user_size, args->flags,
  298. &args->handle);
  299. }
  300. static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
  301. struct drm_file *file)
  302. {
  303. struct etnaviv_drm_private *priv = dev->dev_private;
  304. struct drm_etnaviv_gem_wait *args = data;
  305. struct drm_etnaviv_timespec *timeout = &args->timeout;
  306. struct drm_gem_object *obj;
  307. struct etnaviv_gpu *gpu;
  308. int ret;
  309. if (args->flags & ~(ETNA_WAIT_NONBLOCK))
  310. return -EINVAL;
  311. if (args->pipe >= ETNA_MAX_PIPES)
  312. return -EINVAL;
  313. gpu = priv->gpu[args->pipe];
  314. if (!gpu)
  315. return -ENXIO;
  316. obj = drm_gem_object_lookup(file, args->handle);
  317. if (!obj)
  318. return -ENOENT;
  319. if (args->flags & ETNA_WAIT_NONBLOCK)
  320. timeout = NULL;
  321. ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
  322. drm_gem_object_put(obj);
  323. return ret;
  324. }
  325. static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
  326. struct drm_file *file)
  327. {
  328. struct etnaviv_drm_private *priv = dev->dev_private;
  329. struct drm_etnaviv_pm_domain *args = data;
  330. struct etnaviv_gpu *gpu;
  331. if (args->pipe >= ETNA_MAX_PIPES)
  332. return -EINVAL;
  333. gpu = priv->gpu[args->pipe];
  334. if (!gpu)
  335. return -ENXIO;
  336. return etnaviv_pm_query_dom(gpu, args);
  337. }
  338. static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
  339. struct drm_file *file)
  340. {
  341. struct etnaviv_drm_private *priv = dev->dev_private;
  342. struct drm_etnaviv_pm_signal *args = data;
  343. struct etnaviv_gpu *gpu;
  344. if (args->pipe >= ETNA_MAX_PIPES)
  345. return -EINVAL;
  346. gpu = priv->gpu[args->pipe];
  347. if (!gpu)
  348. return -ENXIO;
  349. return etnaviv_pm_query_sig(gpu, args);
  350. }
  351. static const struct drm_ioctl_desc etnaviv_ioctls[] = {
  352. #define ETNA_IOCTL(n, func, flags) \
  353. DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
  354. ETNA_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
  355. ETNA_IOCTL(GEM_NEW, gem_new, DRM_RENDER_ALLOW),
  356. ETNA_IOCTL(GEM_INFO, gem_info, DRM_RENDER_ALLOW),
  357. ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_RENDER_ALLOW),
  358. ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_RENDER_ALLOW),
  359. ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_RENDER_ALLOW),
  360. ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_RENDER_ALLOW),
  361. ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_RENDER_ALLOW),
  362. ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_RENDER_ALLOW),
  363. ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_RENDER_ALLOW),
  364. ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
  365. };
  366. DEFINE_DRM_GEM_FOPS(fops);
  367. static const struct drm_driver etnaviv_drm_driver = {
  368. .driver_features = DRIVER_GEM | DRIVER_RENDER,
  369. .open = etnaviv_open,
  370. .postclose = etnaviv_postclose,
  371. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  372. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  373. .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
  374. .gem_prime_mmap = drm_gem_prime_mmap,
  375. #ifdef CONFIG_DEBUG_FS
  376. .debugfs_init = etnaviv_debugfs_init,
  377. #endif
  378. .ioctls = etnaviv_ioctls,
  379. .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
  380. .fops = &fops,
  381. .name = "etnaviv",
  382. .desc = "etnaviv DRM",
  383. .date = "20151214",
  384. .major = 1,
  385. .minor = 3,
  386. };
  387. /*
  388. * Platform driver:
  389. */
  390. static int etnaviv_bind(struct device *dev)
  391. {
  392. struct etnaviv_drm_private *priv;
  393. struct drm_device *drm;
  394. int ret;
  395. drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
  396. if (IS_ERR(drm))
  397. return PTR_ERR(drm);
  398. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  399. if (!priv) {
  400. dev_err(dev, "failed to allocate private data\n");
  401. ret = -ENOMEM;
  402. goto out_put;
  403. }
  404. drm->dev_private = priv;
  405. dma_set_max_seg_size(dev, SZ_2G);
  406. mutex_init(&priv->gem_lock);
  407. INIT_LIST_HEAD(&priv->gem_list);
  408. priv->num_gpus = 0;
  409. priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
  410. priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
  411. if (IS_ERR(priv->cmdbuf_suballoc)) {
  412. dev_err(drm->dev, "Failed to create cmdbuf suballocator\n");
  413. ret = PTR_ERR(priv->cmdbuf_suballoc);
  414. goto out_free_priv;
  415. }
  416. dev_set_drvdata(dev, drm);
  417. ret = component_bind_all(dev, drm);
  418. if (ret < 0)
  419. goto out_destroy_suballoc;
  420. load_gpu(drm);
  421. ret = drm_dev_register(drm, 0);
  422. if (ret)
  423. goto out_unbind;
  424. return 0;
  425. out_unbind:
  426. component_unbind_all(dev, drm);
  427. out_destroy_suballoc:
  428. etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
  429. out_free_priv:
  430. kfree(priv);
  431. out_put:
  432. drm_dev_put(drm);
  433. return ret;
  434. }
  435. static void etnaviv_unbind(struct device *dev)
  436. {
  437. struct drm_device *drm = dev_get_drvdata(dev);
  438. struct etnaviv_drm_private *priv = drm->dev_private;
  439. drm_dev_unregister(drm);
  440. component_unbind_all(dev, drm);
  441. etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
  442. drm->dev_private = NULL;
  443. kfree(priv);
  444. drm_dev_put(drm);
  445. }
  446. static const struct component_master_ops etnaviv_master_ops = {
  447. .bind = etnaviv_bind,
  448. .unbind = etnaviv_unbind,
  449. };
  450. static int etnaviv_pdev_probe(struct platform_device *pdev)
  451. {
  452. struct device *dev = &pdev->dev;
  453. struct device_node *first_node = NULL;
  454. struct component_match *match = NULL;
  455. if (!dev->platform_data) {
  456. struct device_node *core_node;
  457. for_each_compatible_node(core_node, NULL, "vivante,gc") {
  458. if (!of_device_is_available(core_node))
  459. continue;
  460. if (!first_node)
  461. first_node = core_node;
  462. drm_of_component_match_add(&pdev->dev, &match,
  463. component_compare_of, core_node);
  464. }
  465. } else {
  466. char **names = dev->platform_data;
  467. unsigned i;
  468. for (i = 0; names[i]; i++)
  469. component_match_add(dev, &match, component_compare_dev_name, names[i]);
  470. }
  471. /*
  472. * PTA and MTLB can have 40 bit base addresses, but
  473. * unfortunately, an entry in the MTLB can only point to a
  474. * 32 bit base address of a STLB. Moreover, to initialize the
  475. * MMU we need a command buffer with a 32 bit address because
  476. * without an MMU there is only an indentity mapping between
  477. * the internal 32 bit addresses and the bus addresses.
  478. *
  479. * To make things easy, we set the dma_coherent_mask to 32
  480. * bit to make sure we are allocating the command buffers and
  481. * TLBs in the lower 4 GiB address space.
  482. */
  483. if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) ||
  484. dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
  485. dev_dbg(&pdev->dev, "No suitable DMA available\n");
  486. return -ENODEV;
  487. }
  488. /*
  489. * Apply the same DMA configuration to the virtual etnaviv
  490. * device as the GPU we found. This assumes that all Vivante
  491. * GPUs in the system share the same DMA constraints.
  492. */
  493. if (first_node)
  494. of_dma_configure(&pdev->dev, first_node, true);
  495. return component_master_add_with_match(dev, &etnaviv_master_ops, match);
  496. }
  497. static int etnaviv_pdev_remove(struct platform_device *pdev)
  498. {
  499. component_master_del(&pdev->dev, &etnaviv_master_ops);
  500. return 0;
  501. }
  502. static struct platform_driver etnaviv_platform_driver = {
  503. .probe = etnaviv_pdev_probe,
  504. .remove = etnaviv_pdev_remove,
  505. .driver = {
  506. .name = "etnaviv",
  507. },
  508. };
  509. static struct platform_device *etnaviv_drm;
  510. static int __init etnaviv_init(void)
  511. {
  512. struct platform_device *pdev;
  513. int ret;
  514. struct device_node *np;
  515. etnaviv_validate_init();
  516. ret = platform_driver_register(&etnaviv_gpu_driver);
  517. if (ret != 0)
  518. return ret;
  519. ret = platform_driver_register(&etnaviv_platform_driver);
  520. if (ret != 0)
  521. goto unregister_gpu_driver;
  522. /*
  523. * If the DT contains at least one available GPU device, instantiate
  524. * the DRM platform device.
  525. */
  526. for_each_compatible_node(np, NULL, "vivante,gc") {
  527. if (!of_device_is_available(np))
  528. continue;
  529. pdev = platform_device_alloc("etnaviv", PLATFORM_DEVID_NONE);
  530. if (!pdev) {
  531. ret = -ENOMEM;
  532. of_node_put(np);
  533. goto unregister_platform_driver;
  534. }
  535. ret = platform_device_add(pdev);
  536. if (ret) {
  537. platform_device_put(pdev);
  538. of_node_put(np);
  539. goto unregister_platform_driver;
  540. }
  541. etnaviv_drm = pdev;
  542. of_node_put(np);
  543. break;
  544. }
  545. return 0;
  546. unregister_platform_driver:
  547. platform_driver_unregister(&etnaviv_platform_driver);
  548. unregister_gpu_driver:
  549. platform_driver_unregister(&etnaviv_gpu_driver);
  550. return ret;
  551. }
  552. module_init(etnaviv_init);
  553. static void __exit etnaviv_exit(void)
  554. {
  555. platform_device_unregister(etnaviv_drm);
  556. platform_driver_unregister(&etnaviv_platform_driver);
  557. platform_driver_unregister(&etnaviv_gpu_driver);
  558. }
  559. module_exit(etnaviv_exit);
  560. MODULE_AUTHOR("Christian Gmeiner <[email protected]>");
  561. MODULE_AUTHOR("Russell King <[email protected]>");
  562. MODULE_AUTHOR("Lucas Stach <[email protected]>");
  563. MODULE_DESCRIPTION("etnaviv DRM Driver");
  564. MODULE_LICENSE("GPL v2");
  565. MODULE_ALIAS("platform:etnaviv");