drm.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2012 Avionic Design GmbH
  4. * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
  5. */
  6. #include <linux/bitops.h>
  7. #include <linux/host1x.h>
  8. #include <linux/idr.h>
  9. #include <linux/iommu.h>
  10. #include <linux/module.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/pm_runtime.h>
  13. #include <drm/drm_aperture.h>
  14. #include <drm/drm_atomic.h>
  15. #include <drm/drm_atomic_helper.h>
  16. #include <drm/drm_debugfs.h>
  17. #include <drm/drm_drv.h>
  18. #include <drm/drm_fourcc.h>
  19. #include <drm/drm_framebuffer.h>
  20. #include <drm/drm_ioctl.h>
  21. #include <drm/drm_prime.h>
  22. #include <drm/drm_vblank.h>
  23. #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
  24. #include <asm/dma-iommu.h>
  25. #endif
  26. #include "dc.h"
  27. #include "drm.h"
  28. #include "gem.h"
  29. #include "uapi.h"
  30. #define DRIVER_NAME "tegra"
  31. #define DRIVER_DESC "NVIDIA Tegra graphics"
  32. #define DRIVER_DATE "20120330"
  33. #define DRIVER_MAJOR 1
  34. #define DRIVER_MINOR 0
  35. #define DRIVER_PATCHLEVEL 0
  36. #define CARVEOUT_SZ SZ_64M
  37. #define CDMA_GATHER_FETCHES_MAX_NB 16383
  38. static int tegra_atomic_check(struct drm_device *drm,
  39. struct drm_atomic_state *state)
  40. {
  41. int err;
  42. err = drm_atomic_helper_check(drm, state);
  43. if (err < 0)
  44. return err;
  45. return tegra_display_hub_atomic_check(drm, state);
  46. }
  47. static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
  48. .fb_create = tegra_fb_create,
  49. #ifdef CONFIG_DRM_FBDEV_EMULATION
  50. .output_poll_changed = drm_fb_helper_output_poll_changed,
  51. #endif
  52. .atomic_check = tegra_atomic_check,
  53. .atomic_commit = drm_atomic_helper_commit,
  54. };
  55. static void tegra_atomic_post_commit(struct drm_device *drm,
  56. struct drm_atomic_state *old_state)
  57. {
  58. struct drm_crtc_state *old_crtc_state __maybe_unused;
  59. struct drm_crtc *crtc;
  60. unsigned int i;
  61. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
  62. tegra_crtc_atomic_post_commit(crtc, old_state);
  63. }
  64. static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
  65. {
  66. struct drm_device *drm = old_state->dev;
  67. struct tegra_drm *tegra = drm->dev_private;
  68. if (tegra->hub) {
  69. bool fence_cookie = dma_fence_begin_signalling();
  70. drm_atomic_helper_commit_modeset_disables(drm, old_state);
  71. tegra_display_hub_atomic_commit(drm, old_state);
  72. drm_atomic_helper_commit_planes(drm, old_state, 0);
  73. drm_atomic_helper_commit_modeset_enables(drm, old_state);
  74. drm_atomic_helper_commit_hw_done(old_state);
  75. dma_fence_end_signalling(fence_cookie);
  76. drm_atomic_helper_wait_for_vblanks(drm, old_state);
  77. drm_atomic_helper_cleanup_planes(drm, old_state);
  78. } else {
  79. drm_atomic_helper_commit_tail_rpm(old_state);
  80. }
  81. tegra_atomic_post_commit(drm, old_state);
  82. }
  83. static const struct drm_mode_config_helper_funcs
  84. tegra_drm_mode_config_helpers = {
  85. .atomic_commit_tail = tegra_atomic_commit_tail,
  86. };
  87. static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
  88. {
  89. struct tegra_drm_file *fpriv;
  90. fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
  91. if (!fpriv)
  92. return -ENOMEM;
  93. idr_init_base(&fpriv->legacy_contexts, 1);
  94. xa_init_flags(&fpriv->contexts, XA_FLAGS_ALLOC1);
  95. xa_init(&fpriv->syncpoints);
  96. mutex_init(&fpriv->lock);
  97. filp->driver_priv = fpriv;
  98. return 0;
  99. }
  100. static void tegra_drm_context_free(struct tegra_drm_context *context)
  101. {
  102. context->client->ops->close_channel(context);
  103. pm_runtime_put(context->client->base.dev);
  104. kfree(context);
  105. }
  106. static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
  107. struct drm_tegra_reloc __user *src,
  108. struct drm_device *drm,
  109. struct drm_file *file)
  110. {
  111. u32 cmdbuf, target;
  112. int err;
  113. err = get_user(cmdbuf, &src->cmdbuf.handle);
  114. if (err < 0)
  115. return err;
  116. err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
  117. if (err < 0)
  118. return err;
  119. err = get_user(target, &src->target.handle);
  120. if (err < 0)
  121. return err;
  122. err = get_user(dest->target.offset, &src->target.offset);
  123. if (err < 0)
  124. return err;
  125. err = get_user(dest->shift, &src->shift);
  126. if (err < 0)
  127. return err;
  128. dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
  129. dest->cmdbuf.bo = tegra_gem_lookup(file, cmdbuf);
  130. if (!dest->cmdbuf.bo)
  131. return -ENOENT;
  132. dest->target.bo = tegra_gem_lookup(file, target);
  133. if (!dest->target.bo)
  134. return -ENOENT;
  135. return 0;
  136. }
  137. int tegra_drm_submit(struct tegra_drm_context *context,
  138. struct drm_tegra_submit *args, struct drm_device *drm,
  139. struct drm_file *file)
  140. {
  141. struct host1x_client *client = &context->client->base;
  142. unsigned int num_cmdbufs = args->num_cmdbufs;
  143. unsigned int num_relocs = args->num_relocs;
  144. struct drm_tegra_cmdbuf __user *user_cmdbufs;
  145. struct drm_tegra_reloc __user *user_relocs;
  146. struct drm_tegra_syncpt __user *user_syncpt;
  147. struct drm_tegra_syncpt syncpt;
  148. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  149. struct drm_gem_object **refs;
  150. struct host1x_syncpt *sp = NULL;
  151. struct host1x_job *job;
  152. unsigned int num_refs;
  153. int err;
  154. user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
  155. user_relocs = u64_to_user_ptr(args->relocs);
  156. user_syncpt = u64_to_user_ptr(args->syncpts);
  157. /* We don't yet support other than one syncpt_incr struct per submit */
  158. if (args->num_syncpts != 1)
  159. return -EINVAL;
  160. /* We don't yet support waitchks */
  161. if (args->num_waitchks != 0)
  162. return -EINVAL;
  163. job = host1x_job_alloc(context->channel, args->num_cmdbufs,
  164. args->num_relocs, false);
  165. if (!job)
  166. return -ENOMEM;
  167. job->num_relocs = args->num_relocs;
  168. job->client = client;
  169. job->class = client->class;
  170. job->serialize = true;
  171. job->syncpt_recovery = true;
  172. /*
  173. * Track referenced BOs so that they can be unreferenced after the
  174. * submission is complete.
  175. */
  176. num_refs = num_cmdbufs + num_relocs * 2;
  177. refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
  178. if (!refs) {
  179. err = -ENOMEM;
  180. goto put;
  181. }
  182. /* reuse as an iterator later */
  183. num_refs = 0;
  184. while (num_cmdbufs) {
  185. struct drm_tegra_cmdbuf cmdbuf;
  186. struct host1x_bo *bo;
  187. struct tegra_bo *obj;
  188. u64 offset;
  189. if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
  190. err = -EFAULT;
  191. goto fail;
  192. }
  193. /*
  194. * The maximum number of CDMA gather fetches is 16383, a higher
  195. * value means the words count is malformed.
  196. */
  197. if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
  198. err = -EINVAL;
  199. goto fail;
  200. }
  201. bo = tegra_gem_lookup(file, cmdbuf.handle);
  202. if (!bo) {
  203. err = -ENOENT;
  204. goto fail;
  205. }
  206. offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
  207. obj = host1x_to_tegra_bo(bo);
  208. refs[num_refs++] = &obj->gem;
  209. /*
  210. * Gather buffer base address must be 4-bytes aligned,
  211. * unaligned offset is malformed and cause commands stream
  212. * corruption on the buffer address relocation.
  213. */
  214. if (offset & 3 || offset > obj->gem.size) {
  215. err = -EINVAL;
  216. goto fail;
  217. }
  218. host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
  219. num_cmdbufs--;
  220. user_cmdbufs++;
  221. }
  222. /* copy and resolve relocations from submit */
  223. while (num_relocs--) {
  224. struct host1x_reloc *reloc;
  225. struct tegra_bo *obj;
  226. err = host1x_reloc_copy_from_user(&job->relocs[num_relocs],
  227. &user_relocs[num_relocs], drm,
  228. file);
  229. if (err < 0)
  230. goto fail;
  231. reloc = &job->relocs[num_relocs];
  232. obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
  233. refs[num_refs++] = &obj->gem;
  234. /*
  235. * The unaligned cmdbuf offset will cause an unaligned write
  236. * during of the relocations patching, corrupting the commands
  237. * stream.
  238. */
  239. if (reloc->cmdbuf.offset & 3 ||
  240. reloc->cmdbuf.offset >= obj->gem.size) {
  241. err = -EINVAL;
  242. goto fail;
  243. }
  244. obj = host1x_to_tegra_bo(reloc->target.bo);
  245. refs[num_refs++] = &obj->gem;
  246. if (reloc->target.offset >= obj->gem.size) {
  247. err = -EINVAL;
  248. goto fail;
  249. }
  250. }
  251. if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
  252. err = -EFAULT;
  253. goto fail;
  254. }
  255. /* Syncpoint ref will be dropped on job release. */
  256. sp = host1x_syncpt_get_by_id(host1x, syncpt.id);
  257. if (!sp) {
  258. err = -ENOENT;
  259. goto fail;
  260. }
  261. job->is_addr_reg = context->client->ops->is_addr_reg;
  262. job->is_valid_class = context->client->ops->is_valid_class;
  263. job->syncpt_incrs = syncpt.incrs;
  264. job->syncpt = sp;
  265. job->timeout = 10000;
  266. if (args->timeout && args->timeout < 10000)
  267. job->timeout = args->timeout;
  268. err = host1x_job_pin(job, context->client->base.dev);
  269. if (err)
  270. goto fail;
  271. err = host1x_job_submit(job);
  272. if (err) {
  273. host1x_job_unpin(job);
  274. goto fail;
  275. }
  276. args->fence = job->syncpt_end;
  277. fail:
  278. while (num_refs--)
  279. drm_gem_object_put(refs[num_refs]);
  280. kfree(refs);
  281. put:
  282. host1x_job_put(job);
  283. return err;
  284. }
  285. #ifdef CONFIG_DRM_TEGRA_STAGING
  286. static int tegra_gem_create(struct drm_device *drm, void *data,
  287. struct drm_file *file)
  288. {
  289. struct drm_tegra_gem_create *args = data;
  290. struct tegra_bo *bo;
  291. bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
  292. &args->handle);
  293. if (IS_ERR(bo))
  294. return PTR_ERR(bo);
  295. return 0;
  296. }
  297. static int tegra_gem_mmap(struct drm_device *drm, void *data,
  298. struct drm_file *file)
  299. {
  300. struct drm_tegra_gem_mmap *args = data;
  301. struct drm_gem_object *gem;
  302. struct tegra_bo *bo;
  303. gem = drm_gem_object_lookup(file, args->handle);
  304. if (!gem)
  305. return -EINVAL;
  306. bo = to_tegra_bo(gem);
  307. args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  308. drm_gem_object_put(gem);
  309. return 0;
  310. }
  311. static int tegra_syncpt_read(struct drm_device *drm, void *data,
  312. struct drm_file *file)
  313. {
  314. struct host1x *host = dev_get_drvdata(drm->dev->parent);
  315. struct drm_tegra_syncpt_read *args = data;
  316. struct host1x_syncpt *sp;
  317. sp = host1x_syncpt_get_by_id_noref(host, args->id);
  318. if (!sp)
  319. return -EINVAL;
  320. args->value = host1x_syncpt_read_min(sp);
  321. return 0;
  322. }
  323. static int tegra_syncpt_incr(struct drm_device *drm, void *data,
  324. struct drm_file *file)
  325. {
  326. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  327. struct drm_tegra_syncpt_incr *args = data;
  328. struct host1x_syncpt *sp;
  329. sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
  330. if (!sp)
  331. return -EINVAL;
  332. return host1x_syncpt_incr(sp);
  333. }
  334. static int tegra_syncpt_wait(struct drm_device *drm, void *data,
  335. struct drm_file *file)
  336. {
  337. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  338. struct drm_tegra_syncpt_wait *args = data;
  339. struct host1x_syncpt *sp;
  340. sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
  341. if (!sp)
  342. return -EINVAL;
  343. return host1x_syncpt_wait(sp, args->thresh,
  344. msecs_to_jiffies(args->timeout),
  345. &args->value);
  346. }
  347. static int tegra_client_open(struct tegra_drm_file *fpriv,
  348. struct tegra_drm_client *client,
  349. struct tegra_drm_context *context)
  350. {
  351. int err;
  352. err = pm_runtime_resume_and_get(client->base.dev);
  353. if (err)
  354. return err;
  355. err = client->ops->open_channel(client, context);
  356. if (err < 0) {
  357. pm_runtime_put(client->base.dev);
  358. return err;
  359. }
  360. err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL);
  361. if (err < 0) {
  362. client->ops->close_channel(context);
  363. pm_runtime_put(client->base.dev);
  364. return err;
  365. }
  366. context->client = client;
  367. context->id = err;
  368. return 0;
  369. }
  370. static int tegra_open_channel(struct drm_device *drm, void *data,
  371. struct drm_file *file)
  372. {
  373. struct tegra_drm_file *fpriv = file->driver_priv;
  374. struct tegra_drm *tegra = drm->dev_private;
  375. struct drm_tegra_open_channel *args = data;
  376. struct tegra_drm_context *context;
  377. struct tegra_drm_client *client;
  378. int err = -ENODEV;
  379. context = kzalloc(sizeof(*context), GFP_KERNEL);
  380. if (!context)
  381. return -ENOMEM;
  382. mutex_lock(&fpriv->lock);
  383. list_for_each_entry(client, &tegra->clients, list)
  384. if (client->base.class == args->client) {
  385. err = tegra_client_open(fpriv, client, context);
  386. if (err < 0)
  387. break;
  388. args->context = context->id;
  389. break;
  390. }
  391. if (err < 0)
  392. kfree(context);
  393. mutex_unlock(&fpriv->lock);
  394. return err;
  395. }
  396. static int tegra_close_channel(struct drm_device *drm, void *data,
  397. struct drm_file *file)
  398. {
  399. struct tegra_drm_file *fpriv = file->driver_priv;
  400. struct drm_tegra_close_channel *args = data;
  401. struct tegra_drm_context *context;
  402. int err = 0;
  403. mutex_lock(&fpriv->lock);
  404. context = idr_find(&fpriv->legacy_contexts, args->context);
  405. if (!context) {
  406. err = -EINVAL;
  407. goto unlock;
  408. }
  409. idr_remove(&fpriv->legacy_contexts, context->id);
  410. tegra_drm_context_free(context);
  411. unlock:
  412. mutex_unlock(&fpriv->lock);
  413. return err;
  414. }
  415. static int tegra_get_syncpt(struct drm_device *drm, void *data,
  416. struct drm_file *file)
  417. {
  418. struct tegra_drm_file *fpriv = file->driver_priv;
  419. struct drm_tegra_get_syncpt *args = data;
  420. struct tegra_drm_context *context;
  421. struct host1x_syncpt *syncpt;
  422. int err = 0;
  423. mutex_lock(&fpriv->lock);
  424. context = idr_find(&fpriv->legacy_contexts, args->context);
  425. if (!context) {
  426. err = -ENODEV;
  427. goto unlock;
  428. }
  429. if (args->index >= context->client->base.num_syncpts) {
  430. err = -EINVAL;
  431. goto unlock;
  432. }
  433. syncpt = context->client->base.syncpts[args->index];
  434. args->id = host1x_syncpt_id(syncpt);
  435. unlock:
  436. mutex_unlock(&fpriv->lock);
  437. return err;
  438. }
  439. static int tegra_submit(struct drm_device *drm, void *data,
  440. struct drm_file *file)
  441. {
  442. struct tegra_drm_file *fpriv = file->driver_priv;
  443. struct drm_tegra_submit *args = data;
  444. struct tegra_drm_context *context;
  445. int err;
  446. mutex_lock(&fpriv->lock);
  447. context = idr_find(&fpriv->legacy_contexts, args->context);
  448. if (!context) {
  449. err = -ENODEV;
  450. goto unlock;
  451. }
  452. err = context->client->ops->submit(context, args, drm, file);
  453. unlock:
  454. mutex_unlock(&fpriv->lock);
  455. return err;
  456. }
  457. static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
  458. struct drm_file *file)
  459. {
  460. struct tegra_drm_file *fpriv = file->driver_priv;
  461. struct drm_tegra_get_syncpt_base *args = data;
  462. struct tegra_drm_context *context;
  463. struct host1x_syncpt_base *base;
  464. struct host1x_syncpt *syncpt;
  465. int err = 0;
  466. mutex_lock(&fpriv->lock);
  467. context = idr_find(&fpriv->legacy_contexts, args->context);
  468. if (!context) {
  469. err = -ENODEV;
  470. goto unlock;
  471. }
  472. if (args->syncpt >= context->client->base.num_syncpts) {
  473. err = -EINVAL;
  474. goto unlock;
  475. }
  476. syncpt = context->client->base.syncpts[args->syncpt];
  477. base = host1x_syncpt_get_base(syncpt);
  478. if (!base) {
  479. err = -ENXIO;
  480. goto unlock;
  481. }
  482. args->id = host1x_syncpt_base_id(base);
  483. unlock:
  484. mutex_unlock(&fpriv->lock);
  485. return err;
  486. }
  487. static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
  488. struct drm_file *file)
  489. {
  490. struct drm_tegra_gem_set_tiling *args = data;
  491. enum tegra_bo_tiling_mode mode;
  492. struct drm_gem_object *gem;
  493. unsigned long value = 0;
  494. struct tegra_bo *bo;
  495. switch (args->mode) {
  496. case DRM_TEGRA_GEM_TILING_MODE_PITCH:
  497. mode = TEGRA_BO_TILING_MODE_PITCH;
  498. if (args->value != 0)
  499. return -EINVAL;
  500. break;
  501. case DRM_TEGRA_GEM_TILING_MODE_TILED:
  502. mode = TEGRA_BO_TILING_MODE_TILED;
  503. if (args->value != 0)
  504. return -EINVAL;
  505. break;
  506. case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
  507. mode = TEGRA_BO_TILING_MODE_BLOCK;
  508. if (args->value > 5)
  509. return -EINVAL;
  510. value = args->value;
  511. break;
  512. default:
  513. return -EINVAL;
  514. }
  515. gem = drm_gem_object_lookup(file, args->handle);
  516. if (!gem)
  517. return -ENOENT;
  518. bo = to_tegra_bo(gem);
  519. bo->tiling.mode = mode;
  520. bo->tiling.value = value;
  521. drm_gem_object_put(gem);
  522. return 0;
  523. }
  524. static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
  525. struct drm_file *file)
  526. {
  527. struct drm_tegra_gem_get_tiling *args = data;
  528. struct drm_gem_object *gem;
  529. struct tegra_bo *bo;
  530. int err = 0;
  531. gem = drm_gem_object_lookup(file, args->handle);
  532. if (!gem)
  533. return -ENOENT;
  534. bo = to_tegra_bo(gem);
  535. switch (bo->tiling.mode) {
  536. case TEGRA_BO_TILING_MODE_PITCH:
  537. args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
  538. args->value = 0;
  539. break;
  540. case TEGRA_BO_TILING_MODE_TILED:
  541. args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
  542. args->value = 0;
  543. break;
  544. case TEGRA_BO_TILING_MODE_BLOCK:
  545. args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
  546. args->value = bo->tiling.value;
  547. break;
  548. default:
  549. err = -EINVAL;
  550. break;
  551. }
  552. drm_gem_object_put(gem);
  553. return err;
  554. }
  555. static int tegra_gem_set_flags(struct drm_device *drm, void *data,
  556. struct drm_file *file)
  557. {
  558. struct drm_tegra_gem_set_flags *args = data;
  559. struct drm_gem_object *gem;
  560. struct tegra_bo *bo;
  561. if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
  562. return -EINVAL;
  563. gem = drm_gem_object_lookup(file, args->handle);
  564. if (!gem)
  565. return -ENOENT;
  566. bo = to_tegra_bo(gem);
  567. bo->flags = 0;
  568. if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
  569. bo->flags |= TEGRA_BO_BOTTOM_UP;
  570. drm_gem_object_put(gem);
  571. return 0;
  572. }
  573. static int tegra_gem_get_flags(struct drm_device *drm, void *data,
  574. struct drm_file *file)
  575. {
  576. struct drm_tegra_gem_get_flags *args = data;
  577. struct drm_gem_object *gem;
  578. struct tegra_bo *bo;
  579. gem = drm_gem_object_lookup(file, args->handle);
  580. if (!gem)
  581. return -ENOENT;
  582. bo = to_tegra_bo(gem);
  583. args->flags = 0;
  584. if (bo->flags & TEGRA_BO_BOTTOM_UP)
  585. args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
  586. drm_gem_object_put(gem);
  587. return 0;
  588. }
  589. #endif
  590. static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
  591. #ifdef CONFIG_DRM_TEGRA_STAGING
  592. DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_OPEN, tegra_drm_ioctl_channel_open,
  593. DRM_RENDER_ALLOW),
  594. DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_CLOSE, tegra_drm_ioctl_channel_close,
  595. DRM_RENDER_ALLOW),
  596. DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_MAP, tegra_drm_ioctl_channel_map,
  597. DRM_RENDER_ALLOW),
  598. DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_UNMAP, tegra_drm_ioctl_channel_unmap,
  599. DRM_RENDER_ALLOW),
  600. DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_SUBMIT, tegra_drm_ioctl_channel_submit,
  601. DRM_RENDER_ALLOW),
  602. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_ALLOCATE, tegra_drm_ioctl_syncpoint_allocate,
  603. DRM_RENDER_ALLOW),
  604. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_FREE, tegra_drm_ioctl_syncpoint_free,
  605. DRM_RENDER_ALLOW),
  606. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_WAIT, tegra_drm_ioctl_syncpoint_wait,
  607. DRM_RENDER_ALLOW),
  608. DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_RENDER_ALLOW),
  609. DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_RENDER_ALLOW),
  610. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
  611. DRM_RENDER_ALLOW),
  612. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
  613. DRM_RENDER_ALLOW),
  614. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
  615. DRM_RENDER_ALLOW),
  616. DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
  617. DRM_RENDER_ALLOW),
  618. DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
  619. DRM_RENDER_ALLOW),
  620. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
  621. DRM_RENDER_ALLOW),
  622. DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
  623. DRM_RENDER_ALLOW),
  624. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
  625. DRM_RENDER_ALLOW),
  626. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
  627. DRM_RENDER_ALLOW),
  628. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
  629. DRM_RENDER_ALLOW),
  630. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
  631. DRM_RENDER_ALLOW),
  632. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
  633. DRM_RENDER_ALLOW),
  634. #endif
  635. };
  636. static const struct file_operations tegra_drm_fops = {
  637. .owner = THIS_MODULE,
  638. .open = drm_open,
  639. .release = drm_release,
  640. .unlocked_ioctl = drm_ioctl,
  641. .mmap = tegra_drm_mmap,
  642. .poll = drm_poll,
  643. .read = drm_read,
  644. .compat_ioctl = drm_compat_ioctl,
  645. .llseek = noop_llseek,
  646. };
  647. static int tegra_drm_context_cleanup(int id, void *p, void *data)
  648. {
  649. struct tegra_drm_context *context = p;
  650. tegra_drm_context_free(context);
  651. return 0;
  652. }
  653. static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
  654. {
  655. struct tegra_drm_file *fpriv = file->driver_priv;
  656. mutex_lock(&fpriv->lock);
  657. idr_for_each(&fpriv->legacy_contexts, tegra_drm_context_cleanup, NULL);
  658. tegra_drm_uapi_close_file(fpriv);
  659. mutex_unlock(&fpriv->lock);
  660. idr_destroy(&fpriv->legacy_contexts);
  661. mutex_destroy(&fpriv->lock);
  662. kfree(fpriv);
  663. }
  664. #ifdef CONFIG_DEBUG_FS
  665. static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
  666. {
  667. struct drm_info_node *node = (struct drm_info_node *)s->private;
  668. struct drm_device *drm = node->minor->dev;
  669. struct drm_framebuffer *fb;
  670. mutex_lock(&drm->mode_config.fb_lock);
  671. list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
  672. seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
  673. fb->base.id, fb->width, fb->height,
  674. fb->format->depth,
  675. fb->format->cpp[0] * 8,
  676. drm_framebuffer_read_refcount(fb));
  677. }
  678. mutex_unlock(&drm->mode_config.fb_lock);
  679. return 0;
  680. }
  681. static int tegra_debugfs_iova(struct seq_file *s, void *data)
  682. {
  683. struct drm_info_node *node = (struct drm_info_node *)s->private;
  684. struct drm_device *drm = node->minor->dev;
  685. struct tegra_drm *tegra = drm->dev_private;
  686. struct drm_printer p = drm_seq_file_printer(s);
  687. if (tegra->domain) {
  688. mutex_lock(&tegra->mm_lock);
  689. drm_mm_print(&tegra->mm, &p);
  690. mutex_unlock(&tegra->mm_lock);
  691. }
  692. return 0;
  693. }
  694. static struct drm_info_list tegra_debugfs_list[] = {
  695. { "framebuffers", tegra_debugfs_framebuffers, 0 },
  696. { "iova", tegra_debugfs_iova, 0 },
  697. };
  698. static void tegra_debugfs_init(struct drm_minor *minor)
  699. {
  700. drm_debugfs_create_files(tegra_debugfs_list,
  701. ARRAY_SIZE(tegra_debugfs_list),
  702. minor->debugfs_root, minor);
  703. }
  704. #endif
  705. static const struct drm_driver tegra_drm_driver = {
  706. .driver_features = DRIVER_MODESET | DRIVER_GEM |
  707. DRIVER_ATOMIC | DRIVER_RENDER | DRIVER_SYNCOBJ,
  708. .open = tegra_drm_open,
  709. .postclose = tegra_drm_postclose,
  710. .lastclose = drm_fb_helper_lastclose,
  711. #if defined(CONFIG_DEBUG_FS)
  712. .debugfs_init = tegra_debugfs_init,
  713. #endif
  714. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  715. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  716. .gem_prime_import = tegra_gem_prime_import,
  717. .dumb_create = tegra_bo_dumb_create,
  718. .ioctls = tegra_drm_ioctls,
  719. .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
  720. .fops = &tegra_drm_fops,
  721. .name = DRIVER_NAME,
  722. .desc = DRIVER_DESC,
  723. .date = DRIVER_DATE,
  724. .major = DRIVER_MAJOR,
  725. .minor = DRIVER_MINOR,
  726. .patchlevel = DRIVER_PATCHLEVEL,
  727. };
  728. int tegra_drm_register_client(struct tegra_drm *tegra,
  729. struct tegra_drm_client *client)
  730. {
  731. /*
  732. * When MLOCKs are implemented, change to allocate a shared channel
  733. * only when MLOCKs are disabled.
  734. */
  735. client->shared_channel = host1x_channel_request(&client->base);
  736. if (!client->shared_channel)
  737. return -EBUSY;
  738. mutex_lock(&tegra->clients_lock);
  739. list_add_tail(&client->list, &tegra->clients);
  740. client->drm = tegra;
  741. mutex_unlock(&tegra->clients_lock);
  742. return 0;
  743. }
  744. int tegra_drm_unregister_client(struct tegra_drm *tegra,
  745. struct tegra_drm_client *client)
  746. {
  747. mutex_lock(&tegra->clients_lock);
  748. list_del_init(&client->list);
  749. client->drm = NULL;
  750. mutex_unlock(&tegra->clients_lock);
  751. if (client->shared_channel)
  752. host1x_channel_put(client->shared_channel);
  753. return 0;
  754. }
  755. int host1x_client_iommu_attach(struct host1x_client *client)
  756. {
  757. struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
  758. struct drm_device *drm = dev_get_drvdata(client->host);
  759. struct tegra_drm *tegra = drm->dev_private;
  760. struct iommu_group *group = NULL;
  761. int err;
  762. #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
  763. if (client->dev->archdata.mapping) {
  764. struct dma_iommu_mapping *mapping =
  765. to_dma_iommu_mapping(client->dev);
  766. arm_iommu_detach_device(client->dev);
  767. arm_iommu_release_mapping(mapping);
  768. domain = iommu_get_domain_for_dev(client->dev);
  769. }
  770. #endif
  771. /*
  772. * If the host1x client is already attached to an IOMMU domain that is
  773. * not the shared IOMMU domain, don't try to attach it to a different
  774. * domain. This allows using the IOMMU-backed DMA API.
  775. */
  776. if (domain && domain != tegra->domain)
  777. return 0;
  778. if (tegra->domain) {
  779. group = iommu_group_get(client->dev);
  780. if (!group)
  781. return -ENODEV;
  782. if (domain != tegra->domain) {
  783. err = iommu_attach_group(tegra->domain, group);
  784. if (err < 0) {
  785. iommu_group_put(group);
  786. return err;
  787. }
  788. }
  789. tegra->use_explicit_iommu = true;
  790. }
  791. client->group = group;
  792. return 0;
  793. }
  794. void host1x_client_iommu_detach(struct host1x_client *client)
  795. {
  796. struct drm_device *drm = dev_get_drvdata(client->host);
  797. struct tegra_drm *tegra = drm->dev_private;
  798. struct iommu_domain *domain;
  799. if (client->group) {
  800. /*
  801. * Devices that are part of the same group may no longer be
  802. * attached to a domain at this point because their group may
  803. * have been detached by an earlier client.
  804. */
  805. domain = iommu_get_domain_for_dev(client->dev);
  806. if (domain)
  807. iommu_detach_group(tegra->domain, client->group);
  808. iommu_group_put(client->group);
  809. client->group = NULL;
  810. }
  811. }
  812. void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
  813. {
  814. struct iova *alloc;
  815. void *virt;
  816. gfp_t gfp;
  817. int err;
  818. if (tegra->domain)
  819. size = iova_align(&tegra->carveout.domain, size);
  820. else
  821. size = PAGE_ALIGN(size);
  822. gfp = GFP_KERNEL | __GFP_ZERO;
  823. if (!tegra->domain) {
  824. /*
  825. * Many units only support 32-bit addresses, even on 64-bit
  826. * SoCs. If there is no IOMMU to translate into a 32-bit IO
  827. * virtual address space, force allocations to be in the
  828. * lower 32-bit range.
  829. */
  830. gfp |= GFP_DMA;
  831. }
  832. virt = (void *)__get_free_pages(gfp, get_order(size));
  833. if (!virt)
  834. return ERR_PTR(-ENOMEM);
  835. if (!tegra->domain) {
  836. /*
  837. * If IOMMU is disabled, devices address physical memory
  838. * directly.
  839. */
  840. *dma = virt_to_phys(virt);
  841. return virt;
  842. }
  843. alloc = alloc_iova(&tegra->carveout.domain,
  844. size >> tegra->carveout.shift,
  845. tegra->carveout.limit, true);
  846. if (!alloc) {
  847. err = -EBUSY;
  848. goto free_pages;
  849. }
  850. *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
  851. err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
  852. size, IOMMU_READ | IOMMU_WRITE);
  853. if (err < 0)
  854. goto free_iova;
  855. return virt;
  856. free_iova:
  857. __free_iova(&tegra->carveout.domain, alloc);
  858. free_pages:
  859. free_pages((unsigned long)virt, get_order(size));
  860. return ERR_PTR(err);
  861. }
  862. void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
  863. dma_addr_t dma)
  864. {
  865. if (tegra->domain)
  866. size = iova_align(&tegra->carveout.domain, size);
  867. else
  868. size = PAGE_ALIGN(size);
  869. if (tegra->domain) {
  870. iommu_unmap(tegra->domain, dma, size);
  871. free_iova(&tegra->carveout.domain,
  872. iova_pfn(&tegra->carveout.domain, dma));
  873. }
  874. free_pages((unsigned long)virt, get_order(size));
  875. }
  876. static bool host1x_drm_wants_iommu(struct host1x_device *dev)
  877. {
  878. struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
  879. struct iommu_domain *domain;
  880. /* Our IOMMU usage policy doesn't currently play well with GART */
  881. if (of_machine_is_compatible("nvidia,tegra20"))
  882. return false;
  883. /*
  884. * If the Tegra DRM clients are backed by an IOMMU, push buffers are
  885. * likely to be allocated beyond the 32-bit boundary if sufficient
  886. * system memory is available. This is problematic on earlier Tegra
  887. * generations where host1x supports a maximum of 32 address bits in
  888. * the GATHER opcode. In this case, unless host1x is behind an IOMMU
  889. * as well it won't be able to process buffers allocated beyond the
  890. * 32-bit boundary.
  891. *
  892. * The DMA API will use bounce buffers in this case, so that could
  893. * perhaps still be made to work, even if less efficient, but there
  894. * is another catch: in order to perform cache maintenance on pages
  895. * allocated for discontiguous buffers we need to map and unmap the
  896. * SG table representing these buffers. This is fine for something
  897. * small like a push buffer, but it exhausts the bounce buffer pool
  898. * (typically on the order of a few MiB) for framebuffers (many MiB
  899. * for any modern resolution).
  900. *
  901. * Work around this by making sure that Tegra DRM clients only use
  902. * an IOMMU if the parent host1x also uses an IOMMU.
  903. *
  904. * Note that there's still a small gap here that we don't cover: if
  905. * the DMA API is backed by an IOMMU there's no way to control which
  906. * device is attached to an IOMMU and which isn't, except via wiring
  907. * up the device tree appropriately. This is considered an problem
  908. * of integration, so care must be taken for the DT to be consistent.
  909. */
  910. domain = iommu_get_domain_for_dev(dev->dev.parent);
  911. /*
  912. * Tegra20 and Tegra30 don't support addressing memory beyond the
  913. * 32-bit boundary, so the regular GATHER opcodes will always be
  914. * sufficient and whether or not the host1x is attached to an IOMMU
  915. * doesn't matter.
  916. */
  917. if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32))
  918. return true;
  919. return domain != NULL;
  920. }
  921. static int host1x_drm_probe(struct host1x_device *dev)
  922. {
  923. struct tegra_drm *tegra;
  924. struct drm_device *drm;
  925. int err;
  926. drm = drm_dev_alloc(&tegra_drm_driver, &dev->dev);
  927. if (IS_ERR(drm))
  928. return PTR_ERR(drm);
  929. tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
  930. if (!tegra) {
  931. err = -ENOMEM;
  932. goto put;
  933. }
  934. if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) {
  935. tegra->domain = iommu_domain_alloc(&platform_bus_type);
  936. if (!tegra->domain) {
  937. err = -ENOMEM;
  938. goto free;
  939. }
  940. err = iova_cache_get();
  941. if (err < 0)
  942. goto domain;
  943. }
  944. mutex_init(&tegra->clients_lock);
  945. INIT_LIST_HEAD(&tegra->clients);
  946. dev_set_drvdata(&dev->dev, drm);
  947. drm->dev_private = tegra;
  948. tegra->drm = drm;
  949. drm_mode_config_init(drm);
  950. drm->mode_config.min_width = 0;
  951. drm->mode_config.min_height = 0;
  952. drm->mode_config.max_width = 0;
  953. drm->mode_config.max_height = 0;
  954. drm->mode_config.normalize_zpos = true;
  955. drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
  956. drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
  957. err = tegra_drm_fb_prepare(drm);
  958. if (err < 0)
  959. goto config;
  960. drm_kms_helper_poll_init(drm);
  961. err = host1x_device_init(dev);
  962. if (err < 0)
  963. goto fbdev;
  964. /*
  965. * Now that all display controller have been initialized, the maximum
  966. * supported resolution is known and the bitmask for horizontal and
  967. * vertical bitfields can be computed.
  968. */
  969. tegra->hmask = drm->mode_config.max_width - 1;
  970. tegra->vmask = drm->mode_config.max_height - 1;
  971. if (tegra->use_explicit_iommu) {
  972. u64 carveout_start, carveout_end, gem_start, gem_end;
  973. u64 dma_mask = dma_get_mask(&dev->dev);
  974. dma_addr_t start, end;
  975. unsigned long order;
  976. start = tegra->domain->geometry.aperture_start & dma_mask;
  977. end = tegra->domain->geometry.aperture_end & dma_mask;
  978. gem_start = start;
  979. gem_end = end - CARVEOUT_SZ;
  980. carveout_start = gem_end + 1;
  981. carveout_end = end;
  982. order = __ffs(tegra->domain->pgsize_bitmap);
  983. init_iova_domain(&tegra->carveout.domain, 1UL << order,
  984. carveout_start >> order);
  985. tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
  986. tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
  987. drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
  988. mutex_init(&tegra->mm_lock);
  989. DRM_DEBUG_DRIVER("IOMMU apertures:\n");
  990. DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end);
  991. DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start,
  992. carveout_end);
  993. } else if (tegra->domain) {
  994. iommu_domain_free(tegra->domain);
  995. tegra->domain = NULL;
  996. iova_cache_put();
  997. }
  998. if (tegra->hub) {
  999. err = tegra_display_hub_prepare(tegra->hub);
  1000. if (err < 0)
  1001. goto device;
  1002. }
  1003. /* syncpoints are used for full 32-bit hardware VBLANK counters */
  1004. drm->max_vblank_count = 0xffffffff;
  1005. err = drm_vblank_init(drm, drm->mode_config.num_crtc);
  1006. if (err < 0)
  1007. goto hub;
  1008. drm_mode_config_reset(drm);
  1009. err = drm_aperture_remove_framebuffers(false, &tegra_drm_driver);
  1010. if (err < 0)
  1011. goto hub;
  1012. err = tegra_drm_fb_init(drm);
  1013. if (err < 0)
  1014. goto hub;
  1015. err = drm_dev_register(drm, 0);
  1016. if (err < 0)
  1017. goto fb;
  1018. return 0;
  1019. fb:
  1020. tegra_drm_fb_exit(drm);
  1021. hub:
  1022. if (tegra->hub)
  1023. tegra_display_hub_cleanup(tegra->hub);
  1024. device:
  1025. if (tegra->domain) {
  1026. mutex_destroy(&tegra->mm_lock);
  1027. drm_mm_takedown(&tegra->mm);
  1028. put_iova_domain(&tegra->carveout.domain);
  1029. iova_cache_put();
  1030. }
  1031. host1x_device_exit(dev);
  1032. fbdev:
  1033. drm_kms_helper_poll_fini(drm);
  1034. tegra_drm_fb_free(drm);
  1035. config:
  1036. drm_mode_config_cleanup(drm);
  1037. domain:
  1038. if (tegra->domain)
  1039. iommu_domain_free(tegra->domain);
  1040. free:
  1041. kfree(tegra);
  1042. put:
  1043. drm_dev_put(drm);
  1044. return err;
  1045. }
  1046. static int host1x_drm_remove(struct host1x_device *dev)
  1047. {
  1048. struct drm_device *drm = dev_get_drvdata(&dev->dev);
  1049. struct tegra_drm *tegra = drm->dev_private;
  1050. int err;
  1051. drm_dev_unregister(drm);
  1052. drm_kms_helper_poll_fini(drm);
  1053. tegra_drm_fb_exit(drm);
  1054. drm_atomic_helper_shutdown(drm);
  1055. drm_mode_config_cleanup(drm);
  1056. if (tegra->hub)
  1057. tegra_display_hub_cleanup(tegra->hub);
  1058. err = host1x_device_exit(dev);
  1059. if (err < 0)
  1060. dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
  1061. if (tegra->domain) {
  1062. mutex_destroy(&tegra->mm_lock);
  1063. drm_mm_takedown(&tegra->mm);
  1064. put_iova_domain(&tegra->carveout.domain);
  1065. iova_cache_put();
  1066. iommu_domain_free(tegra->domain);
  1067. }
  1068. kfree(tegra);
  1069. drm_dev_put(drm);
  1070. return 0;
  1071. }
  1072. #ifdef CONFIG_PM_SLEEP
  1073. static int host1x_drm_suspend(struct device *dev)
  1074. {
  1075. struct drm_device *drm = dev_get_drvdata(dev);
  1076. return drm_mode_config_helper_suspend(drm);
  1077. }
  1078. static int host1x_drm_resume(struct device *dev)
  1079. {
  1080. struct drm_device *drm = dev_get_drvdata(dev);
  1081. return drm_mode_config_helper_resume(drm);
  1082. }
  1083. #endif
  1084. static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
  1085. host1x_drm_resume);
  1086. static const struct of_device_id host1x_drm_subdevs[] = {
  1087. { .compatible = "nvidia,tegra20-dc", },
  1088. { .compatible = "nvidia,tegra20-hdmi", },
  1089. { .compatible = "nvidia,tegra20-gr2d", },
  1090. { .compatible = "nvidia,tegra20-gr3d", },
  1091. { .compatible = "nvidia,tegra30-dc", },
  1092. { .compatible = "nvidia,tegra30-hdmi", },
  1093. { .compatible = "nvidia,tegra30-gr2d", },
  1094. { .compatible = "nvidia,tegra30-gr3d", },
  1095. { .compatible = "nvidia,tegra114-dc", },
  1096. { .compatible = "nvidia,tegra114-dsi", },
  1097. { .compatible = "nvidia,tegra114-hdmi", },
  1098. { .compatible = "nvidia,tegra114-gr2d", },
  1099. { .compatible = "nvidia,tegra114-gr3d", },
  1100. { .compatible = "nvidia,tegra124-dc", },
  1101. { .compatible = "nvidia,tegra124-sor", },
  1102. { .compatible = "nvidia,tegra124-hdmi", },
  1103. { .compatible = "nvidia,tegra124-dsi", },
  1104. { .compatible = "nvidia,tegra124-vic", },
  1105. { .compatible = "nvidia,tegra132-dsi", },
  1106. { .compatible = "nvidia,tegra210-dc", },
  1107. { .compatible = "nvidia,tegra210-dsi", },
  1108. { .compatible = "nvidia,tegra210-sor", },
  1109. { .compatible = "nvidia,tegra210-sor1", },
  1110. { .compatible = "nvidia,tegra210-vic", },
  1111. { .compatible = "nvidia,tegra210-nvdec", },
  1112. { .compatible = "nvidia,tegra186-display", },
  1113. { .compatible = "nvidia,tegra186-dc", },
  1114. { .compatible = "nvidia,tegra186-sor", },
  1115. { .compatible = "nvidia,tegra186-sor1", },
  1116. { .compatible = "nvidia,tegra186-vic", },
  1117. { .compatible = "nvidia,tegra186-nvdec", },
  1118. { .compatible = "nvidia,tegra194-display", },
  1119. { .compatible = "nvidia,tegra194-dc", },
  1120. { .compatible = "nvidia,tegra194-sor", },
  1121. { .compatible = "nvidia,tegra194-vic", },
  1122. { .compatible = "nvidia,tegra194-nvdec", },
  1123. { .compatible = "nvidia,tegra234-vic", },
  1124. { /* sentinel */ }
  1125. };
  1126. static struct host1x_driver host1x_drm_driver = {
  1127. .driver = {
  1128. .name = "drm",
  1129. .pm = &host1x_drm_pm_ops,
  1130. },
  1131. .probe = host1x_drm_probe,
  1132. .remove = host1x_drm_remove,
  1133. .subdevs = host1x_drm_subdevs,
  1134. };
  1135. static struct platform_driver * const drivers[] = {
  1136. &tegra_display_hub_driver,
  1137. &tegra_dc_driver,
  1138. &tegra_hdmi_driver,
  1139. &tegra_dsi_driver,
  1140. &tegra_dpaux_driver,
  1141. &tegra_sor_driver,
  1142. &tegra_gr2d_driver,
  1143. &tegra_gr3d_driver,
  1144. &tegra_vic_driver,
  1145. &tegra_nvdec_driver,
  1146. };
  1147. static int __init host1x_drm_init(void)
  1148. {
  1149. int err;
  1150. if (drm_firmware_drivers_only())
  1151. return -ENODEV;
  1152. err = host1x_driver_register(&host1x_drm_driver);
  1153. if (err < 0)
  1154. return err;
  1155. err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  1156. if (err < 0)
  1157. goto unregister_host1x;
  1158. return 0;
  1159. unregister_host1x:
  1160. host1x_driver_unregister(&host1x_drm_driver);
  1161. return err;
  1162. }
  1163. module_init(host1x_drm_init);
  1164. static void __exit host1x_drm_exit(void)
  1165. {
  1166. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  1167. host1x_driver_unregister(&host1x_drm_driver);
  1168. }
  1169. module_exit(host1x_drm_exit);
  1170. MODULE_AUTHOR("Thierry Reding <[email protected]>");
  1171. MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
  1172. MODULE_LICENSE("GPL v2");