hub.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
  4. */
  5. #include <linux/clk.h>
  6. #include <linux/delay.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/host1x.h>
  9. #include <linux/module.h>
  10. #include <linux/of.h>
  11. #include <linux/of_device.h>
  12. #include <linux/of_graph.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/pm_runtime.h>
  15. #include <linux/reset.h>
  16. #include <drm/drm_atomic.h>
  17. #include <drm/drm_atomic_helper.h>
  18. #include <drm/drm_blend.h>
  19. #include <drm/drm_fourcc.h>
  20. #include <drm/drm_framebuffer.h>
  21. #include <drm/drm_probe_helper.h>
  22. #include "drm.h"
  23. #include "dc.h"
  24. #include "plane.h"
  25. #define NFB 24
  26. static const u32 tegra_shared_plane_formats[] = {
  27. DRM_FORMAT_ARGB1555,
  28. DRM_FORMAT_RGB565,
  29. DRM_FORMAT_RGBA5551,
  30. DRM_FORMAT_ARGB8888,
  31. DRM_FORMAT_ABGR8888,
  32. /* new on Tegra114 */
  33. DRM_FORMAT_ABGR4444,
  34. DRM_FORMAT_ABGR1555,
  35. DRM_FORMAT_BGRA5551,
  36. DRM_FORMAT_XRGB1555,
  37. DRM_FORMAT_RGBX5551,
  38. DRM_FORMAT_XBGR1555,
  39. DRM_FORMAT_BGRX5551,
  40. DRM_FORMAT_BGR565,
  41. DRM_FORMAT_XRGB8888,
  42. DRM_FORMAT_XBGR8888,
  43. /* planar formats */
  44. DRM_FORMAT_UYVY,
  45. DRM_FORMAT_YUYV,
  46. DRM_FORMAT_YUV420,
  47. DRM_FORMAT_YUV422,
  48. };
  49. static const u64 tegra_shared_plane_modifiers[] = {
  50. DRM_FORMAT_MOD_LINEAR,
  51. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
  52. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
  53. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
  54. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
  55. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
  56. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
  57. /*
  58. * The GPU sector layout is only supported on Tegra194, but these will
  59. * be filtered out later on by ->format_mod_supported() on SoCs where
  60. * it isn't supported.
  61. */
  62. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  63. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  64. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  65. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  66. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  67. DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
  68. /* sentinel */
  69. DRM_FORMAT_MOD_INVALID
  70. };
  71. static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
  72. unsigned int offset)
  73. {
  74. if (offset >= 0x500 && offset <= 0x581) {
  75. offset = 0x000 + (offset - 0x500);
  76. return plane->offset + offset;
  77. }
  78. if (offset >= 0x700 && offset <= 0x73c) {
  79. offset = 0x180 + (offset - 0x700);
  80. return plane->offset + offset;
  81. }
  82. if (offset >= 0x800 && offset <= 0x83e) {
  83. offset = 0x1c0 + (offset - 0x800);
  84. return plane->offset + offset;
  85. }
  86. dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
  87. return plane->offset + offset;
  88. }
  89. static inline u32 tegra_plane_readl(struct tegra_plane *plane,
  90. unsigned int offset)
  91. {
  92. return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
  93. }
  94. static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
  95. unsigned int offset)
  96. {
  97. tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
  98. }
  99. static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
  100. {
  101. int err = 0;
  102. mutex_lock(&wgrp->lock);
  103. if (wgrp->usecount == 0) {
  104. err = host1x_client_resume(wgrp->parent);
  105. if (err < 0) {
  106. dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
  107. goto unlock;
  108. }
  109. reset_control_deassert(wgrp->rst);
  110. }
  111. wgrp->usecount++;
  112. unlock:
  113. mutex_unlock(&wgrp->lock);
  114. return err;
  115. }
  116. static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
  117. {
  118. int err;
  119. mutex_lock(&wgrp->lock);
  120. if (wgrp->usecount == 1) {
  121. err = reset_control_assert(wgrp->rst);
  122. if (err < 0) {
  123. pr_err("failed to assert reset for window group %u\n",
  124. wgrp->index);
  125. }
  126. host1x_client_suspend(wgrp->parent);
  127. }
  128. wgrp->usecount--;
  129. mutex_unlock(&wgrp->lock);
  130. }
  131. int tegra_display_hub_prepare(struct tegra_display_hub *hub)
  132. {
  133. unsigned int i;
  134. /*
  135. * XXX Enabling/disabling windowgroups needs to happen when the owner
  136. * display controller is disabled. There's currently no good point at
  137. * which this could be executed, so unconditionally enable all window
  138. * groups for now.
  139. */
  140. for (i = 0; i < hub->soc->num_wgrps; i++) {
  141. struct tegra_windowgroup *wgrp = &hub->wgrps[i];
  142. /* Skip orphaned window group whose parent DC is disabled */
  143. if (wgrp->parent)
  144. tegra_windowgroup_enable(wgrp);
  145. }
  146. return 0;
  147. }
  148. void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
  149. {
  150. unsigned int i;
  151. /*
  152. * XXX Remove this once window groups can be more fine-grainedly
  153. * enabled and disabled.
  154. */
  155. for (i = 0; i < hub->soc->num_wgrps; i++) {
  156. struct tegra_windowgroup *wgrp = &hub->wgrps[i];
  157. /* Skip orphaned window group whose parent DC is disabled */
  158. if (wgrp->parent)
  159. tegra_windowgroup_disable(wgrp);
  160. }
  161. }
  162. static void tegra_shared_plane_update(struct tegra_plane *plane)
  163. {
  164. struct tegra_dc *dc = plane->dc;
  165. unsigned long timeout;
  166. u32 mask, value;
  167. mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
  168. tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
  169. timeout = jiffies + msecs_to_jiffies(1000);
  170. while (time_before(jiffies, timeout)) {
  171. value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
  172. if ((value & mask) == 0)
  173. break;
  174. usleep_range(100, 400);
  175. }
  176. }
  177. static void tegra_shared_plane_activate(struct tegra_plane *plane)
  178. {
  179. struct tegra_dc *dc = plane->dc;
  180. unsigned long timeout;
  181. u32 mask, value;
  182. mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
  183. tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
  184. timeout = jiffies + msecs_to_jiffies(1000);
  185. while (time_before(jiffies, timeout)) {
  186. value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
  187. if ((value & mask) == 0)
  188. break;
  189. usleep_range(100, 400);
  190. }
  191. }
  192. static unsigned int
  193. tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
  194. {
  195. unsigned int offset =
  196. tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
  197. return tegra_dc_readl(dc, offset) & OWNER_MASK;
  198. }
  199. static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
  200. struct tegra_plane *plane)
  201. {
  202. struct device *dev = dc->dev;
  203. if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
  204. if (plane->dc == dc)
  205. return true;
  206. dev_WARN(dev, "head %u owns window %u but is not attached\n",
  207. dc->pipe, plane->index);
  208. }
  209. return false;
  210. }
  211. static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
  212. struct tegra_dc *new)
  213. {
  214. unsigned int offset =
  215. tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
  216. struct tegra_dc *old = plane->dc, *dc = new ? new : old;
  217. struct device *dev = new ? new->dev : old->dev;
  218. unsigned int owner, index = plane->index;
  219. u32 value;
  220. value = tegra_dc_readl(dc, offset);
  221. owner = value & OWNER_MASK;
  222. if (new && (owner != OWNER_MASK && owner != new->pipe)) {
  223. dev_WARN(dev, "window %u owned by head %u\n", index, owner);
  224. return -EBUSY;
  225. }
  226. /*
  227. * This seems to happen whenever the head has been disabled with one
  228. * or more windows being active. This is harmless because we'll just
  229. * reassign the window to the new head anyway.
  230. */
  231. if (old && owner == OWNER_MASK)
  232. dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
  233. old->pipe, owner);
  234. value &= ~OWNER_MASK;
  235. if (new)
  236. value |= OWNER(new->pipe);
  237. else
  238. value |= OWNER_MASK;
  239. tegra_dc_writel(dc, value, offset);
  240. plane->dc = new;
  241. return 0;
  242. }
  243. static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
  244. {
  245. static const unsigned int coeffs[192] = {
  246. 0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
  247. 0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
  248. 0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
  249. 0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
  250. 0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
  251. 0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
  252. 0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
  253. 0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
  254. 0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
  255. 0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
  256. 0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
  257. 0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
  258. 0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
  259. 0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
  260. 0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
  261. 0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
  262. 0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
  263. 0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
  264. 0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
  265. 0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
  266. 0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
  267. 0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
  268. 0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
  269. 0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
  270. 0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
  271. 0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
  272. 0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
  273. 0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
  274. 0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
  275. 0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
  276. 0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
  277. 0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
  278. 0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
  279. 0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
  280. 0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
  281. 0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
  282. 0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
  283. 0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
  284. 0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
  285. 0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
  286. 0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
  287. 0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
  288. 0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
  289. 0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
  290. 0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
  291. 0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
  292. 0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
  293. 0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
  294. };
  295. unsigned int ratio, row, column;
  296. for (ratio = 0; ratio <= 2; ratio++) {
  297. for (row = 0; row <= 15; row++) {
  298. for (column = 0; column <= 3; column++) {
  299. unsigned int index = (ratio << 6) + (row << 2) + column;
  300. u32 value;
  301. value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
  302. tegra_plane_writel(plane, value,
  303. DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
  304. }
  305. }
  306. }
  307. }
  308. static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
  309. struct tegra_plane *plane)
  310. {
  311. u32 value;
  312. int err;
  313. if (!tegra_dc_owns_shared_plane(dc, plane)) {
  314. err = tegra_shared_plane_set_owner(plane, dc);
  315. if (err < 0)
  316. return;
  317. }
  318. value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
  319. value |= MODE_FOUR_LINES;
  320. tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
  321. value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
  322. value = SLOTS(1);
  323. tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
  324. /* disable watermark */
  325. value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
  326. value &= ~LATENCY_CTL_MODE_ENABLE;
  327. tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
  328. value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
  329. value |= WATERMARK_MASK;
  330. tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
  331. /* pipe meter */
  332. value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
  333. value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
  334. tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
  335. /* mempool entries */
  336. value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
  337. value = MEMPOOL_ENTRIES(0x331);
  338. tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
  339. value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
  340. value &= ~THREAD_NUM_MASK;
  341. value |= THREAD_NUM(plane->base.index);
  342. value |= THREAD_GROUP_ENABLE;
  343. tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
  344. tegra_shared_plane_setup_scaler(plane);
  345. tegra_shared_plane_update(plane);
  346. tegra_shared_plane_activate(plane);
  347. }
  348. static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
  349. struct tegra_plane *plane)
  350. {
  351. tegra_shared_plane_set_owner(plane, NULL);
  352. }
  353. static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
  354. struct drm_atomic_state *state)
  355. {
  356. struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
  357. plane);
  358. struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
  359. struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
  360. struct tegra_bo_tiling *tiling = &plane_state->tiling;
  361. struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
  362. int err;
  363. /* no need for further checks if the plane is being disabled */
  364. if (!new_plane_state->crtc || !new_plane_state->fb)
  365. return 0;
  366. err = tegra_plane_format(new_plane_state->fb->format->format,
  367. &plane_state->format,
  368. &plane_state->swap);
  369. if (err < 0)
  370. return err;
  371. err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
  372. if (err < 0)
  373. return err;
  374. if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
  375. !dc->soc->supports_block_linear) {
  376. DRM_ERROR("hardware doesn't support block linear mode\n");
  377. return -EINVAL;
  378. }
  379. if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
  380. !dc->soc->supports_sector_layout) {
  381. DRM_ERROR("hardware doesn't support GPU sector layout\n");
  382. return -EINVAL;
  383. }
  384. /*
  385. * Tegra doesn't support different strides for U and V planes so we
  386. * error out if the user tries to display a framebuffer with such a
  387. * configuration.
  388. */
  389. if (new_plane_state->fb->format->num_planes > 2) {
  390. if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
  391. DRM_ERROR("unsupported UV-plane configuration\n");
  392. return -EINVAL;
  393. }
  394. }
  395. /* XXX scaling is not yet supported, add a check here */
  396. err = tegra_plane_state_add(&tegra->base, new_plane_state);
  397. if (err < 0)
  398. return err;
  399. return 0;
  400. }
  401. static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
  402. struct drm_atomic_state *state)
  403. {
  404. struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
  405. plane);
  406. struct tegra_plane *p = to_tegra_plane(plane);
  407. struct tegra_dc *dc;
  408. u32 value;
  409. int err;
  410. /* rien ne va plus */
  411. if (!old_state || !old_state->crtc)
  412. return;
  413. dc = to_tegra_dc(old_state->crtc);
  414. err = host1x_client_resume(&dc->client);
  415. if (err < 0) {
  416. dev_err(dc->dev, "failed to resume: %d\n", err);
  417. return;
  418. }
  419. /*
  420. * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
  421. * on planes that are already disabled. Make sure we fallback to the
  422. * head for this particular state instead of crashing.
  423. */
  424. if (WARN_ON(p->dc == NULL))
  425. p->dc = dc;
  426. value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
  427. value &= ~WIN_ENABLE;
  428. tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
  429. tegra_dc_remove_shared_plane(dc, p);
  430. host1x_client_suspend(&dc->client);
  431. }
  432. static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
  433. {
  434. u64 tmp, tmp1, tmp2;
  435. tmp = (u64)dfixed_trunc(in);
  436. tmp2 = (u64)out;
  437. tmp1 = (tmp << NFB) + (tmp2 >> 1);
  438. do_div(tmp1, tmp2);
  439. return lower_32_bits(tmp1);
  440. }
  441. static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
  442. struct drm_atomic_state *state)
  443. {
  444. struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
  445. plane);
  446. struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
  447. struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
  448. unsigned int zpos = new_state->normalized_zpos;
  449. struct drm_framebuffer *fb = new_state->fb;
  450. struct tegra_plane *p = to_tegra_plane(plane);
  451. u32 value, min_width, bypass = 0;
  452. dma_addr_t base, addr_flag = 0;
  453. unsigned int bpc, planes;
  454. bool yuv;
  455. int err;
  456. /* rien ne va plus */
  457. if (!new_state->crtc || !new_state->fb)
  458. return;
  459. if (!new_state->visible) {
  460. tegra_shared_plane_atomic_disable(plane, state);
  461. return;
  462. }
  463. err = host1x_client_resume(&dc->client);
  464. if (err < 0) {
  465. dev_err(dc->dev, "failed to resume: %d\n", err);
  466. return;
  467. }
  468. yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planes, &bpc);
  469. tegra_dc_assign_shared_plane(dc, p);
  470. tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
  471. /* blending */
  472. value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
  473. BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
  474. BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
  475. tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
  476. value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
  477. BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
  478. BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
  479. tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
  480. value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
  481. tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
  482. /* scaling */
  483. min_width = min(new_state->src_w >> 16, new_state->crtc_w);
  484. value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
  485. if (min_width < MAX_PIXELS_5TAP444(value)) {
  486. value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
  487. } else {
  488. value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
  489. if (min_width < MAX_PIXELS_2TAP444(value))
  490. value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
  491. else
  492. dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
  493. }
  494. value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
  495. tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
  496. if (new_state->src_w != new_state->crtc_w << 16) {
  497. fixed20_12 width = dfixed_init(new_state->src_w >> 16);
  498. u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
  499. u32 init = (1 << (NFB - 1)) + (incr >> 1);
  500. tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
  501. tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
  502. } else {
  503. bypass |= INPUT_SCALER_HBYPASS;
  504. }
  505. if (new_state->src_h != new_state->crtc_h << 16) {
  506. fixed20_12 height = dfixed_init(new_state->src_h >> 16);
  507. u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
  508. u32 init = (1 << (NFB - 1)) + (incr >> 1);
  509. tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
  510. tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
  511. } else {
  512. bypass |= INPUT_SCALER_VBYPASS;
  513. }
  514. tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
  515. /* disable compression */
  516. tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
  517. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  518. /*
  519. * Physical address bit 39 in Tegra194 is used as a switch for special
  520. * logic that swizzles the memory using either the legacy Tegra or the
  521. * dGPU sector layout.
  522. */
  523. if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
  524. addr_flag = BIT_ULL(39);
  525. #endif
  526. base = tegra_plane_state->iova[0] + fb->offsets[0];
  527. base |= addr_flag;
  528. tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
  529. tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
  530. value = V_POSITION(new_state->crtc_y) |
  531. H_POSITION(new_state->crtc_x);
  532. tegra_plane_writel(p, value, DC_WIN_POSITION);
  533. value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
  534. tegra_plane_writel(p, value, DC_WIN_SIZE);
  535. value = WIN_ENABLE | COLOR_EXPAND;
  536. tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
  537. value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
  538. tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
  539. tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
  540. tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
  541. value = PITCH(fb->pitches[0]);
  542. tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
  543. if (yuv && planes > 1) {
  544. base = tegra_plane_state->iova[1] + fb->offsets[1];
  545. base |= addr_flag;
  546. tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
  547. tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
  548. if (planes > 2) {
  549. base = tegra_plane_state->iova[2] + fb->offsets[2];
  550. base |= addr_flag;
  551. tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
  552. tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
  553. }
  554. value = PITCH_U(fb->pitches[1]);
  555. if (planes > 2)
  556. value |= PITCH_V(fb->pitches[2]);
  557. tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
  558. } else {
  559. tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
  560. tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
  561. tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
  562. tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
  563. tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
  564. }
  565. value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
  566. if (yuv) {
  567. if (bpc < 12)
  568. value |= DEGAMMA_YUV8_10;
  569. else
  570. value |= DEGAMMA_YUV12;
  571. /* XXX parameterize */
  572. value |= COLOR_SPACE_YUV_2020;
  573. } else {
  574. if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
  575. value |= DEGAMMA_SRGB;
  576. }
  577. tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
  578. value = OFFSET_X(new_state->src_y >> 16) |
  579. OFFSET_Y(new_state->src_x >> 16);
  580. tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
  581. if (dc->soc->supports_block_linear) {
  582. unsigned long height = tegra_plane_state->tiling.value;
  583. /* XXX */
  584. switch (tegra_plane_state->tiling.mode) {
  585. case TEGRA_BO_TILING_MODE_PITCH:
  586. value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
  587. DC_WINBUF_SURFACE_KIND_PITCH;
  588. break;
  589. /* XXX not supported on Tegra186 and later */
  590. case TEGRA_BO_TILING_MODE_TILED:
  591. value = DC_WINBUF_SURFACE_KIND_TILED;
  592. break;
  593. case TEGRA_BO_TILING_MODE_BLOCK:
  594. value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
  595. DC_WINBUF_SURFACE_KIND_BLOCK;
  596. break;
  597. }
  598. tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
  599. }
  600. /* disable gamut CSC */
  601. value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
  602. value &= ~CONTROL_CSC_ENABLE;
  603. tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
  604. host1x_client_suspend(&dc->client);
  605. }
  606. static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
  607. .prepare_fb = tegra_plane_prepare_fb,
  608. .cleanup_fb = tegra_plane_cleanup_fb,
  609. .atomic_check = tegra_shared_plane_atomic_check,
  610. .atomic_update = tegra_shared_plane_atomic_update,
  611. .atomic_disable = tegra_shared_plane_atomic_disable,
  612. };
  613. struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
  614. struct tegra_dc *dc,
  615. unsigned int wgrp,
  616. unsigned int index)
  617. {
  618. enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
  619. struct tegra_drm *tegra = drm->dev_private;
  620. struct tegra_display_hub *hub = tegra->hub;
  621. struct tegra_shared_plane *plane;
  622. unsigned int possible_crtcs;
  623. unsigned int num_formats;
  624. const u64 *modifiers;
  625. struct drm_plane *p;
  626. const u32 *formats;
  627. int err;
  628. plane = kzalloc(sizeof(*plane), GFP_KERNEL);
  629. if (!plane)
  630. return ERR_PTR(-ENOMEM);
  631. plane->base.offset = 0x0a00 + 0x0300 * index;
  632. plane->base.index = index;
  633. plane->wgrp = &hub->wgrps[wgrp];
  634. plane->wgrp->parent = &dc->client;
  635. p = &plane->base.base;
  636. /* planes can be assigned to arbitrary CRTCs */
  637. possible_crtcs = BIT(tegra->num_crtcs) - 1;
  638. num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
  639. formats = tegra_shared_plane_formats;
  640. modifiers = tegra_shared_plane_modifiers;
  641. err = drm_universal_plane_init(drm, p, possible_crtcs,
  642. &tegra_plane_funcs, formats,
  643. num_formats, modifiers, type, NULL);
  644. if (err < 0) {
  645. kfree(plane);
  646. return ERR_PTR(err);
  647. }
  648. drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
  649. drm_plane_create_zpos_property(p, 0, 0, 255);
  650. return p;
  651. }
  652. static struct drm_private_state *
  653. tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
  654. {
  655. struct tegra_display_hub_state *state;
  656. state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
  657. if (!state)
  658. return NULL;
  659. __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
  660. return &state->base;
  661. }
  662. static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
  663. struct drm_private_state *state)
  664. {
  665. struct tegra_display_hub_state *hub_state =
  666. to_tegra_display_hub_state(state);
  667. kfree(hub_state);
  668. }
  669. static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
  670. .atomic_duplicate_state = tegra_display_hub_duplicate_state,
  671. .atomic_destroy_state = tegra_display_hub_destroy_state,
  672. };
  673. static struct tegra_display_hub_state *
  674. tegra_display_hub_get_state(struct tegra_display_hub *hub,
  675. struct drm_atomic_state *state)
  676. {
  677. struct drm_private_state *priv;
  678. priv = drm_atomic_get_private_obj_state(state, &hub->base);
  679. if (IS_ERR(priv))
  680. return ERR_CAST(priv);
  681. return to_tegra_display_hub_state(priv);
  682. }
  683. int tegra_display_hub_atomic_check(struct drm_device *drm,
  684. struct drm_atomic_state *state)
  685. {
  686. struct tegra_drm *tegra = drm->dev_private;
  687. struct tegra_display_hub_state *hub_state;
  688. struct drm_crtc_state *old, *new;
  689. struct drm_crtc *crtc;
  690. unsigned int i;
  691. if (!tegra->hub)
  692. return 0;
  693. hub_state = tegra_display_hub_get_state(tegra->hub, state);
  694. if (IS_ERR(hub_state))
  695. return PTR_ERR(hub_state);
  696. /*
  697. * The display hub display clock needs to be fed by the display clock
  698. * with the highest frequency to ensure proper functioning of all the
  699. * displays.
  700. *
  701. * Note that this isn't used before Tegra186, but it doesn't hurt and
  702. * conditionalizing it would make the code less clean.
  703. */
  704. for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
  705. struct tegra_dc_state *dc = to_dc_state(new);
  706. if (new->active) {
  707. if (!hub_state->clk || dc->pclk > hub_state->rate) {
  708. hub_state->dc = to_tegra_dc(dc->base.crtc);
  709. hub_state->clk = hub_state->dc->clk;
  710. hub_state->rate = dc->pclk;
  711. }
  712. }
  713. }
  714. return 0;
  715. }
  716. static void tegra_display_hub_update(struct tegra_dc *dc)
  717. {
  718. u32 value;
  719. int err;
  720. err = host1x_client_resume(&dc->client);
  721. if (err < 0) {
  722. dev_err(dc->dev, "failed to resume: %d\n", err);
  723. return;
  724. }
  725. value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
  726. value &= ~LATENCY_EVENT;
  727. tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
  728. value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
  729. value = CURS_SLOTS(1) | WGRP_SLOTS(1);
  730. tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
  731. tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
  732. tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
  733. tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
  734. tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
  735. host1x_client_suspend(&dc->client);
  736. }
  737. void tegra_display_hub_atomic_commit(struct drm_device *drm,
  738. struct drm_atomic_state *state)
  739. {
  740. struct tegra_drm *tegra = drm->dev_private;
  741. struct tegra_display_hub *hub = tegra->hub;
  742. struct tegra_display_hub_state *hub_state;
  743. struct device *dev = hub->client.dev;
  744. int err;
  745. hub_state = to_tegra_display_hub_state(hub->base.state);
  746. if (hub_state->clk) {
  747. err = clk_set_rate(hub_state->clk, hub_state->rate);
  748. if (err < 0)
  749. dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
  750. hub_state->clk, hub_state->rate);
  751. err = clk_set_parent(hub->clk_disp, hub_state->clk);
  752. if (err < 0)
  753. dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
  754. hub->clk_disp, hub_state->clk, err);
  755. }
  756. if (hub_state->dc)
  757. tegra_display_hub_update(hub_state->dc);
  758. }
  759. static int tegra_display_hub_init(struct host1x_client *client)
  760. {
  761. struct tegra_display_hub *hub = to_tegra_display_hub(client);
  762. struct drm_device *drm = dev_get_drvdata(client->host);
  763. struct tegra_drm *tegra = drm->dev_private;
  764. struct tegra_display_hub_state *state;
  765. state = kzalloc(sizeof(*state), GFP_KERNEL);
  766. if (!state)
  767. return -ENOMEM;
  768. drm_atomic_private_obj_init(drm, &hub->base, &state->base,
  769. &tegra_display_hub_state_funcs);
  770. tegra->hub = hub;
  771. return 0;
  772. }
  773. static int tegra_display_hub_exit(struct host1x_client *client)
  774. {
  775. struct drm_device *drm = dev_get_drvdata(client->host);
  776. struct tegra_drm *tegra = drm->dev_private;
  777. drm_atomic_private_obj_fini(&tegra->hub->base);
  778. tegra->hub = NULL;
  779. return 0;
  780. }
  781. static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
  782. {
  783. struct tegra_display_hub *hub = to_tegra_display_hub(client);
  784. struct device *dev = client->dev;
  785. unsigned int i = hub->num_heads;
  786. int err;
  787. err = reset_control_assert(hub->rst);
  788. if (err < 0)
  789. return err;
  790. while (i--)
  791. clk_disable_unprepare(hub->clk_heads[i]);
  792. clk_disable_unprepare(hub->clk_hub);
  793. clk_disable_unprepare(hub->clk_dsc);
  794. clk_disable_unprepare(hub->clk_disp);
  795. pm_runtime_put_sync(dev);
  796. return 0;
  797. }
  798. static int tegra_display_hub_runtime_resume(struct host1x_client *client)
  799. {
  800. struct tegra_display_hub *hub = to_tegra_display_hub(client);
  801. struct device *dev = client->dev;
  802. unsigned int i;
  803. int err;
  804. err = pm_runtime_resume_and_get(dev);
  805. if (err < 0) {
  806. dev_err(dev, "failed to get runtime PM: %d\n", err);
  807. return err;
  808. }
  809. err = clk_prepare_enable(hub->clk_disp);
  810. if (err < 0)
  811. goto put_rpm;
  812. err = clk_prepare_enable(hub->clk_dsc);
  813. if (err < 0)
  814. goto disable_disp;
  815. err = clk_prepare_enable(hub->clk_hub);
  816. if (err < 0)
  817. goto disable_dsc;
  818. for (i = 0; i < hub->num_heads; i++) {
  819. err = clk_prepare_enable(hub->clk_heads[i]);
  820. if (err < 0)
  821. goto disable_heads;
  822. }
  823. err = reset_control_deassert(hub->rst);
  824. if (err < 0)
  825. goto disable_heads;
  826. return 0;
  827. disable_heads:
  828. while (i--)
  829. clk_disable_unprepare(hub->clk_heads[i]);
  830. clk_disable_unprepare(hub->clk_hub);
  831. disable_dsc:
  832. clk_disable_unprepare(hub->clk_dsc);
  833. disable_disp:
  834. clk_disable_unprepare(hub->clk_disp);
  835. put_rpm:
  836. pm_runtime_put_sync(dev);
  837. return err;
  838. }
  839. static const struct host1x_client_ops tegra_display_hub_ops = {
  840. .init = tegra_display_hub_init,
  841. .exit = tegra_display_hub_exit,
  842. .suspend = tegra_display_hub_runtime_suspend,
  843. .resume = tegra_display_hub_runtime_resume,
  844. };
  845. static int tegra_display_hub_probe(struct platform_device *pdev)
  846. {
  847. u64 dma_mask = dma_get_mask(pdev->dev.parent);
  848. struct device_node *child = NULL;
  849. struct tegra_display_hub *hub;
  850. struct clk *clk;
  851. unsigned int i;
  852. int err;
  853. err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
  854. if (err < 0) {
  855. dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
  856. return err;
  857. }
  858. hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
  859. if (!hub)
  860. return -ENOMEM;
  861. hub->soc = of_device_get_match_data(&pdev->dev);
  862. hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
  863. if (IS_ERR(hub->clk_disp)) {
  864. err = PTR_ERR(hub->clk_disp);
  865. return err;
  866. }
  867. if (hub->soc->supports_dsc) {
  868. hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
  869. if (IS_ERR(hub->clk_dsc)) {
  870. err = PTR_ERR(hub->clk_dsc);
  871. return err;
  872. }
  873. }
  874. hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
  875. if (IS_ERR(hub->clk_hub)) {
  876. err = PTR_ERR(hub->clk_hub);
  877. return err;
  878. }
  879. hub->rst = devm_reset_control_get(&pdev->dev, "misc");
  880. if (IS_ERR(hub->rst)) {
  881. err = PTR_ERR(hub->rst);
  882. return err;
  883. }
  884. hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
  885. sizeof(*hub->wgrps), GFP_KERNEL);
  886. if (!hub->wgrps)
  887. return -ENOMEM;
  888. for (i = 0; i < hub->soc->num_wgrps; i++) {
  889. struct tegra_windowgroup *wgrp = &hub->wgrps[i];
  890. char id[8];
  891. snprintf(id, sizeof(id), "wgrp%u", i);
  892. mutex_init(&wgrp->lock);
  893. wgrp->usecount = 0;
  894. wgrp->index = i;
  895. wgrp->rst = devm_reset_control_get(&pdev->dev, id);
  896. if (IS_ERR(wgrp->rst))
  897. return PTR_ERR(wgrp->rst);
  898. err = reset_control_assert(wgrp->rst);
  899. if (err < 0)
  900. return err;
  901. }
  902. hub->num_heads = of_get_child_count(pdev->dev.of_node);
  903. hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
  904. GFP_KERNEL);
  905. if (!hub->clk_heads)
  906. return -ENOMEM;
  907. for (i = 0; i < hub->num_heads; i++) {
  908. child = of_get_next_child(pdev->dev.of_node, child);
  909. if (!child) {
  910. dev_err(&pdev->dev, "failed to find node for head %u\n",
  911. i);
  912. return -ENODEV;
  913. }
  914. clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
  915. if (IS_ERR(clk)) {
  916. dev_err(&pdev->dev, "failed to get clock for head %u\n",
  917. i);
  918. of_node_put(child);
  919. return PTR_ERR(clk);
  920. }
  921. hub->clk_heads[i] = clk;
  922. }
  923. of_node_put(child);
  924. /* XXX: enable clock across reset? */
  925. err = reset_control_assert(hub->rst);
  926. if (err < 0)
  927. return err;
  928. platform_set_drvdata(pdev, hub);
  929. pm_runtime_enable(&pdev->dev);
  930. INIT_LIST_HEAD(&hub->client.list);
  931. hub->client.ops = &tegra_display_hub_ops;
  932. hub->client.dev = &pdev->dev;
  933. err = host1x_client_register(&hub->client);
  934. if (err < 0)
  935. dev_err(&pdev->dev, "failed to register host1x client: %d\n",
  936. err);
  937. err = devm_of_platform_populate(&pdev->dev);
  938. if (err < 0)
  939. goto unregister;
  940. return err;
  941. unregister:
  942. host1x_client_unregister(&hub->client);
  943. pm_runtime_disable(&pdev->dev);
  944. return err;
  945. }
  946. static int tegra_display_hub_remove(struct platform_device *pdev)
  947. {
  948. struct tegra_display_hub *hub = platform_get_drvdata(pdev);
  949. unsigned int i;
  950. int err;
  951. err = host1x_client_unregister(&hub->client);
  952. if (err < 0) {
  953. dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
  954. err);
  955. }
  956. for (i = 0; i < hub->soc->num_wgrps; i++) {
  957. struct tegra_windowgroup *wgrp = &hub->wgrps[i];
  958. mutex_destroy(&wgrp->lock);
  959. }
  960. pm_runtime_disable(&pdev->dev);
  961. return err;
  962. }
  963. static const struct tegra_display_hub_soc tegra186_display_hub = {
  964. .num_wgrps = 6,
  965. .supports_dsc = true,
  966. };
  967. static const struct tegra_display_hub_soc tegra194_display_hub = {
  968. .num_wgrps = 6,
  969. .supports_dsc = false,
  970. };
  971. static const struct of_device_id tegra_display_hub_of_match[] = {
  972. {
  973. .compatible = "nvidia,tegra194-display",
  974. .data = &tegra194_display_hub
  975. }, {
  976. .compatible = "nvidia,tegra186-display",
  977. .data = &tegra186_display_hub
  978. }, {
  979. /* sentinel */
  980. }
  981. };
  982. MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
  983. struct platform_driver tegra_display_hub_driver = {
  984. .driver = {
  985. .name = "tegra-display-hub",
  986. .of_match_table = tegra_display_hub_of_match,
  987. },
  988. .probe = tegra_display_hub_probe,
  989. .remove = tegra_display_hub_remove,
  990. };