plane.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/iommu.h>
  7. #include <linux/interconnect.h>
  8. #include <drm/drm_atomic.h>
  9. #include <drm/drm_atomic_helper.h>
  10. #include <drm/drm_fourcc.h>
  11. #include <drm/drm_framebuffer.h>
  12. #include <drm/drm_gem_atomic_helper.h>
  13. #include "dc.h"
  14. #include "plane.h"
  15. static void tegra_plane_destroy(struct drm_plane *plane)
  16. {
  17. struct tegra_plane *p = to_tegra_plane(plane);
  18. drm_plane_cleanup(plane);
  19. kfree(p);
  20. }
  21. static void tegra_plane_reset(struct drm_plane *plane)
  22. {
  23. struct tegra_plane *p = to_tegra_plane(plane);
  24. struct tegra_plane_state *state;
  25. unsigned int i;
  26. if (plane->state)
  27. __drm_atomic_helper_plane_destroy_state(plane->state);
  28. kfree(plane->state);
  29. plane->state = NULL;
  30. state = kzalloc(sizeof(*state), GFP_KERNEL);
  31. if (state) {
  32. plane->state = &state->base;
  33. plane->state->plane = plane;
  34. plane->state->zpos = p->index;
  35. plane->state->normalized_zpos = p->index;
  36. for (i = 0; i < 3; i++)
  37. state->iova[i] = DMA_MAPPING_ERROR;
  38. }
  39. }
  40. static struct drm_plane_state *
  41. tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
  42. {
  43. struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
  44. struct tegra_plane_state *copy;
  45. unsigned int i;
  46. copy = kmalloc(sizeof(*copy), GFP_KERNEL);
  47. if (!copy)
  48. return NULL;
  49. __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
  50. copy->tiling = state->tiling;
  51. copy->format = state->format;
  52. copy->swap = state->swap;
  53. copy->reflect_x = state->reflect_x;
  54. copy->reflect_y = state->reflect_y;
  55. copy->opaque = state->opaque;
  56. copy->total_peak_memory_bandwidth = state->total_peak_memory_bandwidth;
  57. copy->peak_memory_bandwidth = state->peak_memory_bandwidth;
  58. copy->avg_memory_bandwidth = state->avg_memory_bandwidth;
  59. for (i = 0; i < 2; i++)
  60. copy->blending[i] = state->blending[i];
  61. for (i = 0; i < 3; i++) {
  62. copy->iova[i] = DMA_MAPPING_ERROR;
  63. copy->map[i] = NULL;
  64. }
  65. return &copy->base;
  66. }
  67. static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
  68. struct drm_plane_state *state)
  69. {
  70. __drm_atomic_helper_plane_destroy_state(state);
  71. kfree(state);
  72. }
  73. static bool tegra_plane_supports_sector_layout(struct drm_plane *plane)
  74. {
  75. struct drm_crtc *crtc;
  76. drm_for_each_crtc(crtc, plane->dev) {
  77. if (plane->possible_crtcs & drm_crtc_mask(crtc)) {
  78. struct tegra_dc *dc = to_tegra_dc(crtc);
  79. if (!dc->soc->supports_sector_layout)
  80. return false;
  81. }
  82. }
  83. return true;
  84. }
  85. static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
  86. uint32_t format,
  87. uint64_t modifier)
  88. {
  89. const struct drm_format_info *info = drm_format_info(format);
  90. if (modifier == DRM_FORMAT_MOD_LINEAR)
  91. return true;
  92. /* check for the sector layout bit */
  93. if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
  94. if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
  95. if (!tegra_plane_supports_sector_layout(plane))
  96. return false;
  97. }
  98. }
  99. if (info->num_planes == 1)
  100. return true;
  101. return false;
  102. }
  103. const struct drm_plane_funcs tegra_plane_funcs = {
  104. .update_plane = drm_atomic_helper_update_plane,
  105. .disable_plane = drm_atomic_helper_disable_plane,
  106. .destroy = tegra_plane_destroy,
  107. .reset = tegra_plane_reset,
  108. .atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
  109. .atomic_destroy_state = tegra_plane_atomic_destroy_state,
  110. .format_mod_supported = tegra_plane_format_mod_supported,
  111. };
  112. static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
  113. {
  114. unsigned int i;
  115. int err;
  116. for (i = 0; i < state->base.fb->format->num_planes; i++) {
  117. struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
  118. struct host1x_bo_mapping *map;
  119. map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE, &dc->client.cache);
  120. if (IS_ERR(map)) {
  121. err = PTR_ERR(map);
  122. goto unpin;
  123. }
  124. if (!dc->client.group) {
  125. /*
  126. * The display controller needs contiguous memory, so
  127. * fail if the buffer is discontiguous and we fail to
  128. * map its SG table to a single contiguous chunk of
  129. * I/O virtual memory.
  130. */
  131. if (map->chunks > 1) {
  132. err = -EINVAL;
  133. goto unpin;
  134. }
  135. state->iova[i] = map->phys;
  136. } else {
  137. state->iova[i] = bo->iova;
  138. }
  139. state->map[i] = map;
  140. }
  141. return 0;
  142. unpin:
  143. dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
  144. while (i--) {
  145. host1x_bo_unpin(state->map[i]);
  146. state->iova[i] = DMA_MAPPING_ERROR;
  147. state->map[i] = NULL;
  148. }
  149. return err;
  150. }
  151. static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
  152. {
  153. unsigned int i;
  154. for (i = 0; i < state->base.fb->format->num_planes; i++) {
  155. host1x_bo_unpin(state->map[i]);
  156. state->iova[i] = DMA_MAPPING_ERROR;
  157. state->map[i] = NULL;
  158. }
  159. }
  160. int tegra_plane_prepare_fb(struct drm_plane *plane,
  161. struct drm_plane_state *state)
  162. {
  163. struct tegra_dc *dc = to_tegra_dc(state->crtc);
  164. int err;
  165. if (!state->fb)
  166. return 0;
  167. err = drm_gem_plane_helper_prepare_fb(plane, state);
  168. if (err < 0)
  169. return err;
  170. return tegra_dc_pin(dc, to_tegra_plane_state(state));
  171. }
  172. void tegra_plane_cleanup_fb(struct drm_plane *plane,
  173. struct drm_plane_state *state)
  174. {
  175. struct tegra_dc *dc = to_tegra_dc(state->crtc);
  176. if (dc)
  177. tegra_dc_unpin(dc, to_tegra_plane_state(state));
  178. }
  179. static int tegra_plane_calculate_memory_bandwidth(struct drm_plane_state *state)
  180. {
  181. struct tegra_plane_state *tegra_state = to_tegra_plane_state(state);
  182. unsigned int i, bpp, dst_w, dst_h, src_w, src_h, mul;
  183. const struct tegra_dc_soc_info *soc;
  184. const struct drm_format_info *fmt;
  185. struct drm_crtc_state *crtc_state;
  186. u64 avg_bandwidth, peak_bandwidth;
  187. if (!state->visible)
  188. return 0;
  189. crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
  190. if (!crtc_state)
  191. return -EINVAL;
  192. src_w = drm_rect_width(&state->src) >> 16;
  193. src_h = drm_rect_height(&state->src) >> 16;
  194. dst_w = drm_rect_width(&state->dst);
  195. dst_h = drm_rect_height(&state->dst);
  196. fmt = state->fb->format;
  197. soc = to_tegra_dc(state->crtc)->soc;
  198. /*
  199. * Note that real memory bandwidth vary depending on format and
  200. * memory layout, we are not taking that into account because small
  201. * estimation error isn't important since bandwidth is rounded up
  202. * anyway.
  203. */
  204. for (i = 0, bpp = 0; i < fmt->num_planes; i++) {
  205. unsigned int bpp_plane = fmt->cpp[i] * 8;
  206. /*
  207. * Sub-sampling is relevant for chroma planes only and vertical
  208. * readouts are not cached, hence only horizontal sub-sampling
  209. * matters.
  210. */
  211. if (i > 0)
  212. bpp_plane /= fmt->hsub;
  213. bpp += bpp_plane;
  214. }
  215. /* average bandwidth in kbytes/sec */
  216. avg_bandwidth = min(src_w, dst_w) * min(src_h, dst_h);
  217. avg_bandwidth *= drm_mode_vrefresh(&crtc_state->adjusted_mode);
  218. avg_bandwidth = DIV_ROUND_UP(avg_bandwidth * bpp, 8) + 999;
  219. do_div(avg_bandwidth, 1000);
  220. /* mode.clock in kHz, peak bandwidth in kbytes/sec */
  221. peak_bandwidth = DIV_ROUND_UP(crtc_state->adjusted_mode.clock * bpp, 8);
  222. /*
  223. * Tegra30/114 Memory Controller can't interleave DC memory requests
  224. * for the tiled windows because DC uses 16-bytes atom, while DDR3
  225. * uses 32-bytes atom. Hence there is x2 memory overfetch for tiled
  226. * framebuffer and DDR3 on these SoCs.
  227. */
  228. if (soc->plane_tiled_memory_bandwidth_x2 &&
  229. tegra_state->tiling.mode == TEGRA_BO_TILING_MODE_TILED)
  230. mul = 2;
  231. else
  232. mul = 1;
  233. /* ICC bandwidth in kbytes/sec */
  234. tegra_state->peak_memory_bandwidth = kBps_to_icc(peak_bandwidth) * mul;
  235. tegra_state->avg_memory_bandwidth = kBps_to_icc(avg_bandwidth) * mul;
  236. return 0;
  237. }
  238. int tegra_plane_state_add(struct tegra_plane *plane,
  239. struct drm_plane_state *state)
  240. {
  241. struct drm_crtc_state *crtc_state;
  242. struct tegra_dc_state *tegra;
  243. int err;
  244. /* Propagate errors from allocation or locking failures. */
  245. crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
  246. if (IS_ERR(crtc_state))
  247. return PTR_ERR(crtc_state);
  248. /* Check plane state for visibility and calculate clipping bounds */
  249. err = drm_atomic_helper_check_plane_state(state, crtc_state,
  250. 0, INT_MAX, true, true);
  251. if (err < 0)
  252. return err;
  253. err = tegra_plane_calculate_memory_bandwidth(state);
  254. if (err < 0)
  255. return err;
  256. tegra = to_dc_state(crtc_state);
  257. tegra->planes |= WIN_A_ACT_REQ << plane->index;
  258. return 0;
  259. }
  260. int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap)
  261. {
  262. /* assume no swapping of fetched data */
  263. if (swap)
  264. *swap = BYTE_SWAP_NOSWAP;
  265. switch (fourcc) {
  266. case DRM_FORMAT_ARGB4444:
  267. *format = WIN_COLOR_DEPTH_B4G4R4A4;
  268. break;
  269. case DRM_FORMAT_ARGB1555:
  270. *format = WIN_COLOR_DEPTH_B5G5R5A1;
  271. break;
  272. case DRM_FORMAT_RGB565:
  273. *format = WIN_COLOR_DEPTH_B5G6R5;
  274. break;
  275. case DRM_FORMAT_RGBA5551:
  276. *format = WIN_COLOR_DEPTH_A1B5G5R5;
  277. break;
  278. case DRM_FORMAT_ARGB8888:
  279. *format = WIN_COLOR_DEPTH_B8G8R8A8;
  280. break;
  281. case DRM_FORMAT_ABGR8888:
  282. *format = WIN_COLOR_DEPTH_R8G8B8A8;
  283. break;
  284. case DRM_FORMAT_ABGR4444:
  285. *format = WIN_COLOR_DEPTH_R4G4B4A4;
  286. break;
  287. case DRM_FORMAT_ABGR1555:
  288. *format = WIN_COLOR_DEPTH_R5G5B5A;
  289. break;
  290. case DRM_FORMAT_BGRA5551:
  291. *format = WIN_COLOR_DEPTH_AR5G5B5;
  292. break;
  293. case DRM_FORMAT_XRGB1555:
  294. *format = WIN_COLOR_DEPTH_B5G5R5X1;
  295. break;
  296. case DRM_FORMAT_RGBX5551:
  297. *format = WIN_COLOR_DEPTH_X1B5G5R5;
  298. break;
  299. case DRM_FORMAT_XBGR1555:
  300. *format = WIN_COLOR_DEPTH_R5G5B5X1;
  301. break;
  302. case DRM_FORMAT_BGRX5551:
  303. *format = WIN_COLOR_DEPTH_X1R5G5B5;
  304. break;
  305. case DRM_FORMAT_BGR565:
  306. *format = WIN_COLOR_DEPTH_R5G6B5;
  307. break;
  308. case DRM_FORMAT_BGRA8888:
  309. *format = WIN_COLOR_DEPTH_A8R8G8B8;
  310. break;
  311. case DRM_FORMAT_RGBA8888:
  312. *format = WIN_COLOR_DEPTH_A8B8G8R8;
  313. break;
  314. case DRM_FORMAT_XRGB8888:
  315. *format = WIN_COLOR_DEPTH_B8G8R8X8;
  316. break;
  317. case DRM_FORMAT_XBGR8888:
  318. *format = WIN_COLOR_DEPTH_R8G8B8X8;
  319. break;
  320. case DRM_FORMAT_UYVY:
  321. *format = WIN_COLOR_DEPTH_YCbCr422;
  322. break;
  323. case DRM_FORMAT_YUYV:
  324. if (!swap)
  325. return -EINVAL;
  326. *format = WIN_COLOR_DEPTH_YCbCr422;
  327. *swap = BYTE_SWAP_SWAP2;
  328. break;
  329. case DRM_FORMAT_YVYU:
  330. if (!swap)
  331. return -EINVAL;
  332. *format = WIN_COLOR_DEPTH_YCbCr422;
  333. *swap = BYTE_SWAP_SWAP4;
  334. break;
  335. case DRM_FORMAT_VYUY:
  336. if (!swap)
  337. return -EINVAL;
  338. *format = WIN_COLOR_DEPTH_YCbCr422;
  339. *swap = BYTE_SWAP_SWAP4HW;
  340. break;
  341. case DRM_FORMAT_YUV420:
  342. *format = WIN_COLOR_DEPTH_YCbCr420P;
  343. break;
  344. case DRM_FORMAT_YUV422:
  345. *format = WIN_COLOR_DEPTH_YCbCr422P;
  346. break;
  347. case DRM_FORMAT_YUV444:
  348. *format = WIN_COLOR_DEPTH_YCbCr444P;
  349. break;
  350. case DRM_FORMAT_NV12:
  351. *format = WIN_COLOR_DEPTH_YCbCr420SP;
  352. break;
  353. case DRM_FORMAT_NV21:
  354. *format = WIN_COLOR_DEPTH_YCrCb420SP;
  355. break;
  356. case DRM_FORMAT_NV16:
  357. *format = WIN_COLOR_DEPTH_YCbCr422SP;
  358. break;
  359. case DRM_FORMAT_NV61:
  360. *format = WIN_COLOR_DEPTH_YCrCb422SP;
  361. break;
  362. case DRM_FORMAT_NV24:
  363. *format = WIN_COLOR_DEPTH_YCbCr444SP;
  364. break;
  365. case DRM_FORMAT_NV42:
  366. *format = WIN_COLOR_DEPTH_YCrCb444SP;
  367. break;
  368. default:
  369. return -EINVAL;
  370. }
  371. return 0;
  372. }
  373. bool tegra_plane_format_is_indexed(unsigned int format)
  374. {
  375. switch (format) {
  376. case WIN_COLOR_DEPTH_P1:
  377. case WIN_COLOR_DEPTH_P2:
  378. case WIN_COLOR_DEPTH_P4:
  379. case WIN_COLOR_DEPTH_P8:
  380. return true;
  381. }
  382. return false;
  383. }
  384. bool tegra_plane_format_is_yuv(unsigned int format, unsigned int *planes, unsigned int *bpc)
  385. {
  386. switch (format) {
  387. case WIN_COLOR_DEPTH_YCbCr422:
  388. case WIN_COLOR_DEPTH_YUV422:
  389. if (planes)
  390. *planes = 1;
  391. if (bpc)
  392. *bpc = 8;
  393. return true;
  394. case WIN_COLOR_DEPTH_YCbCr420P:
  395. case WIN_COLOR_DEPTH_YUV420P:
  396. case WIN_COLOR_DEPTH_YCbCr422P:
  397. case WIN_COLOR_DEPTH_YUV422P:
  398. case WIN_COLOR_DEPTH_YCbCr422R:
  399. case WIN_COLOR_DEPTH_YUV422R:
  400. case WIN_COLOR_DEPTH_YCbCr422RA:
  401. case WIN_COLOR_DEPTH_YUV422RA:
  402. case WIN_COLOR_DEPTH_YCbCr444P:
  403. if (planes)
  404. *planes = 3;
  405. if (bpc)
  406. *bpc = 8;
  407. return true;
  408. case WIN_COLOR_DEPTH_YCrCb420SP:
  409. case WIN_COLOR_DEPTH_YCbCr420SP:
  410. case WIN_COLOR_DEPTH_YCrCb422SP:
  411. case WIN_COLOR_DEPTH_YCbCr422SP:
  412. case WIN_COLOR_DEPTH_YCrCb444SP:
  413. case WIN_COLOR_DEPTH_YCbCr444SP:
  414. if (planes)
  415. *planes = 2;
  416. if (bpc)
  417. *bpc = 8;
  418. return true;
  419. }
  420. if (planes)
  421. *planes = 1;
  422. return false;
  423. }
  424. static bool __drm_format_has_alpha(u32 format)
  425. {
  426. switch (format) {
  427. case DRM_FORMAT_ARGB1555:
  428. case DRM_FORMAT_RGBA5551:
  429. case DRM_FORMAT_ABGR8888:
  430. case DRM_FORMAT_ARGB8888:
  431. return true;
  432. }
  433. return false;
  434. }
  435. static int tegra_plane_format_get_alpha(unsigned int opaque,
  436. unsigned int *alpha)
  437. {
  438. if (tegra_plane_format_is_yuv(opaque, NULL, NULL)) {
  439. *alpha = opaque;
  440. return 0;
  441. }
  442. switch (opaque) {
  443. case WIN_COLOR_DEPTH_B5G5R5X1:
  444. *alpha = WIN_COLOR_DEPTH_B5G5R5A1;
  445. return 0;
  446. case WIN_COLOR_DEPTH_X1B5G5R5:
  447. *alpha = WIN_COLOR_DEPTH_A1B5G5R5;
  448. return 0;
  449. case WIN_COLOR_DEPTH_R8G8B8X8:
  450. *alpha = WIN_COLOR_DEPTH_R8G8B8A8;
  451. return 0;
  452. case WIN_COLOR_DEPTH_B8G8R8X8:
  453. *alpha = WIN_COLOR_DEPTH_B8G8R8A8;
  454. return 0;
  455. case WIN_COLOR_DEPTH_B5G6R5:
  456. *alpha = opaque;
  457. return 0;
  458. }
  459. return -EINVAL;
  460. }
  461. /*
  462. * This is applicable to Tegra20 and Tegra30 only where the opaque formats can
  463. * be emulated using the alpha formats and alpha blending disabled.
  464. */
  465. static int tegra_plane_setup_opacity(struct tegra_plane *tegra,
  466. struct tegra_plane_state *state)
  467. {
  468. unsigned int format;
  469. int err;
  470. switch (state->format) {
  471. case WIN_COLOR_DEPTH_B5G5R5A1:
  472. case WIN_COLOR_DEPTH_A1B5G5R5:
  473. case WIN_COLOR_DEPTH_R8G8B8A8:
  474. case WIN_COLOR_DEPTH_B8G8R8A8:
  475. state->opaque = false;
  476. break;
  477. default:
  478. err = tegra_plane_format_get_alpha(state->format, &format);
  479. if (err < 0)
  480. return err;
  481. state->format = format;
  482. state->opaque = true;
  483. break;
  484. }
  485. return 0;
  486. }
  487. static int tegra_plane_check_transparency(struct tegra_plane *tegra,
  488. struct tegra_plane_state *state)
  489. {
  490. struct drm_plane_state *old, *plane_state;
  491. struct drm_plane *plane;
  492. old = drm_atomic_get_old_plane_state(state->base.state, &tegra->base);
  493. /* check if zpos / transparency changed */
  494. if (old->normalized_zpos == state->base.normalized_zpos &&
  495. to_tegra_plane_state(old)->opaque == state->opaque)
  496. return 0;
  497. /* include all sibling planes into this commit */
  498. drm_for_each_plane(plane, tegra->base.dev) {
  499. struct tegra_plane *p = to_tegra_plane(plane);
  500. /* skip this plane and planes on different CRTCs */
  501. if (p == tegra || p->dc != tegra->dc)
  502. continue;
  503. plane_state = drm_atomic_get_plane_state(state->base.state,
  504. plane);
  505. if (IS_ERR(plane_state))
  506. return PTR_ERR(plane_state);
  507. }
  508. return 1;
  509. }
  510. static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
  511. struct tegra_plane *other)
  512. {
  513. unsigned int index = 0, i;
  514. WARN_ON(plane == other);
  515. for (i = 0; i < 3; i++) {
  516. if (i == plane->index)
  517. continue;
  518. if (i == other->index)
  519. break;
  520. index++;
  521. }
  522. return index;
  523. }
  524. static void tegra_plane_update_transparency(struct tegra_plane *tegra,
  525. struct tegra_plane_state *state)
  526. {
  527. struct drm_plane_state *new;
  528. struct drm_plane *plane;
  529. unsigned int i;
  530. for_each_new_plane_in_state(state->base.state, plane, new, i) {
  531. struct tegra_plane *p = to_tegra_plane(plane);
  532. unsigned index;
  533. /* skip this plane and planes on different CRTCs */
  534. if (p == tegra || p->dc != tegra->dc)
  535. continue;
  536. index = tegra_plane_get_overlap_index(tegra, p);
  537. if (new->fb && __drm_format_has_alpha(new->fb->format->format))
  538. state->blending[index].alpha = true;
  539. else
  540. state->blending[index].alpha = false;
  541. if (new->normalized_zpos > state->base.normalized_zpos)
  542. state->blending[index].top = true;
  543. else
  544. state->blending[index].top = false;
  545. /*
  546. * Missing framebuffer means that plane is disabled, in this
  547. * case mark B / C window as top to be able to differentiate
  548. * windows indices order in regards to zPos for the middle
  549. * window X / Y registers programming.
  550. */
  551. if (!new->fb)
  552. state->blending[index].top = (index == 1);
  553. }
  554. }
  555. static int tegra_plane_setup_transparency(struct tegra_plane *tegra,
  556. struct tegra_plane_state *state)
  557. {
  558. struct tegra_plane_state *tegra_state;
  559. struct drm_plane_state *new;
  560. struct drm_plane *plane;
  561. int err;
  562. /*
  563. * If planes zpos / transparency changed, sibling planes blending
  564. * state may require adjustment and in this case they will be included
  565. * into this atom commit, otherwise blending state is unchanged.
  566. */
  567. err = tegra_plane_check_transparency(tegra, state);
  568. if (err <= 0)
  569. return err;
  570. /*
  571. * All planes are now in the atomic state, walk them up and update
  572. * transparency state for each plane.
  573. */
  574. drm_for_each_plane(plane, tegra->base.dev) {
  575. struct tegra_plane *p = to_tegra_plane(plane);
  576. /* skip planes on different CRTCs */
  577. if (p->dc != tegra->dc)
  578. continue;
  579. new = drm_atomic_get_new_plane_state(state->base.state, plane);
  580. tegra_state = to_tegra_plane_state(new);
  581. /*
  582. * There is no need to update blending state for the disabled
  583. * plane.
  584. */
  585. if (new->fb)
  586. tegra_plane_update_transparency(p, tegra_state);
  587. }
  588. return 0;
  589. }
  590. int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
  591. struct tegra_plane_state *state)
  592. {
  593. int err;
  594. err = tegra_plane_setup_opacity(tegra, state);
  595. if (err < 0)
  596. return err;
  597. err = tegra_plane_setup_transparency(tegra, state);
  598. if (err < 0)
  599. return err;
  600. return 0;
  601. }
  602. static const char * const tegra_plane_icc_names[TEGRA_DC_LEGACY_PLANES_NUM] = {
  603. "wina", "winb", "winc", NULL, NULL, NULL, "cursor",
  604. };
  605. int tegra_plane_interconnect_init(struct tegra_plane *plane)
  606. {
  607. const char *icc_name = tegra_plane_icc_names[plane->index];
  608. struct device *dev = plane->dc->dev;
  609. struct tegra_dc *dc = plane->dc;
  610. int err;
  611. if (WARN_ON(plane->index >= TEGRA_DC_LEGACY_PLANES_NUM) ||
  612. WARN_ON(!tegra_plane_icc_names[plane->index]))
  613. return -EINVAL;
  614. plane->icc_mem = devm_of_icc_get(dev, icc_name);
  615. err = PTR_ERR_OR_ZERO(plane->icc_mem);
  616. if (err) {
  617. dev_err_probe(dev, err, "failed to get %s interconnect\n",
  618. icc_name);
  619. return err;
  620. }
  621. /* plane B on T20/30 has a dedicated memory client for a 6-tap vertical filter */
  622. if (plane->index == 1 && dc->soc->has_win_b_vfilter_mem_client) {
  623. plane->icc_mem_vfilter = devm_of_icc_get(dev, "winb-vfilter");
  624. err = PTR_ERR_OR_ZERO(plane->icc_mem_vfilter);
  625. if (err) {
  626. dev_err_probe(dev, err, "failed to get %s interconnect\n",
  627. "winb-vfilter");
  628. return err;
  629. }
  630. }
  631. return 0;
  632. }