vc4_plane.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2015 Broadcom
  4. */
  5. /**
  6. * DOC: VC4 plane module
  7. *
  8. * Each DRM plane is a layer of pixels being scanned out by the HVS.
  9. *
  10. * At atomic modeset check time, we compute the HVS display element
  11. * state that would be necessary for displaying the plane (giving us a
  12. * chance to figure out if a plane configuration is invalid), then at
  13. * atomic flush time the CRTC will ask us to write our element state
  14. * into the region of the HVS that it has allocated for us.
  15. */
  16. #include <drm/drm_atomic.h>
  17. #include <drm/drm_atomic_helper.h>
  18. #include <drm/drm_atomic_uapi.h>
  19. #include <drm/drm_blend.h>
  20. #include <drm/drm_drv.h>
  21. #include <drm/drm_fb_dma_helper.h>
  22. #include <drm/drm_fourcc.h>
  23. #include <drm/drm_framebuffer.h>
  24. #include <drm/drm_gem_atomic_helper.h>
  25. #include "uapi/drm/vc4_drm.h"
  26. #include "vc4_drv.h"
  27. #include "vc4_regs.h"
  28. static const struct hvs_format {
  29. u32 drm; /* DRM_FORMAT_* */
  30. u32 hvs; /* HVS_FORMAT_* */
  31. u32 pixel_order;
  32. u32 pixel_order_hvs5;
  33. bool hvs5_only;
  34. } hvs_formats[] = {
  35. {
  36. .drm = DRM_FORMAT_XRGB8888,
  37. .hvs = HVS_PIXEL_FORMAT_RGBA8888,
  38. .pixel_order = HVS_PIXEL_ORDER_ABGR,
  39. .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
  40. },
  41. {
  42. .drm = DRM_FORMAT_ARGB8888,
  43. .hvs = HVS_PIXEL_FORMAT_RGBA8888,
  44. .pixel_order = HVS_PIXEL_ORDER_ABGR,
  45. .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
  46. },
  47. {
  48. .drm = DRM_FORMAT_ABGR8888,
  49. .hvs = HVS_PIXEL_FORMAT_RGBA8888,
  50. .pixel_order = HVS_PIXEL_ORDER_ARGB,
  51. .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
  52. },
  53. {
  54. .drm = DRM_FORMAT_XBGR8888,
  55. .hvs = HVS_PIXEL_FORMAT_RGBA8888,
  56. .pixel_order = HVS_PIXEL_ORDER_ARGB,
  57. .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
  58. },
  59. {
  60. .drm = DRM_FORMAT_RGB565,
  61. .hvs = HVS_PIXEL_FORMAT_RGB565,
  62. .pixel_order = HVS_PIXEL_ORDER_XRGB,
  63. },
  64. {
  65. .drm = DRM_FORMAT_BGR565,
  66. .hvs = HVS_PIXEL_FORMAT_RGB565,
  67. .pixel_order = HVS_PIXEL_ORDER_XBGR,
  68. },
  69. {
  70. .drm = DRM_FORMAT_ARGB1555,
  71. .hvs = HVS_PIXEL_FORMAT_RGBA5551,
  72. .pixel_order = HVS_PIXEL_ORDER_ABGR,
  73. .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
  74. },
  75. {
  76. .drm = DRM_FORMAT_XRGB1555,
  77. .hvs = HVS_PIXEL_FORMAT_RGBA5551,
  78. .pixel_order = HVS_PIXEL_ORDER_ABGR,
  79. .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
  80. },
  81. {
  82. .drm = DRM_FORMAT_RGB888,
  83. .hvs = HVS_PIXEL_FORMAT_RGB888,
  84. .pixel_order = HVS_PIXEL_ORDER_XRGB,
  85. },
  86. {
  87. .drm = DRM_FORMAT_BGR888,
  88. .hvs = HVS_PIXEL_FORMAT_RGB888,
  89. .pixel_order = HVS_PIXEL_ORDER_XBGR,
  90. },
  91. {
  92. .drm = DRM_FORMAT_YUV422,
  93. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
  94. .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
  95. },
  96. {
  97. .drm = DRM_FORMAT_YVU422,
  98. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
  99. .pixel_order = HVS_PIXEL_ORDER_XYCRCB,
  100. },
  101. {
  102. .drm = DRM_FORMAT_YUV420,
  103. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
  104. .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
  105. },
  106. {
  107. .drm = DRM_FORMAT_YVU420,
  108. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
  109. .pixel_order = HVS_PIXEL_ORDER_XYCRCB,
  110. },
  111. {
  112. .drm = DRM_FORMAT_NV12,
  113. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
  114. .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
  115. },
  116. {
  117. .drm = DRM_FORMAT_NV21,
  118. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
  119. .pixel_order = HVS_PIXEL_ORDER_XYCRCB,
  120. },
  121. {
  122. .drm = DRM_FORMAT_NV16,
  123. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
  124. .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
  125. },
  126. {
  127. .drm = DRM_FORMAT_NV61,
  128. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
  129. .pixel_order = HVS_PIXEL_ORDER_XYCRCB,
  130. },
  131. {
  132. .drm = DRM_FORMAT_P030,
  133. .hvs = HVS_PIXEL_FORMAT_YCBCR_10BIT,
  134. .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
  135. .hvs5_only = true,
  136. },
  137. };
  138. static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
  139. {
  140. unsigned i;
  141. for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
  142. if (hvs_formats[i].drm == drm_format)
  143. return &hvs_formats[i];
  144. }
  145. return NULL;
  146. }
  147. static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
  148. {
  149. if (dst == src)
  150. return VC4_SCALING_NONE;
  151. if (3 * dst >= 2 * src)
  152. return VC4_SCALING_PPF;
  153. else
  154. return VC4_SCALING_TPZ;
  155. }
  156. static bool plane_enabled(struct drm_plane_state *state)
  157. {
  158. return state->fb && !WARN_ON(!state->crtc);
  159. }
  160. static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
  161. {
  162. struct vc4_plane_state *vc4_state;
  163. if (WARN_ON(!plane->state))
  164. return NULL;
  165. vc4_state = kmemdup(plane->state, sizeof(*vc4_state), GFP_KERNEL);
  166. if (!vc4_state)
  167. return NULL;
  168. memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
  169. vc4_state->dlist_initialized = 0;
  170. __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
  171. if (vc4_state->dlist) {
  172. vc4_state->dlist = kmemdup(vc4_state->dlist,
  173. vc4_state->dlist_count * 4,
  174. GFP_KERNEL);
  175. if (!vc4_state->dlist) {
  176. kfree(vc4_state);
  177. return NULL;
  178. }
  179. vc4_state->dlist_size = vc4_state->dlist_count;
  180. }
  181. return &vc4_state->base;
  182. }
  183. static void vc4_plane_destroy_state(struct drm_plane *plane,
  184. struct drm_plane_state *state)
  185. {
  186. struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
  187. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  188. if (drm_mm_node_allocated(&vc4_state->lbm)) {
  189. unsigned long irqflags;
  190. spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
  191. drm_mm_remove_node(&vc4_state->lbm);
  192. spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
  193. }
  194. kfree(vc4_state->dlist);
  195. __drm_atomic_helper_plane_destroy_state(&vc4_state->base);
  196. kfree(state);
  197. }
  198. /* Called during init to allocate the plane's atomic state. */
  199. static void vc4_plane_reset(struct drm_plane *plane)
  200. {
  201. struct vc4_plane_state *vc4_state;
  202. WARN_ON(plane->state);
  203. vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
  204. if (!vc4_state)
  205. return;
  206. __drm_atomic_helper_plane_reset(plane, &vc4_state->base);
  207. }
  208. static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state)
  209. {
  210. if (vc4_state->dlist_count == vc4_state->dlist_size) {
  211. u32 new_size = max(4u, vc4_state->dlist_count * 2);
  212. u32 *new_dlist = kmalloc_array(new_size, 4, GFP_KERNEL);
  213. if (!new_dlist)
  214. return;
  215. memcpy(new_dlist, vc4_state->dlist, vc4_state->dlist_count * 4);
  216. kfree(vc4_state->dlist);
  217. vc4_state->dlist = new_dlist;
  218. vc4_state->dlist_size = new_size;
  219. }
  220. vc4_state->dlist_count++;
  221. }
  222. static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
  223. {
  224. unsigned int idx = vc4_state->dlist_count;
  225. vc4_dlist_counter_increment(vc4_state);
  226. vc4_state->dlist[idx] = val;
  227. }
  228. /* Returns the scl0/scl1 field based on whether the dimensions need to
  229. * be up/down/non-scaled.
  230. *
  231. * This is a replication of a table from the spec.
  232. */
  233. static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
  234. {
  235. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  236. switch (vc4_state->x_scaling[plane] << 2 | vc4_state->y_scaling[plane]) {
  237. case VC4_SCALING_PPF << 2 | VC4_SCALING_PPF:
  238. return SCALER_CTL0_SCL_H_PPF_V_PPF;
  239. case VC4_SCALING_TPZ << 2 | VC4_SCALING_PPF:
  240. return SCALER_CTL0_SCL_H_TPZ_V_PPF;
  241. case VC4_SCALING_PPF << 2 | VC4_SCALING_TPZ:
  242. return SCALER_CTL0_SCL_H_PPF_V_TPZ;
  243. case VC4_SCALING_TPZ << 2 | VC4_SCALING_TPZ:
  244. return SCALER_CTL0_SCL_H_TPZ_V_TPZ;
  245. case VC4_SCALING_PPF << 2 | VC4_SCALING_NONE:
  246. return SCALER_CTL0_SCL_H_PPF_V_NONE;
  247. case VC4_SCALING_NONE << 2 | VC4_SCALING_PPF:
  248. return SCALER_CTL0_SCL_H_NONE_V_PPF;
  249. case VC4_SCALING_NONE << 2 | VC4_SCALING_TPZ:
  250. return SCALER_CTL0_SCL_H_NONE_V_TPZ;
  251. case VC4_SCALING_TPZ << 2 | VC4_SCALING_NONE:
  252. return SCALER_CTL0_SCL_H_TPZ_V_NONE;
  253. default:
  254. case VC4_SCALING_NONE << 2 | VC4_SCALING_NONE:
  255. /* The unity case is independently handled by
  256. * SCALER_CTL0_UNITY.
  257. */
  258. return 0;
  259. }
  260. }
  261. static int vc4_plane_margins_adj(struct drm_plane_state *pstate)
  262. {
  263. struct vc4_plane_state *vc4_pstate = to_vc4_plane_state(pstate);
  264. unsigned int left, right, top, bottom, adjhdisplay, adjvdisplay;
  265. struct drm_crtc_state *crtc_state;
  266. crtc_state = drm_atomic_get_new_crtc_state(pstate->state,
  267. pstate->crtc);
  268. vc4_crtc_get_margins(crtc_state, &left, &right, &top, &bottom);
  269. if (!left && !right && !top && !bottom)
  270. return 0;
  271. if (left + right >= crtc_state->mode.hdisplay ||
  272. top + bottom >= crtc_state->mode.vdisplay)
  273. return -EINVAL;
  274. adjhdisplay = crtc_state->mode.hdisplay - (left + right);
  275. vc4_pstate->crtc_x = DIV_ROUND_CLOSEST(vc4_pstate->crtc_x *
  276. adjhdisplay,
  277. crtc_state->mode.hdisplay);
  278. vc4_pstate->crtc_x += left;
  279. if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - right)
  280. vc4_pstate->crtc_x = crtc_state->mode.hdisplay - right;
  281. adjvdisplay = crtc_state->mode.vdisplay - (top + bottom);
  282. vc4_pstate->crtc_y = DIV_ROUND_CLOSEST(vc4_pstate->crtc_y *
  283. adjvdisplay,
  284. crtc_state->mode.vdisplay);
  285. vc4_pstate->crtc_y += top;
  286. if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - bottom)
  287. vc4_pstate->crtc_y = crtc_state->mode.vdisplay - bottom;
  288. vc4_pstate->crtc_w = DIV_ROUND_CLOSEST(vc4_pstate->crtc_w *
  289. adjhdisplay,
  290. crtc_state->mode.hdisplay);
  291. vc4_pstate->crtc_h = DIV_ROUND_CLOSEST(vc4_pstate->crtc_h *
  292. adjvdisplay,
  293. crtc_state->mode.vdisplay);
  294. if (!vc4_pstate->crtc_w || !vc4_pstate->crtc_h)
  295. return -EINVAL;
  296. return 0;
  297. }
  298. static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
  299. {
  300. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  301. struct drm_framebuffer *fb = state->fb;
  302. struct drm_gem_dma_object *bo;
  303. int num_planes = fb->format->num_planes;
  304. struct drm_crtc_state *crtc_state;
  305. u32 h_subsample = fb->format->hsub;
  306. u32 v_subsample = fb->format->vsub;
  307. int i, ret;
  308. crtc_state = drm_atomic_get_existing_crtc_state(state->state,
  309. state->crtc);
  310. if (!crtc_state) {
  311. DRM_DEBUG_KMS("Invalid crtc state\n");
  312. return -EINVAL;
  313. }
  314. ret = drm_atomic_helper_check_plane_state(state, crtc_state, 1,
  315. INT_MAX, true, true);
  316. if (ret)
  317. return ret;
  318. for (i = 0; i < num_planes; i++) {
  319. bo = drm_fb_dma_get_gem_obj(fb, i);
  320. vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
  321. }
  322. /*
  323. * We don't support subpixel source positioning for scaling,
  324. * but fractional coordinates can be generated by clipping
  325. * so just round for now
  326. */
  327. vc4_state->src_x = DIV_ROUND_CLOSEST(state->src.x1, 1 << 16);
  328. vc4_state->src_y = DIV_ROUND_CLOSEST(state->src.y1, 1 << 16);
  329. vc4_state->src_w[0] = DIV_ROUND_CLOSEST(state->src.x2, 1 << 16) - vc4_state->src_x;
  330. vc4_state->src_h[0] = DIV_ROUND_CLOSEST(state->src.y2, 1 << 16) - vc4_state->src_y;
  331. vc4_state->crtc_x = state->dst.x1;
  332. vc4_state->crtc_y = state->dst.y1;
  333. vc4_state->crtc_w = state->dst.x2 - state->dst.x1;
  334. vc4_state->crtc_h = state->dst.y2 - state->dst.y1;
  335. ret = vc4_plane_margins_adj(state);
  336. if (ret)
  337. return ret;
  338. vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0],
  339. vc4_state->crtc_w);
  340. vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
  341. vc4_state->crtc_h);
  342. vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
  343. vc4_state->y_scaling[0] == VC4_SCALING_NONE);
  344. if (num_planes > 1) {
  345. vc4_state->is_yuv = true;
  346. vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample;
  347. vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample;
  348. vc4_state->x_scaling[1] =
  349. vc4_get_scaling_mode(vc4_state->src_w[1],
  350. vc4_state->crtc_w);
  351. vc4_state->y_scaling[1] =
  352. vc4_get_scaling_mode(vc4_state->src_h[1],
  353. vc4_state->crtc_h);
  354. /* YUV conversion requires that horizontal scaling be enabled
  355. * on the UV plane even if vc4_get_scaling_mode() returned
  356. * VC4_SCALING_NONE (which can happen when the down-scaling
  357. * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this
  358. * case.
  359. */
  360. if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
  361. vc4_state->x_scaling[1] = VC4_SCALING_PPF;
  362. } else {
  363. vc4_state->is_yuv = false;
  364. vc4_state->x_scaling[1] = VC4_SCALING_NONE;
  365. vc4_state->y_scaling[1] = VC4_SCALING_NONE;
  366. }
  367. return 0;
  368. }
  369. static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
  370. {
  371. u32 scale, recip;
  372. scale = (1 << 16) * src / dst;
  373. /* The specs note that while the reciprocal would be defined
  374. * as (1<<32)/scale, ~0 is close enough.
  375. */
  376. recip = ~0 / scale;
  377. vc4_dlist_write(vc4_state,
  378. VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) |
  379. VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE));
  380. vc4_dlist_write(vc4_state,
  381. VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP));
  382. }
  383. static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
  384. {
  385. u32 scale = (1 << 16) * src / dst;
  386. vc4_dlist_write(vc4_state,
  387. SCALER_PPF_AGC |
  388. VC4_SET_FIELD(scale, SCALER_PPF_SCALE) |
  389. VC4_SET_FIELD(0, SCALER_PPF_IPHASE));
  390. }
  391. static u32 vc4_lbm_size(struct drm_plane_state *state)
  392. {
  393. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  394. struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
  395. u32 pix_per_line;
  396. u32 lbm;
  397. /* LBM is not needed when there's no vertical scaling. */
  398. if (vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
  399. vc4_state->y_scaling[1] == VC4_SCALING_NONE)
  400. return 0;
  401. /*
  402. * This can be further optimized in the RGB/YUV444 case if the PPF
  403. * decimation factor is between 0.5 and 1.0 by using crtc_w.
  404. *
  405. * It's not an issue though, since in that case since src_w[0] is going
  406. * to be greater than or equal to crtc_w.
  407. */
  408. if (vc4_state->x_scaling[0] == VC4_SCALING_TPZ)
  409. pix_per_line = vc4_state->crtc_w;
  410. else
  411. pix_per_line = vc4_state->src_w[0];
  412. if (!vc4_state->is_yuv) {
  413. if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
  414. lbm = pix_per_line * 8;
  415. else {
  416. /* In special cases, this multiplier might be 12. */
  417. lbm = pix_per_line * 16;
  418. }
  419. } else {
  420. /* There are cases for this going down to a multiplier
  421. * of 2, but according to the firmware source, the
  422. * table in the docs is somewhat wrong.
  423. */
  424. lbm = pix_per_line * 16;
  425. }
  426. /* Align it to 64 or 128 (hvs5) bytes */
  427. lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64);
  428. /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
  429. lbm /= vc4->is_vc5 ? 4 : 2;
  430. return lbm;
  431. }
  432. static void vc4_write_scaling_parameters(struct drm_plane_state *state,
  433. int channel)
  434. {
  435. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  436. /* Ch0 H-PPF Word 0: Scaling Parameters */
  437. if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) {
  438. vc4_write_ppf(vc4_state,
  439. vc4_state->src_w[channel], vc4_state->crtc_w);
  440. }
  441. /* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */
  442. if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) {
  443. vc4_write_ppf(vc4_state,
  444. vc4_state->src_h[channel], vc4_state->crtc_h);
  445. vc4_dlist_write(vc4_state, 0xc0c0c0c0);
  446. }
  447. /* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */
  448. if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) {
  449. vc4_write_tpz(vc4_state,
  450. vc4_state->src_w[channel], vc4_state->crtc_w);
  451. }
  452. /* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */
  453. if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) {
  454. vc4_write_tpz(vc4_state,
  455. vc4_state->src_h[channel], vc4_state->crtc_h);
  456. vc4_dlist_write(vc4_state, 0xc0c0c0c0);
  457. }
  458. }
  459. static void vc4_plane_calc_load(struct drm_plane_state *state)
  460. {
  461. unsigned int hvs_load_shift, vrefresh, i;
  462. struct drm_framebuffer *fb = state->fb;
  463. struct vc4_plane_state *vc4_state;
  464. struct drm_crtc_state *crtc_state;
  465. unsigned int vscale_factor;
  466. vc4_state = to_vc4_plane_state(state);
  467. crtc_state = drm_atomic_get_existing_crtc_state(state->state,
  468. state->crtc);
  469. vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode);
  470. /* The HVS is able to process 2 pixels/cycle when scaling the source,
  471. * 4 pixels/cycle otherwise.
  472. * Alpha blending step seems to be pipelined and it's always operating
  473. * at 4 pixels/cycle, so the limiting aspect here seems to be the
  474. * scaler block.
  475. * HVS load is expressed in clk-cycles/sec (AKA Hz).
  476. */
  477. if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
  478. vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
  479. vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
  480. vc4_state->y_scaling[1] != VC4_SCALING_NONE)
  481. hvs_load_shift = 1;
  482. else
  483. hvs_load_shift = 2;
  484. vc4_state->membus_load = 0;
  485. vc4_state->hvs_load = 0;
  486. for (i = 0; i < fb->format->num_planes; i++) {
  487. /* Even if the bandwidth/plane required for a single frame is
  488. *
  489. * vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh
  490. *
  491. * when downscaling, we have to read more pixels per line in
  492. * the time frame reserved for a single line, so the bandwidth
  493. * demand can be punctually higher. To account for that, we
  494. * calculate the down-scaling factor and multiply the plane
  495. * load by this number. We're likely over-estimating the read
  496. * demand, but that's better than under-estimating it.
  497. */
  498. vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i],
  499. vc4_state->crtc_h);
  500. vc4_state->membus_load += vc4_state->src_w[i] *
  501. vc4_state->src_h[i] * vscale_factor *
  502. fb->format->cpp[i];
  503. vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w;
  504. }
  505. vc4_state->hvs_load *= vrefresh;
  506. vc4_state->hvs_load >>= hvs_load_shift;
  507. vc4_state->membus_load *= vrefresh;
  508. }
  509. static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
  510. {
  511. struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
  512. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  513. unsigned long irqflags;
  514. u32 lbm_size;
  515. lbm_size = vc4_lbm_size(state);
  516. if (!lbm_size)
  517. return 0;
  518. if (WARN_ON(!vc4_state->lbm_offset))
  519. return -EINVAL;
  520. /* Allocate the LBM memory that the HVS will use for temporary
  521. * storage due to our scaling/format conversion.
  522. */
  523. if (!drm_mm_node_allocated(&vc4_state->lbm)) {
  524. int ret;
  525. spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
  526. ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
  527. &vc4_state->lbm,
  528. lbm_size,
  529. vc4->is_vc5 ? 64 : 32,
  530. 0, 0);
  531. spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
  532. if (ret)
  533. return ret;
  534. } else {
  535. WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
  536. }
  537. vc4_state->dlist[vc4_state->lbm_offset] = vc4_state->lbm.start;
  538. return 0;
  539. }
  540. /*
  541. * The colorspace conversion matrices are held in 3 entries in the dlist.
  542. * Create an array of them, with entries for each full and limited mode, and
  543. * each supported colorspace.
  544. */
  545. static const u32 colorspace_coeffs[2][DRM_COLOR_ENCODING_MAX][3] = {
  546. {
  547. /* Limited range */
  548. {
  549. /* BT601 */
  550. SCALER_CSC0_ITR_R_601_5,
  551. SCALER_CSC1_ITR_R_601_5,
  552. SCALER_CSC2_ITR_R_601_5,
  553. }, {
  554. /* BT709 */
  555. SCALER_CSC0_ITR_R_709_3,
  556. SCALER_CSC1_ITR_R_709_3,
  557. SCALER_CSC2_ITR_R_709_3,
  558. }, {
  559. /* BT2020 */
  560. SCALER_CSC0_ITR_R_2020,
  561. SCALER_CSC1_ITR_R_2020,
  562. SCALER_CSC2_ITR_R_2020,
  563. }
  564. }, {
  565. /* Full range */
  566. {
  567. /* JFIF */
  568. SCALER_CSC0_JPEG_JFIF,
  569. SCALER_CSC1_JPEG_JFIF,
  570. SCALER_CSC2_JPEG_JFIF,
  571. }, {
  572. /* BT709 */
  573. SCALER_CSC0_ITR_R_709_3_FR,
  574. SCALER_CSC1_ITR_R_709_3_FR,
  575. SCALER_CSC2_ITR_R_709_3_FR,
  576. }, {
  577. /* BT2020 */
  578. SCALER_CSC0_ITR_R_2020_FR,
  579. SCALER_CSC1_ITR_R_2020_FR,
  580. SCALER_CSC2_ITR_R_2020_FR,
  581. }
  582. }
  583. };
  584. static u32 vc4_hvs4_get_alpha_blend_mode(struct drm_plane_state *state)
  585. {
  586. if (!state->fb->format->has_alpha)
  587. return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_FIXED,
  588. SCALER_POS2_ALPHA_MODE);
  589. switch (state->pixel_blend_mode) {
  590. case DRM_MODE_BLEND_PIXEL_NONE:
  591. return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_FIXED,
  592. SCALER_POS2_ALPHA_MODE);
  593. default:
  594. case DRM_MODE_BLEND_PREMULTI:
  595. return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_PIPELINE,
  596. SCALER_POS2_ALPHA_MODE) |
  597. SCALER_POS2_ALPHA_PREMULT;
  598. case DRM_MODE_BLEND_COVERAGE:
  599. return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_PIPELINE,
  600. SCALER_POS2_ALPHA_MODE);
  601. }
  602. }
  603. static u32 vc4_hvs5_get_alpha_blend_mode(struct drm_plane_state *state)
  604. {
  605. if (!state->fb->format->has_alpha)
  606. return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
  607. SCALER5_CTL2_ALPHA_MODE);
  608. switch (state->pixel_blend_mode) {
  609. case DRM_MODE_BLEND_PIXEL_NONE:
  610. return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
  611. SCALER5_CTL2_ALPHA_MODE);
  612. default:
  613. case DRM_MODE_BLEND_PREMULTI:
  614. return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
  615. SCALER5_CTL2_ALPHA_MODE) |
  616. SCALER5_CTL2_ALPHA_PREMULT;
  617. case DRM_MODE_BLEND_COVERAGE:
  618. return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
  619. SCALER5_CTL2_ALPHA_MODE);
  620. }
  621. }
  622. /* Writes out a full display list for an active plane to the plane's
  623. * private dlist state.
  624. */
  625. static int vc4_plane_mode_set(struct drm_plane *plane,
  626. struct drm_plane_state *state)
  627. {
  628. struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
  629. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  630. struct drm_framebuffer *fb = state->fb;
  631. u32 ctl0_offset = vc4_state->dlist_count;
  632. const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
  633. u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
  634. int num_planes = fb->format->num_planes;
  635. u32 h_subsample = fb->format->hsub;
  636. u32 v_subsample = fb->format->vsub;
  637. bool mix_plane_alpha;
  638. bool covers_screen;
  639. u32 scl0, scl1, pitch0;
  640. u32 tiling, src_y;
  641. u32 hvs_format = format->hvs;
  642. unsigned int rotation;
  643. int ret, i;
  644. if (vc4_state->dlist_initialized)
  645. return 0;
  646. ret = vc4_plane_setup_clipping_and_scaling(state);
  647. if (ret)
  648. return ret;
  649. /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB
  650. * and 4:4:4, scl1 should be set to scl0 so both channels of
  651. * the scaler do the same thing. For YUV, the Y plane needs
  652. * to be put in channel 1 and Cb/Cr in channel 0, so we swap
  653. * the scl fields here.
  654. */
  655. if (num_planes == 1) {
  656. scl0 = vc4_get_scl_field(state, 0);
  657. scl1 = scl0;
  658. } else {
  659. scl0 = vc4_get_scl_field(state, 1);
  660. scl1 = vc4_get_scl_field(state, 0);
  661. }
  662. rotation = drm_rotation_simplify(state->rotation,
  663. DRM_MODE_ROTATE_0 |
  664. DRM_MODE_REFLECT_X |
  665. DRM_MODE_REFLECT_Y);
  666. /* We must point to the last line when Y reflection is enabled. */
  667. src_y = vc4_state->src_y;
  668. if (rotation & DRM_MODE_REFLECT_Y)
  669. src_y += vc4_state->src_h[0] - 1;
  670. switch (base_format_mod) {
  671. case DRM_FORMAT_MOD_LINEAR:
  672. tiling = SCALER_CTL0_TILING_LINEAR;
  673. pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
  674. /* Adjust the base pointer to the first pixel to be scanned
  675. * out.
  676. */
  677. for (i = 0; i < num_planes; i++) {
  678. vc4_state->offsets[i] += src_y /
  679. (i ? v_subsample : 1) *
  680. fb->pitches[i];
  681. vc4_state->offsets[i] += vc4_state->src_x /
  682. (i ? h_subsample : 1) *
  683. fb->format->cpp[i];
  684. }
  685. break;
  686. case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: {
  687. u32 tile_size_shift = 12; /* T tiles are 4kb */
  688. /* Whole-tile offsets, mostly for setting the pitch. */
  689. u32 tile_w_shift = fb->format->cpp[0] == 2 ? 6 : 5;
  690. u32 tile_h_shift = 5; /* 16 and 32bpp are 32 pixels high */
  691. u32 tile_w_mask = (1 << tile_w_shift) - 1;
  692. /* The height mask on 32-bit-per-pixel tiles is 63, i.e. twice
  693. * the height (in pixels) of a 4k tile.
  694. */
  695. u32 tile_h_mask = (2 << tile_h_shift) - 1;
  696. /* For T-tiled, the FB pitch is "how many bytes from one row to
  697. * the next, such that
  698. *
  699. * pitch * tile_h == tile_size * tiles_per_row
  700. */
  701. u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift);
  702. u32 tiles_l = vc4_state->src_x >> tile_w_shift;
  703. u32 tiles_r = tiles_w - tiles_l;
  704. u32 tiles_t = src_y >> tile_h_shift;
  705. /* Intra-tile offsets, which modify the base address (the
  706. * SCALER_PITCH0_TILE_Y_OFFSET tells HVS how to walk from that
  707. * base address).
  708. */
  709. u32 tile_y = (src_y >> 4) & 1;
  710. u32 subtile_y = (src_y >> 2) & 3;
  711. u32 utile_y = src_y & 3;
  712. u32 x_off = vc4_state->src_x & tile_w_mask;
  713. u32 y_off = src_y & tile_h_mask;
  714. /* When Y reflection is requested we must set the
  715. * SCALER_PITCH0_TILE_LINE_DIR flag to tell HVS that all lines
  716. * after the initial one should be fetched in descending order,
  717. * which makes sense since we start from the last line and go
  718. * backward.
  719. * Don't know why we need y_off = max_y_off - y_off, but it's
  720. * definitely required (I guess it's also related to the "going
  721. * backward" situation).
  722. */
  723. if (rotation & DRM_MODE_REFLECT_Y) {
  724. y_off = tile_h_mask - y_off;
  725. pitch0 = SCALER_PITCH0_TILE_LINE_DIR;
  726. } else {
  727. pitch0 = 0;
  728. }
  729. tiling = SCALER_CTL0_TILING_256B_OR_T;
  730. pitch0 |= (VC4_SET_FIELD(x_off, SCALER_PITCH0_SINK_PIX) |
  731. VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) |
  732. VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) |
  733. VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R));
  734. vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift);
  735. vc4_state->offsets[0] += subtile_y << 8;
  736. vc4_state->offsets[0] += utile_y << 4;
  737. /* Rows of tiles alternate left-to-right and right-to-left. */
  738. if (tiles_t & 1) {
  739. pitch0 |= SCALER_PITCH0_TILE_INITIAL_LINE_DIR;
  740. vc4_state->offsets[0] += (tiles_w - tiles_l) <<
  741. tile_size_shift;
  742. vc4_state->offsets[0] -= (1 + !tile_y) << 10;
  743. } else {
  744. vc4_state->offsets[0] += tiles_l << tile_size_shift;
  745. vc4_state->offsets[0] += tile_y << 10;
  746. }
  747. break;
  748. }
  749. case DRM_FORMAT_MOD_BROADCOM_SAND64:
  750. case DRM_FORMAT_MOD_BROADCOM_SAND128:
  751. case DRM_FORMAT_MOD_BROADCOM_SAND256: {
  752. uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
  753. if (param > SCALER_TILE_HEIGHT_MASK) {
  754. DRM_DEBUG_KMS("SAND height too large (%d)\n",
  755. param);
  756. return -EINVAL;
  757. }
  758. if (fb->format->format == DRM_FORMAT_P030) {
  759. hvs_format = HVS_PIXEL_FORMAT_YCBCR_10BIT;
  760. tiling = SCALER_CTL0_TILING_128B;
  761. } else {
  762. hvs_format = HVS_PIXEL_FORMAT_H264;
  763. switch (base_format_mod) {
  764. case DRM_FORMAT_MOD_BROADCOM_SAND64:
  765. tiling = SCALER_CTL0_TILING_64B;
  766. break;
  767. case DRM_FORMAT_MOD_BROADCOM_SAND128:
  768. tiling = SCALER_CTL0_TILING_128B;
  769. break;
  770. case DRM_FORMAT_MOD_BROADCOM_SAND256:
  771. tiling = SCALER_CTL0_TILING_256B_OR_T;
  772. break;
  773. default:
  774. return -EINVAL;
  775. }
  776. }
  777. /* Adjust the base pointer to the first pixel to be scanned
  778. * out.
  779. *
  780. * For P030, y_ptr [31:4] is the 128bit word for the start pixel
  781. * y_ptr [3:0] is the pixel (0-11) contained within that 128bit
  782. * word that should be taken as the first pixel.
  783. * Ditto uv_ptr [31:4] vs [3:0], however [3:0] contains the
  784. * element within the 128bit word, eg for pixel 3 the value
  785. * should be 6.
  786. */
  787. for (i = 0; i < num_planes; i++) {
  788. u32 tile_w, tile, x_off, pix_per_tile;
  789. if (fb->format->format == DRM_FORMAT_P030) {
  790. /*
  791. * Spec says: bits [31:4] of the given address
  792. * should point to the 128-bit word containing
  793. * the desired starting pixel, and bits[3:0]
  794. * should be between 0 and 11, indicating which
  795. * of the 12-pixels in that 128-bit word is the
  796. * first pixel to be used
  797. */
  798. u32 remaining_pixels = vc4_state->src_x % 96;
  799. u32 aligned = remaining_pixels / 12;
  800. u32 last_bits = remaining_pixels % 12;
  801. x_off = aligned * 16 + last_bits;
  802. tile_w = 128;
  803. pix_per_tile = 96;
  804. } else {
  805. switch (base_format_mod) {
  806. case DRM_FORMAT_MOD_BROADCOM_SAND64:
  807. tile_w = 64;
  808. break;
  809. case DRM_FORMAT_MOD_BROADCOM_SAND128:
  810. tile_w = 128;
  811. break;
  812. case DRM_FORMAT_MOD_BROADCOM_SAND256:
  813. tile_w = 256;
  814. break;
  815. default:
  816. return -EINVAL;
  817. }
  818. pix_per_tile = tile_w / fb->format->cpp[0];
  819. x_off = (vc4_state->src_x % pix_per_tile) /
  820. (i ? h_subsample : 1) *
  821. fb->format->cpp[i];
  822. }
  823. tile = vc4_state->src_x / pix_per_tile;
  824. vc4_state->offsets[i] += param * tile_w * tile;
  825. vc4_state->offsets[i] += src_y /
  826. (i ? v_subsample : 1) *
  827. tile_w;
  828. vc4_state->offsets[i] += x_off & ~(i ? 1 : 0);
  829. }
  830. pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
  831. break;
  832. }
  833. default:
  834. DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
  835. (long long)fb->modifier);
  836. return -EINVAL;
  837. }
  838. /* Don't waste cycles mixing with plane alpha if the set alpha
  839. * is opaque or there is no per-pixel alpha information.
  840. * In any case we use the alpha property value as the fixed alpha.
  841. */
  842. mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
  843. fb->format->has_alpha;
  844. if (!vc4->is_vc5) {
  845. /* Control word */
  846. vc4_dlist_write(vc4_state,
  847. SCALER_CTL0_VALID |
  848. (rotation & DRM_MODE_REFLECT_X ? SCALER_CTL0_HFLIP : 0) |
  849. (rotation & DRM_MODE_REFLECT_Y ? SCALER_CTL0_VFLIP : 0) |
  850. VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
  851. (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
  852. (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
  853. VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
  854. (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
  855. VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
  856. VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1));
  857. /* Position Word 0: Image Positions and Alpha Value */
  858. vc4_state->pos0_offset = vc4_state->dlist_count;
  859. vc4_dlist_write(vc4_state,
  860. VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) |
  861. VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
  862. VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
  863. /* Position Word 1: Scaled Image Dimensions. */
  864. if (!vc4_state->is_unity) {
  865. vc4_dlist_write(vc4_state,
  866. VC4_SET_FIELD(vc4_state->crtc_w,
  867. SCALER_POS1_SCL_WIDTH) |
  868. VC4_SET_FIELD(vc4_state->crtc_h,
  869. SCALER_POS1_SCL_HEIGHT));
  870. }
  871. /* Position Word 2: Source Image Size, Alpha */
  872. vc4_state->pos2_offset = vc4_state->dlist_count;
  873. vc4_dlist_write(vc4_state,
  874. (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) |
  875. vc4_hvs4_get_alpha_blend_mode(state) |
  876. VC4_SET_FIELD(vc4_state->src_w[0],
  877. SCALER_POS2_WIDTH) |
  878. VC4_SET_FIELD(vc4_state->src_h[0],
  879. SCALER_POS2_HEIGHT));
  880. /* Position Word 3: Context. Written by the HVS. */
  881. vc4_dlist_write(vc4_state, 0xc0c0c0c0);
  882. } else {
  883. u32 hvs_pixel_order = format->pixel_order;
  884. if (format->pixel_order_hvs5)
  885. hvs_pixel_order = format->pixel_order_hvs5;
  886. /* Control word */
  887. vc4_dlist_write(vc4_state,
  888. SCALER_CTL0_VALID |
  889. (hvs_pixel_order << SCALER_CTL0_ORDER_SHIFT) |
  890. (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
  891. VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
  892. (vc4_state->is_unity ?
  893. SCALER5_CTL0_UNITY : 0) |
  894. VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
  895. VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1) |
  896. SCALER5_CTL0_ALPHA_EXPAND |
  897. SCALER5_CTL0_RGB_EXPAND);
  898. /* Position Word 0: Image Positions and Alpha Value */
  899. vc4_state->pos0_offset = vc4_state->dlist_count;
  900. vc4_dlist_write(vc4_state,
  901. (rotation & DRM_MODE_REFLECT_Y ?
  902. SCALER5_POS0_VFLIP : 0) |
  903. VC4_SET_FIELD(vc4_state->crtc_x,
  904. SCALER_POS0_START_X) |
  905. (rotation & DRM_MODE_REFLECT_X ?
  906. SCALER5_POS0_HFLIP : 0) |
  907. VC4_SET_FIELD(vc4_state->crtc_y,
  908. SCALER5_POS0_START_Y)
  909. );
  910. /* Control Word 2 */
  911. vc4_dlist_write(vc4_state,
  912. VC4_SET_FIELD(state->alpha >> 4,
  913. SCALER5_CTL2_ALPHA) |
  914. vc4_hvs5_get_alpha_blend_mode(state) |
  915. (mix_plane_alpha ?
  916. SCALER5_CTL2_ALPHA_MIX : 0)
  917. );
  918. /* Position Word 1: Scaled Image Dimensions. */
  919. if (!vc4_state->is_unity) {
  920. vc4_dlist_write(vc4_state,
  921. VC4_SET_FIELD(vc4_state->crtc_w,
  922. SCALER5_POS1_SCL_WIDTH) |
  923. VC4_SET_FIELD(vc4_state->crtc_h,
  924. SCALER5_POS1_SCL_HEIGHT));
  925. }
  926. /* Position Word 2: Source Image Size */
  927. vc4_state->pos2_offset = vc4_state->dlist_count;
  928. vc4_dlist_write(vc4_state,
  929. VC4_SET_FIELD(vc4_state->src_w[0],
  930. SCALER5_POS2_WIDTH) |
  931. VC4_SET_FIELD(vc4_state->src_h[0],
  932. SCALER5_POS2_HEIGHT));
  933. /* Position Word 3: Context. Written by the HVS. */
  934. vc4_dlist_write(vc4_state, 0xc0c0c0c0);
  935. }
  936. /* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers
  937. *
  938. * The pointers may be any byte address.
  939. */
  940. vc4_state->ptr0_offset = vc4_state->dlist_count;
  941. for (i = 0; i < num_planes; i++)
  942. vc4_dlist_write(vc4_state, vc4_state->offsets[i]);
  943. /* Pointer Context Word 0/1/2: Written by the HVS */
  944. for (i = 0; i < num_planes; i++)
  945. vc4_dlist_write(vc4_state, 0xc0c0c0c0);
  946. /* Pitch word 0 */
  947. vc4_dlist_write(vc4_state, pitch0);
  948. /* Pitch word 1/2 */
  949. for (i = 1; i < num_planes; i++) {
  950. if (hvs_format != HVS_PIXEL_FORMAT_H264 &&
  951. hvs_format != HVS_PIXEL_FORMAT_YCBCR_10BIT) {
  952. vc4_dlist_write(vc4_state,
  953. VC4_SET_FIELD(fb->pitches[i],
  954. SCALER_SRC_PITCH));
  955. } else {
  956. vc4_dlist_write(vc4_state, pitch0);
  957. }
  958. }
  959. /* Colorspace conversion words */
  960. if (vc4_state->is_yuv) {
  961. enum drm_color_encoding color_encoding = state->color_encoding;
  962. enum drm_color_range color_range = state->color_range;
  963. const u32 *ccm;
  964. if (color_encoding >= DRM_COLOR_ENCODING_MAX)
  965. color_encoding = DRM_COLOR_YCBCR_BT601;
  966. if (color_range >= DRM_COLOR_RANGE_MAX)
  967. color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
  968. ccm = colorspace_coeffs[color_range][color_encoding];
  969. vc4_dlist_write(vc4_state, ccm[0]);
  970. vc4_dlist_write(vc4_state, ccm[1]);
  971. vc4_dlist_write(vc4_state, ccm[2]);
  972. }
  973. vc4_state->lbm_offset = 0;
  974. if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
  975. vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
  976. vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
  977. vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
  978. /* Reserve a slot for the LBM Base Address. The real value will
  979. * be set when calling vc4_plane_allocate_lbm().
  980. */
  981. if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
  982. vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
  983. vc4_state->lbm_offset = vc4_state->dlist_count;
  984. vc4_dlist_counter_increment(vc4_state);
  985. }
  986. if (num_planes > 1) {
  987. /* Emit Cb/Cr as channel 0 and Y as channel
  988. * 1. This matches how we set up scl0/scl1
  989. * above.
  990. */
  991. vc4_write_scaling_parameters(state, 1);
  992. }
  993. vc4_write_scaling_parameters(state, 0);
  994. /* If any PPF setup was done, then all the kernel
  995. * pointers get uploaded.
  996. */
  997. if (vc4_state->x_scaling[0] == VC4_SCALING_PPF ||
  998. vc4_state->y_scaling[0] == VC4_SCALING_PPF ||
  999. vc4_state->x_scaling[1] == VC4_SCALING_PPF ||
  1000. vc4_state->y_scaling[1] == VC4_SCALING_PPF) {
  1001. u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start,
  1002. SCALER_PPF_KERNEL_OFFSET);
  1003. /* HPPF plane 0 */
  1004. vc4_dlist_write(vc4_state, kernel);
  1005. /* VPPF plane 0 */
  1006. vc4_dlist_write(vc4_state, kernel);
  1007. /* HPPF plane 1 */
  1008. vc4_dlist_write(vc4_state, kernel);
  1009. /* VPPF plane 1 */
  1010. vc4_dlist_write(vc4_state, kernel);
  1011. }
  1012. }
  1013. vc4_state->dlist[ctl0_offset] |=
  1014. VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE);
  1015. /* crtc_* are already clipped coordinates. */
  1016. covers_screen = vc4_state->crtc_x == 0 && vc4_state->crtc_y == 0 &&
  1017. vc4_state->crtc_w == state->crtc->mode.hdisplay &&
  1018. vc4_state->crtc_h == state->crtc->mode.vdisplay;
  1019. /* Background fill might be necessary when the plane has per-pixel
  1020. * alpha content or a non-opaque plane alpha and could blend from the
  1021. * background or does not cover the entire screen.
  1022. */
  1023. vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
  1024. state->alpha != DRM_BLEND_ALPHA_OPAQUE;
  1025. /* Flag the dlist as initialized to avoid checking it twice in case
  1026. * the async update check already called vc4_plane_mode_set() and
  1027. * decided to fallback to sync update because async update was not
  1028. * possible.
  1029. */
  1030. vc4_state->dlist_initialized = 1;
  1031. vc4_plane_calc_load(state);
  1032. return 0;
  1033. }
  1034. /* If a modeset involves changing the setup of a plane, the atomic
  1035. * infrastructure will call this to validate a proposed plane setup.
  1036. * However, if a plane isn't getting updated, this (and the
  1037. * corresponding vc4_plane_atomic_update) won't get called. Thus, we
  1038. * compute the dlist here and have all active plane dlists get updated
  1039. * in the CRTC's flush.
  1040. */
  1041. static int vc4_plane_atomic_check(struct drm_plane *plane,
  1042. struct drm_atomic_state *state)
  1043. {
  1044. struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
  1045. plane);
  1046. struct vc4_plane_state *vc4_state = to_vc4_plane_state(new_plane_state);
  1047. int ret;
  1048. vc4_state->dlist_count = 0;
  1049. if (!plane_enabled(new_plane_state))
  1050. return 0;
  1051. ret = vc4_plane_mode_set(plane, new_plane_state);
  1052. if (ret)
  1053. return ret;
  1054. return vc4_plane_allocate_lbm(new_plane_state);
  1055. }
  1056. static void vc4_plane_atomic_update(struct drm_plane *plane,
  1057. struct drm_atomic_state *state)
  1058. {
  1059. /* No contents here. Since we don't know where in the CRTC's
  1060. * dlist we should be stored, our dlist is uploaded to the
  1061. * hardware with vc4_plane_write_dlist() at CRTC atomic_flush
  1062. * time.
  1063. */
  1064. }
  1065. u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
  1066. {
  1067. struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
  1068. int i;
  1069. int idx;
  1070. if (!drm_dev_enter(plane->dev, &idx))
  1071. goto out;
  1072. vc4_state->hw_dlist = dlist;
  1073. /* Can't memcpy_toio() because it needs to be 32-bit writes. */
  1074. for (i = 0; i < vc4_state->dlist_count; i++)
  1075. writel(vc4_state->dlist[i], &dlist[i]);
  1076. drm_dev_exit(idx);
  1077. out:
  1078. return vc4_state->dlist_count;
  1079. }
  1080. u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
  1081. {
  1082. const struct vc4_plane_state *vc4_state =
  1083. container_of(state, typeof(*vc4_state), base);
  1084. return vc4_state->dlist_count;
  1085. }
  1086. /* Updates the plane to immediately (well, once the FIFO needs
  1087. * refilling) scan out from at a new framebuffer.
  1088. */
  1089. void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
  1090. {
  1091. struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
  1092. struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
  1093. uint32_t addr;
  1094. int idx;
  1095. if (!drm_dev_enter(plane->dev, &idx))
  1096. return;
  1097. /* We're skipping the address adjustment for negative origin,
  1098. * because this is only called on the primary plane.
  1099. */
  1100. WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
  1101. addr = bo->dma_addr + fb->offsets[0];
  1102. /* Write the new address into the hardware immediately. The
  1103. * scanout will start from this address as soon as the FIFO
  1104. * needs to refill with pixels.
  1105. */
  1106. writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
  1107. /* Also update the CPU-side dlist copy, so that any later
  1108. * atomic updates that don't do a new modeset on our plane
  1109. * also use our updated address.
  1110. */
  1111. vc4_state->dlist[vc4_state->ptr0_offset] = addr;
  1112. drm_dev_exit(idx);
  1113. }
  1114. static void vc4_plane_atomic_async_update(struct drm_plane *plane,
  1115. struct drm_atomic_state *state)
  1116. {
  1117. struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
  1118. plane);
  1119. struct vc4_plane_state *vc4_state, *new_vc4_state;
  1120. int idx;
  1121. if (!drm_dev_enter(plane->dev, &idx))
  1122. return;
  1123. swap(plane->state->fb, new_plane_state->fb);
  1124. plane->state->crtc_x = new_plane_state->crtc_x;
  1125. plane->state->crtc_y = new_plane_state->crtc_y;
  1126. plane->state->crtc_w = new_plane_state->crtc_w;
  1127. plane->state->crtc_h = new_plane_state->crtc_h;
  1128. plane->state->src_x = new_plane_state->src_x;
  1129. plane->state->src_y = new_plane_state->src_y;
  1130. plane->state->src_w = new_plane_state->src_w;
  1131. plane->state->src_h = new_plane_state->src_h;
  1132. plane->state->alpha = new_plane_state->alpha;
  1133. plane->state->pixel_blend_mode = new_plane_state->pixel_blend_mode;
  1134. plane->state->rotation = new_plane_state->rotation;
  1135. plane->state->zpos = new_plane_state->zpos;
  1136. plane->state->normalized_zpos = new_plane_state->normalized_zpos;
  1137. plane->state->color_encoding = new_plane_state->color_encoding;
  1138. plane->state->color_range = new_plane_state->color_range;
  1139. plane->state->src = new_plane_state->src;
  1140. plane->state->dst = new_plane_state->dst;
  1141. plane->state->visible = new_plane_state->visible;
  1142. new_vc4_state = to_vc4_plane_state(new_plane_state);
  1143. vc4_state = to_vc4_plane_state(plane->state);
  1144. vc4_state->crtc_x = new_vc4_state->crtc_x;
  1145. vc4_state->crtc_y = new_vc4_state->crtc_y;
  1146. vc4_state->crtc_h = new_vc4_state->crtc_h;
  1147. vc4_state->crtc_w = new_vc4_state->crtc_w;
  1148. vc4_state->src_x = new_vc4_state->src_x;
  1149. vc4_state->src_y = new_vc4_state->src_y;
  1150. memcpy(vc4_state->src_w, new_vc4_state->src_w,
  1151. sizeof(vc4_state->src_w));
  1152. memcpy(vc4_state->src_h, new_vc4_state->src_h,
  1153. sizeof(vc4_state->src_h));
  1154. memcpy(vc4_state->x_scaling, new_vc4_state->x_scaling,
  1155. sizeof(vc4_state->x_scaling));
  1156. memcpy(vc4_state->y_scaling, new_vc4_state->y_scaling,
  1157. sizeof(vc4_state->y_scaling));
  1158. vc4_state->is_unity = new_vc4_state->is_unity;
  1159. vc4_state->is_yuv = new_vc4_state->is_yuv;
  1160. memcpy(vc4_state->offsets, new_vc4_state->offsets,
  1161. sizeof(vc4_state->offsets));
  1162. vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill;
  1163. /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
  1164. vc4_state->dlist[vc4_state->pos0_offset] =
  1165. new_vc4_state->dlist[vc4_state->pos0_offset];
  1166. vc4_state->dlist[vc4_state->pos2_offset] =
  1167. new_vc4_state->dlist[vc4_state->pos2_offset];
  1168. vc4_state->dlist[vc4_state->ptr0_offset] =
  1169. new_vc4_state->dlist[vc4_state->ptr0_offset];
  1170. /* Note that we can't just call vc4_plane_write_dlist()
  1171. * because that would smash the context data that the HVS is
  1172. * currently using.
  1173. */
  1174. writel(vc4_state->dlist[vc4_state->pos0_offset],
  1175. &vc4_state->hw_dlist[vc4_state->pos0_offset]);
  1176. writel(vc4_state->dlist[vc4_state->pos2_offset],
  1177. &vc4_state->hw_dlist[vc4_state->pos2_offset]);
  1178. writel(vc4_state->dlist[vc4_state->ptr0_offset],
  1179. &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
  1180. drm_dev_exit(idx);
  1181. }
  1182. static int vc4_plane_atomic_async_check(struct drm_plane *plane,
  1183. struct drm_atomic_state *state)
  1184. {
  1185. struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
  1186. plane);
  1187. struct vc4_plane_state *old_vc4_state, *new_vc4_state;
  1188. int ret;
  1189. u32 i;
  1190. ret = vc4_plane_mode_set(plane, new_plane_state);
  1191. if (ret)
  1192. return ret;
  1193. old_vc4_state = to_vc4_plane_state(plane->state);
  1194. new_vc4_state = to_vc4_plane_state(new_plane_state);
  1195. if (!new_vc4_state->hw_dlist)
  1196. return -EINVAL;
  1197. if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
  1198. old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
  1199. old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
  1200. old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset ||
  1201. vc4_lbm_size(plane->state) != vc4_lbm_size(new_plane_state))
  1202. return -EINVAL;
  1203. /* Only pos0, pos2 and ptr0 DWORDS can be updated in an async update
  1204. * if anything else has changed, fallback to a sync update.
  1205. */
  1206. for (i = 0; i < new_vc4_state->dlist_count; i++) {
  1207. if (i == new_vc4_state->pos0_offset ||
  1208. i == new_vc4_state->pos2_offset ||
  1209. i == new_vc4_state->ptr0_offset ||
  1210. (new_vc4_state->lbm_offset &&
  1211. i == new_vc4_state->lbm_offset))
  1212. continue;
  1213. if (new_vc4_state->dlist[i] != old_vc4_state->dlist[i])
  1214. return -EINVAL;
  1215. }
  1216. return 0;
  1217. }
  1218. static int vc4_prepare_fb(struct drm_plane *plane,
  1219. struct drm_plane_state *state)
  1220. {
  1221. struct vc4_bo *bo;
  1222. if (!state->fb)
  1223. return 0;
  1224. bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
  1225. drm_gem_plane_helper_prepare_fb(plane, state);
  1226. if (plane->state->fb == state->fb)
  1227. return 0;
  1228. return vc4_bo_inc_usecnt(bo);
  1229. }
  1230. static void vc4_cleanup_fb(struct drm_plane *plane,
  1231. struct drm_plane_state *state)
  1232. {
  1233. struct vc4_bo *bo;
  1234. if (plane->state->fb == state->fb || !state->fb)
  1235. return;
  1236. bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
  1237. vc4_bo_dec_usecnt(bo);
  1238. }
  1239. static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
  1240. .atomic_check = vc4_plane_atomic_check,
  1241. .atomic_update = vc4_plane_atomic_update,
  1242. .prepare_fb = vc4_prepare_fb,
  1243. .cleanup_fb = vc4_cleanup_fb,
  1244. .atomic_async_check = vc4_plane_atomic_async_check,
  1245. .atomic_async_update = vc4_plane_atomic_async_update,
  1246. };
  1247. static const struct drm_plane_helper_funcs vc5_plane_helper_funcs = {
  1248. .atomic_check = vc4_plane_atomic_check,
  1249. .atomic_update = vc4_plane_atomic_update,
  1250. .atomic_async_check = vc4_plane_atomic_async_check,
  1251. .atomic_async_update = vc4_plane_atomic_async_update,
  1252. };
  1253. static bool vc4_format_mod_supported(struct drm_plane *plane,
  1254. uint32_t format,
  1255. uint64_t modifier)
  1256. {
  1257. /* Support T_TILING for RGB formats only. */
  1258. switch (format) {
  1259. case DRM_FORMAT_XRGB8888:
  1260. case DRM_FORMAT_ARGB8888:
  1261. case DRM_FORMAT_ABGR8888:
  1262. case DRM_FORMAT_XBGR8888:
  1263. case DRM_FORMAT_RGB565:
  1264. case DRM_FORMAT_BGR565:
  1265. case DRM_FORMAT_ARGB1555:
  1266. case DRM_FORMAT_XRGB1555:
  1267. switch (fourcc_mod_broadcom_mod(modifier)) {
  1268. case DRM_FORMAT_MOD_LINEAR:
  1269. case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
  1270. return true;
  1271. default:
  1272. return false;
  1273. }
  1274. case DRM_FORMAT_NV12:
  1275. case DRM_FORMAT_NV21:
  1276. switch (fourcc_mod_broadcom_mod(modifier)) {
  1277. case DRM_FORMAT_MOD_LINEAR:
  1278. case DRM_FORMAT_MOD_BROADCOM_SAND64:
  1279. case DRM_FORMAT_MOD_BROADCOM_SAND128:
  1280. case DRM_FORMAT_MOD_BROADCOM_SAND256:
  1281. return true;
  1282. default:
  1283. return false;
  1284. }
  1285. case DRM_FORMAT_P030:
  1286. switch (fourcc_mod_broadcom_mod(modifier)) {
  1287. case DRM_FORMAT_MOD_BROADCOM_SAND128:
  1288. return true;
  1289. default:
  1290. return false;
  1291. }
  1292. case DRM_FORMAT_RGBX1010102:
  1293. case DRM_FORMAT_BGRX1010102:
  1294. case DRM_FORMAT_RGBA1010102:
  1295. case DRM_FORMAT_BGRA1010102:
  1296. case DRM_FORMAT_YUV422:
  1297. case DRM_FORMAT_YVU422:
  1298. case DRM_FORMAT_YUV420:
  1299. case DRM_FORMAT_YVU420:
  1300. case DRM_FORMAT_NV16:
  1301. case DRM_FORMAT_NV61:
  1302. default:
  1303. return (modifier == DRM_FORMAT_MOD_LINEAR);
  1304. }
  1305. }
  1306. static const struct drm_plane_funcs vc4_plane_funcs = {
  1307. .update_plane = drm_atomic_helper_update_plane,
  1308. .disable_plane = drm_atomic_helper_disable_plane,
  1309. .reset = vc4_plane_reset,
  1310. .atomic_duplicate_state = vc4_plane_duplicate_state,
  1311. .atomic_destroy_state = vc4_plane_destroy_state,
  1312. .format_mod_supported = vc4_format_mod_supported,
  1313. };
  1314. struct drm_plane *vc4_plane_init(struct drm_device *dev,
  1315. enum drm_plane_type type,
  1316. uint32_t possible_crtcs)
  1317. {
  1318. struct vc4_dev *vc4 = to_vc4_dev(dev);
  1319. struct drm_plane *plane;
  1320. struct vc4_plane *vc4_plane;
  1321. u32 formats[ARRAY_SIZE(hvs_formats)];
  1322. int num_formats = 0;
  1323. unsigned i;
  1324. static const uint64_t modifiers[] = {
  1325. DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
  1326. DRM_FORMAT_MOD_BROADCOM_SAND128,
  1327. DRM_FORMAT_MOD_BROADCOM_SAND64,
  1328. DRM_FORMAT_MOD_BROADCOM_SAND256,
  1329. DRM_FORMAT_MOD_LINEAR,
  1330. DRM_FORMAT_MOD_INVALID
  1331. };
  1332. for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
  1333. if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
  1334. formats[num_formats] = hvs_formats[i].drm;
  1335. num_formats++;
  1336. }
  1337. }
  1338. vc4_plane = drmm_universal_plane_alloc(dev, struct vc4_plane, base,
  1339. possible_crtcs,
  1340. &vc4_plane_funcs,
  1341. formats, num_formats,
  1342. modifiers, type, NULL);
  1343. if (IS_ERR(vc4_plane))
  1344. return ERR_CAST(vc4_plane);
  1345. plane = &vc4_plane->base;
  1346. if (vc4->is_vc5)
  1347. drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
  1348. else
  1349. drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
  1350. drm_plane_create_alpha_property(plane);
  1351. drm_plane_create_blend_mode_property(plane,
  1352. BIT(DRM_MODE_BLEND_PIXEL_NONE) |
  1353. BIT(DRM_MODE_BLEND_PREMULTI) |
  1354. BIT(DRM_MODE_BLEND_COVERAGE));
  1355. drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
  1356. DRM_MODE_ROTATE_0 |
  1357. DRM_MODE_ROTATE_180 |
  1358. DRM_MODE_REFLECT_X |
  1359. DRM_MODE_REFLECT_Y);
  1360. drm_plane_create_color_properties(plane,
  1361. BIT(DRM_COLOR_YCBCR_BT601) |
  1362. BIT(DRM_COLOR_YCBCR_BT709) |
  1363. BIT(DRM_COLOR_YCBCR_BT2020),
  1364. BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
  1365. BIT(DRM_COLOR_YCBCR_FULL_RANGE),
  1366. DRM_COLOR_YCBCR_BT709,
  1367. DRM_COLOR_YCBCR_LIMITED_RANGE);
  1368. return plane;
  1369. }
  1370. int vc4_plane_create_additional_planes(struct drm_device *drm)
  1371. {
  1372. struct drm_plane *cursor_plane;
  1373. struct drm_crtc *crtc;
  1374. unsigned int i;
  1375. /* Set up some arbitrary number of planes. We're not limited
  1376. * by a set number of physical registers, just the space in
  1377. * the HVS (16k) and how small an plane can be (28 bytes).
  1378. * However, each plane we set up takes up some memory, and
  1379. * increases the cost of looping over planes, which atomic
  1380. * modesetting does quite a bit. As a result, we pick a
  1381. * modest number of planes to expose, that should hopefully
  1382. * still cover any sane usecase.
  1383. */
  1384. for (i = 0; i < 16; i++) {
  1385. struct drm_plane *plane =
  1386. vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY,
  1387. GENMASK(drm->mode_config.num_crtc - 1, 0));
  1388. if (IS_ERR(plane))
  1389. continue;
  1390. }
  1391. drm_for_each_crtc(crtc, drm) {
  1392. /* Set up the legacy cursor after overlay initialization,
  1393. * since we overlay planes on the CRTC in the order they were
  1394. * initialized.
  1395. */
  1396. cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR,
  1397. drm_crtc_mask(crtc));
  1398. if (!IS_ERR(cursor_plane)) {
  1399. crtc->cursor = cursor_plane;
  1400. }
  1401. }
  1402. return 0;
  1403. }