sde_encoder_phys_vid.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include "sde_encoder_phys.h"
  8. #include "sde_hw_interrupts.h"
  9. #include "sde_core_irq.h"
  10. #include "sde_formats.h"
  11. #include "dsi_display.h"
  12. #include "sde_trace.h"
  13. #define SDE_DEBUG_VIDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
  14. (e) && (e)->base.parent ? \
  15. (e)->base.parent->base.id : -1, \
  16. (e) && (e)->base.hw_intf ? \
  17. (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
  18. #define SDE_ERROR_VIDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
  19. (e) && (e)->base.parent ? \
  20. (e)->base.parent->base.id : -1, \
  21. (e) && (e)->base.hw_intf ? \
  22. (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
  23. #define to_sde_encoder_phys_vid(x) \
  24. container_of(x, struct sde_encoder_phys_vid, base)
  25. /* Poll time to do recovery during active region */
  26. #define POLL_TIME_USEC_FOR_LN_CNT 500
  27. #define MAX_POLL_CNT 10
  28. static bool sde_encoder_phys_vid_is_master(
  29. struct sde_encoder_phys *phys_enc)
  30. {
  31. bool ret = false;
  32. if (phys_enc->split_role != ENC_ROLE_SLAVE)
  33. ret = true;
  34. return ret;
  35. }
  36. static void drm_mode_to_intf_timing_params(
  37. const struct sde_encoder_phys_vid *vid_enc,
  38. const struct drm_display_mode *mode,
  39. struct intf_timing_params *timing)
  40. {
  41. const struct sde_encoder_phys *phys_enc = &vid_enc->base;
  42. memset(timing, 0, sizeof(*timing));
  43. if ((mode->htotal < mode->hsync_end)
  44. || (mode->hsync_start < mode->hdisplay)
  45. || (mode->vtotal < mode->vsync_end)
  46. || (mode->vsync_start < mode->vdisplay)
  47. || (mode->hsync_end < mode->hsync_start)
  48. || (mode->vsync_end < mode->vsync_start)) {
  49. SDE_ERROR(
  50. "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
  51. mode->hsync_start, mode->hsync_end,
  52. mode->htotal, mode->hdisplay);
  53. SDE_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
  54. mode->vsync_start, mode->vsync_end,
  55. mode->vtotal, mode->vdisplay);
  56. return;
  57. }
  58. /*
  59. * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
  60. * Active Region Front Porch Sync Back Porch
  61. * <-----------------><------------><-----><----------->
  62. * <- [hv]display --->
  63. * <--------- [hv]sync_start ------>
  64. * <----------------- [hv]sync_end ------->
  65. * <---------------------------- [hv]total ------------->
  66. */
  67. timing->poms_align_vsync = phys_enc->poms_align_vsync;
  68. timing->width = mode->hdisplay; /* active width */
  69. timing->height = mode->vdisplay; /* active height */
  70. timing->xres = timing->width;
  71. timing->yres = timing->height;
  72. timing->h_back_porch = mode->htotal - mode->hsync_end;
  73. timing->h_front_porch = mode->hsync_start - mode->hdisplay;
  74. timing->v_back_porch = mode->vtotal - mode->vsync_end;
  75. timing->v_front_porch = mode->vsync_start - mode->vdisplay;
  76. timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
  77. timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
  78. timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
  79. timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
  80. timing->border_clr = 0;
  81. timing->underflow_clr = 0xff;
  82. timing->hsync_skew = mode->hskew;
  83. timing->v_front_porch_fixed = vid_enc->base.vfp_cached;
  84. timing->vrefresh = drm_mode_vrefresh(&phys_enc->cached_mode);
  85. if (vid_enc->base.comp_type != MSM_DISPLAY_COMPRESSION_NONE) {
  86. timing->compression_en = true;
  87. timing->dce_bytes_per_line = vid_enc->base.dce_bytes_per_line;
  88. }
  89. /* DSI controller cannot handle active-low sync signals. */
  90. if (phys_enc->hw_intf->cap->type == INTF_DSI) {
  91. timing->hsync_polarity = 0;
  92. timing->vsync_polarity = 0;
  93. }
  94. /* for DP/EDP, Shift timings to align it to bottom right */
  95. if ((phys_enc->hw_intf->cap->type == INTF_DP) ||
  96. (phys_enc->hw_intf->cap->type == INTF_EDP)) {
  97. timing->h_back_porch += timing->h_front_porch;
  98. timing->h_front_porch = 0;
  99. timing->v_back_porch += timing->v_front_porch;
  100. timing->v_front_porch = 0;
  101. }
  102. timing->wide_bus_en = sde_encoder_is_widebus_enabled(phys_enc->parent);
  103. /*
  104. * for DP, divide the horizonal parameters by 2 when
  105. * widebus or compression is enabled, irrespective of
  106. * compression ratio
  107. */
  108. if (phys_enc->hw_intf->cap->type == INTF_DP &&
  109. (timing->wide_bus_en ||
  110. (vid_enc->base.comp_ratio > 1))) {
  111. timing->width = timing->width >> 1;
  112. timing->xres = timing->xres >> 1;
  113. timing->h_back_porch = timing->h_back_porch >> 1;
  114. timing->h_front_porch = timing->h_front_porch >> 1;
  115. timing->hsync_pulse_width = timing->hsync_pulse_width >> 1;
  116. if (vid_enc->base.comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
  117. (vid_enc->base.comp_ratio > 1)) {
  118. timing->extra_dto_cycles =
  119. vid_enc->base.dsc_extra_pclk_cycle_cnt;
  120. timing->width += vid_enc->base.dsc_extra_disp_width;
  121. timing->h_back_porch +=
  122. vid_enc->base.dsc_extra_disp_width;
  123. }
  124. }
  125. /*
  126. * for DSI, if compression is enabled, then divide the horizonal active
  127. * timing parameters by compression ratio.
  128. */
  129. if ((phys_enc->hw_intf->cap->type != INTF_DP) &&
  130. ((vid_enc->base.comp_type ==
  131. MSM_DISPLAY_COMPRESSION_DSC) ||
  132. (vid_enc->base.comp_type ==
  133. MSM_DISPLAY_COMPRESSION_VDC))) {
  134. // adjust active dimensions
  135. timing->width = DIV_ROUND_UP(timing->width,
  136. vid_enc->base.comp_ratio);
  137. timing->xres = DIV_ROUND_UP(timing->xres,
  138. vid_enc->base.comp_ratio);
  139. }
  140. /*
  141. * For edp only:
  142. * DISPLAY_V_START = (VBP * HCYCLE) + HBP
  143. * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
  144. */
  145. /*
  146. * if (vid_enc->hw->cap->type == INTF_EDP) {
  147. * display_v_start += mode->htotal - mode->hsync_start;
  148. * display_v_end -= mode->hsync_start - mode->hdisplay;
  149. * }
  150. */
  151. }
  152. static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
  153. {
  154. u32 active = timing->xres;
  155. u32 inactive =
  156. timing->h_back_porch + timing->h_front_porch +
  157. timing->hsync_pulse_width;
  158. return active + inactive;
  159. }
  160. static inline u32 get_vertical_total(const struct intf_timing_params *timing)
  161. {
  162. u32 active = timing->yres;
  163. u32 inactive = timing->v_back_porch + timing->v_front_porch +
  164. timing->vsync_pulse_width;
  165. return active + inactive;
  166. }
  167. /*
  168. * programmable_fetch_get_num_lines:
  169. * Number of fetch lines in vertical front porch
  170. * @timing: Pointer to the intf timing information for the requested mode
  171. *
  172. * Returns the number of fetch lines in vertical front porch at which mdp
  173. * can start fetching the next frame.
  174. *
  175. * Number of needed prefetch lines is anything that cannot be absorbed in the
  176. * start of frame time (back porch + vsync pulse width).
  177. *
  178. * Some panels have very large VFP, however we only need a total number of
  179. * lines based on the chip worst case latencies.
  180. */
  181. static u32 programmable_fetch_get_num_lines(
  182. struct sde_encoder_phys_vid *vid_enc,
  183. const struct intf_timing_params *timing)
  184. {
  185. struct sde_encoder_phys *phys_enc = &vid_enc->base;
  186. struct sde_mdss_cfg *m;
  187. u32 needed_prefill_lines, needed_vfp_lines, actual_vfp_lines;
  188. const u32 fixed_prefill_fps = DEFAULT_FPS;
  189. u32 default_prefill_lines =
  190. phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
  191. u32 start_of_frame_lines =
  192. timing->v_back_porch + timing->vsync_pulse_width;
  193. u32 v_front_porch = timing->v_front_porch;
  194. u32 vrefresh, max_fps;
  195. m = phys_enc->sde_kms->catalog;
  196. max_fps = sde_encoder_get_dfps_maxfps(phys_enc->parent);
  197. vrefresh = (max_fps > timing->vrefresh) ? max_fps : timing->vrefresh;
  198. /* minimum prefill lines are defined based on 60fps */
  199. needed_prefill_lines = (vrefresh > fixed_prefill_fps) ?
  200. ((default_prefill_lines * vrefresh) /
  201. fixed_prefill_fps) : default_prefill_lines;
  202. needed_vfp_lines = needed_prefill_lines - start_of_frame_lines;
  203. /* Fetch must be outside active lines, otherwise undefined. */
  204. if (start_of_frame_lines >= needed_prefill_lines) {
  205. SDE_DEBUG_VIDENC(vid_enc,
  206. "prog fetch always enabled case\n");
  207. actual_vfp_lines = (test_bit(SDE_FEATURE_DELAY_PRG_FETCH, m->features)) ? 2 : 1;
  208. } else if (v_front_porch < needed_vfp_lines) {
  209. /* Warn fetch needed, but not enough porch in panel config */
  210. pr_warn_once
  211. ("low vbp+vfp may lead to perf issues in some cases\n");
  212. SDE_DEBUG_VIDENC(vid_enc,
  213. "less vfp than fetch req, using entire vfp\n");
  214. actual_vfp_lines = v_front_porch;
  215. } else {
  216. SDE_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
  217. actual_vfp_lines = needed_vfp_lines;
  218. }
  219. SDE_DEBUG_VIDENC(vid_enc,
  220. "vrefresh:%u v_front_porch:%u v_back_porch:%u vsync_pulse_width:%u\n",
  221. vrefresh, v_front_porch, timing->v_back_porch,
  222. timing->vsync_pulse_width);
  223. SDE_DEBUG_VIDENC(vid_enc,
  224. "prefill_lines:%u needed_vfp_lines:%u actual_vfp_lines:%u\n",
  225. needed_prefill_lines, needed_vfp_lines, actual_vfp_lines);
  226. return actual_vfp_lines;
  227. }
  228. /*
  229. * programmable_fetch_config: Programs HW to prefetch lines by offsetting
  230. * the start of fetch into the vertical front porch for cases where the
  231. * vsync pulse width and vertical back porch time is insufficient
  232. *
  233. * Gets # of lines to pre-fetch, then calculate VSYNC counter value.
  234. * HW layer requires VSYNC counter of first pixel of tgt VFP line.
  235. *
  236. * @timing: Pointer to the intf timing information for the requested mode
  237. */
  238. static void programmable_fetch_config(struct sde_encoder_phys *phys_enc,
  239. const struct intf_timing_params *timing)
  240. {
  241. struct sde_encoder_phys_vid *vid_enc =
  242. to_sde_encoder_phys_vid(phys_enc);
  243. struct intf_prog_fetch f = { 0 };
  244. u32 vfp_fetch_lines = 0;
  245. u32 horiz_total = 0;
  246. u32 vert_total = 0;
  247. u32 vfp_fetch_start_vsync_counter = 0;
  248. unsigned long lock_flags;
  249. struct sde_mdss_cfg *m;
  250. if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch))
  251. return;
  252. m = phys_enc->sde_kms->catalog;
  253. vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
  254. if (vfp_fetch_lines) {
  255. vert_total = get_vertical_total(timing);
  256. horiz_total = get_horizontal_total(timing);
  257. vfp_fetch_start_vsync_counter =
  258. (vert_total - vfp_fetch_lines) * horiz_total + 1;
  259. /**
  260. * Check if we need to throttle the fetch to start
  261. * from second line after the active region.
  262. */
  263. if (test_bit(SDE_FEATURE_DELAY_PRG_FETCH, m->features))
  264. vfp_fetch_start_vsync_counter += horiz_total;
  265. f.enable = 1;
  266. f.fetch_start = vfp_fetch_start_vsync_counter;
  267. }
  268. SDE_DEBUG_VIDENC(vid_enc,
  269. "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
  270. vfp_fetch_lines, vfp_fetch_start_vsync_counter);
  271. spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
  272. phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f);
  273. spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
  274. }
  275. static bool sde_encoder_phys_vid_mode_fixup(
  276. struct sde_encoder_phys *phys_enc,
  277. const struct drm_display_mode *mode,
  278. struct drm_display_mode *adj_mode)
  279. {
  280. if (phys_enc)
  281. SDE_DEBUG_VIDENC(to_sde_encoder_phys_vid(phys_enc), "\n");
  282. /*
  283. * Modifying mode has consequences when the mode comes back to us
  284. */
  285. return true;
  286. }
  287. /* vid_enc timing_params must be configured before calling this function */
  288. static void _sde_encoder_phys_vid_setup_avr(
  289. struct sde_encoder_phys *phys_enc, u32 qsync_min_fps)
  290. {
  291. struct sde_encoder_phys_vid *vid_enc;
  292. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  293. if (vid_enc->base.hw_intf->ops.avr_setup) {
  294. struct intf_avr_params avr_params = {0};
  295. u32 default_fps = drm_mode_vrefresh(&phys_enc->cached_mode);
  296. int ret;
  297. if (!default_fps) {
  298. SDE_ERROR_VIDENC(vid_enc,
  299. "invalid default fps %d\n",
  300. default_fps);
  301. return;
  302. }
  303. if (qsync_min_fps > default_fps) {
  304. SDE_ERROR_VIDENC(vid_enc,
  305. "qsync fps %d must be less than default %d\n",
  306. qsync_min_fps, default_fps);
  307. return;
  308. }
  309. avr_params.default_fps = default_fps;
  310. avr_params.min_fps = qsync_min_fps;
  311. ret = vid_enc->base.hw_intf->ops.avr_setup(
  312. vid_enc->base.hw_intf,
  313. &vid_enc->timing_params, &avr_params);
  314. if (ret)
  315. SDE_ERROR_VIDENC(vid_enc,
  316. "bad settings, can't configure AVR\n");
  317. SDE_EVT32(DRMID(phys_enc->parent), default_fps,
  318. qsync_min_fps, ret);
  319. }
  320. }
  321. static void _sde_encoder_phys_vid_avr_ctrl(struct sde_encoder_phys *phys_enc)
  322. {
  323. struct intf_avr_params avr_params;
  324. struct sde_encoder_phys_vid *vid_enc = to_sde_encoder_phys_vid(phys_enc);
  325. u32 avr_step_fps = sde_connector_get_avr_step(phys_enc->connector);
  326. memset(&avr_params, 0, sizeof(avr_params));
  327. avr_params.avr_mode = sde_connector_get_qsync_mode(phys_enc->connector);
  328. if (avr_step_fps)
  329. avr_params.avr_step_lines = mult_frac(phys_enc->cached_mode.vtotal,
  330. vid_enc->timing_params.vrefresh, avr_step_fps);
  331. if (vid_enc->base.hw_intf->ops.avr_ctrl)
  332. vid_enc->base.hw_intf->ops.avr_ctrl(vid_enc->base.hw_intf, &avr_params);
  333. SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_intf->idx - INTF_0,
  334. avr_params.avr_mode, avr_params.avr_step_lines, avr_step_fps);
  335. }
  336. static void sde_encoder_phys_vid_setup_timing_engine(
  337. struct sde_encoder_phys *phys_enc)
  338. {
  339. struct sde_encoder_phys_vid *vid_enc;
  340. struct drm_display_mode mode;
  341. struct intf_timing_params timing_params = { 0 };
  342. const struct sde_format *fmt = NULL;
  343. u32 fmt_fourcc = DRM_FORMAT_RGB888;
  344. u32 qsync_min_fps = 0;
  345. unsigned long lock_flags;
  346. struct sde_hw_intf_cfg intf_cfg = { 0 };
  347. bool is_split_link = false;
  348. if (!phys_enc || !phys_enc->sde_kms || !phys_enc->hw_ctl ||
  349. !phys_enc->hw_intf || !phys_enc->connector) {
  350. SDE_ERROR("invalid encoder %d\n", !phys_enc);
  351. return;
  352. }
  353. mode = phys_enc->cached_mode;
  354. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  355. if (!phys_enc->hw_intf->ops.setup_timing_gen) {
  356. SDE_ERROR("timing engine setup is not supported\n");
  357. return;
  358. }
  359. SDE_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
  360. drm_mode_debug_printmodeline(&mode);
  361. is_split_link = phys_enc->hw_intf->cfg.split_link_en;
  362. if (phys_enc->split_role != ENC_ROLE_SOLO || is_split_link) {
  363. mode.hdisplay >>= 1;
  364. mode.htotal >>= 1;
  365. mode.hsync_start >>= 1;
  366. mode.hsync_end >>= 1;
  367. SDE_DEBUG_VIDENC(vid_enc,
  368. "split_role %d, halve horizontal %d %d %d %d\n",
  369. phys_enc->split_role,
  370. mode.hdisplay, mode.htotal,
  371. mode.hsync_start, mode.hsync_end);
  372. }
  373. if (!phys_enc->vfp_cached) {
  374. phys_enc->vfp_cached =
  375. sde_connector_get_panel_vfp(phys_enc->connector, &mode);
  376. if (phys_enc->vfp_cached <= 0)
  377. phys_enc->vfp_cached = mode.vsync_start - mode.vdisplay;
  378. }
  379. drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
  380. vid_enc->timing_params = timing_params;
  381. if (phys_enc->cont_splash_enabled) {
  382. SDE_DEBUG_VIDENC(vid_enc,
  383. "skipping intf programming since cont splash is enabled\n");
  384. goto exit;
  385. }
  386. fmt = sde_get_sde_format(fmt_fourcc);
  387. SDE_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
  388. spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
  389. phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
  390. &timing_params, fmt);
  391. if (test_bit(SDE_CTL_ACTIVE_CFG,
  392. &phys_enc->hw_ctl->caps->features)) {
  393. sde_encoder_helper_update_intf_cfg(phys_enc);
  394. } else if (phys_enc->hw_ctl->ops.setup_intf_cfg) {
  395. intf_cfg.intf = phys_enc->hw_intf->idx;
  396. intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_VID;
  397. intf_cfg.stream_sel = 0; /* Don't care value for video mode */
  398. intf_cfg.mode_3d =
  399. sde_encoder_helper_get_3d_blend_mode(phys_enc);
  400. phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl,
  401. &intf_cfg);
  402. }
  403. spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
  404. if (phys_enc->hw_intf->cap->type == INTF_DSI)
  405. programmable_fetch_config(phys_enc, &timing_params);
  406. exit:
  407. if (phys_enc->parent_ops.get_qsync_fps)
  408. phys_enc->parent_ops.get_qsync_fps(
  409. phys_enc->parent, &qsync_min_fps, phys_enc->connector->state);
  410. /* only panels which support qsync will have a non-zero min fps */
  411. if (qsync_min_fps) {
  412. _sde_encoder_phys_vid_setup_avr(phys_enc, qsync_min_fps);
  413. _sde_encoder_phys_vid_avr_ctrl(phys_enc);
  414. }
  415. }
  416. static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
  417. {
  418. struct sde_encoder_phys *phys_enc = arg;
  419. struct sde_hw_ctl *hw_ctl;
  420. struct intf_status intf_status = {0};
  421. unsigned long lock_flags;
  422. u32 flush_register = ~0;
  423. u32 reset_status = 0;
  424. int new_cnt = -1, old_cnt = -1;
  425. u32 event = 0;
  426. int pend_ret_fence_cnt = 0;
  427. u32 fence_ready = -1;
  428. if (!phys_enc)
  429. return;
  430. hw_ctl = phys_enc->hw_ctl;
  431. if (!hw_ctl)
  432. return;
  433. SDE_ATRACE_BEGIN("vblank_irq");
  434. /*
  435. * only decrement the pending flush count if we've actually flushed
  436. * hardware. due to sw irq latency, vblank may have already happened
  437. * so we need to double-check with hw that it accepted the flush bits
  438. */
  439. spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
  440. old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
  441. if (hw_ctl->ops.get_flush_register)
  442. flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
  443. if (flush_register)
  444. goto not_flushed;
  445. new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
  446. pend_ret_fence_cnt = atomic_read(&phys_enc->pending_retire_fence_cnt);
  447. /* signal only for master, where there is a pending kickoff */
  448. if (sde_encoder_phys_vid_is_master(phys_enc) &&
  449. atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0)) {
  450. event = SDE_ENCODER_FRAME_EVENT_DONE |
  451. SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE |
  452. SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
  453. }
  454. not_flushed:
  455. if (hw_ctl->ops.get_reset)
  456. reset_status = hw_ctl->ops.get_reset(hw_ctl);
  457. spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
  458. if (event && phys_enc->parent_ops.handle_frame_done)
  459. phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
  460. phys_enc, event);
  461. if (phys_enc->parent_ops.handle_vblank_virt)
  462. phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
  463. phys_enc);
  464. if (phys_enc->hw_intf->ops.get_status)
  465. phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf,
  466. &intf_status);
  467. if (flush_register && hw_ctl->ops.get_hw_fence_status)
  468. fence_ready = hw_ctl->ops.get_hw_fence_status(hw_ctl);
  469. SDE_EVT32_IRQ(DRMID(phys_enc->parent), phys_enc->hw_intf->idx - INTF_0,
  470. old_cnt, atomic_read(&phys_enc->pending_kickoff_cnt),
  471. reset_status ? SDE_EVTLOG_ERROR : 0,
  472. flush_register, event,
  473. atomic_read(&phys_enc->pending_retire_fence_cnt),
  474. intf_status.frame_count, intf_status.line_count,
  475. fence_ready);
  476. /* Signal any waiting atomic commit thread */
  477. wake_up_all(&phys_enc->pending_kickoff_wq);
  478. SDE_ATRACE_END("vblank_irq");
  479. }
  480. static void sde_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
  481. {
  482. struct sde_encoder_phys *phys_enc = arg;
  483. if (!phys_enc)
  484. return;
  485. if (phys_enc->parent_ops.handle_underrun_virt)
  486. phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
  487. phys_enc);
  488. }
  489. static void _sde_encoder_phys_vid_setup_irq_hw_idx(
  490. struct sde_encoder_phys *phys_enc)
  491. {
  492. struct sde_encoder_irq *irq;
  493. /*
  494. * Initialize irq->hw_idx only when irq is not registered.
  495. * Prevent invalidating irq->irq_idx as modeset may be
  496. * called many times during dfps.
  497. */
  498. irq = &phys_enc->irq[INTR_IDX_VSYNC];
  499. if (irq->irq_idx < 0)
  500. irq->hw_idx = phys_enc->intf_idx;
  501. irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
  502. if (irq->irq_idx < 0)
  503. irq->hw_idx = phys_enc->intf_idx;
  504. }
  505. static void sde_encoder_phys_vid_cont_splash_mode_set(
  506. struct sde_encoder_phys *phys_enc,
  507. struct drm_display_mode *adj_mode)
  508. {
  509. if (!phys_enc || !adj_mode) {
  510. SDE_ERROR("invalid args\n");
  511. return;
  512. }
  513. phys_enc->cached_mode = *adj_mode;
  514. phys_enc->enable_state = SDE_ENC_ENABLED;
  515. _sde_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
  516. }
  517. static void sde_encoder_phys_vid_mode_set(
  518. struct sde_encoder_phys *phys_enc,
  519. struct drm_display_mode *mode,
  520. struct drm_display_mode *adj_mode, bool *reinit_mixers)
  521. {
  522. struct sde_rm *rm;
  523. struct sde_rm_hw_iter iter;
  524. int i, instance;
  525. struct sde_encoder_phys_vid *vid_enc;
  526. if (!phys_enc || !phys_enc->sde_kms) {
  527. SDE_ERROR("invalid encoder/kms\n");
  528. return;
  529. }
  530. rm = &phys_enc->sde_kms->rm;
  531. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  532. if (adj_mode) {
  533. phys_enc->cached_mode = *adj_mode;
  534. drm_mode_debug_printmodeline(adj_mode);
  535. SDE_DEBUG_VIDENC(vid_enc, "caching mode:\n");
  536. }
  537. instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
  538. /* Retrieve previously allocated HW Resources. Shouldn't fail */
  539. sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
  540. for (i = 0; i <= instance; i++) {
  541. if (sde_rm_get_hw(rm, &iter)) {
  542. if (phys_enc->hw_ctl && phys_enc->hw_ctl != to_sde_hw_ctl(iter.hw)) {
  543. *reinit_mixers = true;
  544. SDE_EVT32(phys_enc->hw_ctl->idx,
  545. to_sde_hw_ctl(iter.hw)->idx);
  546. }
  547. phys_enc->hw_ctl = to_sde_hw_ctl(iter.hw);
  548. }
  549. }
  550. if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
  551. SDE_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
  552. PTR_ERR(phys_enc->hw_ctl));
  553. phys_enc->hw_ctl = NULL;
  554. return;
  555. }
  556. sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_INTF);
  557. for (i = 0; i <= instance; i++) {
  558. if (sde_rm_get_hw(rm, &iter))
  559. phys_enc->hw_intf = to_sde_hw_intf(iter.hw);
  560. }
  561. if (IS_ERR_OR_NULL(phys_enc->hw_intf)) {
  562. SDE_ERROR_VIDENC(vid_enc, "failed to init intf: %ld\n",
  563. PTR_ERR(phys_enc->hw_intf));
  564. phys_enc->hw_intf = NULL;
  565. return;
  566. }
  567. _sde_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
  568. phys_enc->kickoff_timeout_ms =
  569. sde_encoder_helper_get_kickoff_timeout_ms(phys_enc->parent);
  570. }
  571. static int sde_encoder_phys_vid_control_vblank_irq(
  572. struct sde_encoder_phys *phys_enc,
  573. bool enable)
  574. {
  575. int ret = 0;
  576. struct sde_encoder_phys_vid *vid_enc;
  577. int refcount;
  578. if (!phys_enc) {
  579. SDE_ERROR("invalid encoder\n");
  580. return -EINVAL;
  581. }
  582. mutex_lock(phys_enc->vblank_ctl_lock);
  583. refcount = atomic_read(&phys_enc->vblank_refcount);
  584. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  585. /* Slave encoders don't report vblank */
  586. if (!sde_encoder_phys_vid_is_master(phys_enc))
  587. goto end;
  588. /* protect against negative */
  589. if (!enable && refcount == 0) {
  590. ret = -EINVAL;
  591. goto end;
  592. }
  593. SDE_DEBUG_VIDENC(vid_enc, "[%pS] enable=%d/%d\n",
  594. __builtin_return_address(0),
  595. enable, atomic_read(&phys_enc->vblank_refcount));
  596. SDE_EVT32(DRMID(phys_enc->parent), enable,
  597. atomic_read(&phys_enc->vblank_refcount));
  598. if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1) {
  599. ret = sde_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
  600. if (ret)
  601. atomic_dec_return(&phys_enc->vblank_refcount);
  602. } else if (!enable &&
  603. atomic_dec_return(&phys_enc->vblank_refcount) == 0) {
  604. ret = sde_encoder_helper_unregister_irq(phys_enc,
  605. INTR_IDX_VSYNC);
  606. if (ret)
  607. atomic_inc_return(&phys_enc->vblank_refcount);
  608. }
  609. end:
  610. if (ret) {
  611. SDE_ERROR_VIDENC(vid_enc,
  612. "control vblank irq error %d, enable %d\n",
  613. ret, enable);
  614. SDE_EVT32(DRMID(phys_enc->parent),
  615. phys_enc->hw_intf->idx - INTF_0,
  616. enable, refcount, SDE_EVTLOG_ERROR);
  617. }
  618. mutex_unlock(phys_enc->vblank_ctl_lock);
  619. return ret;
  620. }
  621. static bool sde_encoder_phys_vid_wait_dma_trigger(
  622. struct sde_encoder_phys *phys_enc)
  623. {
  624. struct sde_encoder_phys_vid *vid_enc;
  625. struct sde_hw_intf *intf;
  626. struct sde_hw_ctl *ctl;
  627. struct intf_status status;
  628. if (!phys_enc) {
  629. SDE_ERROR("invalid encoder\n");
  630. return false;
  631. }
  632. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  633. intf = phys_enc->hw_intf;
  634. ctl = phys_enc->hw_ctl;
  635. if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
  636. SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
  637. phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL);
  638. return false;
  639. }
  640. if (!intf->ops.get_status)
  641. return false;
  642. intf->ops.get_status(intf, &status);
  643. /* if interface is not enabled, return true to wait for dma trigger */
  644. return status.is_en ? false : true;
  645. }
  646. static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
  647. {
  648. struct msm_drm_private *priv;
  649. struct sde_encoder_phys_vid *vid_enc;
  650. struct sde_hw_intf *intf;
  651. struct sde_hw_ctl *ctl;
  652. if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
  653. !phys_enc->parent->dev->dev_private ||
  654. !phys_enc->sde_kms) {
  655. SDE_ERROR("invalid encoder/device\n");
  656. return;
  657. }
  658. priv = phys_enc->parent->dev->dev_private;
  659. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  660. intf = phys_enc->hw_intf;
  661. ctl = phys_enc->hw_ctl;
  662. if (!phys_enc->hw_intf || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
  663. SDE_ERROR("invalid hw_intf %d hw_ctl %d hw_pp %d\n",
  664. !phys_enc->hw_intf, !phys_enc->hw_ctl,
  665. !phys_enc->hw_pp);
  666. return;
  667. }
  668. if (!ctl->ops.update_bitmask) {
  669. SDE_ERROR("invalid hw_ctl ops %d\n", ctl->idx);
  670. return;
  671. }
  672. SDE_DEBUG_VIDENC(vid_enc, "\n");
  673. if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
  674. return;
  675. if (!phys_enc->cont_splash_enabled)
  676. sde_encoder_helper_split_config(phys_enc,
  677. phys_enc->hw_intf->idx);
  678. sde_encoder_phys_vid_setup_timing_engine(phys_enc);
  679. /*
  680. * For cases where both the interfaces are connected to same ctl,
  681. * set the flush bit for both master and slave.
  682. * For single flush cases (dual-ctl or pp-split), skip setting the
  683. * flush bit for the slave intf, since both intfs use same ctl
  684. * and HW will only flush the master.
  685. */
  686. if (!test_bit(SDE_CTL_ACTIVE_CFG, &ctl->caps->features) &&
  687. sde_encoder_phys_needs_single_flush(phys_enc) &&
  688. !sde_encoder_phys_vid_is_master(phys_enc))
  689. goto skip_flush;
  690. /**
  691. * skip flushing intf during cont. splash handoff since bootloader
  692. * has already enabled the hardware and is single buffered.
  693. */
  694. if (phys_enc->cont_splash_enabled) {
  695. SDE_DEBUG_VIDENC(vid_enc,
  696. "skipping intf flush bit set as cont. splash is enabled\n");
  697. goto skip_flush;
  698. }
  699. ctl->ops.update_bitmask(ctl, SDE_HW_FLUSH_INTF, intf->idx, 1);
  700. if (phys_enc->hw_pp->merge_3d)
  701. ctl->ops.update_bitmask(ctl, SDE_HW_FLUSH_MERGE_3D,
  702. phys_enc->hw_pp->merge_3d->idx, 1);
  703. if (phys_enc->hw_intf->cap->type == INTF_DP &&
  704. phys_enc->comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
  705. phys_enc->comp_ratio)
  706. ctl->ops.update_bitmask(ctl, SDE_HW_FLUSH_PERIPH, intf->idx, 1);
  707. skip_flush:
  708. SDE_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d intf %d\n",
  709. ctl->idx - CTL_0, intf->idx);
  710. SDE_EVT32(DRMID(phys_enc->parent),
  711. atomic_read(&phys_enc->pending_retire_fence_cnt));
  712. /* ctl_flush & timing engine enable will be triggered by framework */
  713. if (phys_enc->enable_state == SDE_ENC_DISABLED)
  714. phys_enc->enable_state = SDE_ENC_ENABLING;
  715. }
  716. static void sde_encoder_phys_vid_destroy(struct sde_encoder_phys *phys_enc)
  717. {
  718. struct sde_encoder_phys_vid *vid_enc;
  719. if (!phys_enc) {
  720. SDE_ERROR("invalid encoder\n");
  721. return;
  722. }
  723. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  724. SDE_DEBUG_VIDENC(vid_enc, "\n");
  725. kfree(vid_enc);
  726. }
  727. static void sde_encoder_phys_vid_get_hw_resources(
  728. struct sde_encoder_phys *phys_enc,
  729. struct sde_encoder_hw_resources *hw_res,
  730. struct drm_connector_state *conn_state)
  731. {
  732. struct sde_encoder_phys_vid *vid_enc;
  733. if (!phys_enc || !hw_res) {
  734. SDE_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
  735. !phys_enc, !hw_res, !conn_state);
  736. return;
  737. }
  738. if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
  739. SDE_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
  740. return;
  741. }
  742. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  743. SDE_DEBUG_VIDENC(vid_enc, "\n");
  744. hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_VIDEO;
  745. }
  746. static int _sde_encoder_phys_vid_wait_for_vblank(
  747. struct sde_encoder_phys *phys_enc, bool notify)
  748. {
  749. struct sde_encoder_wait_info wait_info = {0};
  750. int ret = 0, new_cnt;
  751. u32 event = SDE_ENCODER_FRAME_EVENT_ERROR |
  752. SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE |
  753. SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
  754. struct drm_connector *conn;
  755. struct sde_hw_ctl *hw_ctl;
  756. u32 flush_register = 0xebad;
  757. bool timeout = false;
  758. if (!phys_enc || !phys_enc->hw_ctl) {
  759. pr_err("invalid encoder\n");
  760. return -EINVAL;
  761. }
  762. hw_ctl = phys_enc->hw_ctl;
  763. conn = phys_enc->connector;
  764. wait_info.wq = &phys_enc->pending_kickoff_wq;
  765. wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
  766. wait_info.timeout_ms = phys_enc->kickoff_timeout_ms;
  767. /* Wait for kickoff to complete */
  768. ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
  769. &wait_info);
  770. /*
  771. * if hwfencing enabled, try again to wait for up to the extended timeout time in
  772. * increments as long as fence has not been signaled.
  773. */
  774. if (ret == -ETIMEDOUT && phys_enc->sde_kms->catalog->hw_fence_rev)
  775. ret = sde_encoder_helper_hw_fence_extended_wait(phys_enc, phys_enc->hw_ctl,
  776. &wait_info, INTR_IDX_VSYNC);
  777. if (ret == -ETIMEDOUT) {
  778. new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
  779. timeout = true;
  780. /*
  781. * Reset ret when flush register is consumed. This handles a race condition between
  782. * irq wait timeout handler reading the register status and the actual IRQ handler
  783. */
  784. if (hw_ctl->ops.get_flush_register)
  785. flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
  786. if (!flush_register)
  787. ret = 0;
  788. /* if we timeout after the extended wait, reset mixers and do sw override */
  789. if (ret && phys_enc->sde_kms->catalog->hw_fence_rev)
  790. sde_encoder_helper_hw_fence_sw_override(phys_enc, hw_ctl);
  791. SDE_EVT32(DRMID(phys_enc->parent), new_cnt, flush_register, ret,
  792. SDE_EVTLOG_FUNC_CASE1);
  793. }
  794. if (notify && timeout && atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0)
  795. && phys_enc->parent_ops.handle_frame_done) {
  796. phys_enc->parent_ops.handle_frame_done(phys_enc->parent, phys_enc, event);
  797. /* notify only on actual timeout cases */
  798. if ((ret == -ETIMEDOUT) && sde_encoder_recovery_events_enabled(phys_enc->parent))
  799. sde_connector_event_notify(conn, DRM_EVENT_SDE_HW_RECOVERY,
  800. sizeof(uint8_t), SDE_RECOVERY_HARD_RESET);
  801. }
  802. SDE_EVT32(DRMID(phys_enc->parent), event, notify, timeout, ret,
  803. ret ? SDE_EVTLOG_FATAL : 0, SDE_EVTLOG_FUNC_EXIT);
  804. return ret;
  805. }
  806. static int sde_encoder_phys_vid_wait_for_vblank(
  807. struct sde_encoder_phys *phys_enc)
  808. {
  809. return _sde_encoder_phys_vid_wait_for_vblank(phys_enc, true);
  810. }
  811. static void sde_encoder_phys_vid_update_txq(struct sde_encoder_phys *phys_enc)
  812. {
  813. struct sde_encoder_virt *sde_enc;
  814. if (!phys_enc)
  815. return;
  816. sde_enc = to_sde_encoder_virt(phys_enc->parent);
  817. if (!sde_enc)
  818. return;
  819. sde_encoder_helper_update_out_fence_txq(sde_enc, true);
  820. }
  821. static int sde_encoder_phys_vid_wait_for_commit_done(
  822. struct sde_encoder_phys *phys_enc)
  823. {
  824. int rc;
  825. rc = _sde_encoder_phys_vid_wait_for_vblank(phys_enc, true);
  826. if (rc)
  827. sde_encoder_helper_phys_reset(phys_enc);
  828. /* Update TxQ for the incoming frame */
  829. sde_encoder_phys_vid_update_txq(phys_enc);
  830. return rc;
  831. }
  832. static int sde_encoder_phys_vid_wait_for_vblank_no_notify(
  833. struct sde_encoder_phys *phys_enc)
  834. {
  835. return _sde_encoder_phys_vid_wait_for_vblank(phys_enc, false);
  836. }
  837. static int sde_encoder_phys_vid_prepare_for_kickoff(
  838. struct sde_encoder_phys *phys_enc,
  839. struct sde_encoder_kickoff_params *params)
  840. {
  841. struct sde_encoder_phys_vid *vid_enc;
  842. struct sde_hw_ctl *ctl;
  843. bool recovery_events;
  844. struct drm_connector *conn;
  845. int rc;
  846. int irq_enable;
  847. if (!phys_enc || !params || !phys_enc->hw_ctl) {
  848. SDE_ERROR("invalid encoder/parameters\n");
  849. return -EINVAL;
  850. }
  851. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  852. ctl = phys_enc->hw_ctl;
  853. if (!ctl->ops.wait_reset_status)
  854. return 0;
  855. conn = phys_enc->connector;
  856. recovery_events = sde_encoder_recovery_events_enabled(
  857. phys_enc->parent);
  858. /*
  859. * hw supports hardware initiated ctl reset, so before we kickoff a new
  860. * frame, need to check and wait for hw initiated ctl reset completion
  861. */
  862. rc = ctl->ops.wait_reset_status(ctl);
  863. if (rc) {
  864. SDE_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
  865. ctl->idx, rc);
  866. ++vid_enc->error_count;
  867. /* to avoid flooding, only log first time, and "dead" time */
  868. if (vid_enc->error_count == 1) {
  869. SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
  870. mutex_lock(phys_enc->vblank_ctl_lock);
  871. irq_enable = atomic_read(&phys_enc->vblank_refcount);
  872. if (irq_enable)
  873. sde_encoder_helper_unregister_irq(
  874. phys_enc, INTR_IDX_VSYNC);
  875. SDE_DBG_DUMP(SDE_DBG_BUILT_IN_ALL);
  876. if (irq_enable)
  877. sde_encoder_helper_register_irq(
  878. phys_enc, INTR_IDX_VSYNC);
  879. mutex_unlock(phys_enc->vblank_ctl_lock);
  880. }
  881. /*
  882. * if the recovery event is registered by user, don't panic
  883. * trigger panic on first timeout if no listener registered
  884. */
  885. if (recovery_events)
  886. sde_connector_event_notify(conn, DRM_EVENT_SDE_HW_RECOVERY,
  887. sizeof(uint8_t), SDE_RECOVERY_CAPTURE);
  888. else
  889. SDE_DBG_DUMP(0x0, "panic");
  890. /* request a ctl reset before the next flush */
  891. phys_enc->enable_state = SDE_ENC_ERR_NEEDS_HW_RESET;
  892. } else {
  893. if (recovery_events && vid_enc->error_count)
  894. sde_connector_event_notify(conn,
  895. DRM_EVENT_SDE_HW_RECOVERY,
  896. sizeof(uint8_t),
  897. SDE_RECOVERY_SUCCESS);
  898. vid_enc->error_count = 0;
  899. }
  900. return rc;
  901. }
  902. static void sde_encoder_phys_vid_single_vblank_wait(
  903. struct sde_encoder_phys *phys_enc)
  904. {
  905. int ret;
  906. struct sde_encoder_phys_vid *vid_enc
  907. = to_sde_encoder_phys_vid(phys_enc);
  908. /*
  909. * Wait for a vsync so we know the ENABLE=0 latched before
  910. * the (connector) source of the vsync's gets disabled,
  911. * otherwise we end up in a funny state if we re-enable
  912. * before the disable latches, which results that some of
  913. * the settings changes for the new modeset (like new
  914. * scanout buffer) don't latch properly..
  915. */
  916. ret = sde_encoder_phys_vid_control_vblank_irq(phys_enc, true);
  917. if (ret) {
  918. SDE_ERROR_VIDENC(vid_enc,
  919. "failed to enable vblank irq: %d\n",
  920. ret);
  921. SDE_EVT32(DRMID(phys_enc->parent),
  922. phys_enc->hw_intf->idx - INTF_0, ret,
  923. SDE_EVTLOG_FUNC_CASE1,
  924. SDE_EVTLOG_ERROR);
  925. } else {
  926. ret = _sde_encoder_phys_vid_wait_for_vblank(phys_enc, false);
  927. if (ret) {
  928. atomic_set(&phys_enc->pending_kickoff_cnt, 0);
  929. SDE_ERROR_VIDENC(vid_enc,
  930. "failure waiting for disable: %d\n",
  931. ret);
  932. SDE_EVT32(DRMID(phys_enc->parent),
  933. phys_enc->hw_intf->idx - INTF_0, ret,
  934. SDE_EVTLOG_FUNC_CASE2,
  935. SDE_EVTLOG_ERROR);
  936. }
  937. sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
  938. }
  939. }
  940. static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
  941. {
  942. struct msm_drm_private *priv;
  943. struct sde_encoder_phys_vid *vid_enc;
  944. unsigned long lock_flags;
  945. struct intf_status intf_status = {0};
  946. if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
  947. !phys_enc->parent->dev->dev_private) {
  948. SDE_ERROR("invalid encoder/device\n");
  949. return;
  950. }
  951. priv = phys_enc->parent->dev->dev_private;
  952. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  953. if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
  954. SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
  955. !phys_enc->hw_intf, !phys_enc->hw_ctl);
  956. return;
  957. }
  958. SDE_DEBUG_VIDENC(vid_enc, "\n");
  959. if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
  960. return;
  961. else if (!sde_encoder_phys_vid_is_master(phys_enc))
  962. goto exit;
  963. if (phys_enc->enable_state == SDE_ENC_DISABLED) {
  964. SDE_ERROR("already disabled\n");
  965. return;
  966. }
  967. if (sde_in_trusted_vm(phys_enc->sde_kms))
  968. goto exit;
  969. spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
  970. phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
  971. sde_encoder_phys_inc_pending(phys_enc);
  972. spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
  973. if (phys_enc->hw_intf->ops.reset_counter)
  974. phys_enc->hw_intf->ops.reset_counter(phys_enc->hw_intf);
  975. sde_encoder_phys_vid_single_vblank_wait(phys_enc);
  976. if (phys_enc->hw_intf->ops.get_status)
  977. phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf,
  978. &intf_status);
  979. if (intf_status.is_en) {
  980. spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
  981. sde_encoder_phys_inc_pending(phys_enc);
  982. spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
  983. sde_encoder_phys_vid_single_vblank_wait(phys_enc);
  984. }
  985. sde_encoder_helper_phys_disable(phys_enc, NULL);
  986. exit:
  987. SDE_EVT32(DRMID(phys_enc->parent),
  988. atomic_read(&phys_enc->pending_retire_fence_cnt));
  989. phys_enc->vfp_cached = 0;
  990. phys_enc->enable_state = SDE_ENC_DISABLED;
  991. }
  992. static int sde_encoder_phys_vid_poll_for_active_region(struct sde_encoder_phys *phys_enc)
  993. {
  994. struct sde_encoder_phys_vid *vid_enc;
  995. struct intf_timing_params *timing;
  996. u32 line_cnt, v_inactive, poll_time_us, trial = 0;
  997. if (!phys_enc || !phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
  998. return -EINVAL;
  999. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  1000. timing = &vid_enc->timing_params;
  1001. /* if programmable fetch is not enabled return early or if it is not a DSI interface*/
  1002. if (!programmable_fetch_get_num_lines(vid_enc, timing) ||
  1003. phys_enc->hw_intf->cap->type != INTF_DSI)
  1004. return 0;
  1005. poll_time_us = DIV_ROUND_UP(1000000, timing->vrefresh) / MAX_POLL_CNT;
  1006. v_inactive = timing->v_front_porch + timing->v_back_porch + timing->vsync_pulse_width;
  1007. do {
  1008. usleep_range(poll_time_us, poll_time_us + 5);
  1009. line_cnt = phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
  1010. trial++;
  1011. } while ((trial < MAX_POLL_CNT) || (line_cnt < v_inactive));
  1012. return (trial >= MAX_POLL_CNT) ? -ETIMEDOUT : 0;
  1013. }
  1014. static void sde_encoder_phys_vid_handle_post_kickoff(
  1015. struct sde_encoder_phys *phys_enc)
  1016. {
  1017. unsigned long lock_flags;
  1018. struct sde_encoder_phys_vid *vid_enc;
  1019. u32 avr_mode;
  1020. u32 ret;
  1021. if (!phys_enc) {
  1022. SDE_ERROR("invalid encoder\n");
  1023. return;
  1024. }
  1025. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  1026. SDE_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
  1027. /*
  1028. * Video mode must flush CTL before enabling timing engine
  1029. * Video encoders need to turn on their interfaces now
  1030. */
  1031. if (phys_enc->enable_state == SDE_ENC_ENABLING) {
  1032. if (sde_encoder_phys_vid_is_master(phys_enc)) {
  1033. SDE_EVT32(DRMID(phys_enc->parent),
  1034. phys_enc->hw_intf->idx - INTF_0);
  1035. spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
  1036. phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf,
  1037. 1);
  1038. spin_unlock_irqrestore(phys_enc->enc_spinlock,
  1039. lock_flags);
  1040. ret = sde_encoder_phys_vid_poll_for_active_region(phys_enc);
  1041. if (ret)
  1042. SDE_DEBUG_VIDENC(vid_enc, "poll for active failed ret:%d\n", ret);
  1043. }
  1044. phys_enc->enable_state = SDE_ENC_ENABLED;
  1045. }
  1046. avr_mode = sde_connector_get_qsync_mode(phys_enc->connector);
  1047. if (avr_mode && vid_enc->base.hw_intf->ops.avr_trigger) {
  1048. vid_enc->base.hw_intf->ops.avr_trigger(vid_enc->base.hw_intf);
  1049. SDE_EVT32(DRMID(phys_enc->parent),
  1050. phys_enc->hw_intf->idx - INTF_0,
  1051. SDE_EVTLOG_FUNC_CASE9);
  1052. }
  1053. }
  1054. static void sde_encoder_phys_vid_prepare_for_commit(
  1055. struct sde_encoder_phys *phys_enc)
  1056. {
  1057. struct sde_connector_state *c_state;
  1058. if (!phys_enc || !phys_enc->parent) {
  1059. SDE_ERROR("invalid encoder parameters\n");
  1060. return;
  1061. }
  1062. if (phys_enc->connector && phys_enc->connector->state) {
  1063. c_state = to_sde_connector_state(phys_enc->connector->state);
  1064. if (!c_state) {
  1065. SDE_ERROR("invalid connector state\n");
  1066. return;
  1067. }
  1068. if (!msm_is_mode_seamless_vrr(&c_state->msm_mode)
  1069. && sde_connector_is_qsync_updated(phys_enc->connector))
  1070. _sde_encoder_phys_vid_avr_ctrl(phys_enc);
  1071. }
  1072. }
  1073. static void sde_encoder_phys_vid_irq_control(struct sde_encoder_phys *phys_enc,
  1074. bool enable)
  1075. {
  1076. struct sde_encoder_phys_vid *vid_enc;
  1077. int ret;
  1078. if (!phys_enc)
  1079. return;
  1080. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  1081. SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_intf->idx - INTF_0,
  1082. enable, atomic_read(&phys_enc->vblank_refcount));
  1083. if (enable) {
  1084. ret = sde_encoder_phys_vid_control_vblank_irq(phys_enc, true);
  1085. if (ret)
  1086. return;
  1087. sde_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
  1088. } else {
  1089. sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
  1090. sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
  1091. }
  1092. }
  1093. static int sde_encoder_phys_vid_get_line_count(
  1094. struct sde_encoder_phys *phys_enc)
  1095. {
  1096. if (!phys_enc)
  1097. return -EINVAL;
  1098. if (!sde_encoder_phys_vid_is_master(phys_enc))
  1099. return -EINVAL;
  1100. if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
  1101. return -EINVAL;
  1102. return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
  1103. }
  1104. static u32 sde_encoder_phys_vid_get_underrun_line_count(
  1105. struct sde_encoder_phys *phys_enc)
  1106. {
  1107. u32 underrun_linecount = 0xebadebad;
  1108. u32 intf_intr_status = 0xebadebad;
  1109. struct intf_status intf_status = {0};
  1110. if (!phys_enc)
  1111. return -EINVAL;
  1112. if (!sde_encoder_phys_vid_is_master(phys_enc) || !phys_enc->hw_intf)
  1113. return -EINVAL;
  1114. if (phys_enc->hw_intf->ops.get_status)
  1115. phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf,
  1116. &intf_status);
  1117. if (phys_enc->hw_intf->ops.get_underrun_line_count)
  1118. underrun_linecount =
  1119. phys_enc->hw_intf->ops.get_underrun_line_count(
  1120. phys_enc->hw_intf);
  1121. if (phys_enc->hw_intf->ops.get_intr_status)
  1122. intf_intr_status = phys_enc->hw_intf->ops.get_intr_status(
  1123. phys_enc->hw_intf);
  1124. SDE_EVT32(DRMID(phys_enc->parent), underrun_linecount,
  1125. intf_status.frame_count, intf_status.line_count,
  1126. intf_intr_status);
  1127. return underrun_linecount;
  1128. }
  1129. static int sde_encoder_phys_vid_wait_for_active(
  1130. struct sde_encoder_phys *phys_enc)
  1131. {
  1132. struct drm_display_mode mode;
  1133. struct sde_encoder_phys_vid *vid_enc;
  1134. u32 ln_cnt, min_ln_cnt, active_lns_cnt;
  1135. u32 retry = MAX_POLL_CNT;
  1136. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  1137. if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count) {
  1138. SDE_ERROR_VIDENC(vid_enc, "invalid vid_enc params\n");
  1139. return -EINVAL;
  1140. }
  1141. mode = phys_enc->cached_mode;
  1142. min_ln_cnt = (mode.vtotal - mode.vsync_start) +
  1143. (mode.vsync_end - mode.vsync_start);
  1144. active_lns_cnt = mode.vdisplay;
  1145. while (retry) {
  1146. ln_cnt = phys_enc->hw_intf->ops.get_line_count(
  1147. phys_enc->hw_intf);
  1148. if ((ln_cnt >= min_ln_cnt) &&
  1149. (ln_cnt < (active_lns_cnt + min_ln_cnt))) {
  1150. SDE_DEBUG_VIDENC(vid_enc,
  1151. "Needed lines left line_cnt=%d\n",
  1152. ln_cnt);
  1153. return 0;
  1154. }
  1155. SDE_ERROR_VIDENC(vid_enc, "line count is less. line_cnt = %d\n", ln_cnt);
  1156. udelay(POLL_TIME_USEC_FOR_LN_CNT);
  1157. retry--;
  1158. }
  1159. return -EINVAL;
  1160. }
  1161. void sde_encoder_phys_vid_add_enc_to_minidump(struct sde_encoder_phys *phys_enc)
  1162. {
  1163. struct sde_encoder_phys_vid *vid_enc;
  1164. vid_enc = to_sde_encoder_phys_vid(phys_enc);
  1165. sde_mini_dump_add_va_region("sde_enc_phys_vid", sizeof(*vid_enc), vid_enc);
  1166. }
  1167. static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
  1168. {
  1169. ops->is_master = sde_encoder_phys_vid_is_master;
  1170. ops->mode_set = sde_encoder_phys_vid_mode_set;
  1171. ops->cont_splash_mode_set = sde_encoder_phys_vid_cont_splash_mode_set;
  1172. ops->mode_fixup = sde_encoder_phys_vid_mode_fixup;
  1173. ops->enable = sde_encoder_phys_vid_enable;
  1174. ops->disable = sde_encoder_phys_vid_disable;
  1175. ops->destroy = sde_encoder_phys_vid_destroy;
  1176. ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources;
  1177. ops->control_vblank_irq = sde_encoder_phys_vid_control_vblank_irq;
  1178. ops->wait_for_commit_done = sde_encoder_phys_vid_wait_for_commit_done;
  1179. ops->wait_for_vblank = sde_encoder_phys_vid_wait_for_vblank_no_notify;
  1180. ops->wait_for_tx_complete = sde_encoder_phys_vid_wait_for_vblank;
  1181. ops->irq_control = sde_encoder_phys_vid_irq_control;
  1182. ops->prepare_for_kickoff = sde_encoder_phys_vid_prepare_for_kickoff;
  1183. ops->handle_post_kickoff = sde_encoder_phys_vid_handle_post_kickoff;
  1184. ops->needs_single_flush = sde_encoder_phys_needs_single_flush;
  1185. ops->setup_misr = sde_encoder_helper_setup_misr;
  1186. ops->collect_misr = sde_encoder_helper_collect_misr;
  1187. ops->trigger_flush = sde_encoder_helper_trigger_flush;
  1188. ops->hw_reset = sde_encoder_helper_hw_reset;
  1189. ops->get_line_count = sde_encoder_phys_vid_get_line_count;
  1190. ops->wait_dma_trigger = sde_encoder_phys_vid_wait_dma_trigger;
  1191. ops->wait_for_active = sde_encoder_phys_vid_wait_for_active;
  1192. ops->prepare_commit = sde_encoder_phys_vid_prepare_for_commit;
  1193. ops->get_underrun_line_count =
  1194. sde_encoder_phys_vid_get_underrun_line_count;
  1195. ops->add_to_minidump = sde_encoder_phys_vid_add_enc_to_minidump;
  1196. }
  1197. struct sde_encoder_phys *sde_encoder_phys_vid_init(
  1198. struct sde_enc_phys_init_params *p)
  1199. {
  1200. struct sde_encoder_phys *phys_enc = NULL;
  1201. struct sde_encoder_phys_vid *vid_enc = NULL;
  1202. struct sde_hw_mdp *hw_mdp;
  1203. struct sde_encoder_irq *irq;
  1204. int i, ret = 0;
  1205. if (!p) {
  1206. ret = -EINVAL;
  1207. goto fail;
  1208. }
  1209. vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
  1210. if (!vid_enc) {
  1211. ret = -ENOMEM;
  1212. goto fail;
  1213. }
  1214. phys_enc = &vid_enc->base;
  1215. hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
  1216. if (IS_ERR_OR_NULL(hw_mdp)) {
  1217. ret = PTR_ERR(hw_mdp);
  1218. SDE_ERROR("failed to get mdptop\n");
  1219. goto fail;
  1220. }
  1221. phys_enc->hw_mdptop = hw_mdp;
  1222. phys_enc->intf_idx = p->intf_idx;
  1223. SDE_DEBUG_VIDENC(vid_enc, "\n");
  1224. sde_encoder_phys_vid_init_ops(&phys_enc->ops);
  1225. phys_enc->parent = p->parent;
  1226. phys_enc->parent_ops = p->parent_ops;
  1227. phys_enc->sde_kms = p->sde_kms;
  1228. phys_enc->split_role = p->split_role;
  1229. phys_enc->intf_mode = INTF_MODE_VIDEO;
  1230. phys_enc->enc_spinlock = p->enc_spinlock;
  1231. phys_enc->vblank_ctl_lock = p->vblank_ctl_lock;
  1232. phys_enc->comp_type = p->comp_type;
  1233. phys_enc->kickoff_timeout_ms = DEFAULT_KICKOFF_TIMEOUT_MS;
  1234. for (i = 0; i < INTR_IDX_MAX; i++) {
  1235. irq = &phys_enc->irq[i];
  1236. INIT_LIST_HEAD(&irq->cb.list);
  1237. irq->irq_idx = -EINVAL;
  1238. irq->hw_idx = -EINVAL;
  1239. irq->cb.arg = phys_enc;
  1240. }
  1241. irq = &phys_enc->irq[INTR_IDX_VSYNC];
  1242. irq->name = "vsync_irq";
  1243. irq->intr_type = SDE_IRQ_TYPE_INTF_VSYNC;
  1244. irq->intr_idx = INTR_IDX_VSYNC;
  1245. irq->cb.func = sde_encoder_phys_vid_vblank_irq;
  1246. irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
  1247. irq->name = "underrun";
  1248. irq->intr_type = SDE_IRQ_TYPE_INTF_UNDER_RUN;
  1249. irq->intr_idx = INTR_IDX_UNDERRUN;
  1250. irq->cb.func = sde_encoder_phys_vid_underrun_irq;
  1251. atomic_set(&phys_enc->vblank_refcount, 0);
  1252. atomic_set(&phys_enc->pending_kickoff_cnt, 0);
  1253. atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
  1254. init_waitqueue_head(&phys_enc->pending_kickoff_wq);
  1255. phys_enc->enable_state = SDE_ENC_DISABLED;
  1256. SDE_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
  1257. return phys_enc;
  1258. fail:
  1259. SDE_ERROR("failed to create encoder\n");
  1260. if (vid_enc)
  1261. sde_encoder_phys_vid_destroy(phys_enc);
  1262. return ERR_PTR(ret);
  1263. }