sde_vbif.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include <linux/debugfs.h>
  8. #include "sde_vbif.h"
  9. #include "sde_hw_vbif.h"
  10. #include "sde_trace.h"
  11. #include "sde_rotator_vbif.h"
  12. #define MAX_XIN_CLIENT 16
  13. #define VBIF_CLK_CLIENT(x) sde_kms->vbif_clk_clients[x]
  14. #define VBIF_CLK_CLIENT_NAME(x) sde_clk_ctrl_type_s[x]
  15. int sde_vbif_clk_register(struct sde_kms *sde_kms, struct sde_vbif_clk_client *client)
  16. {
  17. enum sde_clk_ctrl_type clk_ctrl;
  18. if (!sde_kms || !client)
  19. return -EINVAL;
  20. clk_ctrl = client->clk_ctrl;
  21. if (!SDE_CLK_CTRL_VALID(clk_ctrl))
  22. return -EINVAL;
  23. VBIF_CLK_CLIENT(clk_ctrl).hw = client->hw;
  24. VBIF_CLK_CLIENT(clk_ctrl).clk_ctrl = clk_ctrl;
  25. memcpy(&VBIF_CLK_CLIENT(clk_ctrl).ops, &client->ops, sizeof(struct sde_vbif_clk_ops));
  26. SDE_DEBUG("registering hw:%pK clk_ctrl:%s\n", client->hw, VBIF_CLK_CLIENT_NAME(clk_ctrl));
  27. return 0;
  28. }
  29. /**
  30. * _sde_vbif_setup_clk_supported - check if VBIF setup_clk_force_ctrl API is supported
  31. * @sde_kms: Pointer to sde_kms object
  32. * @clk_ctrl: clock to be controlled
  33. * @return: true if client is supported, otherwise false
  34. */
  35. static bool _sde_vbif_setup_clk_supported(struct sde_kms *sde_kms, enum sde_clk_ctrl_type clk_ctrl)
  36. {
  37. bool supported = false;
  38. bool has_split_vbif = test_bit(SDE_FEATURE_VBIF_CLK_SPLIT, sde_kms->catalog->features);
  39. if (!SDE_CLK_CTRL_VALID(clk_ctrl))
  40. return false;
  41. if ((has_split_vbif && VBIF_CLK_CLIENT(clk_ctrl).ops.setup_clk_force_ctrl) ||
  42. (!has_split_vbif && sde_kms->hw_mdp->ops.setup_clk_force_ctrl))
  43. supported = true;
  44. SDE_DEBUG("split_vbif:%d type:%s supported:%d\n", has_split_vbif,
  45. VBIF_CLK_CLIENT_NAME(clk_ctrl), supported);
  46. return supported;
  47. }
  48. /**
  49. * _sde_vbif_get_clk_supported - check if VBIF get_clk_ctrl_status API is supported
  50. * @sde_kms: Pointer to sde_kms object
  51. * @clk_ctrl: clock to be controlled
  52. * @return: true if client is supported, otherwise false
  53. */
  54. static bool _sde_vbif_get_clk_supported(struct sde_kms *sde_kms, enum sde_clk_ctrl_type clk_ctrl)
  55. {
  56. bool supported = false;
  57. bool has_split_vbif = test_bit(SDE_FEATURE_VBIF_CLK_SPLIT, sde_kms->catalog->features);
  58. if ((has_split_vbif && VBIF_CLK_CLIENT(clk_ctrl).ops.get_clk_ctrl_status) ||
  59. (!has_split_vbif && sde_kms->hw_mdp->ops.get_clk_ctrl_status))
  60. supported = true;
  61. SDE_DEBUG("split_vbif:%d type:%s supported:%d\n", has_split_vbif,
  62. VBIF_CLK_CLIENT_NAME(clk_ctrl), supported);
  63. return supported;
  64. }
  65. /**
  66. * _sde_vbif_setup_clk_force_ctrl - set clock force control
  67. * @sde_kms: Pointer to sde_kms object
  68. * @clk_ctrl: clock to be controlled
  69. * @enable: force on enable
  70. * @return: if the clock is forced-on by this function
  71. */
  72. static int _sde_vbif_setup_clk_force_ctrl(struct sde_kms *sde_kms, enum sde_clk_ctrl_type clk_ctrl,
  73. bool enable)
  74. {
  75. int rc = 0;
  76. struct sde_hw_blk_reg_map *hw = VBIF_CLK_CLIENT(clk_ctrl).hw;
  77. bool has_split_vbif = test_bit(SDE_FEATURE_VBIF_CLK_SPLIT, sde_kms->catalog->features);
  78. if (has_split_vbif)
  79. rc = VBIF_CLK_CLIENT(clk_ctrl).ops.setup_clk_force_ctrl(hw, clk_ctrl, enable);
  80. else
  81. rc = sde_kms->hw_mdp->ops.setup_clk_force_ctrl(sde_kms->hw_mdp, clk_ctrl, enable);
  82. SDE_DEBUG("split_vbif:%d type:%s en:%d rc:%d\n", has_split_vbif,
  83. VBIF_CLK_CLIENT_NAME(clk_ctrl), enable, rc);
  84. return rc;
  85. }
  86. /**
  87. * _sde_vbif_get_clk_ctrl_status - get clock control status
  88. * @sde_kms: Pointer to sde_kms object
  89. * @clk_ctrl: clock to be controlled
  90. * @status: returns true if clock is on
  91. * @return: 0 if success, otherwise return error code
  92. */
  93. static int _sde_vbif_get_clk_ctrl_status(struct sde_kms *sde_kms, enum sde_clk_ctrl_type clk_ctrl,
  94. bool *status)
  95. {
  96. int rc = 0;
  97. struct sde_hw_blk_reg_map *hw = VBIF_CLK_CLIENT(clk_ctrl).hw;
  98. bool has_split_vbif = test_bit(SDE_FEATURE_VBIF_CLK_SPLIT, sde_kms->catalog->features);
  99. if (has_split_vbif)
  100. rc = VBIF_CLK_CLIENT(clk_ctrl).ops.get_clk_ctrl_status(hw, clk_ctrl, status);
  101. else
  102. rc = sde_kms->hw_mdp->ops.get_clk_ctrl_status(sde_kms->hw_mdp, clk_ctrl, status);
  103. SDE_DEBUG("split_vbif:%d type:%s status:%d rc:%d\n", has_split_vbif,
  104. VBIF_CLK_CLIENT_NAME(clk_ctrl), *status, rc);
  105. return rc;
  106. }
  107. /**
  108. * _sde_vbif_wait_for_xin_halt - wait for the xin to halt
  109. * @vbif: Pointer to hardware vbif driver
  110. * @xin_id: Client interface identifier
  111. * @return: 0 if success; error code otherwise
  112. */
  113. static int _sde_vbif_wait_for_xin_halt(struct sde_hw_vbif *vbif, u32 xin_id)
  114. {
  115. ktime_t timeout;
  116. bool status;
  117. int rc;
  118. if (!vbif || !vbif->cap || !vbif->ops.get_xin_halt_status) {
  119. SDE_ERROR("invalid arguments vbif %d\n", !vbif);
  120. return -EINVAL;
  121. }
  122. timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
  123. for (;;) {
  124. status = vbif->ops.get_xin_halt_status(vbif, xin_id);
  125. if (status)
  126. break;
  127. if (ktime_compare_safe(ktime_get(), timeout) > 0) {
  128. status = vbif->ops.get_xin_halt_status(vbif, xin_id);
  129. break;
  130. }
  131. usleep_range(501, 1000);
  132. }
  133. if (!status) {
  134. rc = -ETIMEDOUT;
  135. SDE_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
  136. vbif->idx - VBIF_0, xin_id);
  137. } else {
  138. rc = 0;
  139. SDE_DEBUG("VBIF %d client %d is halted\n",
  140. vbif->idx - VBIF_0, xin_id);
  141. }
  142. return rc;
  143. }
  144. static int _sde_vbif_wait_for_axi_halt(struct sde_hw_vbif *vbif)
  145. {
  146. int rc;
  147. if (!vbif || !vbif->cap || !vbif->ops.get_axi_halt_status) {
  148. SDE_ERROR("invalid arguments vbif %d\n", !vbif);
  149. return -EINVAL;
  150. }
  151. rc = vbif->ops.get_axi_halt_status(vbif);
  152. if (rc)
  153. SDE_ERROR("VBIF %d AXI port(s) not halting. TIMEDOUT.\n",
  154. vbif->idx - VBIF_0);
  155. else
  156. SDE_DEBUG("VBIF %d AXI port(s) halted\n",
  157. vbif->idx - VBIF_0);
  158. return rc;
  159. }
  160. int sde_vbif_halt_plane_xin(struct sde_kms *sde_kms, u32 xin_id, u32 clk_ctrl)
  161. {
  162. struct sde_hw_vbif *vbif = NULL;
  163. struct sde_hw_mdp *mdp;
  164. bool forced_on = false;
  165. bool status;
  166. int rc = 0;
  167. if (!sde_kms) {
  168. SDE_ERROR("invalid argument\n");
  169. return -EINVAL;
  170. }
  171. if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
  172. SDE_DEBUG("vbif operations not permitted\n");
  173. return 0;
  174. }
  175. vbif = sde_kms->hw_vbif[VBIF_RT];
  176. mdp = sde_kms->hw_mdp;
  177. if (!vbif || !mdp || !vbif->ops.get_xin_halt_status ||
  178. !vbif->ops.set_xin_halt ||
  179. !_sde_vbif_setup_clk_supported(sde_kms, clk_ctrl)) {
  180. SDE_ERROR("invalid vbif or mdp arguments\n");
  181. return -EINVAL;
  182. }
  183. mutex_lock(&vbif->mutex);
  184. SDE_EVT32_VERBOSE(vbif->idx, xin_id);
  185. /*
  186. * If status is 0, then make sure client clock is not gated
  187. * while halting by forcing it ON only if it was not previously
  188. * forced on. If status is 1 then its already halted.
  189. */
  190. status = vbif->ops.get_xin_halt_status(vbif, xin_id);
  191. if (status) {
  192. mutex_unlock(&vbif->mutex);
  193. return 0;
  194. }
  195. forced_on = _sde_vbif_setup_clk_force_ctrl(sde_kms, clk_ctrl, true);
  196. /* send halt request for unused plane's xin client */
  197. vbif->ops.set_xin_halt(vbif, xin_id, true);
  198. rc = _sde_vbif_wait_for_xin_halt(vbif, xin_id);
  199. if (rc) {
  200. SDE_ERROR(
  201. "wait failed for pipe halt:xin_id %u, clk_ctrl %u, rc %u\n",
  202. xin_id, clk_ctrl, rc);
  203. SDE_EVT32(xin_id, clk_ctrl, rc, SDE_EVTLOG_ERROR);
  204. }
  205. /* open xin client to enable transactions */
  206. vbif->ops.set_xin_halt(vbif, xin_id, false);
  207. if (forced_on)
  208. _sde_vbif_setup_clk_force_ctrl(sde_kms, clk_ctrl, false);
  209. mutex_unlock(&vbif->mutex);
  210. return rc;
  211. }
  212. /**
  213. * _sde_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
  214. * @vbif: Pointer to hardware vbif driver
  215. * @ot_lim: Pointer to OT limit to be modified
  216. * @params: Pointer to usecase parameters
  217. */
  218. static void _sde_vbif_apply_dynamic_ot_limit(struct sde_hw_vbif *vbif,
  219. u32 *ot_lim, struct sde_vbif_set_ot_params *params)
  220. {
  221. u64 pps;
  222. const struct sde_vbif_dynamic_ot_tbl *tbl;
  223. u32 i;
  224. if (!vbif || !(vbif->cap->features & BIT(SDE_VBIF_QOS_OTLIM)))
  225. return;
  226. /* Dynamic OT setting done only for WFD */
  227. if (!params->is_wfd)
  228. return;
  229. pps = params->frame_rate;
  230. pps *= params->width;
  231. pps *= params->height;
  232. tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
  233. &vbif->cap->dynamic_ot_wr_tbl;
  234. for (i = 0; i < tbl->count; i++) {
  235. if (pps <= tbl->cfg[i].pps) {
  236. *ot_lim = tbl->cfg[i].ot_limit;
  237. break;
  238. }
  239. }
  240. SDE_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
  241. vbif->idx - VBIF_0, params->xin_id,
  242. params->width, params->height, params->frame_rate,
  243. pps, *ot_lim);
  244. }
  245. /**
  246. * _sde_vbif_get_ot_limit - get OT based on usecase & configuration parameters
  247. * @vbif: Pointer to hardware vbif driver
  248. * @params: Pointer to usecase parameters
  249. * @return: OT limit
  250. */
  251. static u32 _sde_vbif_get_ot_limit(struct sde_hw_vbif *vbif,
  252. struct sde_vbif_set_ot_params *params)
  253. {
  254. u32 ot_lim = 0;
  255. u32 val;
  256. if (!vbif || !vbif->cap) {
  257. SDE_ERROR("invalid arguments vbif %d\n", !vbif);
  258. return -EINVAL;
  259. }
  260. if (vbif->cap->default_ot_wr_limit && !params->rd)
  261. ot_lim = vbif->cap->default_ot_wr_limit;
  262. else if (vbif->cap->default_ot_rd_limit && params->rd)
  263. ot_lim = vbif->cap->default_ot_rd_limit;
  264. /*
  265. * If default ot is not set from dt/catalog,
  266. * then do not configure it.
  267. */
  268. if (ot_lim == 0)
  269. goto exit;
  270. /* Modify the limits if the target and the use case requires it */
  271. _sde_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
  272. if (vbif && vbif->ops.get_limit_conf) {
  273. val = vbif->ops.get_limit_conf(vbif,
  274. params->xin_id, params->rd);
  275. if (val == ot_lim)
  276. ot_lim = 0;
  277. }
  278. exit:
  279. SDE_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
  280. vbif->idx - VBIF_0, params->xin_id, ot_lim);
  281. return ot_lim;
  282. }
  283. /**
  284. * sde_vbif_set_ot_limit - set OT based on usecase & configuration parameters
  285. * @vbif: Pointer to hardware vbif driver
  286. * @params: Pointer to usecase parameters
  287. *
  288. * Note this function would block waiting for bus halt.
  289. */
  290. void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
  291. struct sde_vbif_set_ot_params *params)
  292. {
  293. struct sde_hw_vbif *vbif = NULL;
  294. struct sde_hw_mdp *mdp;
  295. bool forced_on = false;
  296. u32 ot_lim;
  297. int ret, i;
  298. if (!sde_kms) {
  299. SDE_ERROR("invalid arguments\n");
  300. return;
  301. }
  302. if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
  303. SDE_DEBUG("vbif operations not permitted\n");
  304. return;
  305. }
  306. mdp = sde_kms->hw_mdp;
  307. for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
  308. if (sde_kms->hw_vbif[i] &&
  309. sde_kms->hw_vbif[i]->idx == params->vbif_idx) {
  310. vbif = sde_kms->hw_vbif[i];
  311. break;
  312. }
  313. }
  314. if (!vbif || !mdp) {
  315. SDE_DEBUG("invalid arguments vbif %d mdp %d\n",
  316. vbif != NULL, mdp != NULL);
  317. return;
  318. }
  319. if (!_sde_vbif_setup_clk_supported(sde_kms, params->clk_ctrl) ||
  320. !vbif->ops.set_limit_conf ||
  321. !vbif->ops.set_xin_halt)
  322. return;
  323. mutex_lock(&vbif->mutex);
  324. SDE_EVT32_VERBOSE(vbif->idx, params->xin_id);
  325. /* set write_gather_en for all write clients */
  326. if (vbif->ops.set_write_gather_en && !params->rd)
  327. vbif->ops.set_write_gather_en(vbif, params->xin_id);
  328. ot_lim = _sde_vbif_get_ot_limit(vbif, params) & 0xFF;
  329. if (ot_lim == 0)
  330. goto exit;
  331. trace_sde_perf_set_ot(params->num, params->xin_id, ot_lim,
  332. params->vbif_idx);
  333. forced_on = _sde_vbif_setup_clk_force_ctrl(sde_kms, params->clk_ctrl, true);
  334. vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
  335. vbif->ops.set_xin_halt(vbif, params->xin_id, true);
  336. ret = _sde_vbif_wait_for_xin_halt(vbif, params->xin_id);
  337. if (ret)
  338. SDE_EVT32(vbif->idx, params->xin_id);
  339. vbif->ops.set_xin_halt(vbif, params->xin_id, false);
  340. if (forced_on)
  341. _sde_vbif_setup_clk_force_ctrl(sde_kms, params->clk_ctrl, false);
  342. exit:
  343. mutex_unlock(&vbif->mutex);
  344. }
  345. void mdp_vbif_lock(struct platform_device *parent_pdev, bool enable)
  346. {
  347. struct drm_device *ddev;
  348. struct sde_kms *sde_kms;
  349. struct sde_hw_vbif *vbif = NULL;
  350. int i;
  351. ddev = platform_get_drvdata(parent_pdev);
  352. if (!ddev || !ddev_to_msm_kms(ddev)) {
  353. SDE_ERROR("invalid drm device\n");
  354. return;
  355. }
  356. sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
  357. for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
  358. if (sde_kms->hw_vbif[i] &&
  359. sde_kms->hw_vbif[i]->idx == VBIF_RT) {
  360. vbif = sde_kms->hw_vbif[i];
  361. break;
  362. }
  363. }
  364. if (!vbif) {
  365. SDE_DEBUG("invalid vbif structure\n");
  366. return;
  367. }
  368. if (enable)
  369. mutex_lock(&vbif->mutex);
  370. else
  371. mutex_unlock(&vbif->mutex);
  372. }
  373. bool sde_vbif_set_xin_halt(struct sde_kms *sde_kms,
  374. struct sde_vbif_set_xin_halt_params *params)
  375. {
  376. struct sde_hw_vbif *vbif = NULL;
  377. struct sde_hw_mdp *mdp;
  378. bool forced_on = false;
  379. int ret, i;
  380. if (!sde_kms || !params) {
  381. SDE_ERROR("invalid arguments\n");
  382. return false;
  383. }
  384. if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
  385. SDE_DEBUG("vbif operations not permitted\n");
  386. return true;
  387. }
  388. mdp = sde_kms->hw_mdp;
  389. for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
  390. if (sde_kms->hw_vbif[i] &&
  391. sde_kms->hw_vbif[i]->idx == params->vbif_idx) {
  392. vbif = sde_kms->hw_vbif[i];
  393. break;
  394. }
  395. }
  396. if (!vbif || !mdp) {
  397. SDE_DEBUG("invalid arguments vbif %d mdp %d\n",
  398. vbif != NULL, mdp != NULL);
  399. return false;
  400. }
  401. if (!_sde_vbif_setup_clk_supported(sde_kms, params->clk_ctrl) ||
  402. !vbif->ops.set_xin_halt)
  403. return false;
  404. mutex_lock(&vbif->mutex);
  405. SDE_EVT32_VERBOSE(vbif->idx, params->xin_id);
  406. if (params->enable) {
  407. forced_on = _sde_vbif_setup_clk_force_ctrl(sde_kms, params->clk_ctrl, true);
  408. vbif->ops.set_xin_halt(vbif, params->xin_id, true);
  409. ret = _sde_vbif_wait_for_xin_halt(vbif, params->xin_id);
  410. if (ret)
  411. SDE_EVT32(vbif->idx, params->xin_id, SDE_EVTLOG_ERROR);
  412. } else {
  413. vbif->ops.set_xin_halt(vbif, params->xin_id, false);
  414. if (params->forced_on)
  415. _sde_vbif_setup_clk_force_ctrl(sde_kms, params->clk_ctrl, false);
  416. }
  417. mutex_unlock(&vbif->mutex);
  418. return forced_on;
  419. }
  420. bool sde_vbif_get_xin_status(struct sde_kms *sde_kms,
  421. struct sde_vbif_get_xin_status_params *params)
  422. {
  423. struct sde_hw_vbif *vbif = NULL;
  424. struct sde_hw_mdp *mdp;
  425. int i, rc;
  426. bool status;
  427. if (!sde_kms || !params) {
  428. SDE_ERROR("invalid arguments\n");
  429. return false;
  430. }
  431. if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
  432. SDE_DEBUG("vbif operations not permitted\n");
  433. return true;
  434. }
  435. mdp = sde_kms->hw_mdp;
  436. for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
  437. if (sde_kms->hw_vbif[i] &&
  438. sde_kms->hw_vbif[i]->idx == params->vbif_idx) {
  439. vbif = sde_kms->hw_vbif[i];
  440. break;
  441. }
  442. }
  443. if (!vbif || !mdp) {
  444. SDE_DEBUG("invalid arguments vbif:%d mdp:%d vbif idx:%d\n",
  445. vbif != NULL, mdp != NULL, params->vbif_idx);
  446. return false;
  447. }
  448. if (!_sde_vbif_get_clk_supported(sde_kms, params->clk_ctrl) ||
  449. !vbif->ops.get_xin_halt_status)
  450. return false;
  451. mutex_lock(&vbif->mutex);
  452. SDE_EVT32_VERBOSE(vbif->idx, params->xin_id);
  453. /* check xin client halt status - true if vbif is idle */
  454. status = vbif->ops.get_xin_halt_status(vbif, params->xin_id);
  455. if (status) {
  456. /* check if client's clk is active - true if clk is active */
  457. rc = _sde_vbif_get_clk_ctrl_status(sde_kms, params->clk_ctrl, &status);
  458. status = (rc < 0) ? false : !status;
  459. }
  460. mutex_unlock(&vbif->mutex);
  461. return status;
  462. }
  463. void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
  464. struct sde_vbif_set_qos_params *params)
  465. {
  466. struct sde_hw_vbif *vbif = NULL;
  467. struct sde_hw_mdp *mdp;
  468. bool forced_on = false;
  469. const struct sde_vbif_qos_tbl *qos_tbl;
  470. int i;
  471. u32 nlvl;
  472. if (!sde_kms || !params || !sde_kms->hw_mdp) {
  473. SDE_ERROR("invalid arguments\n");
  474. return;
  475. }
  476. if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
  477. SDE_DEBUG("vbif operations not permitted\n");
  478. return;
  479. }
  480. mdp = sde_kms->hw_mdp;
  481. for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
  482. if (sde_kms->hw_vbif[i] &&
  483. sde_kms->hw_vbif[i]->idx == params->vbif_idx) {
  484. vbif = sde_kms->hw_vbif[i];
  485. break;
  486. }
  487. }
  488. if (!vbif || !vbif->cap) {
  489. SDE_ERROR("invalid vbif %d\n", params->vbif_idx);
  490. return;
  491. }
  492. if (!vbif->ops.set_qos_remap || !_sde_vbif_setup_clk_supported(sde_kms, params->clk_ctrl)) {
  493. SDE_DEBUG("qos remap not supported\n");
  494. return;
  495. }
  496. if (params->client_type > VBIF_MAX_CLIENT) {
  497. SDE_ERROR("invalid client type:%d\n", params->client_type);
  498. return;
  499. }
  500. qos_tbl = &vbif->cap->qos_tbl[params->client_type];
  501. if (!qos_tbl->count || !qos_tbl->priority_lvl) {
  502. SDE_DEBUG("qos tbl not defined\n");
  503. return;
  504. }
  505. mutex_lock(&vbif->mutex);
  506. forced_on = _sde_vbif_setup_clk_force_ctrl(sde_kms, params->clk_ctrl, true);
  507. nlvl = qos_tbl->count / 2;
  508. for (i = 0; i < nlvl; i++) {
  509. SDE_DEBUG("vbif:%d xin:%d rp_remap:%d/%d, lv_remap:%d/%d\n",
  510. params->vbif_idx, params->xin_id, i, qos_tbl->priority_lvl[i],
  511. i + nlvl, qos_tbl->priority_lvl[i + nlvl]);
  512. vbif->ops.set_qos_remap(vbif, params->xin_id, i,
  513. qos_tbl->priority_lvl[i], qos_tbl->priority_lvl[i + nlvl]);
  514. }
  515. if (forced_on)
  516. _sde_vbif_setup_clk_force_ctrl(sde_kms, params->clk_ctrl, false);
  517. mutex_unlock(&vbif->mutex);
  518. }
  519. void sde_vbif_clear_errors(struct sde_kms *sde_kms)
  520. {
  521. struct sde_hw_vbif *vbif;
  522. u32 i, pnd, src;
  523. if (!sde_kms) {
  524. SDE_ERROR("invalid argument\n");
  525. return;
  526. }
  527. if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
  528. SDE_DEBUG("vbif operations not permitted\n");
  529. return;
  530. }
  531. for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
  532. vbif = sde_kms->hw_vbif[i];
  533. if (vbif && vbif->ops.clear_errors) {
  534. mutex_lock(&vbif->mutex);
  535. vbif->ops.clear_errors(vbif, &pnd, &src);
  536. if (pnd || src) {
  537. SDE_EVT32(i, pnd, src);
  538. SDE_DEBUG("VBIF %d: pnd 0x%X, src 0x%X\n",
  539. vbif->idx - VBIF_0, pnd, src);
  540. }
  541. mutex_unlock(&vbif->mutex);
  542. }
  543. }
  544. }
  545. void sde_vbif_init_memtypes(struct sde_kms *sde_kms)
  546. {
  547. struct sde_hw_vbif *vbif;
  548. int i, j;
  549. if (!sde_kms) {
  550. SDE_ERROR("invalid argument\n");
  551. return;
  552. }
  553. if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
  554. SDE_DEBUG("vbif operations not permitted\n");
  555. return;
  556. }
  557. for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
  558. vbif = sde_kms->hw_vbif[i];
  559. if (vbif && vbif->cap && vbif->ops.set_mem_type) {
  560. mutex_lock(&vbif->mutex);
  561. for (j = 0; j < vbif->cap->memtype_count; j++)
  562. vbif->ops.set_mem_type(
  563. vbif, j, vbif->cap->memtype[j]);
  564. mutex_unlock(&vbif->mutex);
  565. }
  566. }
  567. }
  568. void sde_vbif_axi_halt_request(struct sde_kms *sde_kms)
  569. {
  570. struct sde_hw_vbif *vbif;
  571. int i;
  572. if (!sde_kms) {
  573. SDE_ERROR("invalid argument\n");
  574. return;
  575. }
  576. if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
  577. SDE_DEBUG("vbif operations not permitted\n");
  578. return;
  579. }
  580. for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
  581. vbif = sde_kms->hw_vbif[i];
  582. if (vbif && vbif->cap && vbif->ops.set_axi_halt) {
  583. mutex_lock(&vbif->mutex);
  584. vbif->ops.set_axi_halt(vbif);
  585. _sde_vbif_wait_for_axi_halt(vbif);
  586. mutex_unlock(&vbif->mutex);
  587. }
  588. }
  589. }
  590. int sde_vbif_halt_xin_mask(struct sde_kms *sde_kms, u32 xin_id_mask,
  591. bool halt)
  592. {
  593. struct sde_hw_vbif *vbif;
  594. int i = 0, status, rc;
  595. if (!sde_kms) {
  596. SDE_ERROR("invalid argument\n");
  597. return -EINVAL;
  598. }
  599. vbif = sde_kms->hw_vbif[VBIF_RT];
  600. if (!vbif->ops.get_xin_halt_status || !vbif->ops.set_xin_halt)
  601. return 0;
  602. SDE_EVT32(xin_id_mask, halt);
  603. for (i = 0; i < MAX_XIN_CLIENT; i++) {
  604. if (xin_id_mask & BIT(i)) {
  605. /* unhalt the xin-clients */
  606. if (!halt) {
  607. vbif->ops.set_xin_halt(vbif, i, false);
  608. continue;
  609. }
  610. status = vbif->ops.get_xin_halt_status(vbif, i);
  611. if (status)
  612. continue;
  613. /* halt xin-clients and wait for ack */
  614. vbif->ops.set_xin_halt(vbif, i, true);
  615. rc = _sde_vbif_wait_for_xin_halt(vbif, i);
  616. if (rc) {
  617. SDE_ERROR("xin_halt failed for xin:%d, rc:%d\n",
  618. i, rc);
  619. SDE_EVT32(xin_id_mask, i, rc, SDE_EVTLOG_ERROR);
  620. return rc;
  621. }
  622. }
  623. }
  624. return 0;
  625. }
  626. #if IS_ENABLED(CONFIG_DEBUG_FS)
  627. void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
  628. {
  629. debugfs_remove_recursive(sde_kms->debugfs_vbif);
  630. sde_kms->debugfs_vbif = NULL;
  631. }
  632. int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root)
  633. {
  634. char vbif_name[32];
  635. struct dentry *debugfs_vbif;
  636. int i, j;
  637. sde_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
  638. if (!sde_kms->debugfs_vbif) {
  639. SDE_ERROR("failed to create vbif debugfs\n");
  640. return -EINVAL;
  641. }
  642. for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
  643. struct sde_vbif_cfg *vbif = &sde_kms->catalog->vbif[i];
  644. snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
  645. debugfs_vbif = debugfs_create_dir(vbif_name,
  646. sde_kms->debugfs_vbif);
  647. debugfs_create_u32("features", 0400, debugfs_vbif,
  648. (u32 *)&vbif->features);
  649. debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
  650. (u32 *)&vbif->xin_halt_timeout);
  651. debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
  652. (u32 *)&vbif->default_ot_rd_limit);
  653. debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
  654. (u32 *)&vbif->default_ot_wr_limit);
  655. for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
  656. struct sde_vbif_dynamic_ot_cfg *cfg =
  657. &vbif->dynamic_ot_rd_tbl.cfg[j];
  658. snprintf(vbif_name, sizeof(vbif_name),
  659. "dynamic_ot_rd_%d_pps", j);
  660. debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
  661. (u64 *)&cfg->pps);
  662. snprintf(vbif_name, sizeof(vbif_name),
  663. "dynamic_ot_rd_%d_ot_limit", j);
  664. debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
  665. (u32 *)&cfg->ot_limit);
  666. }
  667. for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
  668. struct sde_vbif_dynamic_ot_cfg *cfg =
  669. &vbif->dynamic_ot_wr_tbl.cfg[j];
  670. snprintf(vbif_name, sizeof(vbif_name),
  671. "dynamic_ot_wr_%d_pps", j);
  672. debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
  673. (u64 *)&cfg->pps);
  674. snprintf(vbif_name, sizeof(vbif_name),
  675. "dynamic_ot_wr_%d_ot_limit", j);
  676. debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
  677. (u32 *)&cfg->ot_limit);
  678. }
  679. }
  680. return 0;
  681. }
  682. #endif /* CONFIG_DEBUG_FS */