sde_rotator_base.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2012, 2015-2020, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "%s: " fmt, __func__
  7. #include <linux/errno.h>
  8. #include <linux/file.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/types.h>
  11. #include <linux/major.h>
  12. #include <linux/debugfs.h>
  13. #include <linux/clk.h>
  14. #include <linux/slab.h>
  15. #include <linux/io.h>
  16. #include <linux/iopoll.h>
  17. #include <linux/regulator/consumer.h>
  18. #define CREATE_TRACE_POINTS
  19. #include "sde_rotator_base.h"
  20. #include "sde_rotator_util.h"
  21. #include "sde_rotator_trace.h"
  22. #include "sde_rotator_debug.h"
  23. #include "sde_rotator_dev.h"
  24. #include "sde_rotator_vbif.h"
  25. static const struct sde_rot_bus_data sde_rot_reg_bus_table[] = {
  26. {0, 0},
  27. {0, 76800},
  28. {0, 150000},
  29. {0, 300000},
  30. };
  31. static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
  32. {
  33. u64 result = (val * (u64)numer);
  34. do_div(result, denom);
  35. return result;
  36. }
  37. static inline u64 apply_fudge_factor(u64 val,
  38. struct sde_mult_factor *factor)
  39. {
  40. return fudge_factor(val, factor->numer, factor->denom);
  41. }
  42. static inline u64 apply_inverse_fudge_factor(u64 val,
  43. struct sde_mult_factor *factor)
  44. {
  45. return fudge_factor(val, factor->denom, factor->numer);
  46. }
  47. static inline bool validate_comp_ratio(struct sde_mult_factor *factor)
  48. {
  49. return factor->numer && factor->denom;
  50. }
  51. const struct sde_rot_bus_data *sde_get_rot_reg_bus_value(u32 usecase_ndx)
  52. {
  53. return &sde_rot_reg_bus_table[usecase_ndx];
  54. }
  55. u32 sde_apply_comp_ratio_factor(u32 quota,
  56. struct sde_mdp_format_params *fmt,
  57. struct sde_mult_factor *factor)
  58. {
  59. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  60. if (!mdata || !test_bit(SDE_QOS_OVERHEAD_FACTOR,
  61. mdata->sde_qos_map))
  62. return quota;
  63. /* apply compression ratio, only for compressed formats */
  64. if (sde_mdp_is_ubwc_format(fmt) &&
  65. validate_comp_ratio(factor))
  66. quota = apply_inverse_fudge_factor(quota, factor);
  67. return quota;
  68. }
  69. #define RES_1080p (1088*1920)
  70. #define RES_UHD (3840*2160)
  71. #define RES_WQXGA (2560*1600)
  72. #define XIN_HALT_TIMEOUT_US 0x4000
  73. static int sde_mdp_wait_for_xin_halt(u32 xin_id)
  74. {
  75. void __iomem *vbif_base;
  76. u32 status;
  77. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  78. u32 idle_mask = BIT(xin_id);
  79. int rc;
  80. vbif_base = mdata->vbif_nrt_io.base;
  81. rc = readl_poll_timeout(vbif_base + MMSS_VBIF_XIN_HALT_CTRL1,
  82. status, (status & idle_mask),
  83. 1000, XIN_HALT_TIMEOUT_US);
  84. if (rc == -ETIMEDOUT) {
  85. SDEROT_ERR("VBIF client %d not halting. TIMEDOUT.\n",
  86. xin_id);
  87. } else {
  88. SDEROT_DBG("VBIF client %d is halted\n", xin_id);
  89. }
  90. return rc;
  91. }
  92. /**
  93. * force_on_xin_clk() - enable/disable the force-on for the pipe clock
  94. * @bit_off: offset of the bit to enable/disable the force-on.
  95. * @reg_off: register offset for the clock control.
  96. * @enable: boolean to indicate if the force-on of the clock needs to be
  97. * enabled or disabled.
  98. *
  99. * This function returns:
  100. * true - if the clock is forced-on by this function
  101. * false - if the clock was already forced on
  102. * It is the caller responsibility to check if this function is forcing
  103. * the clock on; if so, it will need to remove the force of the clock,
  104. * otherwise it should avoid to remove the force-on.
  105. * Clocks must be on when calling this function.
  106. */
  107. static bool force_on_xin_clk(u32 bit_off, u32 clk_ctl_reg_off, bool enable)
  108. {
  109. u32 val;
  110. u32 force_on_mask;
  111. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  112. bool clk_forced_on = false;
  113. force_on_mask = BIT(bit_off);
  114. val = readl_relaxed(mdata->mdp_base + clk_ctl_reg_off);
  115. clk_forced_on = !(force_on_mask & val);
  116. if (enable)
  117. val |= force_on_mask;
  118. else
  119. val &= ~force_on_mask;
  120. writel_relaxed(val, mdata->mdp_base + clk_ctl_reg_off);
  121. return clk_forced_on;
  122. }
  123. void vbif_lock(struct platform_device *parent_pdev)
  124. {
  125. if (!parent_pdev)
  126. return;
  127. mdp_vbif_lock(parent_pdev, true);
  128. }
  129. void vbif_unlock(struct platform_device *parent_pdev)
  130. {
  131. if (!parent_pdev)
  132. return;
  133. mdp_vbif_lock(parent_pdev, false);
  134. }
  135. void sde_mdp_halt_vbif_xin(struct sde_mdp_vbif_halt_params *params)
  136. {
  137. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  138. u32 reg_val;
  139. bool forced_on;
  140. int rc = 0;
  141. if (!mdata || !params || !params->reg_off_mdp_clk_ctrl) {
  142. SDEROT_ERR("null input parameter\n");
  143. return;
  144. }
  145. if (!mdata->parent_pdev &&
  146. params->xin_id > MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1) {
  147. SDEROT_ERR("xin_id:%d exceed max limit\n", params->xin_id);
  148. return;
  149. }
  150. forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
  151. params->reg_off_mdp_clk_ctrl, true);
  152. vbif_lock(mdata->parent_pdev);
  153. SDEROT_EVTLOG(forced_on, params->xin_id);
  154. reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
  155. SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
  156. reg_val | BIT(params->xin_id));
  157. /* this is a polling operation */
  158. rc = sde_mdp_wait_for_xin_halt(params->xin_id);
  159. if (rc == -ETIMEDOUT)
  160. params->xin_timeout = BIT(params->xin_id);
  161. reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
  162. SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
  163. reg_val & ~BIT(params->xin_id));
  164. vbif_unlock(mdata->parent_pdev);
  165. if (forced_on)
  166. force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
  167. params->reg_off_mdp_clk_ctrl, false);
  168. }
  169. u32 sde_mdp_get_ot_limit(u32 width, u32 height, u32 pixfmt, u32 fps, u32 is_rd)
  170. {
  171. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  172. struct sde_mdp_format_params *fmt;
  173. u32 ot_lim;
  174. u32 is_yuv;
  175. u64 res;
  176. ot_lim = (is_rd) ? mdata->default_ot_rd_limit :
  177. mdata->default_ot_wr_limit;
  178. /*
  179. * If default ot is not set from dt,
  180. * then do not configure it.
  181. */
  182. if (ot_lim == 0)
  183. goto exit;
  184. /* Modify the limits if the target and the use case requires it */
  185. if (false == test_bit(SDE_QOS_OTLIM, mdata->sde_qos_map))
  186. goto exit;
  187. width = min_t(u32, width, SDE_ROT_MAX_IMG_WIDTH);
  188. height = min_t(u32, height, SDE_ROT_MAX_IMG_HEIGHT);
  189. res = width * height;
  190. res = res * fps;
  191. fmt = sde_get_format_params(pixfmt);
  192. if (!fmt) {
  193. SDEROT_WARN("invalid format %8.8x\n", pixfmt);
  194. goto exit;
  195. }
  196. is_yuv = sde_mdp_is_yuv_format(fmt);
  197. SDEROT_DBG("w:%d h:%d fps:%d pixfmt:%8.8x yuv:%d res:%llu rd:%d\n",
  198. width, height, fps, pixfmt, is_yuv, res, is_rd);
  199. /*
  200. * If (total_source_pixels <= 62208000 && YUV) -> RD/WROT=2 //1080p30
  201. * If (total_source_pixels <= 124416000 && YUV) -> RD/WROT=4 //1080p60
  202. * If (total_source_pixels <= 2160p && YUV && FPS <= 30) -> RD/WROT = 32
  203. */
  204. if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  205. SDE_MDP_HW_REV_540)) {
  206. if (is_yuv) {
  207. if (res <= (RES_1080p * 30))
  208. ot_lim = 2;
  209. else if (res <= (RES_1080p * 60))
  210. ot_lim = 4;
  211. else if (res <= (RES_WQXGA * 60))
  212. ot_lim = 4;
  213. else if (res <= (RES_UHD * 30))
  214. ot_lim = 8;
  215. } else if (fmt->bpp == 4 && res <= (RES_WQXGA * 60)) {
  216. ot_lim = 16;
  217. }
  218. } else if (IS_SDE_MAJOR_SAME(mdata->mdss_version,
  219. SDE_MDP_HW_REV_600) ||
  220. IS_SDE_MAJOR_SAME(mdata->mdss_version,
  221. SDE_MDP_HW_REV_870) || is_yuv) {
  222. if (res <= (RES_1080p * 30))
  223. ot_lim = 2;
  224. else if (res <= (RES_1080p * 60))
  225. ot_lim = 4;
  226. }
  227. exit:
  228. SDEROT_DBG("ot_lim=%d\n", ot_lim);
  229. return ot_lim;
  230. }
  231. static u32 get_ot_limit(u32 reg_off, u32 bit_off,
  232. struct sde_mdp_set_ot_params *params)
  233. {
  234. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  235. u32 ot_lim;
  236. u32 val;
  237. ot_lim = sde_mdp_get_ot_limit(
  238. params->width, params->height,
  239. params->fmt, params->fps,
  240. params->reg_off_vbif_lim_conf == MMSS_VBIF_RD_LIM_CONF);
  241. /*
  242. * If default ot is not set from dt,
  243. * then do not configure it.
  244. */
  245. if (ot_lim == 0)
  246. goto exit;
  247. val = SDE_VBIF_READ(mdata, reg_off);
  248. val &= (0xFF << bit_off);
  249. val = val >> bit_off;
  250. SDEROT_EVTLOG(val, ot_lim);
  251. if (val == ot_lim)
  252. ot_lim = 0;
  253. exit:
  254. SDEROT_DBG("ot_lim=%d\n", ot_lim);
  255. SDEROT_EVTLOG(params->width, params->height, params->fmt, params->fps,
  256. ot_lim);
  257. return ot_lim;
  258. }
  259. void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params)
  260. {
  261. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  262. u32 ot_lim;
  263. u32 reg_off_vbif_lim_conf = ((params->xin_id / mdata->npriority_lvl)
  264. * mdata->npriority_lvl)
  265. + params->reg_off_vbif_lim_conf;
  266. u32 bit_off_vbif_lim_conf = (params->xin_id % mdata->npriority_lvl) * 8;
  267. u32 reg_val;
  268. u32 sts;
  269. bool forced_on;
  270. vbif_lock(mdata->parent_pdev);
  271. ot_lim = get_ot_limit(
  272. reg_off_vbif_lim_conf,
  273. bit_off_vbif_lim_conf,
  274. params) & 0xFF;
  275. if (ot_lim == 0)
  276. goto exit;
  277. if (params->rotsts_base && params->rotsts_busy_mask) {
  278. sts = readl_relaxed(params->rotsts_base);
  279. if (sts & params->rotsts_busy_mask) {
  280. SDEROT_ERR(
  281. "Rotator still busy, should not modify VBIF\n");
  282. SDEROT_EVTLOG_TOUT_HANDLER(
  283. "rot", "vbif_dbg_bus", "panic");
  284. }
  285. }
  286. trace_rot_perf_set_ot(params->num, params->xin_id, ot_lim);
  287. forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
  288. params->reg_off_mdp_clk_ctrl, true);
  289. reg_val = SDE_VBIF_READ(mdata, reg_off_vbif_lim_conf);
  290. reg_val &= ~(0xFF << bit_off_vbif_lim_conf);
  291. reg_val |= (ot_lim) << bit_off_vbif_lim_conf;
  292. SDE_VBIF_WRITE(mdata, reg_off_vbif_lim_conf, reg_val);
  293. reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
  294. SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
  295. reg_val | BIT(params->xin_id));
  296. /* this is a polling operation */
  297. sde_mdp_wait_for_xin_halt(params->xin_id);
  298. reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
  299. SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
  300. reg_val & ~BIT(params->xin_id));
  301. if (forced_on)
  302. force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
  303. params->reg_off_mdp_clk_ctrl, false);
  304. SDEROT_EVTLOG(params->num, params->xin_id, ot_lim);
  305. exit:
  306. vbif_unlock(mdata->parent_pdev);
  307. return;
  308. }
  309. /*
  310. * sde_mdp_set_vbif_memtype - set memtype output for the given xin port
  311. * @mdata: pointer to global rotator data
  312. * @xin_id: xin identifier
  313. * @memtype: memtype output configuration
  314. * return: none
  315. */
  316. static void sde_mdp_set_vbif_memtype(struct sde_rot_data_type *mdata,
  317. u32 xin_id, u32 memtype)
  318. {
  319. u32 reg_off;
  320. u32 bit_off;
  321. u32 reg_val;
  322. /*
  323. * Assume 4 bits per bit field, 8 fields per 32-bit register.
  324. */
  325. if (xin_id >= 8)
  326. return;
  327. reg_off = MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0;
  328. bit_off = (xin_id & 0x7) * 4;
  329. reg_val = SDE_VBIF_READ(mdata, reg_off);
  330. reg_val &= ~(0x7 << bit_off);
  331. reg_val |= (memtype & 0x7) << bit_off;
  332. SDE_VBIF_WRITE(mdata, reg_off, reg_val);
  333. }
  334. /*
  335. * sde_mdp_init_vbif - initialize static vbif configuration
  336. * return: 0 if success; error code otherwise
  337. */
  338. int sde_mdp_init_vbif(void)
  339. {
  340. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  341. int i;
  342. if (!mdata)
  343. return -EINVAL;
  344. if (mdata->vbif_memtype_count && mdata->vbif_memtype) {
  345. for (i = 0; i < mdata->vbif_memtype_count; i++)
  346. sde_mdp_set_vbif_memtype(mdata, i,
  347. mdata->vbif_memtype[i]);
  348. SDEROT_DBG("amemtype=0x%x\n", SDE_VBIF_READ(mdata,
  349. MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0));
  350. }
  351. return 0;
  352. }
  353. struct reg_bus_client *sde_reg_bus_vote_client_create(char *client_name)
  354. {
  355. struct reg_bus_client *client;
  356. struct sde_rot_data_type *sde_res = sde_rot_get_mdata();
  357. static u32 id;
  358. if (client_name == NULL) {
  359. SDEROT_ERR("client name is null\n");
  360. return ERR_PTR(-EINVAL);
  361. }
  362. client = kzalloc(sizeof(struct reg_bus_client), GFP_KERNEL);
  363. if (!client)
  364. return ERR_PTR(-ENOMEM);
  365. mutex_lock(&sde_res->reg_bus_lock);
  366. strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
  367. client->usecase_ndx = VOTE_INDEX_DISABLE;
  368. client->id = id;
  369. SDEROT_DBG("bus vote client %s created:%pK id :%d\n", client_name,
  370. client, id);
  371. id++;
  372. list_add(&client->list, &sde_res->reg_bus_clist);
  373. mutex_unlock(&sde_res->reg_bus_lock);
  374. return client;
  375. }
  376. void sde_reg_bus_vote_client_destroy(struct reg_bus_client *client)
  377. {
  378. struct sde_rot_data_type *sde_res = sde_rot_get_mdata();
  379. if (!client) {
  380. SDEROT_ERR("reg bus vote: invalid client handle\n");
  381. } else {
  382. SDEROT_DBG("bus vote client %s destroyed:%pK id:%u\n",
  383. client->name, client, client->id);
  384. mutex_lock(&sde_res->reg_bus_lock);
  385. list_del_init(&client->list);
  386. mutex_unlock(&sde_res->reg_bus_lock);
  387. kfree(client);
  388. }
  389. }
  390. int sde_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
  391. {
  392. int ret = 0;
  393. bool changed = false;
  394. u32 max_usecase_ndx = VOTE_INDEX_DISABLE;
  395. const struct sde_rot_bus_data *reg_bus_value = NULL;
  396. struct reg_bus_client *client, *temp_client;
  397. struct sde_rot_data_type *sde_res = sde_rot_get_mdata();
  398. if (!sde_res || !sde_res->reg_bus_hdl || !bus_client)
  399. return 0;
  400. mutex_lock(&sde_res->reg_bus_lock);
  401. bus_client->usecase_ndx = usecase_ndx;
  402. list_for_each_entry_safe(client, temp_client, &sde_res->reg_bus_clist,
  403. list) {
  404. if (client->usecase_ndx < VOTE_INDEX_MAX &&
  405. client->usecase_ndx > max_usecase_ndx)
  406. max_usecase_ndx = client->usecase_ndx;
  407. }
  408. if (sde_res->reg_bus_usecase_ndx != max_usecase_ndx)
  409. changed = true;
  410. SDEROT_DBG(
  411. "%pS: changed=%d current idx=%d request client %s id:%u idx:%d\n",
  412. __builtin_return_address(0), changed, max_usecase_ndx,
  413. bus_client->name, bus_client->id, usecase_ndx);
  414. if (changed) {
  415. reg_bus_value = sde_get_rot_reg_bus_value(max_usecase_ndx);
  416. ret = icc_set_bw(sde_res->reg_bus_hdl, reg_bus_value->ab,
  417. reg_bus_value->ib);
  418. }
  419. if (ret) {
  420. pr_err("rotator: reg_bus_hdl set failed ab=%llu, ib=%llu\n",
  421. reg_bus_value->ab, reg_bus_value->ib);
  422. if (sde_res->reg_bus_usecase_ndx == VOTE_INDEX_DISABLE)
  423. pr_err("rotator: reg_bus_hdl was disabled\n");
  424. } else {
  425. sde_res->reg_bus_usecase_ndx = max_usecase_ndx;
  426. }
  427. mutex_unlock(&sde_res->reg_bus_lock);
  428. return ret;
  429. }
  430. static int sde_mdp_parse_dt_handler(struct platform_device *pdev,
  431. char *prop_name, u32 *offsets, int len)
  432. {
  433. int rc;
  434. rc = of_property_read_u32_array(pdev->dev.of_node, prop_name,
  435. offsets, len);
  436. if (rc) {
  437. SDEROT_DBG("Error from prop %s : u32 array read\n", prop_name);
  438. return -EINVAL;
  439. }
  440. return 0;
  441. }
  442. static int sde_mdp_parse_dt_prop_len(struct platform_device *pdev,
  443. char *prop_name)
  444. {
  445. int len = 0;
  446. of_find_property(pdev->dev.of_node, prop_name, &len);
  447. if (len < 1) {
  448. SDEROT_INFO("prop %s : doesn't exist in device tree\n",
  449. prop_name);
  450. return 0;
  451. }
  452. len = len/sizeof(u32);
  453. return len;
  454. }
  455. static void sde_mdp_parse_vbif_memtype(struct platform_device *pdev,
  456. struct sde_rot_data_type *mdata)
  457. {
  458. int rc;
  459. mdata->vbif_memtype_count = sde_mdp_parse_dt_prop_len(pdev,
  460. "qcom,mdss-rot-vbif-memtype");
  461. mdata->vbif_memtype = kcalloc(mdata->vbif_memtype_count,
  462. sizeof(u32), GFP_KERNEL);
  463. if (!mdata->vbif_memtype || !mdata->vbif_memtype_count) {
  464. mdata->vbif_memtype_count = 0;
  465. return;
  466. }
  467. rc = sde_mdp_parse_dt_handler(pdev,
  468. "qcom,mdss-rot-vbif-memtype", mdata->vbif_memtype,
  469. mdata->vbif_memtype_count);
  470. if (rc) {
  471. SDEROT_DBG("vbif memtype not found\n");
  472. kfree(mdata->vbif_memtype);
  473. mdata->vbif_memtype = NULL;
  474. mdata->vbif_memtype_count = 0;
  475. return;
  476. }
  477. }
  478. static void sde_mdp_parse_vbif_qos(struct platform_device *pdev,
  479. struct sde_rot_data_type *mdata)
  480. {
  481. int rc;
  482. mdata->vbif_rt_qos = NULL;
  483. mdata->npriority_lvl = sde_mdp_parse_dt_prop_len(pdev,
  484. "qcom,mdss-rot-vbif-qos-setting");
  485. mdata->vbif_nrt_qos = kcalloc(mdata->npriority_lvl,
  486. sizeof(u32), GFP_KERNEL);
  487. if (!mdata->vbif_nrt_qos || !mdata->npriority_lvl) {
  488. mdata->npriority_lvl = 0;
  489. return;
  490. }
  491. rc = sde_mdp_parse_dt_handler(pdev,
  492. "qcom,mdss-rot-vbif-qos-setting", mdata->vbif_nrt_qos,
  493. mdata->npriority_lvl);
  494. if (rc) {
  495. SDEROT_DBG("vbif setting not found\n");
  496. kfree(mdata->vbif_nrt_qos);
  497. mdata->vbif_nrt_qos = NULL;
  498. mdata->npriority_lvl = 0;
  499. return;
  500. }
  501. }
  502. static void sde_mdp_parse_vbif_xin_id(struct platform_device *pdev,
  503. struct sde_rot_data_type *mdata)
  504. {
  505. mdata->vbif_xin_id[XIN_SSPP] = XIN_SSPP;
  506. mdata->vbif_xin_id[XIN_WRITEBACK] = XIN_WRITEBACK;
  507. sde_mdp_parse_dt_handler(pdev, "qcom,mdss-rot-xin-id",
  508. mdata->vbif_xin_id, MAX_XIN);
  509. }
  510. static void sde_mdp_parse_cdp_setting(struct platform_device *pdev,
  511. struct sde_rot_data_type *mdata)
  512. {
  513. int rc;
  514. u32 len, data[SDE_ROT_OP_MAX] = {0};
  515. len = sde_mdp_parse_dt_prop_len(pdev,
  516. "qcom,mdss-rot-cdp-setting");
  517. if (len == SDE_ROT_OP_MAX) {
  518. rc = sde_mdp_parse_dt_handler(pdev,
  519. "qcom,mdss-rot-cdp-setting", data, len);
  520. if (rc) {
  521. SDEROT_ERR("invalid CDP setting\n");
  522. goto end;
  523. }
  524. set_bit(SDE_QOS_CDP, mdata->sde_qos_map);
  525. mdata->enable_cdp[SDE_ROT_RD] = data[SDE_ROT_RD];
  526. mdata->enable_cdp[SDE_ROT_WR] = data[SDE_ROT_WR];
  527. return;
  528. }
  529. end:
  530. clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
  531. }
  532. static void sde_mdp_parse_rot_lut_setting(struct platform_device *pdev,
  533. struct sde_rot_data_type *mdata)
  534. {
  535. int rc;
  536. u32 len, data[4];
  537. len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-qos-lut");
  538. if (len == 4) {
  539. rc = sde_mdp_parse_dt_handler(pdev,
  540. "qcom,mdss-rot-qos-lut", data, len);
  541. if (!rc) {
  542. mdata->lut_cfg[SDE_ROT_RD].creq_lut_0 = data[0];
  543. mdata->lut_cfg[SDE_ROT_RD].creq_lut_1 = data[1];
  544. mdata->lut_cfg[SDE_ROT_WR].creq_lut_0 = data[2];
  545. mdata->lut_cfg[SDE_ROT_WR].creq_lut_1 = data[3];
  546. set_bit(SDE_QOS_LUT, mdata->sde_qos_map);
  547. } else {
  548. SDEROT_DBG("qos lut setting not found\n");
  549. }
  550. }
  551. len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-danger-lut");
  552. if (len == SDE_ROT_OP_MAX) {
  553. rc = sde_mdp_parse_dt_handler(pdev,
  554. "qcom,mdss-rot-danger-lut", data, len);
  555. if (!rc) {
  556. mdata->lut_cfg[SDE_ROT_RD].danger_lut
  557. = data[SDE_ROT_RD];
  558. mdata->lut_cfg[SDE_ROT_WR].danger_lut
  559. = data[SDE_ROT_WR];
  560. set_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map);
  561. } else {
  562. SDEROT_DBG("danger lut setting not found\n");
  563. }
  564. }
  565. len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-safe-lut");
  566. if (len == SDE_ROT_OP_MAX) {
  567. rc = sde_mdp_parse_dt_handler(pdev,
  568. "qcom,mdss-rot-safe-lut", data, len);
  569. if (!rc) {
  570. mdata->lut_cfg[SDE_ROT_RD].safe_lut = data[SDE_ROT_RD];
  571. mdata->lut_cfg[SDE_ROT_WR].safe_lut = data[SDE_ROT_WR];
  572. set_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map);
  573. } else {
  574. SDEROT_DBG("safe lut setting not found\n");
  575. }
  576. }
  577. }
  578. static void sde_mdp_parse_inline_rot_lut_setting(struct platform_device *pdev,
  579. struct sde_rot_data_type *mdata)
  580. {
  581. int rc;
  582. u32 len, data[4];
  583. len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-inline-rot-qos-lut");
  584. if (len == 4) {
  585. rc = sde_mdp_parse_dt_handler(pdev,
  586. "qcom,mdss-inline-rot-qos-lut", data, len);
  587. if (!rc) {
  588. mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0 = data[0];
  589. mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1 = data[1];
  590. mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0 = data[2];
  591. mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1 = data[3];
  592. set_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map);
  593. } else {
  594. SDEROT_DBG("inline qos lut setting not found\n");
  595. }
  596. }
  597. len = sde_mdp_parse_dt_prop_len(pdev,
  598. "qcom,mdss-inline-rot-danger-lut");
  599. if (len == SDE_ROT_OP_MAX) {
  600. rc = sde_mdp_parse_dt_handler(pdev,
  601. "qcom,mdss-inline-rot-danger-lut", data, len);
  602. if (!rc) {
  603. mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut
  604. = data[SDE_ROT_RD];
  605. mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut
  606. = data[SDE_ROT_WR];
  607. set_bit(SDE_INLINE_QOS_DANGER_LUT,
  608. mdata->sde_inline_qos_map);
  609. } else {
  610. SDEROT_DBG("inline danger lut setting not found\n");
  611. }
  612. }
  613. len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-inline-rot-safe-lut");
  614. if (len == SDE_ROT_OP_MAX) {
  615. rc = sde_mdp_parse_dt_handler(pdev,
  616. "qcom,mdss-inline-rot-safe-lut", data, len);
  617. if (!rc) {
  618. mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut
  619. = data[SDE_ROT_RD];
  620. mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut
  621. = data[SDE_ROT_WR];
  622. set_bit(SDE_INLINE_QOS_SAFE_LUT,
  623. mdata->sde_inline_qos_map);
  624. } else {
  625. SDEROT_DBG("inline safe lut setting not found\n");
  626. }
  627. }
  628. }
  629. static void sde_mdp_parse_rt_rotator(struct device_node *np)
  630. {
  631. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  632. struct platform_device *pdev;
  633. struct of_phandle_args phargs;
  634. int rc = 0;
  635. rc = of_parse_phandle_with_args(np,
  636. "qcom,mdss-rot-parent", "#list-cells", 0, &phargs);
  637. if (rc)
  638. return;
  639. if (!phargs.np || !phargs.args_count) {
  640. SDEROT_ERR("invalid args\n");
  641. return;
  642. }
  643. pdev = of_find_device_by_node(phargs.np);
  644. if (pdev) {
  645. mdata->parent_pdev = pdev;
  646. } else {
  647. mdata->parent_pdev = NULL;
  648. SDEROT_ERR("Parent mdp node not available\n");
  649. }
  650. of_node_put(phargs.np);
  651. }
  652. static int sde_mdp_parse_dt_misc(struct platform_device *pdev,
  653. struct sde_rot_data_type *mdata)
  654. {
  655. int rc;
  656. u32 data;
  657. rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size",
  658. &data);
  659. mdata->rot_block_size = (!rc ? data : 128);
  660. rc = of_property_read_u32(pdev->dev.of_node,
  661. "qcom,mdss-default-ot-rd-limit", &data);
  662. mdata->default_ot_rd_limit = (!rc ? data : 0);
  663. rc = of_property_read_u32(pdev->dev.of_node,
  664. "qcom,mdss-default-ot-wr-limit", &data);
  665. mdata->default_ot_wr_limit = (!rc ? data : 0);
  666. rc = of_property_read_u32(pdev->dev.of_node,
  667. "qcom,mdss-highest-bank-bit", &(mdata->highest_bank_bit));
  668. if (rc)
  669. SDEROT_DBG(
  670. "Could not read optional property: highest bank bit\n");
  671. sde_mdp_parse_cdp_setting(pdev, mdata);
  672. sde_mdp_parse_vbif_qos(pdev, mdata);
  673. sde_mdp_parse_vbif_xin_id(pdev, mdata);
  674. sde_mdp_parse_vbif_memtype(pdev, mdata);
  675. sde_mdp_parse_rot_lut_setting(pdev, mdata);
  676. sde_mdp_parse_inline_rot_lut_setting(pdev, mdata);
  677. rc = of_property_read_u32(pdev->dev.of_node,
  678. "qcom,mdss-rot-qos-cpu-mask", &data);
  679. mdata->rot_pm_qos_cpu_mask = (!rc ? data : 0);
  680. rc = of_property_read_u32(pdev->dev.of_node,
  681. "qcom,mdss-rot-qos-cpu-dma-latency", &data);
  682. mdata->rot_pm_qos_cpu_dma_latency = (!rc ? data : 0);
  683. mdata->mdp_base = mdata->sde_io.base + SDE_MDP_OFFSET;
  684. return 0;
  685. }
  686. static void sde_mdp_destroy_dt_misc(struct platform_device *pdev,
  687. struct sde_rot_data_type *mdata)
  688. {
  689. kfree(mdata->vbif_memtype);
  690. mdata->vbif_memtype = NULL;
  691. kfree(mdata->vbif_rt_qos);
  692. mdata->vbif_rt_qos = NULL;
  693. kfree(mdata->vbif_nrt_qos);
  694. mdata->vbif_nrt_qos = NULL;
  695. }
  696. static int sde_mdp_bus_scale_register(struct sde_rot_data_type *mdata)
  697. {
  698. int rc = 0;
  699. mdata->reg_bus_hdl = of_icc_get(&mdata->pdev->dev, "qcom,sde-reg-bus");
  700. if (mdata->reg_bus_hdl == NULL) {
  701. pr_err("rotator: reg bus dt node missing\n");
  702. return 0;
  703. } else if (IS_ERR(mdata->reg_bus_hdl)) {
  704. SDEROT_ERR("reg bus handle parsing failed\n");
  705. mdata->reg_bus_hdl = NULL;
  706. rc = -EINVAL;
  707. } else {
  708. SDEROT_DBG("rotator reg_bus_hdl parsing success\n");
  709. }
  710. return rc;
  711. }
  712. static void sde_mdp_bus_scale_unregister(struct sde_rot_data_type *mdata)
  713. {
  714. SDEROT_DBG("unregister reg_bus_hdl\n");
  715. if (mdata->reg_bus_hdl) {
  716. icc_put(mdata->reg_bus_hdl);
  717. mdata->reg_bus_hdl = NULL;
  718. }
  719. }
  720. static struct sde_rot_data_type *sde_rot_res;
  721. struct sde_rot_data_type *sde_rot_get_mdata(void)
  722. {
  723. return sde_rot_res;
  724. }
  725. /*
  726. * sde_rotator_base_init - initialize base rotator data/resource
  727. */
  728. int sde_rotator_base_init(struct sde_rot_data_type **pmdata,
  729. struct platform_device *pdev,
  730. const void *drvdata)
  731. {
  732. int rc;
  733. struct sde_rot_data_type *mdata;
  734. /* if probe deferral happened, return early*/
  735. if (sde_rot_res) {
  736. SDEROT_ERR("Rotator data already initialized, skip init\n");
  737. return 0;
  738. }
  739. mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
  740. if (mdata == NULL)
  741. return -ENOMEM;
  742. mdata->pdev = pdev;
  743. sde_rot_res = mdata;
  744. mutex_init(&mdata->reg_bus_lock);
  745. INIT_LIST_HEAD(&mdata->reg_bus_clist);
  746. rc = sde_rot_ioremap_byname(pdev, &mdata->sde_io, "mdp_phys");
  747. if (rc) {
  748. SDEROT_ERR("unable to map SDE base\n");
  749. goto probe_done;
  750. }
  751. SDEROT_DBG("SDE ROT HW Base addr=0x%x len=0x%x\n",
  752. (int) (unsigned long) mdata->sde_io.base,
  753. mdata->sde_io.len);
  754. rc = sde_rot_ioremap_byname(pdev, &mdata->vbif_nrt_io, "rot_vbif_phys");
  755. if (rc) {
  756. SDEROT_ERR("unable to map SDE ROT VBIF base\n");
  757. goto probe_done;
  758. }
  759. SDEROT_DBG("SDE ROT VBIF HW Base addr=%pK len=0x%x\n",
  760. mdata->vbif_nrt_io.base, mdata->vbif_nrt_io.len);
  761. sde_mdp_parse_rt_rotator(pdev->dev.of_node);
  762. rc = sde_mdp_parse_dt_misc(pdev, mdata);
  763. if (rc) {
  764. SDEROT_ERR("Error in device tree : misc\n");
  765. goto probe_done;
  766. }
  767. rc = sde_mdp_bus_scale_register(mdata);
  768. if (rc) {
  769. SDEROT_ERR("unable to register bus scaling\n");
  770. goto probe_done;
  771. }
  772. rc = sde_smmu_init(&pdev->dev);
  773. if (rc) {
  774. SDEROT_ERR("sde smmu init failed %d\n", rc);
  775. goto probe_done;
  776. }
  777. *pmdata = mdata;
  778. return 0;
  779. probe_done:
  780. return rc;
  781. }
  782. /*
  783. * sde_rotator_base_destroy - clean up base rotator data/resource
  784. */
  785. void sde_rotator_base_destroy(struct sde_rot_data_type *mdata)
  786. {
  787. struct platform_device *pdev;
  788. if (!mdata || !mdata->pdev)
  789. return;
  790. pdev = mdata->pdev;
  791. sde_rot_res = NULL;
  792. sde_mdp_bus_scale_unregister(mdata);
  793. sde_mdp_destroy_dt_misc(pdev, mdata);
  794. sde_rot_iounmap(&mdata->vbif_nrt_io);
  795. sde_rot_iounmap(&mdata->sde_io);
  796. devm_kfree(&pdev->dev, mdata);
  797. }