sde_hw_lm.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include <linux/iopoll.h>
  8. #include "sde_kms.h"
  9. #include "sde_hw_catalog.h"
  10. #include "sde_hwio.h"
  11. #include "sde_hw_lm.h"
  12. #include "sde_hw_mdss.h"
  13. #include "sde_dbg.h"
  14. #include "sde_kms.h"
  15. #define LM_OP_MODE 0x00
  16. #define LM_OUT_SIZE 0x04
  17. #define LM_BORDER_COLOR_0 0x08
  18. #define LM_BORDER_COLOR_1 0x010
  19. /* These register are offset to mixer base + stage base */
  20. #define LM_BLEND0_OP 0x00
  21. #define LM_BLEND0_CONST_ALPHA 0x04
  22. #define LM_FG_COLOR_FILL_COLOR_0 0x08
  23. #define LM_FG_COLOR_FILL_COLOR_1 0x0C
  24. #define LM_FG_COLOR_FILL_SIZE 0x10
  25. #define LM_FG_COLOR_FILL_XY 0x14
  26. #define LM_BLEND0_FG_ALPHA 0x04
  27. #define LM_BLEND0_BG_ALPHA 0x08
  28. #define LM_MISR_CTRL 0x310
  29. #define LM_MISR_SIGNATURE 0x314
  30. #define LM_NOISE_LAYER 0x320
  31. static struct sde_lm_cfg *_lm_offset(enum sde_lm mixer,
  32. struct sde_mdss_cfg *m,
  33. void __iomem *addr,
  34. struct sde_hw_blk_reg_map *b)
  35. {
  36. int i;
  37. for (i = 0; i < m->mixer_count; i++) {
  38. if (mixer == m->mixer[i].id) {
  39. b->base_off = addr;
  40. b->blk_off = m->mixer[i].base;
  41. b->length = m->mixer[i].len;
  42. b->hw_rev = m->hw_rev;
  43. b->log_mask = SDE_DBG_MASK_LM;
  44. return &m->mixer[i];
  45. }
  46. }
  47. return ERR_PTR(-ENOMEM);
  48. }
  49. /**
  50. * _stage_offset(): returns the relative offset of the blend registers
  51. * for the stage to be setup
  52. * @c: mixer ctx contains the mixer to be programmed
  53. * @stage: stage index to setup
  54. */
  55. static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage)
  56. {
  57. const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
  58. int rc;
  59. if (stage == SDE_STAGE_BASE)
  60. rc = -EINVAL;
  61. else if (stage <= sblk->maxblendstages)
  62. rc = sblk->blendstage_base[stage - SDE_STAGE_0];
  63. else
  64. rc = -EINVAL;
  65. return rc;
  66. }
  67. static void sde_hw_lm_setup_out(struct sde_hw_mixer *ctx,
  68. struct sde_hw_mixer_cfg *mixer)
  69. {
  70. struct sde_hw_blk_reg_map *c = &ctx->hw;
  71. u32 outsize;
  72. u32 op_mode;
  73. op_mode = SDE_REG_READ(c, LM_OP_MODE);
  74. outsize = mixer->out_height << 16 | mixer->out_width;
  75. SDE_REG_WRITE(c, LM_OUT_SIZE, outsize);
  76. /* SPLIT_LEFT_RIGHT */
  77. if (mixer->right_mixer)
  78. op_mode |= BIT(31);
  79. else
  80. op_mode &= ~BIT(31);
  81. SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
  82. }
  83. static void sde_hw_lm_setup_border_color(struct sde_hw_mixer *ctx,
  84. struct sde_mdss_color *color,
  85. u8 border_en)
  86. {
  87. struct sde_hw_blk_reg_map *c = &ctx->hw;
  88. if (border_en) {
  89. SDE_REG_WRITE(c, LM_BORDER_COLOR_0,
  90. (color->color_0 & 0xFFF) |
  91. ((color->color_1 & 0xFFF) << 0x10));
  92. SDE_REG_WRITE(c, LM_BORDER_COLOR_1,
  93. (color->color_2 & 0xFFF) |
  94. ((color->color_3 & 0xFFF) << 0x10));
  95. }
  96. }
  97. static void sde_hw_lm_setup_blend_config_combined_alpha(
  98. struct sde_hw_mixer *ctx, u32 stage,
  99. u32 fg_alpha, u32 bg_alpha, u32 blend_op)
  100. {
  101. struct sde_hw_blk_reg_map *c = &ctx->hw;
  102. int stage_off;
  103. u32 const_alpha;
  104. if (stage == SDE_STAGE_BASE)
  105. return;
  106. stage_off = _stage_offset(ctx, stage);
  107. if (WARN_ON(stage_off < 0))
  108. return;
  109. const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
  110. SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
  111. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
  112. }
  113. static void sde_hw_lm_setup_blend_config(struct sde_hw_mixer *ctx,
  114. u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
  115. {
  116. struct sde_hw_blk_reg_map *c = &ctx->hw;
  117. int stage_off;
  118. if (stage == SDE_STAGE_BASE)
  119. return;
  120. stage_off = _stage_offset(ctx, stage);
  121. if (WARN_ON(stage_off < 0))
  122. return;
  123. SDE_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
  124. SDE_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
  125. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
  126. }
  127. static void sde_hw_lm_setup_color3(struct sde_hw_mixer *ctx,
  128. uint32_t mixer_op_mode)
  129. {
  130. struct sde_hw_blk_reg_map *c = &ctx->hw;
  131. int op_mode;
  132. /* read the existing op_mode configuration */
  133. op_mode = SDE_REG_READ(c, LM_OP_MODE);
  134. op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
  135. SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
  136. }
  137. static void sde_hw_lm_gc(struct sde_hw_mixer *mixer,
  138. void *cfg)
  139. {
  140. }
  141. static void sde_hw_lm_clear_dim_layer(struct sde_hw_mixer *ctx)
  142. {
  143. struct sde_hw_blk_reg_map *c = &ctx->hw;
  144. const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
  145. int stage_off, i;
  146. u32 reset = BIT(16), val;
  147. reset = ~reset;
  148. for (i = SDE_STAGE_0; i <= sblk->maxblendstages; i++) {
  149. stage_off = _stage_offset(ctx, i);
  150. if (WARN_ON(stage_off < 0))
  151. return;
  152. /*
  153. * read the existing blendn_op register and clear only DIM layer
  154. * bit (color_fill bit)
  155. */
  156. val = SDE_REG_READ(c, LM_BLEND0_OP + stage_off);
  157. val &= reset;
  158. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
  159. }
  160. }
  161. static void sde_hw_lm_setup_dim_layer(struct sde_hw_mixer *ctx,
  162. struct sde_hw_dim_layer *dim_layer)
  163. {
  164. struct sde_hw_blk_reg_map *c = &ctx->hw;
  165. int stage_off;
  166. u32 val = 0, alpha = 0;
  167. if (dim_layer->stage == SDE_STAGE_BASE)
  168. return;
  169. stage_off = _stage_offset(ctx, dim_layer->stage);
  170. if (stage_off < 0) {
  171. SDE_ERROR("invalid stage_off:%d for dim layer\n", stage_off);
  172. return;
  173. }
  174. alpha = dim_layer->color_fill.color_3 & 0xFF;
  175. val = ((dim_layer->color_fill.color_1 << 2) & 0xFFF) << 16 |
  176. ((dim_layer->color_fill.color_0 << 2) & 0xFFF);
  177. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_COLOR_0 + stage_off, val);
  178. val = (alpha << 4) << 16 |
  179. ((dim_layer->color_fill.color_2 << 2) & 0xFFF);
  180. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_COLOR_1 + stage_off, val);
  181. val = dim_layer->rect.h << 16 | dim_layer->rect.w;
  182. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_SIZE + stage_off, val);
  183. val = dim_layer->rect.y << 16 | dim_layer->rect.x;
  184. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_XY + stage_off, val);
  185. val = BIT(16); /* enable dim layer */
  186. val |= SDE_BLEND_FG_ALPHA_FG_CONST | SDE_BLEND_BG_ALPHA_BG_CONST;
  187. if (dim_layer->flags & SDE_DRM_DIM_LAYER_EXCLUSIVE)
  188. val |= BIT(17);
  189. else
  190. val &= ~BIT(17);
  191. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
  192. val = (alpha << 16) | (0xff - alpha);
  193. SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, val);
  194. }
  195. static void sde_hw_lm_setup_misr(struct sde_hw_mixer *ctx,
  196. bool enable, u32 frame_count)
  197. {
  198. struct sde_hw_blk_reg_map *c = &ctx->hw;
  199. u32 config = 0;
  200. SDE_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
  201. /* clear misr data */
  202. wmb();
  203. if (enable)
  204. config = (frame_count & MISR_FRAME_COUNT_MASK) |
  205. MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
  206. SDE_REG_WRITE(c, LM_MISR_CTRL, config);
  207. }
  208. static int sde_hw_lm_collect_misr(struct sde_hw_mixer *ctx, bool nonblock,
  209. u32 *misr_value)
  210. {
  211. struct sde_hw_blk_reg_map *c = &ctx->hw;
  212. u32 ctrl = 0;
  213. if (!misr_value)
  214. return -EINVAL;
  215. ctrl = SDE_REG_READ(c, LM_MISR_CTRL);
  216. if (!nonblock) {
  217. if (ctrl & MISR_CTRL_ENABLE) {
  218. int rc;
  219. rc = readl_poll_timeout(c->base_off + c->blk_off +
  220. LM_MISR_CTRL, ctrl,
  221. (ctrl & MISR_CTRL_STATUS) > 0, 500,
  222. 84000);
  223. if (rc)
  224. return rc;
  225. } else {
  226. return -EINVAL;
  227. }
  228. }
  229. *misr_value = SDE_REG_READ(c, LM_MISR_SIGNATURE);
  230. return 0;
  231. }
  232. static void sde_hw_clear_noise_layer(struct sde_hw_mixer *ctx)
  233. {
  234. struct sde_hw_blk_reg_map *c = &ctx->hw;
  235. const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
  236. int stage_off, i;
  237. u32 reset = BIT(18) | BIT(31), val;
  238. reset = ~reset;
  239. for (i = SDE_STAGE_0; i <= sblk->maxblendstages; i++) {
  240. stage_off = _stage_offset(ctx, i);
  241. if (WARN_ON(stage_off < 0))
  242. return;
  243. /**
  244. * read the blendn_op register and clear only noise layer
  245. */
  246. val = SDE_REG_READ(c, LM_BLEND0_OP + stage_off);
  247. val &= reset;
  248. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
  249. }
  250. SDE_REG_WRITE(c, LM_NOISE_LAYER, 0);
  251. }
  252. static int sde_hw_lm_setup_noise_layer(struct sde_hw_mixer *ctx,
  253. struct sde_hw_noise_layer_cfg *cfg)
  254. {
  255. struct sde_hw_blk_reg_map *c = &ctx->hw;
  256. int stage_off;
  257. u32 val = 0, alpha = 0;
  258. const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
  259. struct sde_hw_mixer_cfg *mixer = &ctx->cfg;
  260. sde_hw_clear_noise_layer(ctx);
  261. if (!cfg)
  262. return 0;
  263. if (cfg->noise_blend_stage == SDE_STAGE_BASE ||
  264. cfg->noise_blend_stage + 1 != cfg->attn_blend_stage ||
  265. cfg->attn_blend_stage >= sblk->maxblendstages) {
  266. SDE_ERROR("invalid noise_blend_stage %d attn_blend_stage %d max stage %d\n",
  267. cfg->noise_blend_stage, cfg->attn_blend_stage, sblk->maxblendstages);
  268. return -EINVAL;
  269. }
  270. stage_off = _stage_offset(ctx, cfg->noise_blend_stage);
  271. if (stage_off < 0) {
  272. SDE_ERROR("invalid stage_off:%d for noise layer blend stage:%d\n",
  273. stage_off, cfg->noise_blend_stage);
  274. return -EINVAL;
  275. }
  276. val = BIT(18) | BIT(31);
  277. val |= (1 << 8);
  278. alpha = 255 | (cfg->alpha_noise << 16);
  279. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
  280. SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, alpha);
  281. val = ctx->cfg.out_width | (ctx->cfg.out_height << 16);
  282. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_SIZE + stage_off, val);
  283. /* partial update is not supported in noise layer */
  284. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_XY + stage_off, 0);
  285. val = SDE_REG_READ(c, LM_OP_MODE);
  286. val = (1 << cfg->noise_blend_stage) | val;
  287. SDE_REG_WRITE(c, LM_OP_MODE, val);
  288. stage_off = _stage_offset(ctx, cfg->attn_blend_stage);
  289. if (stage_off < 0) {
  290. SDE_ERROR("invalid stage_off:%d for atten layer blend stage:%d\n",
  291. stage_off, cfg->attn_blend_stage);
  292. sde_hw_clear_noise_layer(ctx);
  293. return -EINVAL;
  294. }
  295. val = 1 | BIT(31) | BIT(16);
  296. val |= BIT(2);
  297. val |= (1 << 8);
  298. alpha = cfg->attn_factor;
  299. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
  300. SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, alpha);
  301. val = SDE_REG_READ(c, LM_OP_MODE);
  302. val = (1 << cfg->attn_blend_stage) | val;
  303. SDE_REG_WRITE(c, LM_OP_MODE, val);
  304. val = ctx->cfg.out_width | (ctx->cfg.out_height << 16);
  305. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_SIZE + stage_off, val);
  306. /* partial update is not supported in noise layer */
  307. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_XY + stage_off, 0);
  308. val = 1;
  309. if (mixer->right_mixer)
  310. val |= (((mixer->out_width % 4) & 0x3) << 4);
  311. if (cfg->flags & DRM_NOISE_TEMPORAL_FLAG)
  312. val |= BIT(1);
  313. val |= ((cfg->strength & 0x7) << 8);
  314. SDE_REG_WRITE(c, LM_NOISE_LAYER, val);
  315. return 0;
  316. }
  317. static void _setup_mixer_ops(struct sde_mdss_cfg *m,
  318. struct sde_hw_lm_ops *ops,
  319. unsigned long features)
  320. {
  321. ops->setup_mixer_out = sde_hw_lm_setup_out;
  322. if (test_bit(SDE_MIXER_COMBINED_ALPHA, &features))
  323. ops->setup_blend_config =
  324. sde_hw_lm_setup_blend_config_combined_alpha;
  325. else
  326. ops->setup_blend_config = sde_hw_lm_setup_blend_config;
  327. ops->setup_alpha_out = sde_hw_lm_setup_color3;
  328. ops->setup_border_color = sde_hw_lm_setup_border_color;
  329. ops->setup_gc = sde_hw_lm_gc;
  330. ops->setup_misr = sde_hw_lm_setup_misr;
  331. ops->collect_misr = sde_hw_lm_collect_misr;
  332. if (test_bit(SDE_DIM_LAYER, &features)) {
  333. ops->setup_dim_layer = sde_hw_lm_setup_dim_layer;
  334. ops->clear_dim_layer = sde_hw_lm_clear_dim_layer;
  335. }
  336. if (test_bit(SDE_MIXER_NOISE_LAYER, &features))
  337. ops->setup_noise_layer = sde_hw_lm_setup_noise_layer;
  338. };
  339. struct sde_hw_blk_reg_map *sde_hw_lm_init(enum sde_lm idx,
  340. void __iomem *addr,
  341. struct sde_mdss_cfg *m)
  342. {
  343. struct sde_hw_mixer *c;
  344. struct sde_lm_cfg *cfg;
  345. c = kzalloc(sizeof(*c), GFP_KERNEL);
  346. if (!c)
  347. return ERR_PTR(-ENOMEM);
  348. cfg = _lm_offset(idx, m, addr, &c->hw);
  349. if (IS_ERR_OR_NULL(cfg)) {
  350. kfree(c);
  351. return ERR_PTR(-EINVAL);
  352. }
  353. /* Assign ops */
  354. c->idx = idx;
  355. c->cap = cfg;
  356. /* Dummy mixers should not setup ops nor add to dump ranges */
  357. if (cfg->dummy_mixer)
  358. goto done;
  359. _setup_mixer_ops(m, &c->ops, c->cap->features);
  360. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
  361. c->hw.blk_off + c->hw.length, c->hw.xin_id);
  362. done:
  363. return &c->hw;
  364. }
  365. void sde_hw_lm_destroy(struct sde_hw_blk_reg_map *hw)
  366. {
  367. if (hw)
  368. kfree(to_sde_hw_mixer(hw));
  369. }