sde_hw_lm.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include <linux/iopoll.h>
  8. #include "sde_kms.h"
  9. #include "sde_hw_catalog.h"
  10. #include "sde_hwio.h"
  11. #include "sde_hw_lm.h"
  12. #include "sde_hw_mdss.h"
  13. #include "sde_dbg.h"
  14. #include "sde_kms.h"
  15. #include "sde_hw_util.h"
  16. #define LM_OP_MODE 0x00
  17. #define LM_OUT_SIZE 0x04
  18. #define LM_BORDER_COLOR_0 0x08
  19. #define LM_BORDER_COLOR_1 0x010
  20. /* These register are offset to mixer base + stage base */
  21. #define LM_BLEND0_OP 0x00
  22. #define LM_BLEND0_CONST_ALPHA 0x04
  23. #define LM_FG_COLOR_FILL_COLOR_0 0x08
  24. #define LM_FG_COLOR_FILL_COLOR_1 0x0C
  25. #define LM_FG_COLOR_FILL_SIZE 0x10
  26. #define LM_FG_COLOR_FILL_XY 0x14
  27. #define LM_BLEND0_FG_ALPHA 0x04
  28. #define LM_BLEND0_BG_ALPHA 0x08
  29. #define LM_MISR_CTRL 0x310
  30. #define LM_MISR_SIGNATURE 0x314
  31. #define LM_NOISE_LAYER 0x320
  32. static struct sde_lm_cfg *_lm_offset(enum sde_lm mixer,
  33. struct sde_mdss_cfg *m,
  34. void __iomem *addr,
  35. struct sde_hw_blk_reg_map *b)
  36. {
  37. int i;
  38. for (i = 0; i < m->mixer_count; i++) {
  39. if (mixer == m->mixer[i].id) {
  40. b->base_off = addr;
  41. b->blk_off = m->mixer[i].base;
  42. b->length = m->mixer[i].len;
  43. b->hw_rev = m->hw_rev;
  44. b->log_mask = SDE_DBG_MASK_LM;
  45. return &m->mixer[i];
  46. }
  47. }
  48. return ERR_PTR(-ENOMEM);
  49. }
  50. /**
  51. * _stage_offset(): returns the relative offset of the blend registers
  52. * for the stage to be setup
  53. * @c: mixer ctx contains the mixer to be programmed
  54. * @stage: stage index to setup
  55. */
  56. static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage)
  57. {
  58. const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
  59. int rc;
  60. if (stage == SDE_STAGE_BASE)
  61. rc = -EINVAL;
  62. else if (stage <= sblk->maxblendstages)
  63. rc = sblk->blendstage_base[stage - SDE_STAGE_0];
  64. else
  65. rc = -EINVAL;
  66. return rc;
  67. }
  68. static void sde_hw_lm_setup_out(struct sde_hw_mixer *ctx,
  69. struct sde_hw_mixer_cfg *mixer)
  70. {
  71. struct sde_hw_blk_reg_map *c = &ctx->hw;
  72. u32 outsize;
  73. u32 op_mode;
  74. op_mode = SDE_REG_READ(c, LM_OP_MODE);
  75. outsize = mixer->out_height << 16 | mixer->out_width;
  76. SDE_REG_WRITE(c, LM_OUT_SIZE, outsize);
  77. /* SPLIT_LEFT_RIGHT */
  78. if (mixer->right_mixer)
  79. op_mode |= BIT(31);
  80. else
  81. op_mode &= ~BIT(31);
  82. SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
  83. }
  84. static void sde_hw_lm_setup_border_color(struct sde_hw_mixer *ctx,
  85. struct sde_mdss_color *color,
  86. u8 border_en)
  87. {
  88. struct sde_hw_blk_reg_map *c = &ctx->hw;
  89. if (border_en) {
  90. SDE_REG_WRITE(c, LM_BORDER_COLOR_0,
  91. (color->color_0 & 0xFFF) |
  92. ((color->color_1 & 0xFFF) << 0x10));
  93. SDE_REG_WRITE(c, LM_BORDER_COLOR_1,
  94. (color->color_2 & 0xFFF) |
  95. ((color->color_3 & 0xFFF) << 0x10));
  96. }
  97. }
  98. static void sde_hw_lm_setup_blend_config_combined_alpha(
  99. struct sde_hw_mixer *ctx, u32 stage,
  100. u32 fg_alpha, u32 bg_alpha, u32 blend_op)
  101. {
  102. struct sde_hw_blk_reg_map *c = &ctx->hw;
  103. int stage_off;
  104. u32 const_alpha;
  105. if (stage == SDE_STAGE_BASE)
  106. return;
  107. stage_off = _stage_offset(ctx, stage);
  108. if (WARN_ON(stage_off < 0))
  109. return;
  110. const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
  111. SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
  112. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
  113. }
  114. static void sde_hw_lm_setup_blend_config(struct sde_hw_mixer *ctx,
  115. u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
  116. {
  117. struct sde_hw_blk_reg_map *c = &ctx->hw;
  118. int stage_off;
  119. if (stage == SDE_STAGE_BASE)
  120. return;
  121. stage_off = _stage_offset(ctx, stage);
  122. if (WARN_ON(stage_off < 0))
  123. return;
  124. SDE_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
  125. SDE_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
  126. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
  127. }
  128. static void sde_hw_lm_setup_color3(struct sde_hw_mixer *ctx,
  129. uint32_t mixer_op_mode)
  130. {
  131. struct sde_hw_blk_reg_map *c = &ctx->hw;
  132. int op_mode;
  133. /* read the existing op_mode configuration */
  134. op_mode = SDE_REG_READ(c, LM_OP_MODE);
  135. op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
  136. SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
  137. }
  138. static void sde_hw_lm_gc(struct sde_hw_mixer *mixer,
  139. void *cfg)
  140. {
  141. }
  142. static void sde_hw_lm_clear_dim_layer(struct sde_hw_mixer *ctx)
  143. {
  144. struct sde_hw_blk_reg_map *c = &ctx->hw;
  145. const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
  146. int stage_off, i;
  147. u32 reset = BIT(16), val;
  148. reset = ~reset;
  149. for (i = SDE_STAGE_0; i <= sblk->maxblendstages; i++) {
  150. stage_off = _stage_offset(ctx, i);
  151. if (WARN_ON(stage_off < 0))
  152. return;
  153. /*
  154. * read the existing blendn_op register and clear only DIM layer
  155. * bit (color_fill bit)
  156. */
  157. val = SDE_REG_READ(c, LM_BLEND0_OP + stage_off);
  158. val &= reset;
  159. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
  160. }
  161. }
  162. static void sde_hw_lm_setup_dim_layer(struct sde_hw_mixer *ctx,
  163. struct sde_hw_dim_layer *dim_layer)
  164. {
  165. struct sde_hw_blk_reg_map *c = &ctx->hw;
  166. int stage_off;
  167. u32 val = 0, alpha = 0;
  168. if (dim_layer->stage == SDE_STAGE_BASE)
  169. return;
  170. stage_off = _stage_offset(ctx, dim_layer->stage);
  171. if (stage_off < 0) {
  172. SDE_ERROR("invalid stage_off:%d for dim layer\n", stage_off);
  173. return;
  174. }
  175. alpha = dim_layer->color_fill.color_3 & 0xFF;
  176. val = ((dim_layer->color_fill.color_1 << 2) & 0xFFF) << 16 |
  177. ((dim_layer->color_fill.color_0 << 2) & 0xFFF);
  178. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_COLOR_0 + stage_off, val);
  179. val = (alpha << 4) << 16 |
  180. ((dim_layer->color_fill.color_2 << 2) & 0xFFF);
  181. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_COLOR_1 + stage_off, val);
  182. val = dim_layer->rect.h << 16 | dim_layer->rect.w;
  183. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_SIZE + stage_off, val);
  184. val = dim_layer->rect.y << 16 | dim_layer->rect.x;
  185. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_XY + stage_off, val);
  186. val = BIT(16); /* enable dim layer */
  187. val |= SDE_BLEND_FG_ALPHA_FG_CONST | SDE_BLEND_BG_ALPHA_BG_CONST;
  188. if (dim_layer->flags & SDE_DRM_DIM_LAYER_EXCLUSIVE)
  189. val |= BIT(17);
  190. else
  191. val &= ~BIT(17);
  192. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
  193. val = (alpha << 16) | (0xff - alpha);
  194. SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, val);
  195. }
  196. static void sde_hw_lm_setup_misr(struct sde_hw_mixer *ctx,
  197. bool enable, u32 frame_count)
  198. {
  199. struct sde_hw_blk_reg_map *c = &ctx->hw;
  200. u32 config = 0;
  201. SDE_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
  202. /* clear misr data */
  203. wmb();
  204. if (enable)
  205. config = (frame_count & MISR_FRAME_COUNT_MASK) |
  206. MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
  207. SDE_REG_WRITE(c, LM_MISR_CTRL, config);
  208. }
  209. static int sde_hw_lm_collect_misr(struct sde_hw_mixer *ctx, bool nonblock,
  210. u32 *misr_value)
  211. {
  212. struct sde_hw_blk_reg_map *c = &ctx->hw;
  213. u32 ctrl = 0;
  214. int rc = 0;
  215. if (!misr_value)
  216. return -EINVAL;
  217. ctrl = SDE_REG_READ(c, LM_MISR_CTRL);
  218. if (!nonblock) {
  219. if (ctrl & MISR_CTRL_ENABLE) {
  220. rc = read_poll_timeout(sde_reg_read, ctrl, (ctrl & MISR_CTRL_STATUS) > 0,
  221. 500, false, 84000, c, LM_MISR_CTRL);
  222. if (rc)
  223. return rc;
  224. } else {
  225. return -EINVAL;
  226. }
  227. }
  228. *misr_value = SDE_REG_READ(c, LM_MISR_SIGNATURE);
  229. return rc;
  230. }
  231. static void sde_hw_clear_noise_layer(struct sde_hw_mixer *ctx)
  232. {
  233. struct sde_hw_blk_reg_map *c = &ctx->hw;
  234. const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
  235. int stage_off, i;
  236. u32 reset = BIT(18) | BIT(31), val;
  237. reset = ~reset;
  238. for (i = SDE_STAGE_0; i <= sblk->maxblendstages; i++) {
  239. stage_off = _stage_offset(ctx, i);
  240. if (WARN_ON(stage_off < 0))
  241. return;
  242. /**
  243. * read the blendn_op register and clear only noise layer
  244. */
  245. val = SDE_REG_READ(c, LM_BLEND0_OP + stage_off);
  246. val &= reset;
  247. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
  248. }
  249. SDE_REG_WRITE(c, LM_NOISE_LAYER, 0);
  250. }
  251. static int sde_hw_lm_setup_noise_layer(struct sde_hw_mixer *ctx,
  252. struct sde_hw_noise_layer_cfg *cfg)
  253. {
  254. struct sde_hw_blk_reg_map *c = &ctx->hw;
  255. int stage_off;
  256. u32 val = 0, alpha = 0;
  257. const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
  258. struct sde_hw_mixer_cfg *mixer = &ctx->cfg;
  259. sde_hw_clear_noise_layer(ctx);
  260. if (!cfg)
  261. return 0;
  262. if (cfg->noise_blend_stage == SDE_STAGE_BASE ||
  263. cfg->noise_blend_stage + 1 != cfg->attn_blend_stage ||
  264. cfg->attn_blend_stage >= sblk->maxblendstages) {
  265. SDE_ERROR("invalid noise_blend_stage %d attn_blend_stage %d max stage %d\n",
  266. cfg->noise_blend_stage, cfg->attn_blend_stage, sblk->maxblendstages);
  267. return -EINVAL;
  268. }
  269. stage_off = _stage_offset(ctx, cfg->noise_blend_stage);
  270. if (stage_off < 0) {
  271. SDE_ERROR("invalid stage_off:%d for noise layer blend stage:%d\n",
  272. stage_off, cfg->noise_blend_stage);
  273. return -EINVAL;
  274. }
  275. val = BIT(18) | BIT(31);
  276. val |= (1 << 8);
  277. alpha = 255 | (cfg->alpha_noise << 16);
  278. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
  279. SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, alpha);
  280. val = ctx->cfg.out_width | (ctx->cfg.out_height << 16);
  281. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_SIZE + stage_off, val);
  282. /* partial update is not supported in noise layer */
  283. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_XY + stage_off, 0);
  284. val = SDE_REG_READ(c, LM_OP_MODE);
  285. val = (1 << cfg->noise_blend_stage) | val;
  286. SDE_REG_WRITE(c, LM_OP_MODE, val);
  287. stage_off = _stage_offset(ctx, cfg->attn_blend_stage);
  288. if (stage_off < 0) {
  289. SDE_ERROR("invalid stage_off:%d for atten layer blend stage:%d\n",
  290. stage_off, cfg->attn_blend_stage);
  291. sde_hw_clear_noise_layer(ctx);
  292. return -EINVAL;
  293. }
  294. val = 1 | BIT(31) | BIT(16);
  295. val |= BIT(2);
  296. val |= (1 << 8);
  297. alpha = cfg->attn_factor;
  298. SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
  299. SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, alpha);
  300. val = SDE_REG_READ(c, LM_OP_MODE);
  301. val = (1 << cfg->attn_blend_stage) | val;
  302. SDE_REG_WRITE(c, LM_OP_MODE, val);
  303. val = ctx->cfg.out_width | (ctx->cfg.out_height << 16);
  304. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_SIZE + stage_off, val);
  305. /* partial update is not supported in noise layer */
  306. SDE_REG_WRITE(c, LM_FG_COLOR_FILL_XY + stage_off, 0);
  307. val = 1;
  308. if (mixer->right_mixer)
  309. val |= (((mixer->out_width % 4) & 0x3) << 4);
  310. if (cfg->flags & DRM_NOISE_TEMPORAL_FLAG)
  311. val |= BIT(1);
  312. val |= ((cfg->strength & 0x7) << 8);
  313. SDE_REG_WRITE(c, LM_NOISE_LAYER, val);
  314. return 0;
  315. }
  316. static void _setup_mixer_ops(struct sde_mdss_cfg *m,
  317. struct sde_hw_lm_ops *ops,
  318. unsigned long features)
  319. {
  320. ops->setup_mixer_out = sde_hw_lm_setup_out;
  321. if (test_bit(SDE_MIXER_COMBINED_ALPHA, &features))
  322. ops->setup_blend_config =
  323. sde_hw_lm_setup_blend_config_combined_alpha;
  324. else
  325. ops->setup_blend_config = sde_hw_lm_setup_blend_config;
  326. ops->setup_alpha_out = sde_hw_lm_setup_color3;
  327. ops->setup_border_color = sde_hw_lm_setup_border_color;
  328. ops->setup_gc = sde_hw_lm_gc;
  329. ops->setup_misr = sde_hw_lm_setup_misr;
  330. ops->collect_misr = sde_hw_lm_collect_misr;
  331. if (test_bit(SDE_DIM_LAYER, &features)) {
  332. ops->setup_dim_layer = sde_hw_lm_setup_dim_layer;
  333. ops->clear_dim_layer = sde_hw_lm_clear_dim_layer;
  334. }
  335. if (test_bit(SDE_MIXER_NOISE_LAYER, &features))
  336. ops->setup_noise_layer = sde_hw_lm_setup_noise_layer;
  337. };
  338. struct sde_hw_blk_reg_map *sde_hw_lm_init(enum sde_lm idx,
  339. void __iomem *addr,
  340. struct sde_mdss_cfg *m)
  341. {
  342. struct sde_hw_mixer *c;
  343. struct sde_lm_cfg *cfg;
  344. c = kzalloc(sizeof(*c), GFP_KERNEL);
  345. if (!c)
  346. return ERR_PTR(-ENOMEM);
  347. cfg = _lm_offset(idx, m, addr, &c->hw);
  348. if (IS_ERR_OR_NULL(cfg)) {
  349. kfree(c);
  350. return ERR_PTR(-EINVAL);
  351. }
  352. /* Assign ops */
  353. c->idx = idx;
  354. c->cap = cfg;
  355. /* Dummy mixers should not setup ops nor add to dump ranges */
  356. if (cfg->dummy_mixer)
  357. goto done;
  358. _setup_mixer_ops(m, &c->ops, c->cap->features);
  359. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
  360. c->hw.blk_off + c->hw.length, c->hw.xin_id);
  361. done:
  362. return &c->hw;
  363. }
  364. void sde_hw_lm_destroy(struct sde_hw_blk_reg_map *hw)
  365. {
  366. if (hw)
  367. kfree(to_sde_hw_mixer(hw));
  368. }