sde_hw_ds.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include "sde_hw_ds.h"
  8. #include "sde_formats.h"
  9. #include "sde_dbg.h"
  10. #include "sde_kms.h"
  11. /* Destination scaler TOP registers */
  12. #define DEST_SCALER_OP_MODE 0x00
  13. #define DEST_SCALER_HW_VERSION 0x10
  14. #define DEST_SCALER_MERGE_CTRL 0x0C
  15. #define DEST_SCALER_QSEED3_OP_MODE 0x04
  16. #define DEST_SCALER_DUAL_PIPE 1
  17. #define DEST_SCALER_QUAD_PIPE 3
  18. static void sde_hw_ds_setup_opmode(struct sde_hw_ds *hw_ds, u32 op_mode)
  19. {
  20. struct sde_hw_blk_reg_map *hw = &hw_ds->hw;
  21. u32 op_mode_val;
  22. op_mode_val = SDE_REG_READ(hw, DEST_SCALER_OP_MODE);
  23. if (op_mode)
  24. op_mode_val |= op_mode;
  25. else if (!op_mode && (op_mode_val & SDE_DS_OP_MODE_DUAL))
  26. op_mode_val = 0;
  27. else
  28. op_mode_val &= ~BIT(hw_ds->idx - DS_0);
  29. SDE_REG_WRITE(hw, DEST_SCALER_OP_MODE, op_mode_val);
  30. }
  31. static void sde_hw_ds_setup_opmode_v1(struct sde_hw_ds *hw_ds, u32 op_mode)
  32. {
  33. struct sde_hw_blk_reg_map *hw = &hw_ds->hw;
  34. if (op_mode & SDE_DS_OP_MODE_DUAL) {
  35. op_mode = DEST_SCALER_DUAL_PIPE;
  36. SDE_REG_WRITE(hw, DEST_SCALER_MERGE_CTRL + hw_ds->scl->base, op_mode);
  37. } else if (!op_mode) {
  38. SDE_REG_WRITE(hw, DEST_SCALER_MERGE_CTRL + hw_ds->scl->base, op_mode);
  39. }
  40. }
  41. static void sde_hw_ds_setup_scaler3(struct sde_hw_ds *hw_ds,
  42. void *scaler_cfg, void *scaler_lut_cfg)
  43. {
  44. struct sde_hw_scaler3_cfg *scl3_cfg = scaler_cfg;
  45. struct sde_hw_scaler3_lut_cfg *scl3_lut_cfg = scaler_lut_cfg;
  46. bool de_lpf_en = false;
  47. if (!hw_ds || !hw_ds->scl || !scl3_cfg || !scl3_lut_cfg)
  48. return;
  49. /*
  50. * copy LUT values to scaler structure
  51. */
  52. if (scl3_lut_cfg->is_configured) {
  53. scl3_cfg->dir_lut = scl3_lut_cfg->dir_lut;
  54. scl3_cfg->dir_len = scl3_lut_cfg->dir_len;
  55. scl3_cfg->cir_lut = scl3_lut_cfg->cir_lut;
  56. scl3_cfg->cir_len = scl3_lut_cfg->cir_len;
  57. scl3_cfg->sep_lut = scl3_lut_cfg->sep_lut;
  58. scl3_cfg->sep_len = scl3_lut_cfg->sep_len;
  59. }
  60. if (test_bit(SDE_DS_DE_LPF_BLEND, &hw_ds->scl->features))
  61. de_lpf_en = true;
  62. sde_hw_setup_scaler3(&hw_ds->hw, scl3_cfg, hw_ds->scl->version,
  63. hw_ds->scl->base,
  64. sde_get_sde_format(DRM_FORMAT_XBGR2101010), de_lpf_en);
  65. }
  66. static void sde_hw_ds_disable_dest_scaler(struct sde_hw_ds *hw_ds)
  67. {
  68. struct sde_hw_blk_reg_map *hw = &hw_ds->hw;
  69. SDE_REG_WRITE(hw, hw_ds->scl->base + DEST_SCALER_MERGE_CTRL, 0x0);
  70. SDE_REG_WRITE(hw, hw_ds->scl->base + DEST_SCALER_QSEED3_OP_MODE, 0x0);
  71. }
  72. static void _setup_ds_ops(struct sde_hw_ds_ops *ops, unsigned long features)
  73. {
  74. if (test_bit(SDE_DS_MERGE_CTRL, &features)) {
  75. ops->setup_opmode = sde_hw_ds_setup_opmode_v1;
  76. ops->disable_dest_scl = sde_hw_ds_disable_dest_scaler;
  77. } else {
  78. ops->setup_opmode = sde_hw_ds_setup_opmode;
  79. }
  80. if (test_bit(SDE_SSPP_SCALER_QSEED3, &features) ||
  81. test_bit(SDE_SSPP_SCALER_QSEED3LITE, &features))
  82. ops->setup_scaler = sde_hw_ds_setup_scaler3;
  83. }
  84. static struct sde_ds_cfg *_ds_offset(enum sde_ds ds,
  85. struct sde_mdss_cfg *m,
  86. void __iomem *addr,
  87. struct sde_hw_blk_reg_map *b)
  88. {
  89. int i;
  90. if (!m || !addr || !b)
  91. return ERR_PTR(-EINVAL);
  92. for (i = 0; i < m->ds_count; i++) {
  93. if ((ds == m->ds[i].id) &&
  94. (m->ds[i].top)) {
  95. b->base_off = addr;
  96. b->blk_off = m->ds[i].top->base;
  97. b->length = m->ds[i].top->len;
  98. b->hw_rev = m->hw_rev;
  99. b->log_mask = SDE_DBG_MASK_DS;
  100. return &m->ds[i];
  101. }
  102. }
  103. return ERR_PTR(-EINVAL);
  104. }
  105. struct sde_hw_blk_reg_map *sde_hw_ds_init(enum sde_ds idx,
  106. void __iomem *addr,
  107. struct sde_mdss_cfg *m)
  108. {
  109. struct sde_hw_ds *hw_ds;
  110. struct sde_ds_cfg *cfg;
  111. if (!addr || !m)
  112. return ERR_PTR(-EINVAL);
  113. hw_ds = kzalloc(sizeof(*hw_ds), GFP_KERNEL);
  114. if (!hw_ds)
  115. return ERR_PTR(-ENOMEM);
  116. cfg = _ds_offset(idx, m, addr, &hw_ds->hw);
  117. if (IS_ERR_OR_NULL(cfg)) {
  118. SDE_ERROR("failed to get ds cfg\n");
  119. kfree(hw_ds);
  120. return ERR_PTR(-EINVAL);
  121. }
  122. /* Assign ops */
  123. hw_ds->idx = idx;
  124. hw_ds->scl = cfg;
  125. _setup_ds_ops(&hw_ds->ops, hw_ds->scl->features);
  126. if (m->qseed_hw_rev)
  127. hw_ds->scl->version = m->qseed_hw_rev;
  128. if (cfg->len) {
  129. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
  130. hw_ds->hw.blk_off + cfg->base,
  131. hw_ds->hw.blk_off + cfg->base + cfg->len,
  132. hw_ds->hw.xin_id);
  133. }
  134. return &hw_ds->hw;
  135. }
  136. void sde_hw_ds_destroy(struct sde_hw_blk_reg_map *hw)
  137. {
  138. if (hw)
  139. kfree(to_sde_hw_ds(hw));
  140. }