sde_hw_ds.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #include "sde_hw_ds.h"
  7. #include "sde_formats.h"
  8. #include "sde_dbg.h"
  9. #include "sde_kms.h"
  10. /* Destination scaler TOP registers */
  11. #define DEST_SCALER_OP_MODE 0x00
  12. #define DEST_SCALER_HW_VERSION 0x10
  13. #define DEST_SCALER_MERGE_CTRL 0x0C
  14. #define DEST_SCALER_DUAL_PIPE 1
  15. #define DEST_SCALER_QUAD_PIPE 3
  16. static void sde_hw_ds_setup_opmode(struct sde_hw_ds *hw_ds, u32 op_mode)
  17. {
  18. struct sde_hw_blk_reg_map *hw = &hw_ds->hw;
  19. u32 op_mode_val;
  20. op_mode_val = SDE_REG_READ(hw, DEST_SCALER_OP_MODE);
  21. if (op_mode)
  22. op_mode_val |= op_mode;
  23. else if (!op_mode && (op_mode_val & SDE_DS_OP_MODE_DUAL))
  24. op_mode_val = 0;
  25. else
  26. op_mode_val &= ~BIT(hw_ds->idx - DS_0);
  27. SDE_REG_WRITE(hw, DEST_SCALER_OP_MODE, op_mode_val);
  28. }
  29. static void sde_hw_ds_setup_opmode_v1(struct sde_hw_ds *hw_ds, u32 op_mode)
  30. {
  31. struct sde_hw_blk_reg_map *hw = &hw_ds->hw;
  32. if (op_mode & SDE_DS_OP_MODE_DUAL) {
  33. op_mode = DEST_SCALER_DUAL_PIPE;
  34. SDE_REG_WRITE(hw, DEST_SCALER_MERGE_CTRL + hw_ds->scl->base, op_mode);
  35. }
  36. }
  37. static void sde_hw_ds_setup_scaler3(struct sde_hw_ds *hw_ds,
  38. void *scaler_cfg, void *scaler_lut_cfg)
  39. {
  40. struct sde_hw_scaler3_cfg *scl3_cfg = scaler_cfg;
  41. struct sde_hw_scaler3_lut_cfg *scl3_lut_cfg = scaler_lut_cfg;
  42. bool de_lpf_en = false;
  43. if (!hw_ds || !hw_ds->scl || !scl3_cfg || !scl3_lut_cfg)
  44. return;
  45. /*
  46. * copy LUT values to scaler structure
  47. */
  48. if (scl3_lut_cfg->is_configured) {
  49. scl3_cfg->dir_lut = scl3_lut_cfg->dir_lut;
  50. scl3_cfg->dir_len = scl3_lut_cfg->dir_len;
  51. scl3_cfg->cir_lut = scl3_lut_cfg->cir_lut;
  52. scl3_cfg->cir_len = scl3_lut_cfg->cir_len;
  53. scl3_cfg->sep_lut = scl3_lut_cfg->sep_lut;
  54. scl3_cfg->sep_len = scl3_lut_cfg->sep_len;
  55. }
  56. if (test_bit(SDE_DS_DE_LPF_BLEND, &hw_ds->scl->features))
  57. de_lpf_en = true;
  58. sde_hw_setup_scaler3(&hw_ds->hw, scl3_cfg, hw_ds->scl->version,
  59. hw_ds->scl->base,
  60. sde_get_sde_format(DRM_FORMAT_XBGR2101010), de_lpf_en);
  61. }
  62. static void _setup_ds_ops(struct sde_hw_ds_ops *ops, unsigned long features)
  63. {
  64. if (test_bit(SDE_DS_MERGE_CTRL, &features))
  65. ops->setup_opmode = sde_hw_ds_setup_opmode_v1;
  66. else
  67. ops->setup_opmode = sde_hw_ds_setup_opmode;
  68. if (test_bit(SDE_SSPP_SCALER_QSEED3, &features) ||
  69. test_bit(SDE_SSPP_SCALER_QSEED3LITE, &features))
  70. ops->setup_scaler = sde_hw_ds_setup_scaler3;
  71. }
  72. static struct sde_ds_cfg *_ds_offset(enum sde_ds ds,
  73. struct sde_mdss_cfg *m,
  74. void __iomem *addr,
  75. struct sde_hw_blk_reg_map *b)
  76. {
  77. int i;
  78. if (!m || !addr || !b)
  79. return ERR_PTR(-EINVAL);
  80. for (i = 0; i < m->ds_count; i++) {
  81. if ((ds == m->ds[i].id) &&
  82. (m->ds[i].top)) {
  83. b->base_off = addr;
  84. b->blk_off = m->ds[i].top->base;
  85. b->length = m->ds[i].top->len;
  86. b->hw_rev = m->hw_rev;
  87. b->log_mask = SDE_DBG_MASK_DS;
  88. return &m->ds[i];
  89. }
  90. }
  91. return ERR_PTR(-EINVAL);
  92. }
  93. struct sde_hw_blk_reg_map *sde_hw_ds_init(enum sde_ds idx,
  94. void __iomem *addr,
  95. struct sde_mdss_cfg *m)
  96. {
  97. struct sde_hw_ds *hw_ds;
  98. struct sde_ds_cfg *cfg;
  99. if (!addr || !m)
  100. return ERR_PTR(-EINVAL);
  101. hw_ds = kzalloc(sizeof(*hw_ds), GFP_KERNEL);
  102. if (!hw_ds)
  103. return ERR_PTR(-ENOMEM);
  104. cfg = _ds_offset(idx, m, addr, &hw_ds->hw);
  105. if (IS_ERR_OR_NULL(cfg)) {
  106. SDE_ERROR("failed to get ds cfg\n");
  107. kfree(hw_ds);
  108. return ERR_PTR(-EINVAL);
  109. }
  110. /* Assign ops */
  111. hw_ds->idx = idx;
  112. hw_ds->scl = cfg;
  113. _setup_ds_ops(&hw_ds->ops, hw_ds->scl->features);
  114. if (m->qseed_hw_rev)
  115. hw_ds->scl->version = m->qseed_hw_rev;
  116. if (cfg->len) {
  117. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
  118. hw_ds->hw.blk_off + cfg->base,
  119. hw_ds->hw.blk_off + cfg->base + cfg->len,
  120. hw_ds->hw.xin_id);
  121. }
  122. return &hw_ds->hw;
  123. }
  124. void sde_hw_ds_destroy(struct sde_hw_blk_reg_map *hw)
  125. {
  126. if (hw)
  127. kfree(to_sde_hw_ds(hw));
  128. }