sde_reg_dma.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include "sde_reg_dma.h"
  8. #include "sde_hw_reg_dma_v1.h"
  9. #include "sde_dbg.h"
  10. #define REG_DMA_VER_1_0 0x00010000
  11. #define REG_DMA_VER_1_1 0x00010001
  12. #define REG_DMA_VER_1_2 0x00010002
  13. #define REG_DMA_VER_2_0 0x00020000
  14. #define REG_DMA_VER_3_0 0x00030000
  15. static int default_check_support(enum sde_reg_dma_features feature,
  16. enum sde_reg_dma_blk blk,
  17. bool *is_supported)
  18. {
  19. if (!is_supported)
  20. return -EINVAL;
  21. *is_supported = false;
  22. return 0;
  23. }
  24. static int default_setup_payload(struct sde_reg_dma_setup_ops_cfg *cfg)
  25. {
  26. DRM_ERROR("not implemented\n");
  27. return -EINVAL;
  28. }
  29. static int default_kick_off(struct sde_reg_dma_kickoff_cfg *cfg)
  30. {
  31. DRM_ERROR("not implemented\n");
  32. return -EINVAL;
  33. }
  34. static int default_reset(struct sde_hw_ctl *ctl)
  35. {
  36. DRM_ERROR("not implemented\n");
  37. return -EINVAL;
  38. }
  39. struct sde_reg_dma_buffer *default_alloc_reg_dma_buf(u32 size)
  40. {
  41. DRM_ERROR("not implemented\n");
  42. return ERR_PTR(-EINVAL);
  43. }
  44. int default_dealloc_reg_dma(struct sde_reg_dma_buffer *lut_buf)
  45. {
  46. DRM_ERROR("not implemented\n");
  47. return -EINVAL;
  48. }
  49. static int default_buf_reset_reg_dma(struct sde_reg_dma_buffer *lut_buf)
  50. {
  51. DRM_ERROR("not implemented\n");
  52. return -EINVAL;
  53. }
  54. static int default_last_command(struct sde_hw_ctl *ctl,
  55. enum sde_reg_dma_queue q, enum sde_reg_dma_last_cmd_mode mode)
  56. {
  57. return 0;
  58. }
  59. static int default_last_command_sb(struct sde_hw_ctl *ctl,
  60. enum sde_reg_dma_queue q, enum sde_reg_dma_last_cmd_mode mode)
  61. {
  62. return 0;
  63. }
  64. static void default_dump_reg(void)
  65. {
  66. }
  67. static void set_default_dma_ops(struct sde_hw_reg_dma *reg_dma)
  68. {
  69. const static struct sde_hw_reg_dma_ops ops = {
  70. default_check_support, default_setup_payload,
  71. default_kick_off, default_reset, default_alloc_reg_dma_buf,
  72. default_dealloc_reg_dma, default_buf_reset_reg_dma,
  73. default_last_command, default_last_command_sb,
  74. default_dump_reg};
  75. memcpy(&reg_dma->ops, &ops, sizeof(ops));
  76. }
  77. static struct sde_hw_reg_dma reg_dma;
  78. int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m,
  79. struct drm_device *dev)
  80. {
  81. int rc = 0;
  82. set_default_dma_ops(&reg_dma);
  83. /**
  84. * Register dummy range to ensure register dump is only done on
  85. * targeted LUTDMA regions. start = 1, end = 1 so full range isn't used
  86. */
  87. sde_dbg_reg_register_dump_range(LUTDMA_DBG_NAME, "DUMMY_LUTDMA", 1, 1,
  88. m->dma_cfg.xin_id);
  89. if (!addr || !m || !dev) {
  90. DRM_DEBUG("invalid addr %pK catalog %pK dev %pK\n", addr, m,
  91. dev);
  92. return 0;
  93. }
  94. if (!m->reg_dma_count)
  95. return 0;
  96. reg_dma.reg_dma_count = m->reg_dma_count;
  97. reg_dma.drm_dev = dev;
  98. reg_dma.addr = addr;
  99. reg_dma.caps = &m->dma_cfg;
  100. switch (reg_dma.caps->version) {
  101. case REG_DMA_VER_1_0:
  102. rc = init_v1(&reg_dma);
  103. if (rc)
  104. DRM_DEBUG("init v1 dma ops failed\n");
  105. break;
  106. case REG_DMA_VER_1_1:
  107. rc = init_v11(&reg_dma);
  108. if (rc)
  109. DRM_DEBUG("init v11 dma ops failed\n");
  110. break;
  111. case REG_DMA_VER_1_2:
  112. rc = init_v12(&reg_dma);
  113. if (rc)
  114. DRM_DEBUG("init v12 dma ops failed\n");
  115. break;
  116. case REG_DMA_VER_2_0:
  117. rc = init_v2(&reg_dma);
  118. if (rc)
  119. DRM_DEBUG("init v2 dma ops failed\n");
  120. break;
  121. case REG_DMA_VER_3_0:
  122. rc = init_v3(&reg_dma);
  123. if (rc)
  124. DRM_DEBUG("init v3 dma ops failed\n");
  125. break;
  126. default:
  127. break;
  128. }
  129. return rc;
  130. }
  131. struct sde_hw_reg_dma_ops *sde_reg_dma_get_ops(void)
  132. {
  133. return &reg_dma.ops;
  134. }
  135. void sde_reg_dma_deinit(void)
  136. {
  137. if (!reg_dma.drm_dev || !reg_dma.caps)
  138. return;
  139. switch (reg_dma.caps->version) {
  140. case REG_DMA_VER_1_0:
  141. case REG_DMA_VER_1_1:
  142. case REG_DMA_VER_1_2:
  143. case REG_DMA_VER_2_0:
  144. deinit_v1();
  145. break;
  146. default:
  147. break;
  148. }
  149. memset(&reg_dma, 0, sizeof(reg_dma));
  150. set_default_dma_ops(&reg_dma);
  151. }