sde_reg_dma.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include "sde_reg_dma.h"
  8. #include "sde_hw_reg_dma_v1.h"
  9. #include "sde_dbg.h"
  10. #include "sde_hw_ctl.h"
  11. #define REG_DMA_VER_1_0 0x00010000
  12. #define REG_DMA_VER_1_1 0x00010001
  13. #define REG_DMA_VER_1_2 0x00010002
  14. #define REG_DMA_VER_2_0 0x00020000
  15. #define REG_DMA_VER_3_0 0x00030000
  16. static int default_check_support(enum sde_reg_dma_features feature,
  17. enum sde_reg_dma_blk blk,
  18. bool *is_supported)
  19. {
  20. if (!is_supported)
  21. return -EINVAL;
  22. *is_supported = false;
  23. return 0;
  24. }
  25. static int default_setup_payload(struct sde_reg_dma_setup_ops_cfg *cfg)
  26. {
  27. DRM_ERROR("not implemented\n");
  28. return -EINVAL;
  29. }
  30. static int default_kick_off(struct sde_reg_dma_kickoff_cfg *cfg)
  31. {
  32. DRM_ERROR("not implemented\n");
  33. return -EINVAL;
  34. }
  35. static int default_reset(struct sde_hw_ctl *ctl)
  36. {
  37. DRM_ERROR("not implemented\n");
  38. return -EINVAL;
  39. }
  40. struct sde_reg_dma_buffer *default_alloc_reg_dma_buf(u32 size)
  41. {
  42. DRM_ERROR("not implemented\n");
  43. return ERR_PTR(-EINVAL);
  44. }
  45. int default_dealloc_reg_dma(struct sde_reg_dma_buffer *lut_buf)
  46. {
  47. DRM_ERROR("not implemented\n");
  48. return -EINVAL;
  49. }
  50. static int default_buf_reset_reg_dma(struct sde_reg_dma_buffer *lut_buf)
  51. {
  52. DRM_ERROR("not implemented\n");
  53. return -EINVAL;
  54. }
  55. static int default_last_command(struct sde_hw_ctl *ctl,
  56. enum sde_reg_dma_queue q, enum sde_reg_dma_last_cmd_mode mode)
  57. {
  58. return 0;
  59. }
  60. static int default_last_command_sb(struct sde_hw_ctl *ctl,
  61. enum sde_reg_dma_queue q, enum sde_reg_dma_last_cmd_mode mode)
  62. {
  63. return 0;
  64. }
  65. static void default_dump_reg(void)
  66. {
  67. }
  68. static void set_default_dma_ops(struct sde_hw_reg_dma *reg_dma)
  69. {
  70. const static struct sde_hw_reg_dma_ops ops = {
  71. default_check_support, default_setup_payload,
  72. default_kick_off, default_reset, default_alloc_reg_dma_buf,
  73. default_dealloc_reg_dma, default_buf_reset_reg_dma,
  74. default_last_command, default_last_command_sb,
  75. default_dump_reg};
  76. memcpy(&reg_dma->ops, &ops, sizeof(ops));
  77. }
  78. static struct sde_hw_reg_dma reg_dma;
  79. static int sde_reg_dma_reset(void *ctl_data, void *priv_data)
  80. {
  81. struct sde_hw_ctl *sde_hw_ctl = (struct sde_hw_ctl *)ctl_data;
  82. struct sde_hw_reg_dma_ops *ops = sde_reg_dma_get_ops();
  83. if (ops && ops->reset) {
  84. SDE_EVT32(sde_hw_ctl ? sde_hw_ctl->idx : 0xff, SDE_EVTLOG_FUNC_ENTRY);
  85. return ops->reset(sde_hw_ctl);
  86. }
  87. return 0;
  88. }
  89. int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m,
  90. struct drm_device *dev)
  91. {
  92. int rc = 0;
  93. void *client_entry_handle;
  94. struct msm_fence_error_ops sde_reg_dma_event_ops = {
  95. .fence_error_handle_submodule = sde_reg_dma_reset,
  96. };
  97. client_entry_handle = msm_register_fence_error_event(dev, &sde_reg_dma_event_ops, NULL);
  98. if (IS_ERR_OR_NULL(client_entry_handle))
  99. DRM_INFO("register fence_error_event failed.\n");
  100. set_default_dma_ops(&reg_dma);
  101. if (!addr || !m || !dev) {
  102. DRM_DEBUG("invalid addr %pK catalog %pK dev %pK\n", addr, m,
  103. dev);
  104. return 0;
  105. }
  106. /**
  107. * Register dummy range to ensure register dump is only done on
  108. * targeted LUTDMA regions. start = 1, end = 1 so full range isn't used
  109. */
  110. sde_dbg_reg_register_dump_range(LUTDMA_DBG_NAME, "DUMMY_LUTDMA", 1, 1,
  111. m->dma_cfg.xin_id);
  112. if (!m->reg_dma_count)
  113. return 0;
  114. reg_dma.reg_dma_count = m->reg_dma_count;
  115. reg_dma.drm_dev = dev;
  116. reg_dma.addr = addr;
  117. reg_dma.caps = &m->dma_cfg;
  118. switch (reg_dma.caps->version) {
  119. case REG_DMA_VER_1_0:
  120. rc = init_v1(&reg_dma);
  121. if (rc)
  122. DRM_DEBUG("init v1 dma ops failed\n");
  123. break;
  124. case REG_DMA_VER_1_1:
  125. rc = init_v11(&reg_dma);
  126. if (rc)
  127. DRM_DEBUG("init v11 dma ops failed\n");
  128. break;
  129. case REG_DMA_VER_1_2:
  130. rc = init_v12(&reg_dma);
  131. if (rc)
  132. DRM_DEBUG("init v12 dma ops failed\n");
  133. break;
  134. case REG_DMA_VER_2_0:
  135. rc = init_v2(&reg_dma);
  136. if (rc)
  137. DRM_DEBUG("init v2 dma ops failed\n");
  138. break;
  139. case REG_DMA_VER_3_0:
  140. rc = init_v3(&reg_dma);
  141. if (rc)
  142. DRM_DEBUG("init v3 dma ops failed\n");
  143. break;
  144. default:
  145. break;
  146. }
  147. return rc;
  148. }
  149. struct sde_hw_reg_dma_ops *sde_reg_dma_get_ops(void)
  150. {
  151. return &reg_dma.ops;
  152. }
  153. void sde_reg_dma_deinit(void)
  154. {
  155. if (!reg_dma.drm_dev || !reg_dma.caps)
  156. return;
  157. switch (reg_dma.caps->version) {
  158. case REG_DMA_VER_1_0:
  159. case REG_DMA_VER_1_1:
  160. case REG_DMA_VER_1_2:
  161. case REG_DMA_VER_2_0:
  162. deinit_v1();
  163. break;
  164. default:
  165. break;
  166. }
  167. memset(&reg_dma, 0, sizeof(reg_dma));
  168. set_default_dma_ops(&reg_dma);
  169. }