|
@@ -20,15 +20,17 @@
|
|
|
#define DECODE_SEL_OP (BIT(HW_BLK_SELECT))
|
|
|
#define REG_WRITE_OP ((BIT(REG_SINGLE_WRITE)) | (BIT(REG_BLK_WRITE_SINGLE)) | \
|
|
|
(BIT(REG_BLK_WRITE_INC)) | (BIT(REG_BLK_WRITE_MULTIPLE)) | \
|
|
|
- (BIT(REG_SINGLE_MODIFY)))
|
|
|
+ (BIT(REG_SINGLE_MODIFY)) | (BIT(REG_BLK_LUT_WRITE)))
|
|
|
|
|
|
#define REG_DMA_OPS (DECODE_SEL_OP | REG_WRITE_OP)
|
|
|
#define IS_OP_ALLOWED(op, buf_op) (BIT(op) & buf_op)
|
|
|
|
|
|
-#define SET_UP_REG_DMA_REG(hw, reg_dma) \
|
|
|
+#define SET_UP_REG_DMA_REG(hw, reg_dma, i) \
|
|
|
do { \
|
|
|
+ if ((reg_dma)->caps->reg_dma_blks[(i)].valid == false) \
|
|
|
+ break; \
|
|
|
(hw).base_off = (reg_dma)->addr; \
|
|
|
- (hw).blk_off = (reg_dma)->caps->base; \
|
|
|
+ (hw).blk_off = (reg_dma)->caps->reg_dma_blks[(i)].base; \
|
|
|
(hw).hwversion = (reg_dma)->caps->version; \
|
|
|
(hw).log_mask = SDE_DBG_MASK_REGDMA; \
|
|
|
} while (0)
|
|
@@ -51,22 +53,31 @@
|
|
|
#define HW_INDEX_REG_WRITE_OPCODE (BIT(28) | BIT(29))
|
|
|
#define AUTO_INC_REG_WRITE_OPCODE (BIT(30))
|
|
|
#define BLK_REG_WRITE_OPCODE (BIT(30) | BIT(28))
|
|
|
+#define LUTBUS_WRITE_OPCODE (BIT(30) | BIT(29))
|
|
|
|
|
|
#define WRAP_MIN_SIZE 2
|
|
|
#define WRAP_MAX_SIZE (BIT(4) - 1)
|
|
|
#define MAX_DWORDS_SZ (BIT(14) - 1)
|
|
|
#define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
|
|
|
|
|
|
+#define LUTBUS_TABLE_SEL_MASK 0x10000
|
|
|
+#define LUTBUS_BLOCK_SEL_MASK 0xffff
|
|
|
+#define LUTBUS_TRANS_SZ_MASK 0xff0000
|
|
|
+#define LUTBUS_LUT_SIZE_MASK 0x3fff
|
|
|
+
|
|
|
static uint32_t reg_dma_register_count;
|
|
|
static uint32_t reg_dma_decode_sel;
|
|
|
static uint32_t reg_dma_opmode_offset;
|
|
|
static uint32_t reg_dma_ctl0_queue0_cmd0_offset;
|
|
|
+static uint32_t reg_dma_ctl0_queue1_cmd0_offset;
|
|
|
static uint32_t reg_dma_intr_status_offset;
|
|
|
static uint32_t reg_dma_intr_4_status_offset;
|
|
|
static uint32_t reg_dma_intr_clear_offset;
|
|
|
static uint32_t reg_dma_ctl_trigger_offset;
|
|
|
static uint32_t reg_dma_ctl0_reset_offset;
|
|
|
static uint32_t reg_dma_error_clear_mask;
|
|
|
+static uint32_t reg_dma_ctl_queue_off[CTL_MAX];
|
|
|
+static uint32_t reg_dma_ctl_queue1_off[CTL_MAX];
|
|
|
|
|
|
typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
|
|
|
@@ -78,6 +89,7 @@ static u32 ops_mem_size[REG_DMA_SETUP_OPS_MAX] = {
|
|
|
[HW_BLK_SELECT] = sizeof(u32) * 2,
|
|
|
[REG_SINGLE_WRITE] = sizeof(u32) * 2,
|
|
|
[REG_SINGLE_MODIFY] = sizeof(u32) * 3,
|
|
|
+ [REG_BLK_LUT_WRITE] = sizeof(u32) * 2,
|
|
|
};
|
|
|
|
|
|
static u32 queue_sel[DMA_CTL_QUEUE_MAX] = {
|
|
@@ -85,7 +97,6 @@ static u32 queue_sel[DMA_CTL_QUEUE_MAX] = {
|
|
|
[DMA_CTL_QUEUE1] = BIT(4),
|
|
|
};
|
|
|
|
|
|
-static u32 reg_dma_ctl_queue_off[CTL_MAX];
|
|
|
static u32 dspp_read_sel[DSPP_HIST_MAX] = {
|
|
|
[DSPP0_HIST] = 0,
|
|
|
[DSPP1_HIST] = 1,
|
|
@@ -119,6 +130,7 @@ static u32 ctl_trigger_done_mask[CTL_MAX][DMA_CTL_QUEUE_MAX] = {
|
|
|
static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
|
static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
|
static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
|
+static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
|
static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
|
static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
|
static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
@@ -127,6 +139,7 @@ static int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
|
static int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
|
static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
|
static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
|
+static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
|
static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
|
|
|
static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf);
|
|
|
static int check_support_v1(enum sde_reg_dma_features feature,
|
|
@@ -139,6 +152,8 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
|
|
|
static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
|
|
|
static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);
|
|
|
static void dump_regs_v1(void);
|
|
|
+static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
|
|
|
+ enum sde_reg_dma_last_cmd_mode mode);
|
|
|
|
|
|
static reg_dma_internal_ops write_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
|
|
|
[HW_BLK_SELECT] = write_decode_sel,
|
|
@@ -147,6 +162,7 @@ static reg_dma_internal_ops write_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
|
|
|
[REG_BLK_WRITE_INC] = write_multi_reg_index,
|
|
|
[REG_BLK_WRITE_MULTIPLE] = write_multi_lut_reg,
|
|
|
[REG_SINGLE_MODIFY] = write_single_modify,
|
|
|
+ [REG_BLK_LUT_WRITE] = write_block_lut_reg,
|
|
|
};
|
|
|
|
|
|
static reg_dma_internal_ops validate_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
|
|
@@ -156,9 +172,11 @@ static reg_dma_internal_ops validate_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
|
|
|
[REG_BLK_WRITE_INC] = validate_write_reg,
|
|
|
[REG_BLK_WRITE_MULTIPLE] = validate_write_multi_lut_reg,
|
|
|
[REG_SINGLE_MODIFY] = validate_write_reg,
|
|
|
+ [REG_BLK_LUT_WRITE] = validate_blk_lut_write,
|
|
|
};
|
|
|
|
|
|
-static struct sde_reg_dma_buffer *last_cmd_buf[CTL_MAX];
|
|
|
+static struct sde_reg_dma_buffer *last_cmd_buf_db[CTL_MAX];
|
|
|
+static struct sde_reg_dma_buffer *last_cmd_buf_sb[CTL_MAX];
|
|
|
|
|
|
static void get_decode_sel(unsigned long blk, u32 *decode_sel)
|
|
|
{
|
|
@@ -331,6 +349,22 @@ static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
|
|
|
+{
|
|
|
+ u32 *loc = NULL;
|
|
|
+
|
|
|
+ loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
|
|
|
+ cfg->dma_buf->index);
|
|
|
+ loc[0] = LUTBUS_WRITE_OPCODE;
|
|
|
+ loc[0] |= (cfg->table_sel << 16) & LUTBUS_TABLE_SEL_MASK;
|
|
|
+ loc[0] |= (cfg->block_sel & LUTBUS_BLOCK_SEL_MASK);
|
|
|
+ loc[1] = (cfg->trans_size << 16) & LUTBUS_TRANS_SZ_MASK;
|
|
|
+ loc[1] |= (cfg->lut_size & LUTBUS_LUT_SIZE_MASK);
|
|
|
+ cfg->dma_buf->index += ops_mem_size[cfg->ops];
|
|
|
+
|
|
|
+ return write_multi_reg(cfg);
|
|
|
+}
|
|
|
+
|
|
|
static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
|
|
|
{
|
|
|
u32 *loc = NULL;
|
|
@@ -363,6 +397,27 @@ static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg)
|
|
|
+{
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ rc = validate_write_reg(cfg);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+
|
|
|
+ if (cfg->table_sel >= LUTBUS_TABLE_SELECT_MAX ||
|
|
|
+ cfg->block_sel >= LUTBUS_BLOCK_MAX ||
|
|
|
+ (cfg->trans_size != LUTBUS_IGC_TRANS_SIZE &&
|
|
|
+ cfg->trans_size != LUTBUS_GAMUT_TRANS_SIZE)) {
|
|
|
+ DRM_ERROR("invalid table_sel %d block_sel %d trans_size %d\n",
|
|
|
+ cfg->table_sel, cfg->block_sel,
|
|
|
+ cfg->trans_size);
|
|
|
+ rc = -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
|
|
|
{
|
|
|
u32 remain_len, write_len;
|
|
@@ -536,6 +591,14 @@ static int validate_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
|
|
|
(size_t)MAX_DWORDS_SZ);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
+
|
|
|
+ if (cfg->dma_type == REG_DMA_TYPE_SB &&
|
|
|
+ (cfg->queue_select != DMA_CTL_QUEUE1 ||
|
|
|
+ cfg->op == REG_DMA_READ)) {
|
|
|
+ DRM_ERROR("invalid queue selected %d or op %d for SB LUTDMA\n",
|
|
|
+ cfg->queue_select, cfg->op);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -553,7 +616,16 @@ static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
|
|
|
cmd1 |= (cfg->op == REG_DMA_WRITE) ? (BIT(22)) : 0;
|
|
|
cmd1 |= (SIZE_DWORD(cfg->dma_buf->index) & MAX_DWORDS_SZ);
|
|
|
|
|
|
- SET_UP_REG_DMA_REG(hw, reg_dma);
|
|
|
+ if (cfg->dma_type == REG_DMA_TYPE_DB)
|
|
|
+ SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_DB);
|
|
|
+ else if (cfg->dma_type == REG_DMA_TYPE_SB)
|
|
|
+ SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_SB);
|
|
|
+
|
|
|
+ if (hw.hwversion == 0) {
|
|
|
+ DRM_ERROR("invalid dma type %d\n", cfg->dma_type);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
|
|
|
val = SDE_REG_READ(&hw, reg_dma_intr_4_status_offset);
|
|
|
if (val) {
|
|
@@ -564,15 +636,28 @@ static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
|
|
|
SDE_EVT32(val);
|
|
|
}
|
|
|
|
|
|
- SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
|
|
|
- cfg->dma_buf->iova);
|
|
|
- SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
|
|
|
- cmd1);
|
|
|
+ if (cfg->dma_type == REG_DMA_TYPE_DB) {
|
|
|
+ SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
|
|
|
+ cfg->dma_buf->iova);
|
|
|
+ SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
|
|
|
+ cmd1);
|
|
|
+ } else if (cfg->dma_type == REG_DMA_TYPE_SB) {
|
|
|
+ SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx],
|
|
|
+ cfg->dma_buf->iova);
|
|
|
+ SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx] + 0x4,
|
|
|
+ cmd1);
|
|
|
+ }
|
|
|
+
|
|
|
if (cfg->last_command) {
|
|
|
mask = ctl_trigger_done_mask[cfg->ctl->idx][cfg->queue_select];
|
|
|
SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset, mask);
|
|
|
- SDE_REG_WRITE(&cfg->ctl->hw, reg_dma_ctl_trigger_offset,
|
|
|
- queue_sel[cfg->queue_select]);
|
|
|
+ /* DB LUTDMA use SW trigger while SB LUTDMA uses DSPP_SB
|
|
|
+ * flush as its trigger event.
|
|
|
+ */
|
|
|
+ if (cfg->dma_type == REG_DMA_TYPE_DB) {
|
|
|
+ SDE_REG_WRITE(&cfg->ctl->hw, reg_dma_ctl_trigger_offset,
|
|
|
+ queue_sel[cfg->queue_select]);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
@@ -587,26 +672,45 @@ int init_v1(struct sde_hw_reg_dma *cfg)
|
|
|
|
|
|
reg_dma = cfg;
|
|
|
for (i = CTL_0; i < CTL_MAX; i++) {
|
|
|
- if (!last_cmd_buf[i]) {
|
|
|
- last_cmd_buf[i] =
|
|
|
+ if (!last_cmd_buf_db[i]) {
|
|
|
+ last_cmd_buf_db[i] =
|
|
|
alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
|
|
|
- if (IS_ERR_OR_NULL(last_cmd_buf[i])) {
|
|
|
+ if (IS_ERR_OR_NULL(last_cmd_buf_db[i])) {
|
|
|
/*
|
|
|
* This will allow reg dma to fall back to
|
|
|
* AHB domain
|
|
|
*/
|
|
|
pr_info("Failed to allocate reg dma, ret:%lu\n",
|
|
|
- PTR_ERR(last_cmd_buf[i]));
|
|
|
+ PTR_ERR(last_cmd_buf_db[i]));
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (!last_cmd_buf_sb[i]) {
|
|
|
+ last_cmd_buf_sb[i] =
|
|
|
+ alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
|
|
|
+ if (IS_ERR_OR_NULL(last_cmd_buf_sb[i])) {
|
|
|
+ /*
|
|
|
+ * This will allow reg dma to fall back to
|
|
|
+ * AHB domain
|
|
|
+ */
|
|
|
+ pr_info("Failed to allocate reg dma, ret:%lu\n",
|
|
|
+ PTR_ERR(last_cmd_buf_sb[i]));
|
|
|
return 0;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
if (rc) {
|
|
|
for (i = 0; i < CTL_MAX; i++) {
|
|
|
- if (!last_cmd_buf[i])
|
|
|
+ if (!last_cmd_buf_db[i])
|
|
|
continue;
|
|
|
- dealloc_reg_dma_v1(last_cmd_buf[i]);
|
|
|
- last_cmd_buf[i] = NULL;
|
|
|
+ dealloc_reg_dma_v1(last_cmd_buf_db[i]);
|
|
|
+ last_cmd_buf_db[i] = NULL;
|
|
|
+ }
|
|
|
+ for (i = 0; i < CTL_MAX; i++) {
|
|
|
+ if (!last_cmd_buf_sb[i])
|
|
|
+ continue;
|
|
|
+ dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
|
|
|
+ last_cmd_buf_sb[i] = NULL;
|
|
|
}
|
|
|
return rc;
|
|
|
}
|
|
@@ -699,6 +803,33 @@ int init_v12(struct sde_hw_reg_dma *cfg)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+int init_v2(struct sde_hw_reg_dma *cfg)
|
|
|
+{
|
|
|
+ int ret = 0, i = 0;
|
|
|
+
|
|
|
+ ret = init_v12(cfg);
|
|
|
+ if (ret) {
|
|
|
+ DRM_ERROR("failed to initialize v12: ret %d\n", ret);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* initialize register offsets based on version delta */
|
|
|
+ reg_dma_register_count = 0x91;
|
|
|
+ reg_dma_ctl0_queue1_cmd0_offset = 0x1c;
|
|
|
+ reg_dma_error_clear_mask |= BIT(19);
|
|
|
+
|
|
|
+ reg_dma_ctl_queue1_off[CTL_0] = reg_dma_ctl0_queue1_cmd0_offset;
|
|
|
+ for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
|
|
|
+ reg_dma_ctl_queue1_off[i] = reg_dma_ctl_queue1_off[i - 1] +
|
|
|
+ (sizeof(u32) * 4);
|
|
|
+
|
|
|
+ v1_supported[IGC] = GRP_DSPP_HW_BLK_SELECT | GRP_VIG_HW_BLK_SELECT |
|
|
|
+ GRP_DMA_HW_BLK_SELECT;
|
|
|
+ reg_dma->ops.last_command_sb = last_cmd_sb_v2;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int check_support_v1(enum sde_reg_dma_features feature,
|
|
|
enum sde_reg_dma_blk blk,
|
|
|
bool *is_supported)
|
|
@@ -748,7 +879,7 @@ static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
|
|
|
int reset_v1(struct sde_hw_ctl *ctl)
|
|
|
{
|
|
|
struct sde_hw_blk_reg_map hw;
|
|
|
- u32 index, val, i = 0;
|
|
|
+ u32 index, val, i = 0, k = 0;
|
|
|
|
|
|
if (!ctl || ctl->idx > CTL_MAX) {
|
|
|
DRM_ERROR("invalid ctl %pK ctl idx %d\n",
|
|
@@ -756,20 +887,26 @@ int reset_v1(struct sde_hw_ctl *ctl)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- memset(&hw, 0, sizeof(hw));
|
|
|
index = ctl->idx - CTL_0;
|
|
|
- SET_UP_REG_DMA_REG(hw, reg_dma);
|
|
|
- SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
|
|
|
- SDE_REG_WRITE(&hw, (reg_dma_ctl0_reset_offset + index * sizeof(u32)),
|
|
|
- BIT(0));
|
|
|
-
|
|
|
- i = 0;
|
|
|
- do {
|
|
|
- udelay(1000);
|
|
|
- i++;
|
|
|
- val = SDE_REG_READ(&hw,
|
|
|
- (reg_dma_ctl0_reset_offset + index * sizeof(u32)));
|
|
|
- } while (i < 2 && val);
|
|
|
+ for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
|
|
|
+ memset(&hw, 0, sizeof(hw));
|
|
|
+ SET_UP_REG_DMA_REG(hw, reg_dma, k);
|
|
|
+ if (hw.hwversion == 0)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
|
|
|
+ SDE_REG_WRITE(&hw, (reg_dma_ctl0_reset_offset +
|
|
|
+ index * sizeof(u32)), BIT(0));
|
|
|
+
|
|
|
+ i = 0;
|
|
|
+ do {
|
|
|
+ udelay(1000);
|
|
|
+ i++;
|
|
|
+ val = SDE_REG_READ(&hw,
|
|
|
+ (reg_dma_ctl0_reset_offset +
|
|
|
+ index * sizeof(u32)));
|
|
|
+ } while (i < 2 && val);
|
|
|
+ }
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -973,13 +1110,13 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- if (!last_cmd_buf[ctl->idx] || !last_cmd_buf[ctl->idx]->iova) {
|
|
|
- DRM_DEBUG("invalid last cmd buf for idx %d\n", ctl->idx);
|
|
|
- return 0;
|
|
|
+ if (!last_cmd_buf_db[ctl->idx] || !last_cmd_buf_db[ctl->idx]->iova) {
|
|
|
+ DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
|
|
|
+ return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- cfg.dma_buf = last_cmd_buf[ctl->idx];
|
|
|
- reset_reg_dma_buffer_v1(last_cmd_buf[ctl->idx]);
|
|
|
+ cfg.dma_buf = last_cmd_buf_db[ctl->idx];
|
|
|
+ reset_reg_dma_buffer_v1(last_cmd_buf_db[ctl->idx]);
|
|
|
if (validate_last_cmd(&cfg)) {
|
|
|
DRM_ERROR("validate buf failed\n");
|
|
|
return -EINVAL;
|
|
@@ -995,14 +1132,17 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
|
|
|
kick_off.trigger_mode = WRITE_IMMEDIATE;
|
|
|
kick_off.last_command = 1;
|
|
|
kick_off.op = REG_DMA_WRITE;
|
|
|
- kick_off.dma_buf = last_cmd_buf[ctl->idx];
|
|
|
- if (kick_off_v1(&kick_off)) {
|
|
|
+ kick_off.dma_type = REG_DMA_TYPE_DB;
|
|
|
+ kick_off.dma_buf = last_cmd_buf_db[ctl->idx];
|
|
|
+ rc = kick_off_v1(&kick_off);
|
|
|
+ if (rc) {
|
|
|
DRM_ERROR("kick off last cmd failed\n");
|
|
|
- return -EINVAL;
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
+ //Lack of block support will be caught by kick_off
|
|
|
memset(&hw, 0, sizeof(hw));
|
|
|
- SET_UP_REG_DMA_REG(hw, reg_dma);
|
|
|
+ SET_UP_REG_DMA_REG(hw, reg_dma, kick_off.dma_type);
|
|
|
|
|
|
SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode);
|
|
|
if (mode == REG_DMA_WAIT4_COMP) {
|
|
@@ -1016,7 +1156,7 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
|
|
|
SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, mode);
|
|
|
}
|
|
|
|
|
|
- return 0;
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
void deinit_v1(void)
|
|
@@ -1024,23 +1164,77 @@ void deinit_v1(void)
|
|
|
int i = 0;
|
|
|
|
|
|
for (i = CTL_0; i < CTL_MAX; i++) {
|
|
|
- if (last_cmd_buf[i])
|
|
|
- dealloc_reg_dma_v1(last_cmd_buf[i]);
|
|
|
- last_cmd_buf[i] = NULL;
|
|
|
+ if (last_cmd_buf_db[i])
|
|
|
+ dealloc_reg_dma_v1(last_cmd_buf_db[i]);
|
|
|
+ last_cmd_buf_db[i] = NULL;
|
|
|
+ if (last_cmd_buf_sb[i])
|
|
|
+ dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
|
|
|
+ last_cmd_buf_sb[i] = NULL;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static void dump_regs_v1(void)
|
|
|
{
|
|
|
- uint32_t i = 0;
|
|
|
+ uint32_t i = 0, k = 0;
|
|
|
u32 val;
|
|
|
struct sde_hw_blk_reg_map hw;
|
|
|
|
|
|
- memset(&hw, 0, sizeof(hw));
|
|
|
- SET_UP_REG_DMA_REG(hw, reg_dma);
|
|
|
+ for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
|
|
|
+ memset(&hw, 0, sizeof(hw));
|
|
|
+ SET_UP_REG_DMA_REG(hw, reg_dma, k);
|
|
|
+ if (hw.hwversion == 0)
|
|
|
+ continue;
|
|
|
|
|
|
- for (i = 0; i < reg_dma_register_count; i++) {
|
|
|
- val = SDE_REG_READ(&hw, i * sizeof(u32));
|
|
|
- DRM_ERROR("offset %x val %x\n", (u32)(i * sizeof(u32)), val);
|
|
|
+ for (i = 0; i < reg_dma_register_count; i++) {
|
|
|
+ val = SDE_REG_READ(&hw, i * sizeof(u32));
|
|
|
+ DRM_ERROR("offset %x val %x\n", (u32)(i * sizeof(u32)),
|
|
|
+ val);
|
|
|
+ }
|
|
|
}
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
|
|
|
+ enum sde_reg_dma_last_cmd_mode mode)
|
|
|
+{
|
|
|
+ struct sde_reg_dma_setup_ops_cfg cfg;
|
|
|
+ struct sde_reg_dma_kickoff_cfg kick_off;
|
|
|
+ int rc = 0;
|
|
|
+
|
|
|
+ if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
|
|
|
+ DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
|
|
|
+ ((ctl) ? ctl->idx : -1));
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!last_cmd_buf_sb[ctl->idx] || !last_cmd_buf_sb[ctl->idx]->iova) {
|
|
|
+ DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ cfg.dma_buf = last_cmd_buf_sb[ctl->idx];
|
|
|
+ reset_reg_dma_buffer_v1(last_cmd_buf_sb[ctl->idx]);
|
|
|
+ if (validate_last_cmd(&cfg)) {
|
|
|
+ DRM_ERROR("validate buf failed\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (write_last_cmd(&cfg)) {
|
|
|
+ DRM_ERROR("write buf failed\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ kick_off.ctl = ctl;
|
|
|
+ kick_off.queue_select = q;
|
|
|
+ kick_off.trigger_mode = WRITE_IMMEDIATE;
|
|
|
+ kick_off.last_command = 1;
|
|
|
+ kick_off.op = REG_DMA_WRITE;
|
|
|
+ kick_off.dma_type = REG_DMA_TYPE_SB;
|
|
|
+ kick_off.queue_select = DMA_CTL_QUEUE1;
|
|
|
+ kick_off.dma_buf = last_cmd_buf_sb[ctl->idx];
|
|
|
+ rc = kick_off_v1(&kick_off);
|
|
|
+ if (rc)
|
|
|
+ DRM_ERROR("kick off last cmd failed\n");
|
|
|
+
|
|
|
+ return rc;
|
|
|
}
|