disp: msm: sde: update hw-fence txq wr_ptr from hardware

This change adds hardware programming that will update the
txq wr_ptr upon output fence firing.

Change-Id: I79ff0ea5fb2b7f73a48bd70e3c8e71ea69fead95
Signed-off-by: Christina Oliveira <quic_coliveir@quicinc.com>
This commit is contained in:
Christina Oliveira
2023-03-09 11:51:34 -08:00
committed by Gerrit - the friendly Code Review server
parent 280c38cc54
commit b5cbfa8358
12 changed files with 302 additions and 108 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@@ -192,7 +192,7 @@ static int msm_smmu_one_to_one_map(struct msm_mmu *mmu, uint32_t iova,
if (!client || !client->domain)
return -ENODEV;
ret = iommu_map(client->domain, dest_address, dest_address,
ret = iommu_map(client->domain, iova, dest_address,
size, prot);
if (ret)
pr_err("smmu map failed\n");

View File

@@ -3079,6 +3079,9 @@ static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
sde_enc->cur_master->hw_ctl,
&sde_enc->cur_master->intf_cfg_v1);
if (sde_enc->cur_master->hw_ctl)
sde_fence_output_hw_fence_dir_write_init(sde_enc->cur_master->hw_ctl);
_sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info);
if (!sde_encoder_in_cont_splash(drm_enc))

View File

@@ -16,6 +16,9 @@
#define TIMELINE_VAL_LENGTH 128
#define SPEC_FENCE_FLAG_FENCE_ARRAY 0x10
#define SPEC_FENCE_FLAG_ARRAY_BIND 0x11
#define HW_FENCE_DIR_WRITE_SIZE 0x2
#define HW_FENCE_DIR_WRITE_MASK 0xFFFFFFFF
#define HW_FENCE_HFI_MMAP_DPU_BA 0x200000
/**
* struct sde_fence - release/retire fence structure
@@ -63,18 +66,18 @@ enum sde_hw_fence_clients {
* This 'hw_fence_data_dpu_client' must be used for HW that does not support dpu-signal.
*/
struct sde_hw_fence_data hw_fence_data_no_dpu[SDE_HW_FENCE_CLIENT_MAX] = {
{SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, 8, 14, {2, 3}, 0, 8, 8,
0, 0},
{SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, 8, 15, {4, 5}, 0, 8, 8,
0, 0},
{SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, 8, 16, {6, 7}, 0, 8, 8,
0, 0},
{SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, 8, 17, {8, 9}, 0, 8, 8,
0, 0},
{SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, 8, 18, {10, 11}, 0, 8, 8,
0, 0},
{SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, 8, 19, {12, 13}, 0, 8, 8,
0, 0}
{SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, NULL, NULL, 8, 14, {2, 3},
0, 8, 8, 0, 0},
{SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, NULL, NULL, 8, 15, {4, 5},
0, 8, 8, 0, 0},
{SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, NULL, NULL, 8, 16, {6, 7},
0, 8, 8, 0, 0},
{SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, NULL, NULL, 8, 17, {8, 9},
0, 8, 8, 0, 0},
{SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, NULL, NULL, 8, 18, {10, 11},
0, 8, 8, 0, 0},
{SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, NULL, NULL, 8, 19, {12, 13},
0, 8, 8, 0, 0}
};
/**
@@ -85,27 +88,32 @@ struct sde_hw_fence_data hw_fence_data_no_dpu[SDE_HW_FENCE_CLIENT_MAX] = {
* This 'hw_fence_data_dpu_client' must be used for HW that supports dpu-signal
*/
struct sde_hw_fence_data hw_fence_data_dpu_client[SDE_HW_FENCE_CLIENT_MAX] = {
{SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, 8, 0, {0, 6}, 0, 8, 25,
0, 0},
{SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, 8, 1, {1, 7}, 0, 8, 25,
0, 0},
{SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, 8, 2, {2, 8}, 0, 8, 25,
0, 0},
{SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, 8, 3, {3, 9}, 0, 8, 25,
0, 0},
{SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, 8, 4, {4, 10}, 0, 8, 25,
0, 0},
{SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, 8, 5, {5, 11}, 0, 8, 25,
0, 0}
{SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, NULL, NULL, 8, 0, {0, 6},
0, 8, 25, 0, 0},
{SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, NULL, NULL, 8, 1, {1, 7},
0, 8, 25, 0, 0},
{SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, NULL, NULL, 8, 2, {2, 8},
0, 8, 25, 0, 0},
{SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, NULL, NULL, 8, 3, {3, 9},
0, 8, 25, 0, 0},
{SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, NULL, NULL, 8, 4, {4, 10},
0, 8, 25, 0, 0},
{SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, NULL, NULL, 8, 5, {5, 11},
0, 8, 25, 0, 0}
};
int sde_hw_fence_init(struct sde_hw_ctl *hw_ctl, bool use_dpu_ipcc)
int sde_hw_fence_init(struct sde_hw_ctl *hw_ctl, bool use_dpu_ipcc, struct msm_mmu *mmu)
{
struct msm_hw_fence_hfi_queue_header *hfi_queue_header_va, *hfi_queue_header_pa;
struct msm_hw_fence_hfi_queue_table_header *hfi_table_header;
struct sde_hw_fence_data *sde_hw_fence_data;
struct sde_hw_fence_data *hwfence_data;
int ctl_id;
phys_addr_t queue_pa;
void *queue_va;
u32 qhdr0_offset, ctl_hfi_iova;
int ctl_id, ret;
if (!hw_ctl)
if (!hw_ctl || !hw_ctl->ops.hw_fence_output_fence_dir_write_init)
return -EINVAL;
ctl_id = hw_ctl->idx - CTL_0;
@@ -141,8 +149,35 @@ int sde_hw_fence_init(struct sde_hw_ctl *hw_ctl, bool use_dpu_ipcc)
return -EINVAL;
}
SDE_DEBUG("hwfence registered ctl_id:%d hw_fence_client_id:%d handle:0x%p\n",
ctl_id, hwfence_data->hw_fence_client_id, hwfence_data->hw_fence_handle);
/* one-to-one memory map of ctl-path client queues */
ctl_hfi_iova = HW_FENCE_HFI_MMAP_DPU_BA +
PAGE_ALIGN(hwfence_data->mem_descriptor.size * ctl_id);
ret = mmu->funcs->one_to_one_map(mmu, ctl_hfi_iova,
hwfence_data->mem_descriptor.device_addr,
hwfence_data->mem_descriptor.size, IOMMU_READ | IOMMU_WRITE);
if (ret) {
SDE_ERROR("queue one2one memory smmu map failed, ret:%d ctl_id:%d, client:%d\n",
ret, ctl_id, hwfence_data->hw_fence_client_id);
return ret;
}
/* get queue header offset */
queue_va = hwfence_data->mem_descriptor.virtual_addr;
hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)queue_va;
qhdr0_offset = hfi_table_header->qhdr0_offset;
/* initialize tx_wm pointer */
hfi_queue_header_va = (struct msm_hw_fence_hfi_queue_header *)(queue_va + qhdr0_offset);
hwfence_data->txq_tx_wm_va = &hfi_queue_header_va->tx_wm;
/* initialize txq wr_ptr addr pointer */
queue_pa = ctl_hfi_iova;
hfi_queue_header_pa = (struct msm_hw_fence_hfi_queue_header *)(queue_pa + qhdr0_offset);
hwfence_data->txq_wr_ptr_pa = &hfi_queue_header_pa->write_index;
SDE_DEBUG("hwfence registered ctl:%d client:%d handle:0x%pK tx_wm:0x%x wr_idx:0x%x\n",
ctl_id, hwfence_data->hw_fence_client_id, hwfence_data->hw_fence_handle,
*hwfence_data->txq_tx_wm_va, *hwfence_data->txq_wr_ptr_pa);
return 0;
}
@@ -465,6 +500,14 @@ static int _sde_fence_arm_output_hw_fence(struct sde_fence_context *ctx, bool vi
return 0;
}
void sde_fence_output_hw_fence_dir_write_init(struct sde_hw_ctl *hw_ctl)
{
if (hw_ctl && hw_ctl->ops.hw_fence_output_fence_dir_write_init)
hw_ctl->ops.hw_fence_output_fence_dir_write_init(hw_ctl,
hw_ctl->hwfence_data.txq_wr_ptr_pa, HW_FENCE_DIR_WRITE_SIZE,
HW_FENCE_DIR_WRITE_MASK);
}
/* update output hw_fences txq */
int sde_fence_update_hw_fences_txq(struct sde_fence_context *ctx, bool vid_mode, u32 line_count,
u32 debugfs_hw_fence)
@@ -516,7 +559,7 @@ int sde_fence_update_hw_fences_txq(struct sde_fence_context *ctx, bool vid_mode,
/* update hw-fence tx queue */
SDE_EVT32(ctl_id, SDE_EVTLOG_H32(fc->hwfence_index),
SDE_EVTLOG_L32(fc->hwfence_index));
SDE_EVTLOG_L32(fc->hwfence_index), *data->txq_tx_wm_va);
ret = msm_hw_fence_update_txq(data->hw_fence_handle, fc->hwfence_index, 0, 0);
if (ret) {
SDE_ERROR("fail txq update index:%llu fctx:%llu seqno:%llu client:%d\n",
@@ -526,6 +569,12 @@ int sde_fence_update_hw_fences_txq(struct sde_fence_context *ctx, bool vid_mode,
fence->seqno, ctl_id, SDE_EVTLOG_ERROR);
goto exit;
}
/* update hw-fence tx queue wr_idx data */
if (hw_ctl->ops.hw_fence_output_fence_dir_write_data)
hw_ctl->ops.hw_fence_output_fence_dir_write_data(hw_ctl,
*data->txq_tx_wm_va);
/* avoid updating txq more than once and avoid repeating the same fence twice */
txq_updated = fc->txq_updated_fence = true;

View File

@@ -68,6 +68,8 @@ enum sde_fence_event {
* @client_id: client_id enum for the display driver.
* @hw_fence_client_id: client_id enum for the hw-fence driver.
* @mem_descriptor: memory descriptor with the hfi for the rx/tx queues mapping.
* @txq_tx_wm_va: pointer to store virtual address of tx_wm
* @txq_wr_ptr_pa: pointer to store physical address of write_ptr
* @ipcc_in_client: ipcc client triggering the signal: IN_CLIENT (APPS) -> DPU
* @ipcc_in_signal: ipcc signal triggered from client to dpu: IN_SIGNAL (APPS) -> DPU
* @ipcc_out_signal_pp: output signal from dpu to fctl, ping-pongs between two signals
@@ -82,6 +84,8 @@ struct sde_hw_fence_data {
enum hw_fence_client_id hw_fence_client_id;
void *hw_fence_handle;
struct msm_hw_fence_mem_addr mem_descriptor;
u32 *txq_tx_wm_va;
u32 *txq_wr_ptr_pa;
u32 ipcc_in_client;
u32 ipcc_in_signal;
u32 ipcc_out_signal_pp[MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG];
@@ -153,10 +157,11 @@ struct sde_fence_context *sde_fence_init(const char *name,
*
* @hw_ctl: hw ctl client to init.
* @use_ipcc: boolean to indicate if hw should use dpu ipcc signals.
* @mmu: mmu to map memory for queues
*
* Returns: Zero on success, otherwise returns an error code.
*/
int sde_hw_fence_init(struct sde_hw_ctl *hw_ctl, bool use_dpu_ipcc);
int sde_hw_fence_init(struct sde_hw_ctl *hw_ctl, bool use_dpu_ipcc, struct msm_mmu *mmu);
/**
* sde_fence_hw_fence_deinit - deinitialize hw-fence clients
@@ -177,6 +182,12 @@ void sde_hw_fence_deinit(struct sde_hw_ctl *hw_ctl);
int sde_fence_register_hw_fences_wait(struct sde_hw_ctl *hw_ctl, struct dma_fence **fences,
u32 num_fences);
/**
* sde_fence_output_hw_fence_dir_write_init - update addr, mask and size for output fence dir write
* @hw_ctl: hw ctl client to init dir write regs for
*/
void sde_fence_output_hw_fence_dir_write_init(struct sde_hw_ctl *hw_ctl);
/**
* sde_fence_update_hw_fences_txq - updates the hw-fence txq with the list of hw-fences to signal
* upon triggering the ipcc signal.

View File

@@ -5481,11 +5481,16 @@ static void _sde_hw_fence_caps(struct sde_mdss_cfg *sde_cfg)
set_bit(SDE_FEATURE_HW_FENCE_IPCC, sde_cfg->features);
if (SDE_HW_MAJOR(sde_cfg->hw_rev) >= SDE_HW_MAJOR(SDE_HW_VER_A00))
set_bit(SDE_MDP_HW_FENCE_DIR_WRITE, &sde_cfg->mdp[0].features);
for (i = 0; i < sde_cfg->ctl_count; i++) {
ctl = sde_cfg->ctl + i;
set_bit(SDE_CTL_HW_FENCE, &ctl->features);
if (SDE_HW_MAJOR(sde_cfg->hw_rev) >= SDE_HW_MAJOR(SDE_HW_VER_A00))
if (SDE_HW_MAJOR(sde_cfg->hw_rev) >= SDE_HW_MAJOR(SDE_HW_VER_A00)) {
set_bit(SDE_CTL_HW_FENCE_TRIGGER_SEL, &ctl->features);
set_bit(SDE_CTL_HW_FENCE_DIR_WRITE, &ctl->features);
}
}
}

View File

@@ -270,6 +270,7 @@ struct sde_intr_irq_offsets {
* @SDE_MDP_DHDR_MEMPOOL_4K Dynamic HDR mempool is 4k aligned
* @SDE_MDP_PERIPH_TOP_REMOVED Indicates if periph top0 block is removed
* @SDE_MDP_TOP_PPB_SET_SIZE Indicates if top block supports ppb size setting
* @SDE_MDP_HW_FENCE_DIR_WRITE Indicates if hw supports hw-fence dir write
* @SDE_MDP_MAX Maximum value
*/
enum {
@@ -284,6 +285,7 @@ enum {
SDE_MDP_DHDR_MEMPOOL_4K,
SDE_MDP_PERIPH_TOP_0_REMOVED,
SDE_MDP_TOP_PPB_SET_SIZE,
SDE_MDP_HW_FENCE_DIR_WRITE,
SDE_MDP_MAX
};
@@ -597,6 +599,7 @@ enum {
* @SDE_CTL_UNIFIED_DSPP_FLUSH CTL supports only one flush bit for DSPP
* @SDE_CTL_HW_FENCE CTL supports hw fencing
* @SDE_CTL_HW_FENCE_TRIGGER_SEL CTL supports SW selection of cmd/vid modes for trigger sel
* @SDE_CTL_HW_FENCE_DIR_WRITE CTL support hw fencing dir writes
* @SDE_CTL_MAX
*/
enum {
@@ -608,6 +611,7 @@ enum {
SDE_CTL_UNIFIED_DSPP_FLUSH,
SDE_CTL_HW_FENCE,
SDE_CTL_HW_FENCE_TRIGGER_SEL,
SDE_CTL_HW_FENCE_DIR_WRITE,
SDE_CTL_MAX
};

View File

@@ -66,6 +66,10 @@
#define CTL_OUTPUT_FENCE_START_TIMESTAMP1 0x26C
#define CTL_OUTPUT_FENCE_END_TIMESTAMP0 0x270
#define CTL_OUTPUT_FENCE_END_TIMESTAMP1 0x274
#define CTL_OUTPUT_FENCE_DIR_ADDR 0x280
#define CTL_OUTPUT_FENCE_DIR_DATA 0x284
#define CTL_OUTPUT_FENCE_DIR_MASK 0x288
#define CTL_OUTPUT_FENCE_DIR_ATTR 0x28C
#define CTL_MIXER_BORDER_OUT BIT(24)
#define CTL_FLUSH_MASK_ROT BIT(27)
@@ -359,6 +363,25 @@ static inline void sde_hw_ctl_trigger_output_fence(struct sde_hw_ctl *ctx, u32 t
SDE_REG_WRITE(&ctx->hw, CTL_OUTPUT_FENCE_CTRL, val);
}
static inline void sde_hw_ctl_output_fence_dir_wr_init(struct sde_hw_ctl *ctx, u32 *addr,
u32 size, u32 mask)
{
uintptr_t ptr_val = (uintptr_t)addr;
u32 attr = SDE_REG_READ(&ctx->hw, CTL_OUTPUT_FENCE_DIR_ATTR);
attr &= ~(0x7 << 4);
attr |= ((size & 0x7) << 4);
SDE_REG_WRITE(&ctx->hw, CTL_OUTPUT_FENCE_DIR_ATTR, attr);
SDE_REG_WRITE(&ctx->hw, CTL_OUTPUT_FENCE_DIR_MASK, mask);
SDE_REG_WRITE(&ctx->hw, CTL_OUTPUT_FENCE_DIR_ADDR, ptr_val);
}
static inline void sde_hw_ctl_output_fence_dir_wr_data(struct sde_hw_ctl *ctx, u32 data)
{
SDE_REG_WRITE(&ctx->hw, CTL_OUTPUT_FENCE_DIR_DATA, data);
}
static inline void sde_hw_ctl_hw_fence_ctrl(struct sde_hw_ctl *ctx, bool sw_override_set,
bool sw_override_clear, u32 mode)
{
@@ -1488,6 +1511,12 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
ops->trigger_output_fence_override = sde_hw_ctl_trigger_output_fence_override;
ops->hw_fence_output_status = sde_hw_ctl_output_fence_timestamps;
ops->hw_fence_output_timestamp_ctrl = sde_hw_ctl_fence_timestamp_ctrl;
if (cap & BIT(SDE_CTL_HW_FENCE_DIR_WRITE)) {
ops->hw_fence_output_fence_dir_write_init =
sde_hw_ctl_output_fence_dir_wr_init;
ops->hw_fence_output_fence_dir_write_data =
sde_hw_ctl_output_fence_dir_wr_data;
}
}
if (cap & BIT(SDE_CTL_UIDLE))

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
@@ -228,6 +228,22 @@ struct sde_hw_ctl_ops {
*/
void (*hw_fence_update_output_fence)(struct sde_hw_ctl *ctx, u32 client_id, u32 signal_id);
/**
* update address, data size, and mask values for output fence direct writes
* @ctx : ctl path ctx pointer
* @addr : address value to write
* @size : size value to write
* @mask : mask value to write
*/
void (*hw_fence_output_fence_dir_write_init)(struct sde_hw_ctl *ctx, u32 *addr, u32 size,
u32 mask);
/**
* update data value for output_fence direct writes
* @ctx : ctl path ctx pointer
* @data : data value to write
*/
void (*hw_fence_output_fence_dir_write_data)(struct sde_hw_ctl *ctx, u32 data);
/**
* update input hw fence ipcc client_id and signal_id
* @ctx : ctl path ctx pointer

View File

@@ -670,6 +670,60 @@ static void sde_hw_input_hw_fence_status(struct sde_hw_mdp *mdp, u64 *s_val, u64
wmb(); /* make sure the timestamps are cleared */
}
static void _sde_hw_setup_hw_input_fences_config(u32 protocol_id, u32 client_phys_id,
unsigned long ipcc_base_addr, struct sde_hw_blk_reg_map *c)
{
u32 val, offset;
/*select ipcc protocol id for dpu */
val = (protocol_id == HW_FENCE_IPCC_FENCE_PROTOCOL_ID) ?
HW_FENCE_DPU_FENCE_PROTOCOL_ID : protocol_id;
SDE_REG_WRITE(c, MDP_CTL_HW_FENCE_CTRL, val);
/* configure the start of the FENCE_IDn_ISR ops for input and output fence isr's */
val = (HW_FENCE_DPU_OUTPUT_FENCE_START_N << 16) | (HW_FENCE_DPU_INPUT_FENCE_START_N & 0xFF);
SDE_REG_WRITE(c, MDP_CTL_HW_FENCE_ID_START_ADDR, val);
/* setup input fence isr */
/* configure the attribs for the isr read_reg op */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ADDR, 0);
val = HW_FENCE_IPCC_PROTOCOLp_CLIENTc_RECV_ID(ipcc_base_addr,
protocol_id, client_phys_id);
SDE_REG_WRITE(c, offset, val);
offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 0);
val = MDP_CTL_FENCE_ATTRS(0x1, 0x2, 0x1);
SDE_REG_WRITE(c, offset, val);
offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_MASK, 0);
SDE_REG_WRITE(c, offset, 0xFFFFFFFF);
/* configure the attribs for the write if eq data */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_DATA, 1);
SDE_REG_WRITE(c, offset, 0x1);
/* program input-fence isr ops */
/* set read_reg op */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
HW_FENCE_DPU_INPUT_FENCE_START_N);
val = MDP_CTL_FENCE_ISR_OP_CODE(0x0, 0x0, 0x0, 0x0);
SDE_REG_WRITE(c, offset, val);
/* set write if eq op for flush ready */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
(HW_FENCE_DPU_INPUT_FENCE_START_N + 1));
val = MDP_CTL_FENCE_ISR_OP_CODE(0x7, 0x0, 0x1, 0x0);
SDE_REG_WRITE(c, offset, val);
/* set exit op */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
(HW_FENCE_DPU_INPUT_FENCE_START_N + 2));
val = MDP_CTL_FENCE_ISR_OP_CODE(0xf, 0x0, 0x0, 0x0);
SDE_REG_WRITE(c, offset, val);
}
static void sde_hw_setup_hw_fences_config(struct sde_hw_mdp *mdp, u32 protocol_id,
u32 client_phys_id, unsigned long ipcc_base_addr)
{
@@ -681,57 +735,10 @@ static void sde_hw_setup_hw_fences_config(struct sde_hw_mdp *mdp, u32 protocol_i
return;
}
/* start from the base-address of the mdss */
c = mdp->hw;
c.blk_off = 0x0;
/*select ipcc protocol id for dpu */
val = (protocol_id == HW_FENCE_IPCC_FENCE_PROTOCOL_ID) ?
HW_FENCE_DPU_FENCE_PROTOCOL_ID : protocol_id;
SDE_REG_WRITE(&c, MDP_CTL_HW_FENCE_CTRL, val);
/* configure the start of the FENCE_IDn_ISR ops for input and output fence isr's */
val = (HW_FENCE_DPU_OUTPUT_FENCE_START_N << 16) | (HW_FENCE_DPU_INPUT_FENCE_START_N & 0xFF);
SDE_REG_WRITE(&c, MDP_CTL_HW_FENCE_ID_START_ADDR, val);
/* setup input fence isr */
/* configure the attribs for the isr read_reg op */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ADDR, 0);
val = HW_FENCE_IPCC_PROTOCOLp_CLIENTc_RECV_ID(ipcc_base_addr,
protocol_id, client_phys_id);
SDE_REG_WRITE(&c, offset, val);
offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 0);
val = MDP_CTL_FENCE_ATTRS(0x1, 0x2, 0x1);
SDE_REG_WRITE(&c, offset, val);
offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_MASK, 0);
SDE_REG_WRITE(&c, offset, 0xFFFFFFFF);
/* configure the attribs for the write if eq data */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_DATA, 1);
SDE_REG_WRITE(&c, offset, 0x1);
/* program input-fence isr ops */
/* set read_reg op */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
HW_FENCE_DPU_INPUT_FENCE_START_N);
val = MDP_CTL_FENCE_ISR_OP_CODE(0x0, 0x0, 0x0, 0x0);
SDE_REG_WRITE(&c, offset, val);
/* set write if eq op for flush ready */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
(HW_FENCE_DPU_INPUT_FENCE_START_N + 1));
val = MDP_CTL_FENCE_ISR_OP_CODE(0x7, 0x0, 0x1, 0x0);
SDE_REG_WRITE(&c, offset, val);
/* set exit op */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
(HW_FENCE_DPU_INPUT_FENCE_START_N + 2));
val = MDP_CTL_FENCE_ISR_OP_CODE(0xf, 0x0, 0x0, 0x0);
SDE_REG_WRITE(&c, offset, val);
_sde_hw_setup_hw_input_fences_config(protocol_id, client_phys_id, ipcc_base_addr, &c);
/*setup output fence isr */
@@ -801,6 +808,70 @@ void sde_hw_top_set_ppb_fifo_size(struct sde_hw_mdp *mdp, u32 pp, u32 sz)
spin_unlock(&mdp->slock);
}
static void sde_hw_setup_hw_fences_config_with_dir_write(struct sde_hw_mdp *mdp, u32 protocol_id,
u32 client_phys_id, unsigned long ipcc_base_addr)
{
u32 val, offset;
struct sde_hw_blk_reg_map c;
if (!mdp) {
SDE_ERROR("invalid mdp, won't configure hw-fences\n");
return;
}
c = mdp->hw;
c.blk_off = 0x0;
_sde_hw_setup_hw_input_fences_config(protocol_id, client_phys_id, ipcc_base_addr, &c);
/*setup output fence isr */
/* configure the attribs for the isr load_data op */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ADDR, 4);
val = HW_FENCE_IPCC_PROTOCOLp_CLIENTc_SEND(ipcc_base_addr,
protocol_id, client_phys_id);
SDE_REG_WRITE(&c, offset, val);
offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 4);
val = MDP_CTL_FENCE_ATTRS(0x1, 0x2, 0x0);
SDE_REG_WRITE(&c, offset, val);
offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_MASK, 4);
SDE_REG_WRITE(&c, offset, 0xFFFFFFFF);
/* program output-fence isr ops */
/* set load_data op*/
offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
HW_FENCE_DPU_OUTPUT_FENCE_START_N);
val = MDP_CTL_FENCE_ISR_OP_CODE(0x6, 0x0, 0x4, 0x0);
SDE_REG_WRITE(&c, offset, val);
/* set write_direct op */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
(HW_FENCE_DPU_OUTPUT_FENCE_START_N + 1));
val = MDP_CTL_FENCE_ISR_OP_CODE(0x3, 0x0, 0x0, 0x0);
SDE_REG_WRITE(&c, offset, val);
/* set wait op */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
(HW_FENCE_DPU_OUTPUT_FENCE_START_N + 2));
val = MDP_CTL_FENCE_ISR_OP_CODE(0x4, 0x1, 0x0, 0x0);
SDE_REG_WRITE(&c, offset, val);
/* set write_reg op */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
(HW_FENCE_DPU_OUTPUT_FENCE_START_N + 3));
val = MDP_CTL_FENCE_ISR_OP_CODE(0x2, 0x4, 0x0, 0x0);
SDE_REG_WRITE(&c, offset, val);
/* set exit op */
offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
(HW_FENCE_DPU_OUTPUT_FENCE_START_N + 4));
val = MDP_CTL_FENCE_ISR_OP_CODE(0xf, 0x0, 0x0, 0x0);
SDE_REG_WRITE(&c, offset, val);
}
static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops, unsigned long cap, u32 hw_fence_rev)
{
ops->setup_split_pipe = sde_hw_setup_split_pipe;
@@ -823,7 +894,11 @@ static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops, unsigned long cap, u32 hw
ops->get_autorefresh_status = sde_hw_get_autorefresh_status;
if (hw_fence_rev) {
if (cap & BIT(SDE_MDP_HW_FENCE_DIR_WRITE))
ops->setup_hw_fences = sde_hw_setup_hw_fences_config_with_dir_write;
else
ops->setup_hw_fences = sde_hw_setup_hw_fences_config;
ops->hw_fence_input_timestamp_ctrl = sde_hw_hw_fence_timestamp_ctrl;
ops->hw_fence_input_status = sde_hw_input_hw_fence_status;
}

View File

@@ -4934,7 +4934,6 @@ static int _sde_kms_hw_init_blocks(struct sde_kms *sde_kms,
struct drm_device *dev,
struct msm_drm_private *priv)
{
struct sde_rm *rm = NULL;
int i, rc = -EINVAL;
sde_kms->catalog = sde_hw_catalog_init(dev);
@@ -4974,9 +4973,7 @@ static int _sde_kms_hw_init_blocks(struct sde_kms *sde_kms,
sde_dbg_init_dbg_buses(sde_kms->core_rev);
rm = &sde_kms->rm;
rc = sde_rm_init(rm, sde_kms->catalog, sde_kms->mmio,
sde_kms->dev);
rc = sde_rm_init(&sde_kms->rm);
if (rc) {
SDE_ERROR("rm init failed: %d\n", rc);
goto power_error;

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
@@ -716,7 +716,7 @@ static int _sde_rm_hw_blk_create(
return 0;
}
static int _init_hw_fences(struct sde_rm *rm, bool use_ipcc)
static int _init_hw_fences(struct sde_rm *rm, bool use_ipcc, struct sde_kms *sde_kms)
{
struct sde_rm_hw_iter iter;
int ret = 0;
@@ -725,11 +725,20 @@ static int _init_hw_fences(struct sde_rm *rm, bool use_ipcc)
while (_sde_rm_get_hw_locked(rm, &iter)) {
struct sde_hw_ctl *ctl = to_sde_hw_ctl(iter.blk->hw);
if (sde_hw_fence_init(ctl, use_ipcc)) {
if (sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE] &&
sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]->mmu) {
if (sde_hw_fence_init(ctl, use_ipcc,
sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]->mmu)) {
SDE_DEBUG("failed to init hw_fence idx:%d\n", ctl->idx);
ret = -EINVAL;
break;
}
} else {
SDE_DEBUG("failed to init hw_fence idx:%d, no aspace to map memory\n",
ctl->idx);
ret = -EINVAL;
break;
}
SDE_DEBUG("init hw-fence for ctl %d", iter.blk->id);
}
@@ -741,7 +750,7 @@ static int _init_hw_fences(struct sde_rm *rm, bool use_ipcc)
static int _sde_rm_hw_blk_create_new(struct sde_rm *rm,
struct sde_mdss_cfg *cat,
void __iomem *mmio)
void __iomem *mmio, struct sde_kms *sde_kms)
{
int i, rc = 0;
@@ -825,7 +834,8 @@ static int _sde_rm_hw_blk_create_new(struct sde_rm *rm,
}
if (cat->hw_fence_rev) {
if (_init_hw_fences(rm, test_bit(SDE_FEATURE_HW_FENCE_IPCC, cat->features))) {
if (_init_hw_fences(rm, test_bit(SDE_FEATURE_HW_FENCE_IPCC, cat->features),
sde_kms)) {
SDE_INFO("failed to init hw-fences, disabling hw-fences\n");
cat->hw_fence_rev = 0;
}
@@ -910,11 +920,12 @@ void sde_rm_debugfs_init(struct sde_rm *rm, struct dentry *parent)
}
#endif /* CONFIG_DEBUG_FS */
int sde_rm_init(struct sde_rm *rm,
struct sde_mdss_cfg *cat,
void __iomem *mmio,
struct drm_device *dev)
int sde_rm_init(struct sde_rm *rm)
{
struct sde_kms *sde_kms = container_of(rm, struct sde_kms, rm);
struct sde_mdss_cfg *cat = sde_kms->catalog;
void __iomem *mmio = sde_kms->mmio;
struct drm_device *dev = sde_kms->dev;
int i, rc = 0;
enum sde_hw_blk_type type;
@@ -977,7 +988,7 @@ int sde_rm_init(struct sde_rm *rm,
}
}
rc = _sde_rm_hw_blk_create_new(rm, cat, mmio);
rc = _sde_rm_hw_blk_create_new(rm, cat, mmio, sde_kms);
if (!rc)
return 0;

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
@@ -239,15 +239,9 @@ void sde_rm_debugfs_init(struct sde_rm *rm, struct dentry *parent);
* sde_rm_init - Read hardware catalog and create reservation tracking objects
* for all HW blocks.
* @rm: SDE Resource Manager handle
* @cat: Pointer to hardware catalog
* @mmio: mapped register io address of MDP
* @dev: device handle for event logging purposes
* @Return: 0 on Success otherwise -ERROR
*/
int sde_rm_init(struct sde_rm *rm,
struct sde_mdss_cfg *cat,
void __iomem *mmio,
struct drm_device *dev);
int sde_rm_init(struct sde_rm *rm);
/**
* sde_rm_destroy - Free all memory allocated by sde_rm_init