msm: eva: synx v2 support
propagated sync v2 support from 2.0 reference 4162025 Change-Id: I3427657e21e7eda92088d828203a330ba3c86335 Signed-off-by: Yu SI <quic_ysi@quicinc.com> Signed-off-by: George Shen <quic_sqiao@quicinc.com>
This commit is contained in:
@@ -238,8 +238,12 @@ struct eva_kmd_hfi_fence_packet {
|
||||
};
|
||||
|
||||
struct eva_kmd_fence {
|
||||
#ifdef CVP_CONFIG_SYNX_V2
|
||||
__u32 h_synx;
|
||||
#else
|
||||
__s32 h_synx;
|
||||
__u32 secure_key;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct eva_kmd_fence_ctrl {
|
||||
|
@@ -30,8 +30,8 @@ endif
|
||||
|
||||
ifeq ($(CONFIG_ARCH_PINEAPPLE), y)
|
||||
$(info within KBUILD file CONFIG_ARCH_PINEAPPLE = $(CONFIG_ARCH_PINEAPPLE))
|
||||
KBUILD_CPPFLAGS += -DCONFIG_EVA_PINEAPPLE=1
|
||||
ccflags-y += -DCONFIG_EVA_PINEAPPLE=1
|
||||
KBUILD_CPPFLAGS += -DCONFIG_EVA_PINEAPPLE=1 -DCVP_CONFIG_SYNX_V2=1
|
||||
ccflags-y += -DCONFIG_EVA_PINEAPPLE=1 -DCVP_CONFIG_SYNX_V2=1
|
||||
ccflags-y += -I$(EVA_ROOT)/../synx-kernel/msm/synx/ \
|
||||
-I$(EVA_ROOT)/../synx-kernel/include/uapi/synx/media/
|
||||
endif
|
||||
|
@@ -36,6 +36,7 @@ enum queue_state {
|
||||
|
||||
#ifdef CONFIG_EVA_PINEAPPLE
|
||||
#define CVP_MMRM_ENABLED 1
|
||||
#define CVP_CONFIG_SYNX_V2 1
|
||||
#endif /* End of CONFIG_EVA_PINEAPPLE */
|
||||
|
||||
|
||||
|
@@ -100,6 +100,8 @@ static void __noc_error_info_iris2(struct iris_hfi_device *device);
|
||||
static int __enable_hw_power_collapse(struct iris_hfi_device *device);
|
||||
|
||||
static int __power_off_controller(struct iris_hfi_device *device);
|
||||
static int __hwfence_regs_map(struct iris_hfi_device *device);
|
||||
static int __hwfence_regs_unmap(struct iris_hfi_device *device);
|
||||
|
||||
static struct iris_hfi_vpu_ops iris2_ops = {
|
||||
.interrupt_init = interrupt_init_iris2,
|
||||
@@ -136,6 +138,15 @@ static inline bool is_sys_cache_present(struct iris_hfi_device *device)
|
||||
return device->res->sys_cache_present;
|
||||
}
|
||||
|
||||
static int cvp_synx_recover(void)
|
||||
{
|
||||
#ifdef CVP_SYNX_ENABLED
|
||||
return synx_recover(SYNX_CLIENT_EVA_CTX0);
|
||||
#else
|
||||
return 0;
|
||||
#endif /* End of CVP_SYNX_ENABLED */
|
||||
}
|
||||
|
||||
#define ROW_SIZE 32
|
||||
|
||||
int get_hfi_version(void)
|
||||
@@ -1891,6 +1902,60 @@ static int iris_pm_qos_update(void *device)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __hwfence_regs_map(struct iris_hfi_device *device)
|
||||
{
|
||||
int rc = 0;
|
||||
struct context_bank_info *cb;
|
||||
|
||||
cb = msm_cvp_smem_get_context_bank(device->res, 0);
|
||||
if (!cb) {
|
||||
dprintk(CVP_ERR, "%s: fail to get cb\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = iommu_map(cb->domain, device->res->ipclite_iova,
|
||||
device->res->ipclite_phyaddr,
|
||||
device->res->ipclite_size,
|
||||
IOMMU_READ | IOMMU_WRITE);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "map ipclite fail %d %#x %#x %#x\n",
|
||||
rc, device->res->ipclite_iova,
|
||||
device->res->ipclite_phyaddr,
|
||||
device->res->ipclite_size);
|
||||
return rc;
|
||||
}
|
||||
rc = iommu_map(cb->domain, device->res->hwmutex_iova,
|
||||
device->res->hwmutex_phyaddr,
|
||||
device->res->hwmutex_size,
|
||||
IOMMU_MMIO | IOMMU_READ | IOMMU_WRITE);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "map hwmutex fail %d %#x %#x %#x\n",
|
||||
rc, device->res->hwmutex_iova,
|
||||
device->res->hwmutex_phyaddr,
|
||||
device->res->hwmutex_size);
|
||||
return rc;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __hwfence_regs_unmap(struct iris_hfi_device *device)
|
||||
{
|
||||
int rc = 0;
|
||||
struct context_bank_info *cb;
|
||||
|
||||
cb = msm_cvp_smem_get_context_bank(device->res, 0);
|
||||
if (!cb) {
|
||||
dprintk(CVP_ERR, "%s: fail to get cb\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
iommu_unmap(cb->domain, device->res->ipclite_iova,
|
||||
device->res->ipclite_size);
|
||||
iommu_unmap(cb->domain, device->res->hwmutex_iova,
|
||||
device->res->hwmutex_size);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int iris_hfi_core_init(void *device)
|
||||
{
|
||||
int rc = 0;
|
||||
@@ -1922,6 +1987,8 @@ static int iris_hfi_core_init(void *device)
|
||||
dev->bus_vote.data_count = 1;
|
||||
dev->bus_vote.data->power_mode = CVP_POWER_TURBO;
|
||||
|
||||
__hwfence_regs_map(dev);
|
||||
|
||||
rc = __load_fw(dev);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "Failed to load Iris FW\n");
|
||||
@@ -1997,6 +2064,14 @@ static int iris_hfi_core_init(void *device)
|
||||
__set_ubwc_config(device);
|
||||
__sys_set_idle_indicator(device, true);
|
||||
|
||||
#ifdef CVP_CONFIG_SYNX_V2
|
||||
rc = cvp_synx_recover();
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "Failed to recover synx\n");
|
||||
goto err_core_init;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (dev->res->pm_qos.latency_us) {
|
||||
int err = 0;
|
||||
u32 i, cpu;
|
||||
@@ -2082,6 +2157,7 @@ static int iris_hfi_core_release(void *dev)
|
||||
|
||||
__disable_subcaches(device);
|
||||
__unload_fw(device);
|
||||
__hwfence_regs_unmap(device);
|
||||
|
||||
if (msm_cvp_mmrm_enabled) {
|
||||
rc = msm_cvp_mmrm_deregister(device);
|
||||
|
@@ -3,6 +3,7 @@
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "cvp_comm_def.h"
|
||||
#ifndef __H_CVP_HFI_HELPER_H__
|
||||
#define __H_CVP_HFI_HELPER_H__
|
||||
|
||||
@@ -307,6 +308,19 @@ struct cvp_hfi_client {
|
||||
u32 reserved2;
|
||||
} __packed;
|
||||
|
||||
#ifdef CVP_CONFIG_SYNX_V2
|
||||
struct cvp_hfi_buf_type {
|
||||
u32 iova;
|
||||
u32 size;
|
||||
u32 offset;
|
||||
u32 flags;
|
||||
u32 reserved1;
|
||||
u32 reserved2;
|
||||
u32 fence_type;
|
||||
u32 input_handle;
|
||||
u32 output_handle;
|
||||
};
|
||||
#else
|
||||
struct cvp_hfi_buf_type {
|
||||
u32 iova;
|
||||
u32 size;
|
||||
@@ -315,6 +329,7 @@ struct cvp_hfi_buf_type {
|
||||
u32 reserved1;
|
||||
u32 reserved2;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct cvp_hfi_cmd_session_set_buffers_packet {
|
||||
u32 size;
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#include "cvp_hfi.h"
|
||||
#include "cvp_core_hfi.h"
|
||||
#include "msm_cvp_buf.h"
|
||||
#include "cvp_comm_def.h"
|
||||
|
||||
struct cvp_power_level {
|
||||
unsigned long core_sum;
|
||||
@@ -14,6 +15,14 @@ struct cvp_power_level {
|
||||
unsigned long bw_sum;
|
||||
};
|
||||
|
||||
static int cvp_enqueue_pkt(struct msm_cvp_inst* inst,
|
||||
struct eva_kmd_hfi_packet *in_pkt,
|
||||
unsigned int in_offset,
|
||||
unsigned int in_buf_num);
|
||||
|
||||
static int cvp_check_clock(struct msm_cvp_inst *inst,
|
||||
struct cvp_hfi_msg_session_hdr_ext *hdr);
|
||||
|
||||
int msm_cvp_get_session_info(struct msm_cvp_inst *inst, u32 *session)
|
||||
{
|
||||
int rc = 0;
|
||||
@@ -124,6 +133,38 @@ exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool check_clock_required(struct msm_cvp_inst *inst,
|
||||
struct eva_kmd_hfi_packet *hdr)
|
||||
{
|
||||
struct cvp_hfi_msg_session_hdr_ext *ehdr =
|
||||
(struct cvp_hfi_msg_session_hdr_ext *)hdr;
|
||||
bool clock_check = false;
|
||||
|
||||
if (!msm_cvp_dcvs_disable &&
|
||||
ehdr->packet_type == HFI_MSG_SESSION_CVP_FD) {
|
||||
if (ehdr->size == sizeof(struct cvp_hfi_msg_session_hdr_ext)
|
||||
+ sizeof(struct cvp_hfi_buf_type)) {
|
||||
struct msm_cvp_core *core = inst->core;
|
||||
|
||||
dprintk(CVP_PWR, "busy cycle %d, total %d\n",
|
||||
ehdr->busy_cycles, ehdr->total_cycles);
|
||||
|
||||
if (core->dyn_clk.sum_fps[HFI_HW_FDU] ||
|
||||
core->dyn_clk.sum_fps[HFI_HW_MPU] ||
|
||||
core->dyn_clk.sum_fps[HFI_HW_OD] ||
|
||||
core->dyn_clk.sum_fps[HFI_HW_ICA]) {
|
||||
clock_check = true;
|
||||
}
|
||||
} else {
|
||||
dprintk(CVP_WARN, "dcvs is disabled, %d != %d + %d\n",
|
||||
ehdr->size, sizeof(struct cvp_hfi_msg_session_hdr_ext),
|
||||
sizeof(struct cvp_hfi_buf_type));
|
||||
}
|
||||
}
|
||||
|
||||
return clock_check;
|
||||
}
|
||||
|
||||
static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
|
||||
struct eva_kmd_hfi_packet *out_pkt)
|
||||
{
|
||||
@@ -131,6 +172,7 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
|
||||
struct cvp_session_queue *sq;
|
||||
struct msm_cvp_inst *s;
|
||||
int rc = 0;
|
||||
bool clock_check = false;
|
||||
|
||||
if (!inst) {
|
||||
dprintk(CVP_ERR, "%s invalid session\n", __func__);
|
||||
@@ -146,6 +188,11 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
|
||||
|
||||
rc = cvp_wait_process_message(inst, sq, NULL, wait_time, out_pkt);
|
||||
|
||||
clock_check = check_clock_required(inst, out_pkt);
|
||||
if (clock_check)
|
||||
cvp_check_clock(inst,
|
||||
(struct cvp_hfi_msg_session_hdr_ext *)out_pkt);
|
||||
|
||||
cvp_put_inst(inst);
|
||||
return rc;
|
||||
}
|
||||
@@ -156,14 +203,13 @@ static int msm_cvp_session_process_hfi(
|
||||
unsigned int in_offset,
|
||||
unsigned int in_buf_num)
|
||||
{
|
||||
int pkt_idx, pkt_type, rc = 0;
|
||||
struct cvp_hfi_device *hdev;
|
||||
int pkt_idx, rc = 0;
|
||||
|
||||
unsigned int offset = 0, buf_num = 0, signal;
|
||||
struct cvp_session_queue *sq;
|
||||
struct msm_cvp_inst *s;
|
||||
bool is_config_pkt;
|
||||
enum buf_map_type map_type;
|
||||
struct cvp_hfi_cmd_session_hdr *cmd_hdr;
|
||||
|
||||
|
||||
if (!inst || !inst->core || !in_pkt) {
|
||||
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
||||
@@ -174,7 +220,6 @@ static int msm_cvp_session_process_hfi(
|
||||
if (!s)
|
||||
return -ECONNRESET;
|
||||
|
||||
hdev = inst->core->device;
|
||||
|
||||
pkt_idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)in_pkt);
|
||||
if (pkt_idx < 0) {
|
||||
@@ -227,34 +272,7 @@ static int msm_cvp_session_process_hfi(
|
||||
goto exit;
|
||||
}
|
||||
|
||||
pkt_type = in_pkt->pkt_data[1];
|
||||
map_type = cvp_find_map_type(pkt_type);
|
||||
|
||||
cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
|
||||
/* The kdata will be overriden by transaction ID if the cmd has buf */
|
||||
cmd_hdr->client_data.kdata = pkt_idx;
|
||||
|
||||
if (map_type == MAP_PERSIST)
|
||||
rc = msm_cvp_map_user_persist(inst, in_pkt, offset, buf_num);
|
||||
else if (map_type == UNMAP_PERSIST)
|
||||
rc = msm_cvp_mark_user_persist(inst, in_pkt, offset, buf_num);
|
||||
else
|
||||
rc = msm_cvp_map_frame(inst, in_pkt, offset, buf_num);
|
||||
|
||||
if (rc)
|
||||
goto exit;
|
||||
|
||||
rc = call_hfi_op(hdev, session_send, (void *)inst->session, in_pkt);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: Failed in call_hfi_op %d, %x\n",
|
||||
__func__, in_pkt->pkt_data[0], in_pkt->pkt_data[1]);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (signal != HAL_NO_RESP)
|
||||
dprintk(CVP_ERR, "%s signal %d from UMD is not HAL_NO_RESP\n",
|
||||
__func__, signal);
|
||||
cvp_enqueue_pkt(inst, in_pkt, offset, buf_num);
|
||||
|
||||
exit:
|
||||
cvp_put_inst(inst);
|
||||
@@ -492,7 +510,7 @@ static int cvp_fence_proc(struct msm_cvp_inst *inst,
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "%s %s: Failed in call_hfi_op %d, %x\n",
|
||||
current->comm, __func__, pkt->size, pkt->packet_type);
|
||||
synx_state = SYNX_STATE_SIGNALED_ERROR;
|
||||
synx_state = SYNX_STATE_SIGNALED_CANCEL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
@@ -501,34 +519,14 @@ static int cvp_fence_proc(struct msm_cvp_inst *inst,
|
||||
(struct eva_kmd_hfi_packet *)&hdr);
|
||||
|
||||
/* Only FD support dcvs at certain FW */
|
||||
if (!msm_cvp_dcvs_disable &&
|
||||
hdr.packet_type == HFI_MSG_SESSION_CVP_FD) {
|
||||
if (hdr.size == sizeof(struct cvp_hfi_msg_session_hdr_ext)
|
||||
+ sizeof(struct cvp_hfi_buf_type)) {
|
||||
struct cvp_hfi_msg_session_hdr_ext *fhdr =
|
||||
(struct cvp_hfi_msg_session_hdr_ext *)&hdr;
|
||||
struct msm_cvp_core *core = inst->core;
|
||||
clock_check = check_clock_required(inst,
|
||||
(struct eva_kmd_hfi_packet *)&hdr);
|
||||
|
||||
dprintk(CVP_PWR, "busy cycle %d, total %d\n",
|
||||
fhdr->busy_cycles, fhdr->total_cycles);
|
||||
|
||||
if (core->dyn_clk.sum_fps[HFI_HW_FDU] ||
|
||||
core->dyn_clk.sum_fps[HFI_HW_MPU] ||
|
||||
core->dyn_clk.sum_fps[HFI_HW_OD] ||
|
||||
core->dyn_clk.sum_fps[HFI_HW_ICA]) {
|
||||
clock_check = true;
|
||||
}
|
||||
} else {
|
||||
dprintk(CVP_WARN, "dcvs is disabled, %d != %d + %d\n",
|
||||
hdr.size, sizeof(struct cvp_hfi_msg_session_hdr_ext),
|
||||
sizeof(struct cvp_hfi_buf_type));
|
||||
}
|
||||
}
|
||||
hfi_err = hdr.error_type;
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "%s %s: cvp_wait_process_message rc %d\n",
|
||||
current->comm, __func__, rc);
|
||||
synx_state = SYNX_STATE_SIGNALED_ERROR;
|
||||
synx_state = SYNX_STATE_SIGNALED_CANCEL;
|
||||
goto exit;
|
||||
}
|
||||
if (hfi_err == HFI_ERR_SESSION_FLUSHED) {
|
||||
@@ -584,7 +582,7 @@ static void cvp_free_fence_data(struct cvp_fence_command *f)
|
||||
|
||||
static int cvp_fence_thread(void *data)
|
||||
{
|
||||
int rc = 0;
|
||||
int rc = 0, num_fences;
|
||||
struct msm_cvp_inst *inst;
|
||||
struct cvp_fence_queue *q;
|
||||
enum queue_state state;
|
||||
@@ -618,7 +616,14 @@ wait:
|
||||
pkt = f->pkt;
|
||||
synx = (u32 *)f->synx;
|
||||
|
||||
ktid = pkt->client_data.kdata & (FENCE_BIT - 1);
|
||||
num_fences = f->num_fences - f->output_index;
|
||||
/*
|
||||
* If there is output fence, go through fence path
|
||||
* Otherwise, go through non-fenced path
|
||||
*/
|
||||
if (num_fences)
|
||||
ktid = pkt->client_data.kdata & (FENCE_BIT - 1);
|
||||
|
||||
dprintk(CVP_SYNX, "%s pkt type %d on ktid %llu frameID %llu\n",
|
||||
current->comm, pkt->packet_type, ktid, f->frame_id);
|
||||
|
||||
@@ -751,7 +756,7 @@ static int msm_cvp_session_process_hfi_fence(struct msm_cvp_inst *inst,
|
||||
|
||||
rc = inst->core->synx_ftbl->cvp_import_synx(inst, f, fence);
|
||||
if (rc) {
|
||||
kfree(f);
|
||||
cvp_free_fence_data(f);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
@@ -766,6 +771,158 @@ exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
static int cvp_populate_fences( struct eva_kmd_hfi_packet *in_pkt,
|
||||
unsigned int offset, unsigned int num, struct msm_cvp_inst *inst)
|
||||
{
|
||||
#ifdef CVP_CONFIG_SYNX_V2
|
||||
u32 i, buf_offset;
|
||||
struct eva_kmd_fence fences[MAX_HFI_FENCE_SIZE >> 2];
|
||||
struct cvp_fence_command *f;
|
||||
struct cvp_hfi_cmd_session_hdr *cmd_hdr;
|
||||
struct cvp_fence_queue *q;
|
||||
enum op_mode mode;
|
||||
struct cvp_buf_type *buf;
|
||||
|
||||
int rc;
|
||||
rc = 0;
|
||||
|
||||
q = &inst->fence_cmd_queue;
|
||||
|
||||
mutex_lock(&q->lock);
|
||||
mode = q->mode;
|
||||
mutex_unlock(&q->lock);
|
||||
|
||||
if (mode == OP_DRAINING) {
|
||||
dprintk(CVP_SYNX, "%s: flush in progress\n", __func__);
|
||||
rc = -EBUSY;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
|
||||
rc = cvp_alloc_fence_data((&f), cmd_hdr->size);
|
||||
if (rc)
|
||||
goto exit;
|
||||
|
||||
f->type = cmd_hdr->packet_type;
|
||||
f->mode = OP_NORMAL;
|
||||
f->signature = 0xFEEDFACE;
|
||||
f->num_fences = 0;
|
||||
|
||||
/* First pass to find INPUT synx handles */
|
||||
buf_offset = offset;
|
||||
for (i = 0; i < num; i++) {
|
||||
buf = (struct cvp_buf_type *)&in_pkt->pkt_data[buf_offset];
|
||||
buf_offset += sizeof(*buf) >> 2;
|
||||
|
||||
if (buf->input_handle) {
|
||||
/* Check fence_type? */
|
||||
fences[f->num_fences].h_synx = buf->input_handle;
|
||||
f->num_fences++;
|
||||
buf->fence_type &= ~INPUT_FENCE_BITMASK;
|
||||
buf->input_handle = 0;
|
||||
}
|
||||
}
|
||||
f->output_index = f->num_fences;
|
||||
|
||||
dprintk(CVP_SYNX, "%s:Input Fence passed - Number of Fences is %d\n",
|
||||
__func__, f->num_fences);
|
||||
|
||||
/*
|
||||
* Second pass to find OUTPUT synx handle
|
||||
* If no of fences is 0 dont execute the below portion until line 911, return 0
|
||||
*/
|
||||
buf_offset = offset;
|
||||
for (i = 0; i < num; i++) {
|
||||
buf = (struct cvp_buf_type*)&in_pkt->pkt_data[buf_offset];
|
||||
buf_offset += sizeof(*buf) >> 2;
|
||||
|
||||
if (buf->output_handle) {
|
||||
/* Check fence_type? */
|
||||
fences[f->num_fences].h_synx = buf->output_handle;
|
||||
f->num_fences++;
|
||||
buf->fence_type &= ~OUTPUT_FENCE_BITMASK;
|
||||
buf->output_handle = 0;
|
||||
}
|
||||
}
|
||||
dprintk(CVP_SYNX, "%s:Output Fence passed - Number of Fences is %d\n",
|
||||
__func__, f->num_fences);
|
||||
|
||||
if (f->num_fences == 0)
|
||||
goto free_exit;
|
||||
|
||||
rc = inst->core->synx_ftbl->cvp_import_synx(inst, f,
|
||||
(u32*)fences);
|
||||
|
||||
if (rc)
|
||||
goto free_exit;
|
||||
|
||||
memcpy(f->pkt, cmd_hdr, cmd_hdr->size);
|
||||
f->pkt->client_data.kdata |= FENCE_BIT;
|
||||
|
||||
mutex_lock(&q->lock);
|
||||
list_add_tail(&f->list, &inst->fence_cmd_queue.wait_list);
|
||||
mutex_unlock(&q->lock);
|
||||
|
||||
wake_up(&inst->fence_cmd_queue.wq);
|
||||
|
||||
return f->num_fences;
|
||||
|
||||
free_exit:
|
||||
cvp_free_fence_data(f);
|
||||
exit:
|
||||
#endif /* CVP_CONFIG_SYNX_V2 */
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
static int cvp_enqueue_pkt(struct msm_cvp_inst* inst,
|
||||
struct eva_kmd_hfi_packet *in_pkt,
|
||||
unsigned int in_offset,
|
||||
unsigned int in_buf_num)
|
||||
{
|
||||
struct cvp_hfi_device *hdev;
|
||||
struct cvp_hfi_cmd_session_hdr *cmd_hdr;
|
||||
int pkt_type, rc = 0;
|
||||
enum buf_map_type map_type;
|
||||
|
||||
hdev = inst->core->device;
|
||||
|
||||
pkt_type = in_pkt->pkt_data[1];
|
||||
map_type = cvp_find_map_type(pkt_type);
|
||||
|
||||
cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
|
||||
/* The kdata will be overriden by transaction ID if the cmd has buf */
|
||||
cmd_hdr->client_data.kdata = 0;
|
||||
|
||||
if (map_type == MAP_PERSIST)
|
||||
rc = msm_cvp_map_user_persist(inst, in_pkt, in_offset, in_buf_num);
|
||||
else if (map_type == UNMAP_PERSIST)
|
||||
rc = msm_cvp_mark_user_persist(inst, in_pkt, in_offset, in_buf_num);
|
||||
else
|
||||
rc = msm_cvp_map_frame(inst, in_pkt, in_offset, in_buf_num);
|
||||
|
||||
if (cvp_populate_fences(in_pkt, in_offset, in_buf_num, inst) == 0) {
|
||||
rc = call_hfi_op(hdev, session_send, (void*)inst->session,
|
||||
in_pkt);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,"%s: Failed in call_hfi_op %d, %x\n",
|
||||
__func__, in_pkt->pkt_data[0],
|
||||
in_pkt->pkt_data[1]);
|
||||
if (map_type == MAP_FRAME)
|
||||
msm_cvp_unmap_frame(inst,
|
||||
cmd_hdr->client_data.kdata);
|
||||
}
|
||||
goto exit;
|
||||
} else {
|
||||
dprintk(CVP_SYNX, "Going fenced path\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline int div_by_1dot5(unsigned int a)
|
||||
{
|
||||
unsigned long i = a << 1;
|
||||
@@ -1615,7 +1772,7 @@ int cvp_clean_session_queues(struct msm_cvp_inst *inst)
|
||||
mutex_lock(&q->lock);
|
||||
if (q->state == QUEUE_START) {
|
||||
mutex_unlock(&q->lock);
|
||||
cvp_clean_fence_queue(inst, SYNX_STATE_SIGNALED_ERROR);
|
||||
cvp_clean_fence_queue(inst, SYNX_STATE_SIGNALED_CANCEL);
|
||||
} else {
|
||||
dprintk(CVP_WARN, "Incorrect fence cmd queue state %d\n",
|
||||
q->state);
|
||||
|
@@ -1540,6 +1540,7 @@ int msm_cvp_map_frame(struct msm_cvp_inst *inst,
|
||||
if (!offset || !buf_num)
|
||||
return 0;
|
||||
|
||||
|
||||
cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
|
||||
ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
|
||||
ktid &= (FENCE_BIT - 1);
|
||||
|
@@ -12,6 +12,7 @@
|
||||
#include <linux/dma-heap.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <media/msm_eva_private.h>
|
||||
#include "cvp_comm_def.h"
|
||||
|
||||
#define MAX_FRAME_BUFFER_NUMS 30
|
||||
#define MAX_DMABUF_NUMS 64
|
||||
@@ -108,6 +109,22 @@ static inline void DEINIT_DMAMAP_CACHE(struct cvp_dmamap_cache *cache)
|
||||
cache->nr = 0;
|
||||
}
|
||||
|
||||
#define INPUT_FENCE_BITMASK 0x1
|
||||
#define OUTPUT_FENCE_BITMASK 0x2
|
||||
|
||||
#ifdef CVP_CONFIG_SYNX_V2
|
||||
struct cvp_buf_type {
|
||||
s32 fd;
|
||||
u32 size;
|
||||
u32 offset;
|
||||
u32 flags;
|
||||
u32 reserved1;
|
||||
u32 reserved2;
|
||||
u32 fence_type;
|
||||
u32 input_handle;
|
||||
u32 output_handle;
|
||||
};
|
||||
#else
|
||||
struct cvp_buf_type {
|
||||
s32 fd;
|
||||
u32 size;
|
||||
@@ -121,6 +138,7 @@ struct cvp_buf_type {
|
||||
};
|
||||
};
|
||||
};
|
||||
#endif
|
||||
|
||||
enum buffer_owner {
|
||||
DRIVER,
|
||||
|
@@ -36,6 +36,7 @@ bool msm_cvp_mmrm_enabled = !true;
|
||||
#endif
|
||||
bool msm_cvp_dcvs_disable = !true;
|
||||
int msm_cvp_minidump_enable = !1;
|
||||
bool cvp_kernel_fence_enabled = true;
|
||||
|
||||
#define MAX_DBG_BUF_SIZE 4096
|
||||
|
||||
@@ -257,6 +258,7 @@ struct dentry *msm_cvp_debugfs_init_drv(void)
|
||||
&msm_cvp_minidump_enable);
|
||||
debugfs_create_bool("fw_coverage", 0644, dir, &msm_cvp_fw_coverage);
|
||||
debugfs_create_bool("auto_pil", 0644, dir, &msm_cvp_auto_pil);
|
||||
debugfs_create_bool("kernel_fence", 0644, dir, &cvp_kernel_fence_enabled);
|
||||
debugfs_create_bool("disable_thermal_mitigation", 0644, dir,
|
||||
&msm_cvp_thermal_mitigation_disabled);
|
||||
debugfs_create_bool("enable_cacheop", 0644, dir,
|
||||
|
@@ -68,6 +68,7 @@ extern bool msm_cvp_dsp_disable;
|
||||
extern bool msm_cvp_mmrm_enabled;
|
||||
extern bool msm_cvp_dcvs_disable;
|
||||
extern int msm_cvp_minidump_enable;
|
||||
extern bool cvp_kernel_fence_enabled;
|
||||
|
||||
#define dprintk(__level, __fmt, arg...) \
|
||||
do { \
|
||||
|
@@ -211,7 +211,7 @@ enum msm_cvp_modes {
|
||||
};
|
||||
|
||||
#define MAX_NUM_MSGS_PER_SESSION 128
|
||||
#define CVP_MAX_WAIT_TIME 2000
|
||||
#define CVP_MAX_WAIT_TIME 10000
|
||||
|
||||
struct cvp_session_msg {
|
||||
struct list_head node;
|
||||
@@ -417,7 +417,7 @@ struct msm_cvp_inst {
|
||||
u32 error_code;
|
||||
/* prev_error_code saves value of error_code before it's cleared */
|
||||
u32 prev_error_code;
|
||||
struct synx_session synx_session_id;
|
||||
struct synx_session *synx_session_id;
|
||||
struct cvp_fence_queue fence_cmd_queue;
|
||||
char proc_name[TASK_COMM_LEN];
|
||||
};
|
||||
|
@@ -141,6 +141,38 @@ static int msm_cvp_load_ipcc_regs(struct msm_cvp_platform_resources *res)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msm_cvp_load_regspace_mapping(struct msm_cvp_platform_resources *res)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int ipclite_mapping_config[3];
|
||||
unsigned int hwmutex_mapping_config[3];
|
||||
struct platform_device *pdev = res->pdev;
|
||||
|
||||
ret = of_property_read_u32_array(pdev->dev.of_node, "ipclite_mappings",
|
||||
ipclite_mapping_config, 3);
|
||||
if (ret) {
|
||||
dprintk(CVP_ERR, "Failed to read ipclite reg: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
res->ipclite_iova = ipclite_mapping_config[0];
|
||||
res->ipclite_size = ipclite_mapping_config[1];
|
||||
res->ipclite_phyaddr = ipclite_mapping_config[2];
|
||||
|
||||
ret = of_property_read_u32_array(pdev->dev.of_node, "hwmutex_mappings",
|
||||
hwmutex_mapping_config, 3);
|
||||
if (ret) {
|
||||
dprintk(CVP_ERR, "Failed to read hwmutex reg: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
res->hwmutex_iova = hwmutex_mapping_config[0];
|
||||
res->hwmutex_size = hwmutex_mapping_config[1];
|
||||
res->hwmutex_phyaddr = hwmutex_mapping_config[2];
|
||||
dprintk(CVP_CORE, "ipclite %#x %#x %#x hwmutex %#x %#x %#x\n",
|
||||
res->ipclite_iova, res->ipclite_phyaddr, res->ipclite_size,
|
||||
res->hwmutex_iova, res->hwmutex_phyaddr, res->hwmutex_size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msm_cvp_load_gcc_regs(struct msm_cvp_platform_resources *res)
|
||||
{
|
||||
int ret = 0;
|
||||
@@ -859,6 +891,10 @@ int cvp_read_platform_resources_from_dt(
|
||||
if (rc)
|
||||
dprintk(CVP_ERR, "Failed to load IPCC regs: %d\n", rc);
|
||||
|
||||
rc = msm_cvp_load_regspace_mapping(res);
|
||||
if (rc)
|
||||
dprintk(CVP_ERR, "Failed to load reg space mapping: %d\n", rc);
|
||||
|
||||
rc = msm_cvp_load_gcc_regs(res);
|
||||
|
||||
rc = msm_cvp_load_regulator_table(res);
|
||||
|
@@ -156,6 +156,12 @@ struct msm_cvp_platform_resources {
|
||||
uint32_t register_size;
|
||||
uint32_t ipcc_reg_size;
|
||||
uint32_t gcc_reg_size;
|
||||
phys_addr_t ipclite_iova;
|
||||
phys_addr_t ipclite_phyaddr;
|
||||
uint32_t ipclite_size;
|
||||
phys_addr_t hwmutex_iova;
|
||||
phys_addr_t hwmutex_phyaddr;
|
||||
uint32_t hwmutex_size;
|
||||
uint32_t irq;
|
||||
uint32_t sku_version;
|
||||
struct allowed_clock_rates_table *allowed_clks_tbl;
|
||||
|
@@ -8,14 +8,21 @@
|
||||
#include "msm_cvp_debug.h"
|
||||
#include "msm_cvp_core.h"
|
||||
#include "msm_cvp_dsp.h"
|
||||
#include "cvp_comm_def.h"
|
||||
|
||||
#ifdef CVP_SYNX_ENABLED
|
||||
static int cvp_sess_init_synx_v1(struct msm_cvp_inst *inst)
|
||||
|
||||
#ifdef CVP_CONFIG_SYNX_V2
|
||||
|
||||
static int cvp_sess_init_synx_v2(struct msm_cvp_inst *inst)
|
||||
{
|
||||
struct synx_initialization_params params;
|
||||
|
||||
struct synx_initialization_params params = { 0 };
|
||||
|
||||
params.name = "cvp-kernel-client";
|
||||
if (synx_initialize(&inst->synx_session_id, ¶ms)) {
|
||||
params.id = SYNX_CLIENT_EVA_CTX0;
|
||||
inst->synx_session_id = synx_initialize(¶ms);
|
||||
if (IS_ERR_OR_NULL(&inst->synx_session_id)) {
|
||||
dprintk(CVP_ERR, "%s synx_initialize failed\n", __func__);
|
||||
return -EFAULT;
|
||||
}
|
||||
@@ -23,7 +30,272 @@ static int cvp_sess_init_synx_v1(struct msm_cvp_inst *inst)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cvp_sess_deinit_synx(struct msm_cvp_inst *inst)
|
||||
static int cvp_sess_deinit_synx_v2(struct msm_cvp_inst *inst)
|
||||
{
|
||||
if (!inst) {
|
||||
dprintk(CVP_ERR, "Used invalid sess in deinit_synx\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
synx_uninitialize(inst->synx_session_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cvp_dump_fence_queue_v2(struct msm_cvp_inst *inst)
|
||||
{
|
||||
struct cvp_fence_queue *q;
|
||||
struct cvp_fence_command *f;
|
||||
struct synx_session *ssid;
|
||||
int i;
|
||||
|
||||
q = &inst->fence_cmd_queue;
|
||||
ssid = inst->synx_session_id;
|
||||
mutex_lock(&q->lock);
|
||||
dprintk(CVP_WARN, "inst %x fence q mode %d, ssid %pK\n",
|
||||
hash32_ptr(inst->session), q->mode, ssid);
|
||||
|
||||
dprintk(CVP_WARN, "fence cmdq wait list:\n");
|
||||
list_for_each_entry(f, &q->wait_list, list) {
|
||||
dprintk(CVP_WARN, "frame pkt type 0x%x\n", f->pkt->packet_type);
|
||||
for (i = 0; i < f->output_index; i++)
|
||||
dprintk(CVP_WARN, "idx %d client hdl %d, state %d\n",
|
||||
i, f->synx[i],
|
||||
synx_get_status(ssid, f->synx[i]));
|
||||
|
||||
}
|
||||
|
||||
dprintk(CVP_WARN, "fence cmdq schedule list:\n");
|
||||
list_for_each_entry(f, &q->sched_list, list) {
|
||||
dprintk(CVP_WARN, "frame pkt type 0x%x\n", f->pkt->packet_type);
|
||||
for (i = 0; i < f->output_index; i++)
|
||||
dprintk(CVP_WARN, "idx %d client hdl %d, state %d\n",
|
||||
i, f->synx[i],
|
||||
synx_get_status(ssid, f->synx[i]));
|
||||
|
||||
}
|
||||
mutex_unlock(&q->lock);
|
||||
}
|
||||
|
||||
static int cvp_import_synx_v2(struct msm_cvp_inst *inst,
|
||||
struct cvp_fence_command *fc,
|
||||
u32 *fence)
|
||||
{
|
||||
int rc = 0, rr = 0;
|
||||
int i;
|
||||
struct eva_kmd_fence *fs;
|
||||
struct synx_import_params params = {0};
|
||||
u32 h_synx;
|
||||
struct synx_session *ssid;
|
||||
|
||||
fs = (struct eva_kmd_fence *)fence;
|
||||
ssid = inst->synx_session_id;
|
||||
|
||||
for (i = 0; i < fc->num_fences; ++i) {
|
||||
h_synx = fs[i].h_synx;
|
||||
|
||||
if (h_synx) {
|
||||
params.type = SYNX_IMPORT_INDV_PARAMS;
|
||||
params.indv.fence = &h_synx;
|
||||
params.indv.flags = SYNX_IMPORT_SYNX_FENCE
|
||||
| SYNX_IMPORT_LOCAL_FENCE;
|
||||
params.indv.new_h_synx = &fc->synx[i];
|
||||
|
||||
rc = synx_import(ssid, ¶ms);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: %u synx_import failed\n",
|
||||
__func__, h_synx);
|
||||
rr = rc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rr;
|
||||
}
|
||||
|
||||
static int cvp_release_synx_v2(struct msm_cvp_inst *inst,
|
||||
struct cvp_fence_command *fc)
|
||||
{
|
||||
int rc = 0;
|
||||
int i;
|
||||
u32 h_synx;
|
||||
struct synx_session *ssid;
|
||||
|
||||
ssid = inst->synx_session_id;
|
||||
for (i = 0; i < fc->num_fences; ++i) {
|
||||
h_synx = fc->synx[i];
|
||||
if (h_synx) {
|
||||
rc = synx_release(ssid, h_synx);
|
||||
if (rc)
|
||||
dprintk(CVP_ERR,
|
||||
"%s: synx_release %d, %d failed\n",
|
||||
__func__, h_synx, i);
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cvp_cancel_synx_impl(struct msm_cvp_inst *inst,
|
||||
enum cvp_synx_type type,
|
||||
struct cvp_fence_command *fc,
|
||||
int synx_state)
|
||||
{
|
||||
int rc = 0;
|
||||
int i;
|
||||
u32 h_synx;
|
||||
struct synx_session *ssid;
|
||||
int start = 0, end = 0;
|
||||
|
||||
ssid = inst->synx_session_id;
|
||||
|
||||
if (type == CVP_INPUT_SYNX) {
|
||||
start = 0;
|
||||
end = fc->output_index;
|
||||
} else if (type == CVP_OUTPUT_SYNX) {
|
||||
start = fc->output_index;
|
||||
end = fc->num_fences;
|
||||
} else {
|
||||
dprintk(CVP_ERR, "%s Incorrect synx type\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = start; i < end; ++i) {
|
||||
h_synx = fc->synx[i];
|
||||
if (h_synx) {
|
||||
rc = synx_signal(ssid, h_synx, synx_state);
|
||||
dprintk(CVP_SYNX, "Cancel synx %d session %llx\n",
|
||||
h_synx, inst);
|
||||
if (rc)
|
||||
dprintk(CVP_ERR,
|
||||
"%s: synx_signal %d %d %d failed\n",
|
||||
__func__, h_synx, i, synx_state);
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
||||
|
||||
}
|
||||
|
||||
static int cvp_cancel_synx_v2(struct msm_cvp_inst *inst, enum cvp_synx_type type,
|
||||
struct cvp_fence_command *fc, int synx_state)
|
||||
{
|
||||
return cvp_cancel_synx_impl(inst, type, fc, synx_state);
|
||||
}
|
||||
|
||||
static int cvp_wait_synx(struct synx_session *ssid, u32 *synx, u32 num_synx,
|
||||
u32 *synx_state)
|
||||
{
|
||||
int i = 0, rc = 0;
|
||||
unsigned long timeout_ms = 2000;
|
||||
u32 h_synx;
|
||||
|
||||
while (i < num_synx) {
|
||||
h_synx = synx[i];
|
||||
if (h_synx) {
|
||||
rc = synx_wait(ssid, h_synx, timeout_ms);
|
||||
if (rc) {
|
||||
*synx_state = synx_get_status(ssid, h_synx);
|
||||
if(*synx_state == SYNX_STATE_SIGNALED_SUCCESS)
|
||||
{
|
||||
dprintk(CVP_DBG, "%s: SYNX SIGNAl STATE SUCCESS \n", __func__);
|
||||
rc=0;
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
else if (*synx_state == SYNX_STATE_SIGNALED_CANCEL) {
|
||||
dprintk(CVP_SYNX,
|
||||
"%s: synx_wait %d cancel %d state %d\n",
|
||||
current->comm, i, rc, *synx_state);
|
||||
} else {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: synx_wait %d failed %d state %d\n",
|
||||
current->comm, i, rc, *synx_state);
|
||||
*synx_state = SYNX_STATE_SIGNALED_CANCEL;
|
||||
}
|
||||
return rc;
|
||||
} else {
|
||||
rc = 0; /* SYNX_STATE_SIGNALED_SUCCESS = 2 */
|
||||
}
|
||||
|
||||
dprintk(CVP_SYNX, "Wait synx %u returned succes\n",
|
||||
h_synx);
|
||||
}
|
||||
++i;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cvp_signal_synx(struct synx_session *ssid, u32 *synx, u32 num_synx,
|
||||
u32 synx_state)
|
||||
{
|
||||
int i = 0, rc = 0;
|
||||
u32 h_synx;
|
||||
|
||||
while (i < num_synx) {
|
||||
h_synx = synx[i];
|
||||
if (h_synx) {
|
||||
rc = synx_signal(ssid, h_synx, synx_state);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: synx_signal %u %d failed\n",
|
||||
current->comm, h_synx, i);
|
||||
synx_state = SYNX_STATE_SIGNALED_CANCEL;
|
||||
}
|
||||
dprintk(CVP_SYNX, "Signaled synx %u state %d\n",
|
||||
h_synx, synx_state);
|
||||
}
|
||||
++i;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cvp_synx_ops_v2(struct msm_cvp_inst *inst, enum cvp_synx_type type,
|
||||
struct cvp_fence_command *fc, u32 *synx_state)
|
||||
{
|
||||
struct synx_session *ssid;
|
||||
|
||||
ssid = inst->synx_session_id;
|
||||
|
||||
if (type == CVP_INPUT_SYNX) {
|
||||
return cvp_wait_synx(ssid, fc->synx, fc->output_index,
|
||||
synx_state);
|
||||
} else if (type == CVP_OUTPUT_SYNX) {
|
||||
return cvp_signal_synx(ssid, &fc->synx[fc->output_index],
|
||||
(fc->num_fences - fc->output_index),
|
||||
*synx_state);
|
||||
} else {
|
||||
dprintk(CVP_ERR, "%s Incorrect SYNX type\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static struct msm_cvp_synx_ops cvp_synx = {
|
||||
.cvp_sess_init_synx = cvp_sess_init_synx_v2,
|
||||
.cvp_sess_deinit_synx = cvp_sess_deinit_synx_v2,
|
||||
.cvp_release_synx = cvp_release_synx_v2,
|
||||
.cvp_import_synx = cvp_import_synx_v2,
|
||||
.cvp_synx_ops = cvp_synx_ops_v2,
|
||||
.cvp_cancel_synx = cvp_cancel_synx_v2,
|
||||
.cvp_dump_fence_queue = cvp_dump_fence_queue_v2,
|
||||
};
|
||||
|
||||
|
||||
#else
|
||||
static int cvp_sess_init_synx_v1(struct msm_cvp_inst *inst)
|
||||
{
|
||||
struct synx_initialization_params params;
|
||||
|
||||
params.name = "cvp-kernel-client";
|
||||
if (synx_initialize(&inst->synx_session_id, ¶ms)) {
|
||||
|
||||
dprintk(CVP_ERR, "%s synx_initialize failed\n", __func__);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cvp_sess_deinit_synx_v1(struct msm_cvp_inst *inst)
|
||||
{
|
||||
if (!inst) {
|
||||
dprintk(CVP_ERR, "Used invalid sess in deinit_synx\n");
|
||||
@@ -53,7 +325,6 @@ static void cvp_dump_fence_queue_v1(struct msm_cvp_inst *inst)
|
||||
dprintk(CVP_WARN, "idx %d client hdl %d, state %d\n",
|
||||
i, f->synx[i],
|
||||
synx_get_status(ssid, f->synx[i]));
|
||||
|
||||
}
|
||||
|
||||
dprintk(CVP_WARN, "fence cmdq schedule list:\n");
|
||||
@@ -63,7 +334,6 @@ static void cvp_dump_fence_queue_v1(struct msm_cvp_inst *inst)
|
||||
dprintk(CVP_WARN, "idx %d client hdl %d, state %d\n",
|
||||
i, f->synx[i],
|
||||
synx_get_status(ssid, f->synx[i]));
|
||||
|
||||
}
|
||||
mutex_unlock(&q->lock);
|
||||
}
|
||||
@@ -74,7 +344,7 @@ static int cvp_import_synx_v1(struct msm_cvp_inst *inst,
|
||||
{
|
||||
int rc = 0, rr = 0;
|
||||
int i;
|
||||
struct cvp_fence_type *fs;
|
||||
struct eva_kmd_fence *fs;
|
||||
struct synx_import_params params;
|
||||
s32 h_synx;
|
||||
struct synx_session ssid;
|
||||
@@ -84,7 +354,7 @@ static int cvp_import_synx_v1(struct msm_cvp_inst *inst,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
fs = (struct cvp_fence_type *)fence;
|
||||
fs = (struct eva_kmd_fence *)fence;
|
||||
ssid = inst->synx_session_id;
|
||||
|
||||
for (i = 0; i < fc->num_fences; ++i) {
|
||||
@@ -177,7 +447,7 @@ static int cvp_cancel_synx_impl(struct msm_cvp_inst *inst,
|
||||
|
||||
}
|
||||
|
||||
static int cvp_cancel_synx(struct msm_cvp_inst *inst, enum cvp_synx_type type,
|
||||
static int cvp_cancel_synx_v1(struct msm_cvp_inst *inst, enum cvp_synx_type type,
|
||||
struct cvp_fence_command *fc, int synx_state)
|
||||
{
|
||||
if (fc->signature != 0xFEEDFACE) {
|
||||
@@ -268,62 +538,76 @@ static int cvp_synx_ops_v1(struct msm_cvp_inst *inst, enum cvp_synx_type type,
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static struct msm_cvp_synx_ops cvp_synx = {
|
||||
.cvp_sess_init_synx = cvp_sess_init_synx_v1,
|
||||
.cvp_sess_deinit_synx = cvp_sess_deinit_synx_v1,
|
||||
.cvp_release_synx = cvp_release_synx_v1,
|
||||
.cvp_import_synx = cvp_import_synx_v1,
|
||||
.cvp_synx_ops = cvp_synx_ops_v1,
|
||||
.cvp_cancel_synx = cvp_cancel_synx_v1,
|
||||
.cvp_dump_fence_queue = cvp_dump_fence_queue_v1,
|
||||
};
|
||||
|
||||
#endif /* End of CVP_CONFIG_SYNX_V2 */
|
||||
#else
|
||||
static int cvp_sess_init_synx_v1(struct msm_cvp_inst *inst)
|
||||
static int cvp_sess_init_synx_stub(struct msm_cvp_inst *inst)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cvp_sess_deinit_synx(struct msm_cvp_inst *inst)
|
||||
static int cvp_sess_deinit_synx_stub(struct msm_cvp_inst *inst)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cvp_release_synx_v1(struct msm_cvp_inst *inst,
|
||||
static int cvp_release_synx_stub(struct msm_cvp_inst *inst,
|
||||
struct cvp_fence_command *fc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cvp_import_synx_v1(struct msm_cvp_inst *inst,
|
||||
static int cvp_import_synx_stub(struct msm_cvp_inst *inst,
|
||||
struct cvp_fence_command *fc,
|
||||
u32 *fence)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cvp_synx_ops_v1(struct msm_cvp_inst *inst, enum cvp_synx_type type,
|
||||
static int cvp_synx_ops_stub(struct msm_cvp_inst *inst, enum cvp_synx_type type,
|
||||
struct cvp_fence_command *fc, u32 *synx_state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cvp_cancel_synx(struct msm_cvp_inst *inst, enum cvp_synx_type type,
|
||||
static int cvp_cancel_synx_stub(struct msm_cvp_inst *inst, enum cvp_synx_type type,
|
||||
struct cvp_fence_command *fc, int synx_state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cvp_dump_fence_queue_v1(struct msm_cvp_inst *inst)
|
||||
static void cvp_dump_fence_queue_stub(struct msm_cvp_inst *inst)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
struct msm_cvp_synx_ops cvp_synx_v1 = {
|
||||
.cvp_sess_init_synx = cvp_sess_init_synx_v1,
|
||||
.cvp_sess_deinit_synx = cvp_sess_deinit_synx,
|
||||
.cvp_release_synx = cvp_release_synx_v1,
|
||||
.cvp_import_synx = cvp_import_synx_v1,
|
||||
.cvp_synx_ops = cvp_synx_ops_v1,
|
||||
.cvp_cancel_synx = cvp_cancel_synx,
|
||||
.cvp_dump_fence_queue = cvp_dump_fence_queue_v1,
|
||||
static struct msm_cvp_synx_ops cvp_synx = {
|
||||
.cvp_sess_init_synx = cvp_sess_init_synx_stub,
|
||||
.cvp_sess_deinit_synx = cvp_sess_deinit_synx_stub,
|
||||
.cvp_release_synx = cvp_release_synx_stub,
|
||||
.cvp_import_synx = cvp_import_synx_stub,
|
||||
.cvp_synx_ops = cvp_synx_ops_stub,
|
||||
.cvp_cancel_synx = cvp_cancel_synx_stub,
|
||||
.cvp_dump_fence_queue = cvp_dump_fence_queue_stub,
|
||||
};
|
||||
|
||||
|
||||
#endif /* End of CVP_SYNX_ENABLED */
|
||||
|
||||
void cvp_synx_ftbl_init(struct msm_cvp_core *core)
|
||||
{
|
||||
if (!core)
|
||||
return;
|
||||
|
||||
/* Synx API version check below if needed */
|
||||
core->synx_ftbl = &cvp_synx_v1;
|
||||
core->synx_ftbl = &cvp_synx;
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _MSM_CVP_SYNX_H_
|
||||
//#ifndef _MSM_CVP_SYNX_H_
|
||||
#define _MSM_CVP_SYNX_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
@@ -32,11 +32,6 @@ struct cvp_fence_queue {
|
||||
struct list_head sched_list;
|
||||
};
|
||||
|
||||
struct cvp_fence_type {
|
||||
s32 h_synx;
|
||||
u32 secure_key;
|
||||
};
|
||||
|
||||
struct cvp_fence_command {
|
||||
struct list_head list;
|
||||
u64 frame_id;
|
||||
@@ -76,4 +71,4 @@ struct msm_cvp_synx_ops {
|
||||
};
|
||||
|
||||
void cvp_synx_ftbl_init(struct msm_cvp_core *core);
|
||||
#endif
|
||||
//#endif
|
||||
|
Reference in New Issue
Block a user