video: driver: Add support for Synx V2 fences

Add Synx V2 fences support to improve latency
in video decode usecases.

Change-Id: If21f8b65895364a804f8e03580b09c44b377c199
Signed-off-by: Akshata Sahukar <quic_asahukar@quicinc.com>
This commit is contained in:
Akshata Sahukar
2023-02-03 16:58:07 -08:00
parent 59a8ab6758
commit 33d0b20141
21 changed files with 678 additions and 51 deletions

View File

@@ -20,6 +20,7 @@ KBUILD_OPTIONS := VIDEO_ROOT=$(VIDEO_BLD_DIR)
KBUILD_OPTIONS += $(VIDEO_SELECT) KBUILD_OPTIONS += $(VIDEO_SELECT)
KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS=$(shell pwd)/$(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS=$(shell pwd)/$(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers
KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS+=$(shell pwd)/$(call intermediates-dir-for,DLKM,hw-fence-module-symvers)/Module.symvers
########################################################### ###########################################################
DLKM_DIR := device/qcom/common/dlkm DLKM_DIR := device/qcom/common/dlkm
@@ -36,7 +37,9 @@ LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
LOCAL_REQUIRED_MODULES := mmrm-module-symvers LOCAL_REQUIRED_MODULES := mmrm-module-symvers
LOCAL_REQUIRED_MODULES += hw-fence-module-symvers
LOCAL_ADDITIONAL_DEPENDENCIES := $(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers LOCAL_ADDITIONAL_DEPENDENCIES := $(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers
LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,hw-fence-module-symvers)/Module.symvers
include $(DLKM_DIR)/Build_external_kernelmodule.mk include $(DLKM_DIR)/Build_external_kernelmodule.mk
endif endif

View File

@@ -17,6 +17,7 @@
#include "msm_vidc_memory.h" #include "msm_vidc_memory.h"
#include "msm_vidc_control.h" #include "msm_vidc_control.h"
#include "msm_vidc_driver.h" #include "msm_vidc_driver.h"
#include "msm_vidc_fence.h"
#include "hfi_packet.h" #include "hfi_packet.h"
#include "hfi_property.h" #include "hfi_property.h"
#include "venus_hfi.h" #include "venus_hfi.h"
@@ -275,6 +276,11 @@ static int msm_vidc_init_ops(struct msm_vidc_core *core)
d_vpr_e("%s: invalid resource ops\n", __func__); d_vpr_e("%s: invalid resource ops\n", __func__);
return -EINVAL; return -EINVAL;
} }
core->fence_ops = get_dma_fence_ops();
if (!core->fence_ops) {
d_vpr_e("%s: invalid dma fence ops\n", __func__);
return -EINVAL;
}
return 0; return 0;
} }

View File

@@ -322,6 +322,7 @@ static struct msm_platform_core_capability core_data_kalama[] = {
{ENC_AUTO_FRAMERATE, 1}, {ENC_AUTO_FRAMERATE, 1},
{DEVICE_CAPS, V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_META_CAPTURE | {DEVICE_CAPS, V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_META_CAPTURE |
V4L2_CAP_STREAMING}, V4L2_CAP_STREAMING},
{SUPPORTS_SYNX_FENCE, 0},
{SUPPORTS_REQUESTS, 1}, {SUPPORTS_REQUESTS, 1},
}; };

View File

@@ -17,6 +17,7 @@
#include "msm_vidc_internal.h" #include "msm_vidc_internal.h"
#include "msm_vidc_platform_ext.h" #include "msm_vidc_platform_ext.h"
#include "msm_vidc_memory_ext.h" #include "msm_vidc_memory_ext.h"
#include "msm_vidc_synx.h"
#include "resources_ext.h" #include "resources_ext.h"
#include "msm_vidc_iris33.h" #include "msm_vidc_iris33.h"
#include "hfi_property.h" #include "hfi_property.h"
@@ -323,6 +324,7 @@ static struct msm_platform_core_capability core_data_pineapple[] = {
{ENC_AUTO_FRAMERATE, 1}, {ENC_AUTO_FRAMERATE, 1},
{DEVICE_CAPS, V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_META_CAPTURE | {DEVICE_CAPS, V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_META_CAPTURE |
V4L2_CAP_STREAMING}, V4L2_CAP_STREAMING},
{SUPPORTS_SYNX_FENCE, 0}, /* disabled temporarily */
{SUPPORTS_REQUESTS, 1}, {SUPPORTS_REQUESTS, 1},
}; };
@@ -2819,6 +2821,12 @@ static int msm_vidc_init_data(struct msm_vidc_core *core)
d_vpr_e("%s: invalid resource ext ops\n", __func__); d_vpr_e("%s: invalid resource ext ops\n", __func__);
return -EINVAL; return -EINVAL;
} }
core->fence_ops = get_synx_fence_ops();
if (!core->fence_ops) {
d_vpr_e("%s: invalid synx fence ops\n", __func__);
return -EINVAL;
}
rc = msm_vidc_pineapple_check_ddr_type(); rc = msm_vidc_pineapple_check_ddr_type();
if (rc) if (rc)
return rc; return rc;

View File

@@ -220,6 +220,7 @@ static struct msm_platform_core_capability core_data_pineapple[] = {
{NON_FATAL_FAULTS, 1}, {NON_FATAL_FAULTS, 1},
{ENC_AUTO_FRAMERATE, 1}, {ENC_AUTO_FRAMERATE, 1},
{DEVICE_CAPS, V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING}, {DEVICE_CAPS, V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING},
{SUPPORTS_SYNX_FENCE, 0},
{SUPPORTS_REQUESTS, 0}, {SUPPORTS_REQUESTS, 0},
}; };

View File

@@ -222,6 +222,7 @@ static struct msm_platform_core_capability core_data_waipio[] = {
{NON_FATAL_FAULTS, 1}, {NON_FATAL_FAULTS, 1},
{ENC_AUTO_FRAMERATE, 1}, {ENC_AUTO_FRAMERATE, 1},
{DEVICE_CAPS, V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING}, {DEVICE_CAPS, V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING},
{SUPPORTS_SYNX_FENCE, 0},
{SUPPORTS_REQUESTS, 0}, {SUPPORTS_REQUESTS, 0},
}; };

View File

@@ -33,6 +33,13 @@ struct msm_vidc_venus_ops {
int (*noc_error_info)(struct msm_vidc_core *core); int (*noc_error_info)(struct msm_vidc_core *core);
}; };
struct msm_vidc_synx_fence_data {
u32 client_id;
void *session;
u32 client_flags; /* not used */
struct msm_vidc_mem queue;
};
struct msm_vidc_mem_addr { struct msm_vidc_mem_addr {
u32 align_device_addr; u32 align_device_addr;
u8 *align_virtual_addr; u8 *align_virtual_addr;
@@ -109,11 +116,13 @@ struct msm_vidc_core {
struct msm_vidc_venus_ops *venus_ops; struct msm_vidc_venus_ops *venus_ops;
const struct msm_vidc_resources_ops *res_ops; const struct msm_vidc_resources_ops *res_ops;
struct msm_vidc_session_ops *session_ops; struct msm_vidc_session_ops *session_ops;
struct msm_vidc_memory_ops *mem_ops; const struct msm_vidc_memory_ops *mem_ops;
struct media_device_ops *media_device_ops; struct media_device_ops *media_device_ops;
const struct msm_vidc_fence_ops *fence_ops;
u32 header_id; u32 header_id;
u32 packet_id; u32 packet_id;
u32 sys_init_id; u32 sys_init_id;
struct msm_vidc_synx_fence_data synx_fence_data;
}; };
#endif // _MSM_VIDC_CORE_H_ #endif // _MSM_VIDC_CORE_H_

View File

@@ -9,18 +9,26 @@
#include "msm_vidc_inst.h" #include "msm_vidc_inst.h"
#include "msm_vidc_buffer.h" #include "msm_vidc_buffer.h"
struct msm_vidc_fence *msm_vidc_fence_create(
struct msm_vidc_inst *inst);
int msm_vidc_create_fence_fd(struct msm_vidc_inst *inst,
struct msm_vidc_fence *fence);
struct msm_vidc_fence *msm_vidc_get_fence_from_id(
struct msm_vidc_inst *inst, u32 fence_id);
int msm_vidc_fence_signal(struct msm_vidc_inst *inst,
u32 fence_id);
void msm_vidc_fence_destroy(struct msm_vidc_inst *inst,
u32 fence_id);
int msm_vidc_fence_init(struct msm_vidc_inst *inst); int msm_vidc_fence_init(struct msm_vidc_inst *inst);
void msm_vidc_fence_deinit(struct msm_vidc_inst *inst); void msm_vidc_fence_deinit(struct msm_vidc_inst *inst);
#define call_fence_op(c, op, ...) \
(((c) && (c)->fence_ops && (c)->fence_ops->op) ? \
((c)->fence_ops->op(__VA_ARGS__)) : 0)
struct msm_vidc_fence_ops {
int (*fence_register)(struct msm_vidc_core *core);
int (*fence_deregister)(struct msm_vidc_core *core);
struct msm_vidc_fence *(*fence_create)(struct msm_vidc_inst *inst);
int (*fence_create_fd)(struct msm_vidc_inst *inst,
struct msm_vidc_fence *fence);
void (*fence_destroy)(struct msm_vidc_inst *inst,
u64 fence_id);
int (*fence_signal)(struct msm_vidc_inst *inst,
u64 fence_id);
void (*fence_recover)(struct msm_vidc_core *core);
};
const struct msm_vidc_fence_ops *get_dma_fence_ops(void);
#endif // __H_MSM_VIDC_FENCE_H__ #endif // __H_MSM_VIDC_FENCE_H__

View File

@@ -598,6 +598,7 @@ enum msm_vidc_core_capability_type {
ENC_AUTO_FRAMERATE, ENC_AUTO_FRAMERATE,
DEVICE_CAPS, DEVICE_CAPS,
SUPPORTS_REQUESTS, SUPPORTS_REQUESTS,
SUPPORTS_SYNX_FENCE,
CORE_CAP_MAX, CORE_CAP_MAX,
}; };
@@ -858,6 +859,8 @@ struct msm_vidc_fence {
spinlock_t lock; spinlock_t lock;
struct sync_file *sync_file; struct sync_file *sync_file;
int fd; int fd;
u64 fence_id;
void *session;
}; };
struct msm_vidc_mem { struct msm_vidc_mem {
@@ -884,6 +887,7 @@ struct msm_vidc_mem {
struct sg_table *table; struct sg_table *table;
struct dma_buf_attachment *attach; struct dma_buf_attachment *attach;
phys_addr_t phys_addr; phys_addr_t phys_addr;
enum dma_data_direction direction;
}; };
struct msm_vidc_mem_list { struct msm_vidc_mem_list {

View File

@@ -76,6 +76,10 @@ struct msm_vidc_memory_ops {
struct msm_vidc_mem *mem); struct msm_vidc_mem *mem);
int (*memory_unmap_free)(struct msm_vidc_core *core, int (*memory_unmap_free)(struct msm_vidc_core *core,
struct msm_vidc_mem *mem); struct msm_vidc_mem *mem);
int (*mem_dma_map_page)(struct msm_vidc_core *core,
struct msm_vidc_mem *mem);
int (*mem_dma_unmap_page)(struct msm_vidc_core *core,
struct msm_vidc_mem *mem);
u32 (*buffer_region)(struct msm_vidc_inst *inst, u32 (*buffer_region)(struct msm_vidc_inst *inst,
enum msm_vidc_buffer_type buffer_type); enum msm_vidc_buffer_type buffer_type);
int (*iommu_map)(struct msm_vidc_core *core, int (*iommu_map)(struct msm_vidc_core *core,
@@ -84,6 +88,6 @@ struct msm_vidc_memory_ops {
struct msm_vidc_mem *mem); struct msm_vidc_mem *mem);
}; };
struct msm_vidc_memory_ops *get_mem_ops(void); const struct msm_vidc_memory_ops *get_mem_ops(void);
#endif // _MSM_VIDC_MEMORY_H_ #endif // _MSM_VIDC_MEMORY_H_

View File

@@ -7,7 +7,8 @@
#ifndef _MSM_VIDC_MEMORY_EXT_H_ #ifndef _MSM_VIDC_MEMORY_EXT_H_
#define _MSM_VIDC_MEMORY_EXT_H_ #define _MSM_VIDC_MEMORY_EXT_H_
struct msm_vidc_memory_ops; #include "msm_vidc_memory.h"
struct msm_vidc_memory_ops *get_mem_ops_ext(void);
const struct msm_vidc_memory_ops *get_mem_ops_ext(void);
#endif // _MSM_VIDC_MEMORY_EXT_H_ #endif // _MSM_VIDC_MEMORY_EXT_H_

View File

@@ -0,0 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _H_MSM_VIDC_SYNX_H_
#define _H_MSM_VIDC_SYNX_H_
#include "msm_vidc_fence.h"
const struct msm_vidc_fence_ops *get_synx_fence_ops(void);
#endif //_H_MSM_VIDC_SYNX_H_

View File

@@ -1682,17 +1682,19 @@ int msm_vidc_get_fence_fd(struct msm_vidc_inst *inst, int *fence_fd)
{ {
int rc = 0; int rc = 0;
struct msm_vidc_fence *fence, *dummy_fence; struct msm_vidc_fence *fence, *dummy_fence;
struct msm_vidc_core *core;
bool found = false; bool found = false;
*fence_fd = INVALID_FD; *fence_fd = INVALID_FD;
if (!inst || !inst->capabilities) { if (!inst || !inst->capabilities || !inst->core) {
d_vpr_e("%s: invalid params\n", __func__); d_vpr_e("%s: invalid params\n", __func__);
return -EINVAL; return -EINVAL;
} }
core = inst->core;
list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) { list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) {
if (fence->dma_fence.seqno == if (fence->fence_id ==
(u64)inst->capabilities->cap[FENCE_ID].value) { (u64)inst->capabilities->cap[FENCE_ID].value) {
found = true; found = true;
break; break;
@@ -1706,7 +1708,7 @@ int msm_vidc_get_fence_fd(struct msm_vidc_inst *inst, int *fence_fd)
} }
if (fence->fd == INVALID_FD) { if (fence->fd == INVALID_FD) {
rc = msm_vidc_create_fence_fd(inst, fence); rc = call_fence_op(core, fence_create_fd, inst, fence);
if (rc) if (rc)
goto exit; goto exit;
} }
@@ -2995,11 +2997,13 @@ int msm_vidc_queue_buffer_single(struct msm_vidc_inst *inst, struct vb2_buffer *
int rc = 0; int rc = 0;
struct msm_vidc_buffer *buf = NULL; struct msm_vidc_buffer *buf = NULL;
struct msm_vidc_fence *fence = NULL; struct msm_vidc_fence *fence = NULL;
struct msm_vidc_core *core = NULL;
if (!inst || !vb2 || !inst->capabilities) { if (!inst || !vb2 || !inst->capabilities || !inst->core) {
d_vpr_e("%s: invalid params\n", __func__); d_vpr_e("%s: invalid params\n", __func__);
return -EINVAL; return -EINVAL;
} }
core = inst->core;
buf = msm_vidc_get_driver_buf(inst, vb2); buf = msm_vidc_get_driver_buf(inst, vb2);
if (!buf) if (!buf)
@@ -3007,10 +3011,10 @@ int msm_vidc_queue_buffer_single(struct msm_vidc_inst *inst, struct vb2_buffer *
if (is_meta_rx_inp_enabled(inst, META_OUTBUF_FENCE) && if (is_meta_rx_inp_enabled(inst, META_OUTBUF_FENCE) &&
is_output_buffer(buf->type)) { is_output_buffer(buf->type)) {
fence = msm_vidc_fence_create(inst); fence = call_fence_op(core, fence_create, inst);
if (!fence) if (!fence)
return rc; return -EINVAL;
buf->fence_id = fence->dma_fence.seqno; buf->fence_id = fence->fence_id;
} }
rc = inst->event_handle(inst, MSM_VIDC_BUF_QUEUE, buf); rc = inst->event_handle(inst, MSM_VIDC_BUF_QUEUE, buf);
@@ -3021,7 +3025,7 @@ exit:
if (rc) { if (rc) {
i_vpr_e(inst, "%s: qbuf failed\n", __func__); i_vpr_e(inst, "%s: qbuf failed\n", __func__);
if (fence) if (fence)
msm_vidc_fence_destroy(inst, (u32)fence->dma_fence.seqno); call_fence_op(core, fence_destroy, inst, fence->fence_id);
} }
return rc; return rc;
} }
@@ -5088,7 +5092,7 @@ void msm_vidc_destroy_buffers(struct msm_vidc_inst *inst)
list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) { list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) {
i_vpr_e(inst, "%s: destroying fence %s\n", __func__, fence->name); i_vpr_e(inst, "%s: destroying fence %s\n", __func__, fence->name);
msm_vidc_fence_destroy(inst, (u32)fence->dma_fence.seqno); call_fence_op(core, fence_destroy, inst, fence->fence_id);
} }
/* destroy buffers from pool */ /* destroy buffers from pool */

View File

@@ -7,8 +7,6 @@
#include "msm_vidc_driver.h" #include "msm_vidc_driver.h"
#include "msm_vidc_debug.h" #include "msm_vidc_debug.h"
extern struct msm_vidc_core *g_core;
static const char *msm_vidc_dma_fence_get_driver_name(struct dma_fence *df) static const char *msm_vidc_dma_fence_get_driver_name(struct dma_fence *df)
{ {
struct msm_vidc_fence *fence; struct msm_vidc_fence *fence;
@@ -76,6 +74,8 @@ struct msm_vidc_fence *msm_vidc_fence_create(struct msm_vidc_inst *inst)
if (inst->fence_context.seq_num >= INT_MAX) if (inst->fence_context.seq_num >= INT_MAX)
inst->fence_context.seq_num = 0; inst->fence_context.seq_num = 0;
fence->fence_id = fence->dma_fence.seqno;
INIT_LIST_HEAD(&fence->list); INIT_LIST_HEAD(&fence->list);
list_add_tail(&fence->list, &inst->fence_list); list_add_tail(&fence->list, &inst->fence_list);
i_vpr_l(inst, "%s: created %s\n", __func__, fence->name); i_vpr_l(inst, "%s: created %s\n", __func__, fence->name);
@@ -83,7 +83,7 @@ struct msm_vidc_fence *msm_vidc_fence_create(struct msm_vidc_inst *inst)
return fence; return fence;
} }
int msm_vidc_create_fence_fd(struct msm_vidc_inst *inst, int msm_vidc_dma_fence_create_fd(struct msm_vidc_inst *inst,
struct msm_vidc_fence *fence) struct msm_vidc_fence *fence)
{ {
int rc = 0; int rc = 0;
@@ -119,8 +119,8 @@ err_fd:
return rc; return rc;
} }
struct msm_vidc_fence *msm_vidc_get_fence_from_id( static struct msm_vidc_fence *msm_vidc_get_dma_fence_from_id(
struct msm_vidc_inst *inst, u32 fence_id) struct msm_vidc_inst *inst, u64 fence_id)
{ {
struct msm_vidc_fence *fence, *dummy_fence; struct msm_vidc_fence *fence, *dummy_fence;
bool found = false; bool found = false;
@@ -131,19 +131,22 @@ struct msm_vidc_fence *msm_vidc_get_fence_from_id(
} }
list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) { list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) {
if (fence->dma_fence.seqno == (u64)fence_id) { if (fence->fence_id == fence_id) {
found = true; found = true;
break; break;
} }
} }
if (!found) if (!found) {
i_vpr_l(inst, "%s: no fence available for id: %u\n",
__func__, fence_id);
return NULL; return NULL;
}
return fence; return fence;
} }
int msm_vidc_fence_signal(struct msm_vidc_inst *inst, u32 fence_id) static int msm_vidc_fence_signal(struct msm_vidc_inst *inst, u64 fence_id)
{ {
int rc = 0; int rc = 0;
struct msm_vidc_fence *fence; struct msm_vidc_fence *fence;
@@ -153,7 +156,7 @@ int msm_vidc_fence_signal(struct msm_vidc_inst *inst, u32 fence_id)
return -EINVAL; return -EINVAL;
} }
fence = msm_vidc_get_fence_from_id(inst, fence_id); fence = msm_vidc_get_dma_fence_from_id(inst, fence_id);
if (!fence) { if (!fence) {
i_vpr_e(inst, "%s: no fence available to signal with id: %u\n", i_vpr_e(inst, "%s: no fence available to signal with id: %u\n",
__func__, fence_id); __func__, fence_id);
@@ -163,6 +166,7 @@ int msm_vidc_fence_signal(struct msm_vidc_inst *inst, u32 fence_id)
i_vpr_l(inst, "%s: fence %s\n", __func__, fence->name); i_vpr_l(inst, "%s: fence %s\n", __func__, fence->name);
list_del_init(&fence->list); list_del_init(&fence->list);
dma_fence_signal(&fence->dma_fence); dma_fence_signal(&fence->dma_fence);
dma_fence_put(&fence->dma_fence); dma_fence_put(&fence->dma_fence);
@@ -171,7 +175,7 @@ exit:
} }
void msm_vidc_fence_destroy(struct msm_vidc_inst *inst, u32 fence_id) static void msm_vidc_fence_destroy(struct msm_vidc_inst *inst, u64 fence_id)
{ {
struct msm_vidc_fence *fence; struct msm_vidc_fence *fence;
@@ -180,7 +184,7 @@ void msm_vidc_fence_destroy(struct msm_vidc_inst *inst, u32 fence_id)
return; return;
} }
fence = msm_vidc_get_fence_from_id(inst, fence_id); fence = msm_vidc_get_dma_fence_from_id(inst, fence_id);
if (!fence) { if (!fence) {
return; return;
} }
@@ -221,3 +225,15 @@ void msm_vidc_fence_deinit(struct msm_vidc_inst *inst)
snprintf(inst->fence_context.name, sizeof(inst->fence_context.name), snprintf(inst->fence_context.name, sizeof(inst->fence_context.name),
"%s", ""); "%s", "");
} }
static const struct msm_vidc_fence_ops msm_dma_fence_ops = {
.fence_create = msm_vidc_fence_create,
.fence_destroy = msm_vidc_fence_destroy,
.fence_signal = msm_vidc_fence_signal,
.fence_create_fd = msm_vidc_dma_fence_create_fd,
};
const struct msm_vidc_fence_ops *get_dma_fence_ops(void)
{
return &msm_dma_fence_ops;
}

View File

@@ -425,8 +425,7 @@ static int msm_vidc_memory_alloc_map(struct msm_vidc_core *core, struct msm_vidc
cb = msm_vidc_get_context_bank_for_region(core, mem->region); cb = msm_vidc_get_context_bank_for_region(core, mem->region);
if (!cb) { if (!cb) {
d_vpr_e("%s: Failed to get context bank device\n", d_vpr_e("%s: failed to get context bank device\n", __func__);
__func__);
return -EIO; return -EIO;
} }
@@ -457,18 +456,17 @@ static int msm_vidc_memory_unmap_free(struct msm_vidc_core *core, struct msm_vid
d_vpr_h( d_vpr_h(
"%s: dmabuf %pK, size %d, kvaddr %pK, buffer_type %s, secure %d, region %d\n", "%s: dmabuf %pK, size %d, kvaddr %pK, buffer_type %s, secure %d, region %d\n",
__func__, mem->device_addr, mem->size, mem->kvaddr, buf_name(mem->type), __func__, mem->device_addr, mem->size, mem->kvaddr,
mem->secure, mem->region); buf_name(mem->type), mem->secure, mem->region);
cb = msm_vidc_get_context_bank_for_region(core, mem->region); cb = msm_vidc_get_context_bank_for_region(core, mem->region);
if (!cb) { if (!cb) {
d_vpr_e("%s: Failed to get context bank device\n", d_vpr_e("%s: failed to get context bank device\n", __func__);
__func__);
return -EIO; return -EIO;
} }
dma_free_attrs(cb->dev, mem->size, mem->kvaddr, mem->device_addr, dma_free_attrs(cb->dev, mem->size, mem->kvaddr, mem->device_addr,
mem->attrs); mem->attrs);
mem->kvaddr = NULL; mem->kvaddr = NULL;
mem->device_addr = 0; mem->device_addr = 0;
@@ -476,6 +474,99 @@ static int msm_vidc_memory_unmap_free(struct msm_vidc_core *core, struct msm_vid
return rc; return rc;
} }
static int msm_vidc_dma_map_page(struct msm_vidc_core *core,
struct msm_vidc_mem *mem)
{
int rc = 0;
struct context_bank_info *cb = NULL;
dma_addr_t dma_addr;
if (!core || !mem) {
d_vpr_e("%s: invalid params\n", __func__);
return -EINVAL;
}
if (mem->refcount) {
mem->refcount++;
goto exit;
}
cb = msm_vidc_get_context_bank_for_region(core, mem->region);
if (!cb) {
d_vpr_e("%s: Failed to get context bank device\n",
__func__);
rc = -EIO;
goto error;
}
/* map and obtain dma address for physically contiguous memory */
dma_addr = dma_map_page(cb->dev, phys_to_page(mem->phys_addr),
0, (size_t)mem->size, mem->direction);
rc = dma_mapping_error(cb->dev, dma_addr);
if (rc) {
d_vpr_e("%s: Failed to map memory\n", __func__);
goto error;
}
mem->device_addr = dma_addr;
mem->refcount++;
exit:
d_vpr_l(
"%s: type %11s, device_addr %#llx, size %u region %d, refcount %d\n",
__func__, buf_name(mem->type), mem->device_addr,
mem->size, mem->region, mem->refcount);
return 0;
error:
return rc;
}
static int msm_vidc_dma_unmap_page(struct msm_vidc_core *core,
struct msm_vidc_mem *mem)
{
int rc = 0;
struct context_bank_info *cb = NULL;
if (!core || !mem) {
d_vpr_e("%s: invalid params\n", __func__);
return -EINVAL;
}
if (mem->refcount) {
mem->refcount--;
} else {
d_vpr_e("unmap called while refcount is zero already\n");
return -EINVAL;
}
cb = msm_vidc_get_context_bank_for_region(core, mem->region);
if (!cb) {
d_vpr_e("%s: Failed to get context bank device\n",
__func__);
rc = -EIO;
goto exit;
}
d_vpr_l(
"%s: type %11s, device_addr %#x, refcount %d, region %d\n",
__func__, buf_name(mem->type), mem->device_addr,
mem->refcount, mem->region);
if (mem->refcount)
goto exit;
dma_unmap_page(cb->dev, (dma_addr_t)(mem->device_addr),
mem->size, mem->direction);
mem->device_addr = 0x0;
exit:
return rc;
}
static u32 msm_vidc_buffer_region(struct msm_vidc_inst *inst, static u32 msm_vidc_buffer_region(struct msm_vidc_inst *inst,
enum msm_vidc_buffer_type buffer_type) enum msm_vidc_buffer_type buffer_type)
{ {
@@ -494,7 +585,8 @@ static int msm_vidc_iommu_map(struct msm_vidc_core *core, struct msm_vidc_mem *m
cb = msm_vidc_get_context_bank_for_region(core, mem->region); cb = msm_vidc_get_context_bank_for_region(core, mem->region);
if (!cb) { if (!cb) {
d_vpr_e("%s: Failed to get context bank device\n", __func__); d_vpr_e("%s: failed to get context bank device for region: %d\n",
__func__, mem->region);
return -EIO; return -EIO;
} }
@@ -524,8 +616,8 @@ static int msm_vidc_iommu_unmap(struct msm_vidc_core *core, struct msm_vidc_mem
cb = msm_vidc_get_context_bank_for_region(core, mem->region); cb = msm_vidc_get_context_bank_for_region(core, mem->region);
if (!cb) { if (!cb) {
d_vpr_e("%s: Failed to get context bank device\n", d_vpr_e("%s: failed to get context bank device for region: %d\n",
__func__); __func__, mem->region);
return -EIO; return -EIO;
} }
@@ -540,7 +632,7 @@ static int msm_vidc_iommu_unmap(struct msm_vidc_core *core, struct msm_vidc_mem
return rc; return rc;
} }
static struct msm_vidc_memory_ops msm_mem_ops = { static const struct msm_vidc_memory_ops msm_mem_ops = {
.dma_buf_get = msm_vidc_dma_buf_get, .dma_buf_get = msm_vidc_dma_buf_get,
.dma_buf_put = msm_vidc_dma_buf_put, .dma_buf_put = msm_vidc_dma_buf_put,
.dma_buf_put_completely = msm_vidc_dma_buf_put_completely, .dma_buf_put_completely = msm_vidc_dma_buf_put_completely,
@@ -550,12 +642,14 @@ static struct msm_vidc_memory_ops msm_mem_ops = {
.dma_buf_unmap_attachment = msm_vidc_dma_buf_unmap_attachment, .dma_buf_unmap_attachment = msm_vidc_dma_buf_unmap_attachment,
.memory_alloc_map = msm_vidc_memory_alloc_map, .memory_alloc_map = msm_vidc_memory_alloc_map,
.memory_unmap_free = msm_vidc_memory_unmap_free, .memory_unmap_free = msm_vidc_memory_unmap_free,
.mem_dma_map_page = msm_vidc_dma_map_page,
.mem_dma_unmap_page = msm_vidc_dma_unmap_page,
.buffer_region = msm_vidc_buffer_region, .buffer_region = msm_vidc_buffer_region,
.iommu_map = msm_vidc_iommu_map, .iommu_map = msm_vidc_iommu_map,
.iommu_unmap = msm_vidc_iommu_unmap, .iommu_unmap = msm_vidc_iommu_unmap,
}; };
struct msm_vidc_memory_ops *get_mem_ops(void) const struct msm_vidc_memory_ops *get_mem_ops(void)
{ {
return &msm_mem_ops; return &msm_mem_ops;
} }

View File

@@ -452,9 +452,9 @@ static int msm_vidc_memory_unmap_free_ext(struct msm_vidc_core *core, struct msm
return rc; return rc;
} }
struct msm_vidc_memory_ops *get_mem_ops_ext(void) const struct msm_vidc_memory_ops *get_mem_ops_ext(void)
{ {
struct msm_vidc_memory_ops *mem_ops = get_mem_ops(); const struct msm_vidc_memory_ops *mem_ops = get_mem_ops();
static struct msm_vidc_memory_ops mem_ops_ext; static struct msm_vidc_memory_ops mem_ops_ext;
memcpy(&mem_ops_ext, mem_ops, sizeof(struct msm_vidc_memory_ops)); memcpy(&mem_ops_ext, mem_ops, sizeof(struct msm_vidc_memory_ops));

View File

@@ -24,6 +24,7 @@
#include "msm_vidc_driver.h" #include "msm_vidc_driver.h"
#include "msm_vidc_debug.h" #include "msm_vidc_debug.h"
#include "msm_vidc_state.h" #include "msm_vidc_state.h"
#include "msm_vidc_fence.h"
#include "msm_vidc_platform.h" #include "msm_vidc_platform.h"
#include "msm_vidc_core.h" #include "msm_vidc_core.h"
#include "msm_vidc_memory.h" #include "msm_vidc_memory.h"
@@ -567,6 +568,41 @@ static int msm_vidc_component_master_bind(struct device *dev)
return rc; return rc;
} }
/* register for synx fence */
if (core->capabilities[SUPPORTS_SYNX_FENCE].value) {
rc = call_fence_op(core, fence_register, core);
if (rc) {
d_vpr_e("%s: failed to register synx fence\n",
__func__);
core->capabilities[SUPPORTS_SYNX_FENCE].value = 0;
/*
* - Bail out the session for time being for this
* case where synx fence register call retunrs error
* to help with debugging
* - Re-initialize fence ops with dma_fence_ops.
* This is required once we start ignoring this
* synx fence register call error.
*/
core->fence_ops = get_dma_fence_ops();
if (!core->fence_ops) {
d_vpr_e("%s: invalid dma fence ops\n", __func__);
return -EINVAL;
}
return rc;
}
} else {
/*
* override synx fence ops with dma fence ops for
* time being until synx fence support is enabled
*/
core->fence_ops = get_dma_fence_ops();
if (!core->fence_ops) {
d_vpr_e("%s: invalid dma fence ops\n", __func__);
return -EINVAL;
}
}
rc = msm_vidc_initialize_media(core); rc = msm_vidc_initialize_media(core);
if (rc) { if (rc) {
d_vpr_e("%s: media initialization failed\n", __func__); d_vpr_e("%s: media initialization failed\n", __func__);
@@ -607,6 +643,7 @@ static void msm_vidc_component_master_unbind(struct device *dev)
msm_vidc_core_deinit(core, true); msm_vidc_core_deinit(core, true);
venus_hfi_queue_deinit(core); venus_hfi_queue_deinit(core);
msm_vidc_deinitialize_media(core); msm_vidc_deinitialize_media(core);
call_fence_op(core, fence_deregister, core);
component_unbind_all(dev, core); component_unbind_all(dev, core);
d_vpr_h("%s(): succssful\n", __func__); d_vpr_h("%s(): succssful\n", __func__);

View File

@@ -0,0 +1,394 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <msm_hw_fence_synx_translation.h>
#include "msm_vidc_core.h"
#include "msm_vidc_internal.h"
#include "msm_vidc_fence.h"
#include "msm_vidc_debug.h"
#include <synx_api.h>
#define MSM_VIDC_SYNX_FENCE_CLIENT_ID SYNX_CLIENT_HW_FENCE_VID_CTX0
#define MSM_VIDC_SYNX_CREATE_DMA_FENCE SYNX_CREATE_DMA_FENCE
#define MAX_SYNX_FENCE_SESSION_NAME 64
static const char *msm_vidc_synx_dma_fence_get_driver_name(struct dma_fence *df)
{
struct msm_vidc_fence *fence;
if (df) {
fence = container_of(df, struct msm_vidc_fence, dma_fence);
return fence->name;
}
return "msm_vidc_synx_dma_fence_get_driver_name: invalid fence";
}
static const char *msm_vidc_synx_dma_fence_get_timeline_name(struct dma_fence *df)
{
struct msm_vidc_fence *fence;
if (df) {
fence = container_of(df, struct msm_vidc_fence, dma_fence);
return fence->name;
}
return "msm_vidc_synx_dma_fence_get_timeline_name: invalid fence";
}
static void msm_vidc_synx_fence_release(struct dma_fence *df)
{
struct msm_vidc_fence *fence;
int rc = 0;
if (!df) {
d_vpr_e("%s: invalid dma fence\n", __func__);
return;
}
fence = container_of(df, struct msm_vidc_fence, dma_fence);
if (!fence) {
d_vpr_e("%s: invalid fence\n", __func__);
return;
}
d_vpr_l("%s: name %s\n", __func__, fence->name);
/* destroy associated synx fence */
if (fence->session) {
rc = synx_hwfence_release((struct synx_session *)fence->session,
(u32)fence->fence_id);
if (rc)
d_vpr_e("%s: failed to destroy synx fence for %s\n",
__func__, fence->name);
}
msm_vidc_vmem_free((void **)&fence);
return;
}
static const struct dma_fence_ops msm_vidc_synx_dma_fence_ops = {
.get_driver_name = msm_vidc_synx_dma_fence_get_driver_name,
.get_timeline_name = msm_vidc_synx_dma_fence_get_timeline_name,
.release = msm_vidc_synx_fence_release,
};
static struct msm_vidc_fence *msm_vidc_get_synx_fence_from_id(
struct msm_vidc_inst *inst, u64 fence_id)
{
struct msm_vidc_fence *fence, *dummy_fence;
bool found = false;
if (!inst) {
d_vpr_e("%s: invalid params\n", __func__);
return NULL;
}
list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) {
if (fence->fence_id == fence_id) {
found = true;
break;
}
}
if (!found) {
i_vpr_l(inst, "%s: no fence available for id: %u\n",
__func__, fence_id);
return NULL;
}
return fence;
}
static void msm_vidc_synx_fence_destroy(struct msm_vidc_inst *inst, u64 fence_id)
{
struct msm_vidc_fence *fence;
if (!inst || !inst->core) {
d_vpr_e("%s: invalid params\n", __func__);
return;
}
fence = msm_vidc_get_synx_fence_from_id(inst, fence_id);
if (!fence) {
return;
}
i_vpr_e(inst, "%s: fence %s\n", __func__, fence->name);
list_del_init(&fence->list);
dma_fence_set_error(&fence->dma_fence, -EINVAL);
dma_fence_signal(&fence->dma_fence);
dma_fence_put(&fence->dma_fence);
}
static int msm_vidc_synx_fence_register(struct msm_vidc_core *core)
{
struct synx_initialization_params params;
struct synx_session *session = NULL;
char synx_session_name[MAX_SYNX_FENCE_SESSION_NAME];
struct synx_queue_desc queue_desc;
if (!core && !core->capabilities) {
d_vpr_e("%s: invalid params\n", __func__);
return -EINVAL;
}
if (!core->capabilities[SUPPORTS_SYNX_FENCE].value)
return 0;
/* fill synx_initialization_params */
memset(&params, 0, sizeof(struct synx_initialization_params));
memset(&queue_desc, 0, sizeof(struct synx_queue_desc));
params.id = (enum synx_client_id)MSM_VIDC_SYNX_FENCE_CLIENT_ID;
snprintf(synx_session_name, MAX_SYNX_FENCE_SESSION_NAME,
"video synx fence");
params.name = synx_session_name;
params.ptr = &queue_desc;
session =
(struct synx_session *)synx_hwfence_initialize(&params);
if (IS_ERR_OR_NULL(session)) {
d_vpr_e("%s: invalid synx fence session\n", __func__);
return -EINVAL;
}
/* fill core synx fence data */
core->synx_fence_data.client_id = (u32)params.id;
core->synx_fence_data.session = (void *)session;
core->synx_fence_data.queue.size = (u32)queue_desc.size;
core->synx_fence_data.queue.kvaddr = queue_desc.vaddr;
core->synx_fence_data.queue.phys_addr = (phys_addr_t)queue_desc.dev_addr;
core->synx_fence_data.queue.type = MSM_VIDC_BUF_INTERFACE_QUEUE;
core->synx_fence_data.queue.region = MSM_VIDC_NON_SECURE;
core->synx_fence_data.queue.direction = DMA_BIDIRECTIONAL;
d_vpr_h("%s: successfully registered synx fence\n", __func__);
return 0;
}
static int msm_vidc_synx_fence_deregister(struct msm_vidc_core *core)
{
int rc = 0;
if (!core || !core->capabilities) {
d_vpr_e("%s: invalid params\n", __func__);
return -EINVAL;
}
if (!core->capabilities[SUPPORTS_SYNX_FENCE].value)
return 0;
rc = synx_hwfence_uninitialize(
(struct synx_session *)core->synx_fence_data.session);
if (rc) {
d_vpr_e("%s: failed to deregister synx fence\n", __func__);
/* ignore error */
rc = 0;
} else {
d_vpr_l("%s: successfully deregistered synx fence\n", __func__);
}
return rc;
}
static struct msm_vidc_fence *msm_vidc_synx_dma_fence_create(struct msm_vidc_inst *inst)
{
struct msm_vidc_fence *fence = NULL;
int rc = 0;
if (!inst) {
d_vpr_e("%s: invalid params\n", __func__);
return NULL;
}
rc = msm_vidc_vmem_alloc(sizeof(*fence), (void **)&fence, __func__);
if (rc)
return NULL;
fence->fd = INVALID_FD;
spin_lock_init(&fence->lock);
dma_fence_init(&fence->dma_fence, &msm_vidc_synx_dma_fence_ops,
&fence->lock, inst->fence_context.ctx_num,
++inst->fence_context.seq_num);
snprintf(fence->name, sizeof(fence->name), "synx %s: %llu",
inst->fence_context.name, inst->fence_context.seq_num);
fence->fence_id = fence->dma_fence.seqno;
INIT_LIST_HEAD(&fence->list);
list_add_tail(&fence->list, &inst->fence_list);
i_vpr_l(inst, "%s: created %s\n", __func__, fence->name);
return fence;
}
static struct msm_vidc_fence *msm_vidc_synx_fence_create(struct msm_vidc_inst *inst)
{
int rc = 0;
struct msm_vidc_fence *fence = NULL;
struct msm_vidc_core *core = NULL;
struct synx_create_params params;
u32 fence_id = 0;
if (!inst || !inst->core || !fence) {
d_vpr_e("%s: invalid params\n", __func__);
return NULL;
}
core = inst->core;
if (!core->capabilities) {
d_vpr_e("%s: invalid core caps\n", __func__);
return NULL;
}
/* return if synx fence is not supported */
if (!core->capabilities[SUPPORTS_SYNX_FENCE].value)
return NULL;
/* create dma fence */
fence = msm_vidc_synx_dma_fence_create(inst);
if (!fence) {
i_vpr_e(inst, "%s: failed to create dma fence\n", __func__);
return NULL;
}
if (!core->synx_fence_data.session) {
i_vpr_e(inst, "%s: invalid synx fence session\n", __func__);
goto destroy_dma_fence;
}
/* fill synx fence params structure */
memset(&params, 0, sizeof(struct synx_create_params));
params.name = fence->name;
params.fence = (void *)&fence->dma_fence;
params.h_synx = &fence_id;
params.flags = MSM_VIDC_SYNX_CREATE_DMA_FENCE;
/* create hw fence */
rc = synx_hwfence_create(
(struct synx_session *)core->synx_fence_data.session,
&params);
if (rc) {
i_vpr_e(inst, "%s: failed to create hw fence for %s",
__func__, fence->name);
goto destroy_dma_fence;
}
fence->fence_id = (u64)(*(params.h_synx));
/* this copy of hw fence client handle is req. to destroy synx fence */
fence->session = core->synx_fence_data.session;
i_vpr_l(inst, "%s: successfully created synx fence with id: %llu",
__func__, fence->fence_id);
return fence;
destroy_dma_fence:
msm_vidc_synx_fence_destroy(inst, fence->fence_id);
return NULL;
}
int msm_vidc_synx_fence_create_fd(struct msm_vidc_inst *inst,
struct msm_vidc_fence *fence)
{
int rc = 0;
if (!inst || !fence) {
d_vpr_e("%s: invalid params\n", __func__);
return -EINVAL;
}
fence->fd = get_unused_fd_flags(0);
if (fence->fd < 0) {
i_vpr_e(inst, "%s: getting fd (%d) failed\n", __func__,
fence->fd);
rc = -EINVAL;
goto err_fd;
}
fence->sync_file = sync_file_create(&fence->dma_fence);
if (!fence->sync_file) {
i_vpr_e(inst, "%s: sync_file_create failed\n", __func__);
rc = -EINVAL;
goto err_sync_file;
}
fd_install(fence->fd, fence->sync_file->file);
i_vpr_l(inst, "%s: created fd %d for fence %s\n", __func__,
fence->fd, fence->name);
return 0;
err_sync_file:
put_unused_fd(fence->fd);
err_fd:
return rc;
}
static int msm_vidc_synx_fence_signal(struct msm_vidc_inst *inst, u64 fence_id)
{
int rc = 0;
struct msm_vidc_fence *fence;
struct msm_vidc_core *core;
if (!inst || !inst->core) {
d_vpr_e("%s: invalid params\n", __func__);
return -EINVAL;
}
core = inst->core;
if (!core->capabilities) {
d_vpr_e("%s: invalid core caps\n", __func__);
return -EINVAL;
}
fence = msm_vidc_get_synx_fence_from_id(inst, fence_id);
if (!fence) {
i_vpr_e(inst, "%s: no fence available to signal with id: %u\n",
__func__, fence_id);
rc = -EINVAL;
goto exit;
}
i_vpr_l(inst, "%s: fence %s\n", __func__, fence->name);
list_del_init(&fence->list);
dma_fence_signal(&fence->dma_fence);
dma_fence_put(&fence->dma_fence);
exit:
return rc;
}
static void msm_vidc_synx_fence_recover(struct msm_vidc_core *core)
{
int rc = 0;
if (!core) {
d_vpr_e("%s: invalid paras\n", __func__);
return;
}
rc = synx_hwfence_recover(
(enum synx_client_id)core->synx_fence_data.client_id);
if (rc)
d_vpr_e("%s: failed to recover synx fences for client id: %d",
__func__,
(enum synx_client_id)core->synx_fence_data.client_id);
return;
}
const struct msm_vidc_fence_ops *get_synx_fence_ops(void)
{
static struct msm_vidc_fence_ops synx_ops;
synx_ops.fence_register = msm_vidc_synx_fence_register;
synx_ops.fence_deregister = msm_vidc_synx_fence_deregister;
synx_ops.fence_create = msm_vidc_synx_fence_create;
synx_ops.fence_create_fd = msm_vidc_synx_fence_create_fd;
synx_ops.fence_destroy = msm_vidc_synx_fence_destroy;
synx_ops.fence_signal = msm_vidc_synx_fence_signal;
synx_ops.fence_recover = msm_vidc_synx_fence_recover;
return &synx_ops;
}

View File

@@ -429,6 +429,8 @@ void venus_hfi_queue_deinit(struct msm_vidc_core *core)
call_mem_op(core, memory_unmap_free, core, &core->sfr.mem); call_mem_op(core, memory_unmap_free, core, &core->sfr.mem);
call_mem_op(core, iommu_unmap, core, &core->aon.mem); call_mem_op(core, iommu_unmap, core, &core->aon.mem);
call_mem_op(core, memory_unmap_free, core, &core->mmap_buf.mem); call_mem_op(core, memory_unmap_free, core, &core->mmap_buf.mem);
call_mem_op(core, mem_dma_unmap_page, core,
&core->synx_fence_data.queue);
for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) { for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
core->iface_queues[i].q_hdr = NULL; core->iface_queues[i].q_hdr = NULL;
@@ -588,6 +590,20 @@ int venus_hfi_queue_init(struct msm_vidc_core *core)
/* write sfr buffer size in first word */ /* write sfr buffer size in first word */
*((u32 *)core->sfr.align_virtual_addr) = core->sfr.mem_size; *((u32 *)core->sfr.align_virtual_addr) = core->sfr.mem_size;
/* map synx fence tx/rx queue buffer */
if (core->capabilities[SUPPORTS_SYNX_FENCE].value) {
/*
* queue memory is already allocated by synx fence
* driver during msm_vidc_synx_fence_register(..) call
*/
rc = call_mem_op(core, mem_dma_map_page, core,
&core->synx_fence_data.queue);
if (rc) {
d_vpr_e("%s: synx fence queue buffer map failed\n", __func__);
goto fail_alloc_queue;
}
}
/* map aon registers */ /* map aon registers */
memset(&mem, 0, sizeof(mem)); memset(&mem, 0, sizeof(mem));
dev_reg = venus_hfi_get_device_region_info(core, MSM_VIDC_AON_REGISTERS); dev_reg = venus_hfi_get_device_region_info(core, MSM_VIDC_AON_REGISTERS);
@@ -634,12 +650,15 @@ int venus_hfi_queue_init(struct msm_vidc_core *core)
* payload[7-8] : address and size of HW mutex registers * payload[7-8] : address and size of HW mutex registers
* payload[9-10] : address and size of IPCC registers * payload[9-10] : address and size of IPCC registers
* payload[11-12] : address and size of AON registers * payload[11-12] : address and size of AON registers
* payload[13-14] : address and size of synx fence queue memory
*/ */
memset(core->mmap_buf.align_virtual_addr, 0, ALIGNED_MMAP_BUF_SIZE); memset(core->mmap_buf.align_virtual_addr, 0, ALIGNED_MMAP_BUF_SIZE);
payload = ((u32 *)core->mmap_buf.align_virtual_addr); payload = ((u32 *)core->mmap_buf.align_virtual_addr);
payload[0] = 1; payload[0] = 1;
payload[11] = core->aon.mem.device_addr; payload[11] = core->aon.mem.device_addr;
payload[12] = core->aon.mem.size; payload[12] = core->aon.mem.size;
payload[13] = core->synx_fence_data.queue.device_addr;
payload[14] = core->synx_fence_data.queue.size;
skip_mmap_buffer: skip_mmap_buffer:
return 0; return 0;

View File

@@ -1001,11 +1001,11 @@ static int handle_output_buffer(struct msm_vidc_inst *inst,
if (inst->hfi_frame_info.fence_id) { if (inst->hfi_frame_info.fence_id) {
if (buf->data_size) { if (buf->data_size) {
/* signal fence */ /* signal fence */
msm_vidc_fence_signal(inst, call_fence_op(core, fence_signal, inst,
inst->hfi_frame_info.fence_id); inst->hfi_frame_info.fence_id);
} else { } else {
/* destroy fence */ /* destroy fence */
msm_vidc_fence_destroy(inst, call_fence_op(core, fence_destroy, inst,
inst->hfi_frame_info.fence_id); inst->hfi_frame_info.fence_id);
} }
} }

View File

@@ -25,7 +25,10 @@ endif
LINUXINCLUDE += -I$(VIDEO_DRIVER_ABS_PATH)/platform/common/inc \ LINUXINCLUDE += -I$(VIDEO_DRIVER_ABS_PATH)/platform/common/inc \
-I$(VIDEO_DRIVER_ABS_PATH)/variant/common/inc \ -I$(VIDEO_DRIVER_ABS_PATH)/variant/common/inc \
-I$(VIDEO_DRIVER_ABS_PATH)/vidc/inc \ -I$(VIDEO_DRIVER_ABS_PATH)/vidc/inc \
-I$(VIDEO_ROOT)/include/uapi/vidc -I$(VIDEO_ROOT)/include/uapi/vidc \
-I$(VIDEO_ROOT)/../mm-drivers/hw_fence/include/ \
-I$(VIDEO_ROOT)/../synx-kernel/msm/synx/ \
-I$(VIDEO_ROOT)/../synx-kernel/include/uapi/synx/media/
USERINCLUDE += -I$(VIDEO_ROOT)/include/uapi/vidc/media \ USERINCLUDE += -I$(VIDEO_ROOT)/include/uapi/vidc/media \
-I$(VIDEO_ROOT)/include/uapi/vidc -I$(VIDEO_ROOT)/include/uapi/vidc
@@ -69,6 +72,7 @@ msm_video-objs += $(VIDEO_DRIVER_REL_PATH)/platform/common/src/msm_vidc_platform
$(VIDEO_DRIVER_REL_PATH)/vidc/src/msm_vidc_memory.o \ $(VIDEO_DRIVER_REL_PATH)/vidc/src/msm_vidc_memory.o \
$(VIDEO_DRIVER_REL_PATH)/vidc/src/msm_vidc_memory_ext.o \ $(VIDEO_DRIVER_REL_PATH)/vidc/src/msm_vidc_memory_ext.o \
$(VIDEO_DRIVER_REL_PATH)/vidc/src/msm_vidc_fence.o \ $(VIDEO_DRIVER_REL_PATH)/vidc/src/msm_vidc_fence.o \
$(VIDEO_DRIVER_REL_PATH)/vidc/src/msm_vidc_synx.o \
$(VIDEO_DRIVER_REL_PATH)/vidc/src/venus_hfi.o \ $(VIDEO_DRIVER_REL_PATH)/vidc/src/venus_hfi.o \
$(VIDEO_DRIVER_REL_PATH)/vidc/src/venus_hfi_queue.o \ $(VIDEO_DRIVER_REL_PATH)/vidc/src/venus_hfi_queue.o \
$(VIDEO_DRIVER_REL_PATH)/vidc/src/hfi_packet.o \ $(VIDEO_DRIVER_REL_PATH)/vidc/src/hfi_packet.o \