Kaynağa Gözat

Merge "video: driver: Add debugfs support to disable synx v2 fence"

qctecmdr 2 yıl önce
ebeveyn
işleme
0e06b9af89

+ 3 - 0
Android.mk

@@ -20,6 +20,7 @@ KBUILD_OPTIONS := VIDEO_ROOT=$(VIDEO_BLD_DIR)
 KBUILD_OPTIONS += $(VIDEO_SELECT)
 
 KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS=$(shell pwd)/$(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers
+KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS+=$(shell pwd)/$(call intermediates-dir-for,DLKM,hw-fence-module-symvers)/Module.symvers
 ###########################################################
 
 DLKM_DIR   := device/qcom/common/dlkm
@@ -36,7 +37,9 @@ LOCAL_MODULE_DEBUG_ENABLE := true
 LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
 
 LOCAL_REQUIRED_MODULES    := mmrm-module-symvers
+LOCAL_REQUIRED_MODULES    += hw-fence-module-symvers
 LOCAL_ADDITIONAL_DEPENDENCIES := $(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers
+LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,hw-fence-module-symvers)/Module.symvers
 
 include $(DLKM_DIR)/Build_external_kernelmodule.mk
 endif

+ 6 - 0
driver/platform/common/src/msm_vidc_platform.c

@@ -17,6 +17,7 @@
 #include "msm_vidc_memory.h"
 #include "msm_vidc_control.h"
 #include "msm_vidc_driver.h"
+#include "msm_vidc_fence.h"
 #include "hfi_packet.h"
 #include "hfi_property.h"
 #include "venus_hfi.h"
@@ -275,6 +276,11 @@ static int msm_vidc_init_ops(struct msm_vidc_core *core)
 		d_vpr_e("%s: invalid resource ops\n", __func__);
 		return -EINVAL;
 	}
+	core->fence_ops = get_dma_fence_ops();
+	if (!core->fence_ops) {
+		d_vpr_e("%s: invalid dma fence ops\n", __func__);
+		return -EINVAL;
+	}
 
 	return 0;
 }

+ 1 - 0
driver/platform/kalama/src/msm_vidc_kalama.c

@@ -322,6 +322,7 @@ static struct msm_platform_core_capability core_data_kalama[] = {
 	{ENC_AUTO_FRAMERATE, 1},
 	{DEVICE_CAPS, V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_META_CAPTURE |
 		V4L2_CAP_STREAMING},
+	{SUPPORTS_SYNX_FENCE, 0},
 	{SUPPORTS_REQUESTS, 1},
 };
 

+ 23 - 1
driver/platform/pineapple/src/msm_vidc_pineapple.c

@@ -17,6 +17,7 @@
 #include "msm_vidc_internal.h"
 #include "msm_vidc_platform_ext.h"
 #include "msm_vidc_memory_ext.h"
+#include "msm_vidc_synx.h"
 #include "resources_ext.h"
 #include "msm_vidc_iris33.h"
 #include "hfi_property.h"
@@ -323,6 +324,7 @@ static struct msm_platform_core_capability core_data_pineapple[] = {
 	{ENC_AUTO_FRAMERATE, 1},
 	{DEVICE_CAPS, V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_META_CAPTURE |
 		V4L2_CAP_STREAMING},
+	{SUPPORTS_SYNX_FENCE, 1},
 	{SUPPORTS_REQUESTS, 1},
 };
 
@@ -2656,7 +2658,21 @@ static const struct reg_preset_table pineapple_reg_preset_table[] = {
 
 /* name, phys_addr, size, device_addr, device region type */
 static const struct device_region_table pineapple_device_region_table[] = {
-	{ "aon-registers", 0x0AAE0000, 0x1000, 0xFFAE0000, MSM_VIDC_AON_REGISTERS },
+	{
+		"aon-registers",
+		0x0AAE0000, 0x1000, 0xFFAE0000,
+		MSM_VIDC_AON
+	},
+	{
+		"ipc_protocol4_client8_version-registers",
+		0x00508000, 0x1000, 0xFFADD000,
+		MSM_VIDC_PROTOCOL_FENCE_CLIENT_VPU
+	},
+	{
+		"qtimer_f0v1_qtmr_v1_cntpct_lo",
+		0x17421000, 0x1000, 0xFFADC000,
+		MSM_VIDC_QTIMER
+	},
 };
 
 /* decoder properties */
@@ -2819,6 +2835,12 @@ static int msm_vidc_init_data(struct msm_vidc_core *core)
 		d_vpr_e("%s: invalid resource ext ops\n", __func__);
 		return -EINVAL;
 	}
+	core->fence_ops = get_synx_fence_ops();
+	if (!core->fence_ops) {
+		d_vpr_e("%s: invalid synx fence ops\n", __func__);
+		return -EINVAL;
+	}
+
 	rc = msm_vidc_pineapple_check_ddr_type();
 	if (rc)
 		return rc;

+ 1 - 0
driver/platform/pineapple/src/pineapple.c

@@ -220,6 +220,7 @@ static struct msm_platform_core_capability core_data_pineapple[] = {
 	{NON_FATAL_FAULTS, 1},
 	{ENC_AUTO_FRAMERATE, 1},
 	{DEVICE_CAPS, V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING},
+	{SUPPORTS_SYNX_FENCE, 0},
 	{SUPPORTS_REQUESTS, 0},
 };
 

+ 1 - 0
driver/platform/waipio/src/waipio.c

@@ -222,6 +222,7 @@ static struct msm_platform_core_capability core_data_waipio[] = {
 	{NON_FATAL_FAULTS, 1},
 	{ENC_AUTO_FRAMERATE, 1},
 	{DEVICE_CAPS, V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING},
+	{SUPPORTS_SYNX_FENCE, 0},
 	{SUPPORTS_REQUESTS, 0},
 };
 

+ 13 - 2
driver/vidc/inc/msm_vidc_core.h

@@ -33,6 +33,13 @@ struct msm_vidc_venus_ops {
 	int (*noc_error_info)(struct msm_vidc_core *core);
 };
 
+struct msm_vidc_synx_fence_data {
+	u32                             client_id;
+	void                           *session;
+	u32                             client_flags; /* not used */
+	struct msm_vidc_mem             queue;
+};
+
 struct msm_vidc_mem_addr {
 	u32 align_device_addr;
 	u8 *align_virtual_addr;
@@ -85,7 +92,9 @@ struct msm_vidc_core {
 	struct msm_vidc_mem_addr               sfr;
 	struct msm_vidc_mem_addr               iface_q_table;
 	struct msm_vidc_mem_addr               mmap_buf;
-	struct msm_vidc_mem_addr               aon;
+	struct msm_vidc_mem_addr               aon_reg;
+	struct msm_vidc_mem_addr               fence_reg;
+	struct msm_vidc_mem_addr               qtimer_reg;
 	struct msm_vidc_iface_q_info           iface_queues[VIDC_IFACEQ_NUMQ];
 	struct delayed_work                    pm_work;
 	struct workqueue_struct               *pm_workq;
@@ -109,11 +118,13 @@ struct msm_vidc_core {
 	struct msm_vidc_venus_ops             *venus_ops;
 	const struct msm_vidc_resources_ops   *res_ops;
 	struct msm_vidc_session_ops           *session_ops;
-	struct msm_vidc_memory_ops            *mem_ops;
+	const struct msm_vidc_memory_ops      *mem_ops;
 	struct media_device_ops               *media_device_ops;
+	const struct msm_vidc_fence_ops       *fence_ops;
 	u32                                    header_id;
 	u32                                    packet_id;
 	u32                                    sys_init_id;
+	struct msm_vidc_synx_fence_data        synx_fence_data;
 };
 
 #endif // _MSM_VIDC_CORE_H_

+ 1 - 0
driver/vidc/inc/msm_vidc_debug.h

@@ -39,6 +39,7 @@ extern int msm_vidc_ddr_bw;
 extern int msm_vidc_llc_bw;
 extern bool msm_vidc_fw_dump;
 extern unsigned int msm_vidc_enable_bugon;
+extern bool msm_vidc_disable_synx_fence;
 
 /* do not modify the log message as it is used in test scripts */
 #define FMT_STRING_SET_CTRL \

+ 18 - 10
driver/vidc/inc/msm_vidc_fence.h

@@ -9,18 +9,26 @@
 #include "msm_vidc_inst.h"
 #include "msm_vidc_buffer.h"
 
-struct msm_vidc_fence *msm_vidc_fence_create(
-		struct msm_vidc_inst *inst);
-int msm_vidc_create_fence_fd(struct msm_vidc_inst *inst,
-		struct msm_vidc_fence *fence);
-struct msm_vidc_fence *msm_vidc_get_fence_from_id(
-	struct msm_vidc_inst *inst, u32 fence_id);
-int msm_vidc_fence_signal(struct msm_vidc_inst *inst,
-		u32 fence_id);
-void msm_vidc_fence_destroy(struct msm_vidc_inst *inst,
-		u32 fence_id);
 int msm_vidc_fence_init(struct msm_vidc_inst *inst);
 void msm_vidc_fence_deinit(struct msm_vidc_inst *inst);
 
+#define call_fence_op(c, op, ...)                  \
+	(((c) && (c)->fence_ops && (c)->fence_ops->op) ? \
+	((c)->fence_ops->op(__VA_ARGS__)) : 0)
+
+struct msm_vidc_fence_ops {
+	int (*fence_register)(struct msm_vidc_core *core);
+	int (*fence_deregister)(struct msm_vidc_core *core);
+	struct msm_vidc_fence *(*fence_create)(struct msm_vidc_inst *inst);
+	int (*fence_create_fd)(struct msm_vidc_inst *inst,
+		struct msm_vidc_fence *fence);
+	void (*fence_destroy)(struct msm_vidc_inst *inst,
+		u64 fence_id);
+	int (*fence_signal)(struct msm_vidc_inst *inst,
+		u64 fence_id);
+	void (*fence_recover)(struct msm_vidc_core *core);
+};
+
+const struct msm_vidc_fence_ops *get_dma_fence_ops(void);
 
 #endif // __H_MSM_VIDC_FENCE_H__

+ 7 - 1
driver/vidc/inc/msm_vidc_internal.h

@@ -464,7 +464,9 @@ enum msm_vidc_buffer_region {
 
 enum msm_vidc_device_region {
 	MSM_VIDC_DEVICE_REGION_NONE = 0,
-	MSM_VIDC_AON_REGISTERS,
+	MSM_VIDC_AON,
+	MSM_VIDC_PROTOCOL_FENCE_CLIENT_VPU,
+	MSM_VIDC_QTIMER,
 	MSM_VIDC_DEVICE_REGION_MAX,
 };
 
@@ -598,6 +600,7 @@ enum msm_vidc_core_capability_type {
 	ENC_AUTO_FRAMERATE,
 	DEVICE_CAPS,
 	SUPPORTS_REQUESTS,
+	SUPPORTS_SYNX_FENCE,
 	CORE_CAP_MAX,
 };
 
@@ -858,6 +861,8 @@ struct msm_vidc_fence {
 	spinlock_t                  lock;
 	struct sync_file            *sync_file;
 	int                         fd;
+	u64                         fence_id;
+	void                        *session;
 };
 
 struct msm_vidc_mem {
@@ -884,6 +889,7 @@ struct msm_vidc_mem {
 	struct sg_table            *table;
 	struct dma_buf_attachment  *attach;
 	phys_addr_t                 phys_addr;
+	enum dma_data_direction     direction;
 };
 
 struct msm_vidc_mem_list {

+ 5 - 1
driver/vidc/inc/msm_vidc_memory.h

@@ -76,6 +76,10 @@ struct msm_vidc_memory_ops {
 		struct msm_vidc_mem *mem);
 	int (*memory_unmap_free)(struct msm_vidc_core *core,
 		struct msm_vidc_mem *mem);
+	int (*mem_dma_map_page)(struct msm_vidc_core *core,
+		struct msm_vidc_mem *mem);
+	int (*mem_dma_unmap_page)(struct msm_vidc_core *core,
+		struct msm_vidc_mem *mem);
 	u32 (*buffer_region)(struct msm_vidc_inst *inst,
 		enum msm_vidc_buffer_type buffer_type);
 	int (*iommu_map)(struct msm_vidc_core *core,
@@ -84,6 +88,6 @@ struct msm_vidc_memory_ops {
 		struct msm_vidc_mem *mem);
 };
 
-struct msm_vidc_memory_ops *get_mem_ops(void);
+const struct msm_vidc_memory_ops *get_mem_ops(void);
 
 #endif // _MSM_VIDC_MEMORY_H_

+ 3 - 2
driver/vidc/inc/msm_vidc_memory_ext.h

@@ -7,7 +7,8 @@
 #ifndef _MSM_VIDC_MEMORY_EXT_H_
 #define _MSM_VIDC_MEMORY_EXT_H_
 
-struct msm_vidc_memory_ops;
-struct msm_vidc_memory_ops *get_mem_ops_ext(void);
+#include "msm_vidc_memory.h"
+
+const struct msm_vidc_memory_ops *get_mem_ops_ext(void);
 
 #endif // _MSM_VIDC_MEMORY_EXT_H_

+ 13 - 0
driver/vidc/inc/msm_vidc_synx.h

@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _H_MSM_VIDC_SYNX_H_
+#define _H_MSM_VIDC_SYNX_H_
+
+#include "msm_vidc_fence.h"
+
+const struct msm_vidc_fence_ops *get_synx_fence_ops(void);
+
+#endif //_H_MSM_VIDC_SYNX_H_

+ 6 - 0
driver/vidc/src/msm_vidc_debug.c

@@ -124,6 +124,10 @@ module_param_cb(msm_vidc_fw_dump, &msm_vidc_fw_dump_fops, &g_core, 0644);
 bool msm_vidc_lossless_encode = !true;
 EXPORT_SYMBOL(msm_vidc_lossless_encode);
 
+/* disabled synx fence by default temporarily */
+bool msm_vidc_disable_synx_fence = !false;
+EXPORT_SYMBOL(msm_vidc_disable_synx_fence);
+
 bool msm_vidc_syscache_disable = !true;
 EXPORT_SYMBOL(msm_vidc_syscache_disable);
 
@@ -398,6 +402,8 @@ struct dentry* msm_vidc_debugfs_init_drv(void)
 			&msm_vidc_syscache_disable);
 	debugfs_create_bool("lossless_encoding", 0644, dir,
 			&msm_vidc_lossless_encode);
+	debugfs_create_bool("disable_synx_v2_fence", 0644, dir,
+			&msm_vidc_disable_synx_fence);
 	debugfs_create_u32("enable_bugon", 0644, dir,
 			&msm_vidc_enable_bugon);
 

+ 13 - 9
driver/vidc/src/msm_vidc_driver.c

@@ -1682,17 +1682,19 @@ int msm_vidc_get_fence_fd(struct msm_vidc_inst *inst, int *fence_fd)
 {
 	int rc = 0;
 	struct msm_vidc_fence *fence, *dummy_fence;
+	struct msm_vidc_core *core;
 	bool found = false;
 
 	*fence_fd = INVALID_FD;
 
-	if (!inst || !inst->capabilities) {
+	if (!inst || !inst->capabilities || !inst->core) {
 		d_vpr_e("%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
+	core = inst->core;
 
 	list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) {
-		if (fence->dma_fence.seqno ==
+		if (fence->fence_id ==
 			(u64)inst->capabilities->cap[FENCE_ID].value) {
 			found = true;
 			break;
@@ -1706,7 +1708,7 @@ int msm_vidc_get_fence_fd(struct msm_vidc_inst *inst, int *fence_fd)
 	}
 
 	if (fence->fd == INVALID_FD) {
-		rc = msm_vidc_create_fence_fd(inst, fence);
+		rc = call_fence_op(core, fence_create_fd, inst, fence);
 		if (rc)
 			goto exit;
 	}
@@ -2995,11 +2997,13 @@ int msm_vidc_queue_buffer_single(struct msm_vidc_inst *inst, struct vb2_buffer *
 	int rc = 0;
 	struct msm_vidc_buffer *buf = NULL;
 	struct msm_vidc_fence *fence = NULL;
+	struct msm_vidc_core *core = NULL;
 
-	if (!inst || !vb2 || !inst->capabilities) {
+	if (!inst || !vb2 || !inst->capabilities || !inst->core) {
 		d_vpr_e("%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
+	core = inst->core;
 
 	buf = msm_vidc_get_driver_buf(inst, vb2);
 	if (!buf)
@@ -3007,10 +3011,10 @@ int msm_vidc_queue_buffer_single(struct msm_vidc_inst *inst, struct vb2_buffer *
 
 	if (is_meta_rx_inp_enabled(inst, META_OUTBUF_FENCE) &&
 		is_output_buffer(buf->type)) {
-		fence = msm_vidc_fence_create(inst);
+		fence = call_fence_op(core, fence_create, inst);
 		if (!fence)
-			return rc;
-		buf->fence_id = fence->dma_fence.seqno;
+			return -EINVAL;
+		buf->fence_id = fence->fence_id;
 	}
 
 	rc = inst->event_handle(inst, MSM_VIDC_BUF_QUEUE, buf);
@@ -3021,7 +3025,7 @@ exit:
 	if (rc) {
 		i_vpr_e(inst, "%s: qbuf failed\n", __func__);
 		if (fence)
-			msm_vidc_fence_destroy(inst, (u32)fence->dma_fence.seqno);
+			call_fence_op(core, fence_destroy, inst, fence->fence_id);
 	}
 	return rc;
 }
@@ -5088,7 +5092,7 @@ void msm_vidc_destroy_buffers(struct msm_vidc_inst *inst)
 
 	list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) {
 		i_vpr_e(inst, "%s: destroying fence %s\n", __func__, fence->name);
-		msm_vidc_fence_destroy(inst, (u32)fence->dma_fence.seqno);
+		call_fence_op(core, fence_destroy, inst, fence->fence_id);
 	}
 
 	/* destroy buffers from pool */

+ 27 - 11
driver/vidc/src/msm_vidc_fence.c

@@ -7,8 +7,6 @@
 #include "msm_vidc_driver.h"
 #include "msm_vidc_debug.h"
 
-extern struct msm_vidc_core *g_core;
-
 static const char *msm_vidc_dma_fence_get_driver_name(struct dma_fence *df)
 {
 	struct msm_vidc_fence *fence;
@@ -76,6 +74,8 @@ struct msm_vidc_fence *msm_vidc_fence_create(struct msm_vidc_inst *inst)
 	if (inst->fence_context.seq_num >= INT_MAX)
 		inst->fence_context.seq_num = 0;
 
+	fence->fence_id = fence->dma_fence.seqno;
+
 	INIT_LIST_HEAD(&fence->list);
 	list_add_tail(&fence->list, &inst->fence_list);
 	i_vpr_l(inst, "%s: created %s\n", __func__, fence->name);
@@ -83,7 +83,7 @@ struct msm_vidc_fence *msm_vidc_fence_create(struct msm_vidc_inst *inst)
 	return fence;
 }
 
-int msm_vidc_create_fence_fd(struct msm_vidc_inst *inst,
+int msm_vidc_dma_fence_create_fd(struct msm_vidc_inst *inst,
 	struct msm_vidc_fence *fence)
 {
 	int rc = 0;
@@ -119,8 +119,8 @@ err_fd:
 	return rc;
 }
 
-struct msm_vidc_fence *msm_vidc_get_fence_from_id(
-	struct msm_vidc_inst *inst, u32 fence_id)
+static struct msm_vidc_fence *msm_vidc_get_dma_fence_from_id(
+	struct msm_vidc_inst *inst, u64 fence_id)
 {
 	struct msm_vidc_fence *fence, *dummy_fence;
 	bool found = false;
@@ -131,19 +131,22 @@ struct msm_vidc_fence *msm_vidc_get_fence_from_id(
 	}
 
 	list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) {
-		if (fence->dma_fence.seqno == (u64)fence_id) {
+		if (fence->fence_id == fence_id) {
 			found = true;
 			break;
 		}
 	}
 
-	if (!found)
+	if (!found) {
+		i_vpr_l(inst, "%s: no fence available for id: %u\n",
+			__func__, fence_id);
 		return NULL;
+	}
 
 	return fence;
 }
 
-int msm_vidc_fence_signal(struct msm_vidc_inst *inst, u32 fence_id)
+static int msm_vidc_fence_signal(struct msm_vidc_inst *inst, u64 fence_id)
 {
 	int rc = 0;
 	struct msm_vidc_fence *fence;
@@ -153,7 +156,7 @@ int msm_vidc_fence_signal(struct msm_vidc_inst *inst, u32 fence_id)
 		return -EINVAL;
 	}
 
-	fence = msm_vidc_get_fence_from_id(inst, fence_id);
+	fence = msm_vidc_get_dma_fence_from_id(inst, fence_id);
 	if (!fence) {
 		i_vpr_e(inst, "%s: no fence available to signal with id: %u\n",
 			__func__, fence_id);
@@ -163,6 +166,7 @@ int msm_vidc_fence_signal(struct msm_vidc_inst *inst, u32 fence_id)
 
 	i_vpr_l(inst, "%s: fence %s\n", __func__, fence->name);
 	list_del_init(&fence->list);
+
 	dma_fence_signal(&fence->dma_fence);
 	dma_fence_put(&fence->dma_fence);
 
@@ -171,7 +175,7 @@ exit:
 }
 
 
-void msm_vidc_fence_destroy(struct msm_vidc_inst *inst, u32 fence_id)
+static void msm_vidc_fence_destroy(struct msm_vidc_inst *inst, u64 fence_id)
 {
 	struct msm_vidc_fence *fence;
 
@@ -180,7 +184,7 @@ void msm_vidc_fence_destroy(struct msm_vidc_inst *inst, u32 fence_id)
 		return;
 	}
 
-	fence = msm_vidc_get_fence_from_id(inst, fence_id);
+	fence = msm_vidc_get_dma_fence_from_id(inst, fence_id);
 	if (!fence) {
 		return;
 	}
@@ -221,3 +225,15 @@ void msm_vidc_fence_deinit(struct msm_vidc_inst *inst)
 	snprintf(inst->fence_context.name, sizeof(inst->fence_context.name),
 		"%s", "");
 }
+
+static const struct msm_vidc_fence_ops msm_dma_fence_ops = {
+	.fence_create             = msm_vidc_fence_create,
+	.fence_destroy            = msm_vidc_fence_destroy,
+	.fence_signal             = msm_vidc_fence_signal,
+	.fence_create_fd          = msm_vidc_dma_fence_create_fd,
+};
+
+const struct msm_vidc_fence_ops *get_dma_fence_ops(void)
+{
+	return &msm_dma_fence_ops;
+}

+ 106 - 12
driver/vidc/src/msm_vidc_memory.c

@@ -425,8 +425,7 @@ static int msm_vidc_memory_alloc_map(struct msm_vidc_core *core, struct msm_vidc
 
 	cb = msm_vidc_get_context_bank_for_region(core, mem->region);
 	if (!cb) {
-		d_vpr_e("%s: Failed to get context bank device\n",
-			 __func__);
+		d_vpr_e("%s: failed to get context bank device\n", __func__);
 		return -EIO;
 	}
 
@@ -457,18 +456,17 @@ static int msm_vidc_memory_unmap_free(struct msm_vidc_core *core, struct msm_vid
 
 	d_vpr_h(
 		"%s: dmabuf %pK, size %d, kvaddr %pK, buffer_type %s, secure %d, region %d\n",
-		__func__, mem->device_addr, mem->size, mem->kvaddr, buf_name(mem->type),
-		mem->secure, mem->region);
+		__func__, mem->device_addr, mem->size, mem->kvaddr,
+		buf_name(mem->type), mem->secure, mem->region);
 
 	cb = msm_vidc_get_context_bank_for_region(core, mem->region);
 	if (!cb) {
-		d_vpr_e("%s: Failed to get context bank device\n",
-			 __func__);
+		d_vpr_e("%s: failed to get context bank device\n", __func__);
 		return -EIO;
 	}
 
 	dma_free_attrs(cb->dev, mem->size, mem->kvaddr, mem->device_addr,
-		       mem->attrs);
+		mem->attrs);
 
 	mem->kvaddr = NULL;
 	mem->device_addr = 0;
@@ -476,6 +474,99 @@ static int msm_vidc_memory_unmap_free(struct msm_vidc_core *core, struct msm_vid
 	return rc;
 }
 
+static int msm_vidc_dma_map_page(struct msm_vidc_core *core,
+	struct msm_vidc_mem *mem)
+{
+	int rc = 0;
+	struct context_bank_info *cb = NULL;
+	dma_addr_t dma_addr;
+
+	if (!core || !mem) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (mem->refcount) {
+		mem->refcount++;
+		goto exit;
+	}
+
+	cb = msm_vidc_get_context_bank_for_region(core, mem->region);
+	if (!cb) {
+		d_vpr_e("%s: Failed to get context bank device\n",
+			__func__);
+		rc = -EIO;
+		goto error;
+	}
+
+	/* map and obtain dma address for physically contiguous memory */
+	dma_addr = dma_map_page(cb->dev, phys_to_page(mem->phys_addr),
+		0, (size_t)mem->size, mem->direction);
+
+	rc = dma_mapping_error(cb->dev, dma_addr);
+	if (rc) {
+		d_vpr_e("%s: Failed to map memory\n", __func__);
+		goto error;
+	}
+
+	mem->device_addr = dma_addr;
+	mem->refcount++;
+
+exit:
+	d_vpr_l(
+		"%s: type %11s, device_addr %#llx, size %u region %d, refcount %d\n",
+		__func__, buf_name(mem->type), mem->device_addr,
+		mem->size, mem->region, mem->refcount);
+
+	return 0;
+
+error:
+	return rc;
+}
+
+static int msm_vidc_dma_unmap_page(struct msm_vidc_core *core,
+	struct msm_vidc_mem *mem)
+{
+	int rc = 0;
+	struct context_bank_info *cb = NULL;
+
+	if (!core || !mem) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (mem->refcount) {
+		mem->refcount--;
+	} else {
+		d_vpr_e("unmap called while refcount is zero already\n");
+		return -EINVAL;
+	}
+
+	cb = msm_vidc_get_context_bank_for_region(core, mem->region);
+	if (!cb) {
+		d_vpr_e("%s: Failed to get context bank device\n",
+			__func__);
+		rc = -EIO;
+		goto exit;
+	}
+
+	d_vpr_l(
+		"%s: type %11s, device_addr %#x, refcount %d, region %d\n",
+		__func__, buf_name(mem->type), mem->device_addr,
+		mem->refcount, mem->region);
+
+	if (mem->refcount)
+		goto exit;
+
+	dma_unmap_page(cb->dev, (dma_addr_t)(mem->device_addr),
+		mem->size, mem->direction);
+
+	mem->device_addr = 0x0;
+
+exit:
+	return rc;
+}
+
 static u32 msm_vidc_buffer_region(struct msm_vidc_inst *inst,
 	enum msm_vidc_buffer_type buffer_type)
 {
@@ -494,7 +585,8 @@ static int msm_vidc_iommu_map(struct msm_vidc_core *core, struct msm_vidc_mem *m
 
 	cb = msm_vidc_get_context_bank_for_region(core, mem->region);
 	if (!cb) {
-		d_vpr_e("%s: Failed to get context bank device\n", __func__);
+		d_vpr_e("%s: failed to get context bank device for region: %d\n",
+			__func__, mem->region);
 		return -EIO;
 	}
 
@@ -524,8 +616,8 @@ static int msm_vidc_iommu_unmap(struct msm_vidc_core *core, struct msm_vidc_mem
 
 	cb = msm_vidc_get_context_bank_for_region(core, mem->region);
 	if (!cb) {
-		d_vpr_e("%s: Failed to get context bank device\n",
-			__func__);
+		d_vpr_e("%s: failed to get context bank device for region: %d\n",
+			__func__, mem->region);
 		return -EIO;
 	}
 
@@ -540,7 +632,7 @@ static int msm_vidc_iommu_unmap(struct msm_vidc_core *core, struct msm_vidc_mem
 	return rc;
 }
 
-static struct msm_vidc_memory_ops msm_mem_ops = {
+static const struct msm_vidc_memory_ops msm_mem_ops = {
 	.dma_buf_get                    = msm_vidc_dma_buf_get,
 	.dma_buf_put                    = msm_vidc_dma_buf_put,
 	.dma_buf_put_completely         = msm_vidc_dma_buf_put_completely,
@@ -550,12 +642,14 @@ static struct msm_vidc_memory_ops msm_mem_ops = {
 	.dma_buf_unmap_attachment       = msm_vidc_dma_buf_unmap_attachment,
 	.memory_alloc_map               = msm_vidc_memory_alloc_map,
 	.memory_unmap_free              = msm_vidc_memory_unmap_free,
+	.mem_dma_map_page               = msm_vidc_dma_map_page,
+	.mem_dma_unmap_page             = msm_vidc_dma_unmap_page,
 	.buffer_region                  = msm_vidc_buffer_region,
 	.iommu_map                      = msm_vidc_iommu_map,
 	.iommu_unmap                    = msm_vidc_iommu_unmap,
 };
 
-struct msm_vidc_memory_ops *get_mem_ops(void)
+const struct msm_vidc_memory_ops *get_mem_ops(void)
 {
 	return &msm_mem_ops;
 }

+ 2 - 2
driver/vidc/src/msm_vidc_memory_ext.c

@@ -452,9 +452,9 @@ static int msm_vidc_memory_unmap_free_ext(struct msm_vidc_core *core, struct msm
 	return rc;
 }
 
-struct msm_vidc_memory_ops *get_mem_ops_ext(void)
+const struct msm_vidc_memory_ops *get_mem_ops_ext(void)
 {
-	struct msm_vidc_memory_ops *mem_ops = get_mem_ops();
+	const struct msm_vidc_memory_ops *mem_ops = get_mem_ops();
 	static struct msm_vidc_memory_ops mem_ops_ext;
 
 	memcpy(&mem_ops_ext, mem_ops, sizeof(struct msm_vidc_memory_ops));

+ 36 - 0
driver/vidc/src/msm_vidc_probe.c

@@ -24,6 +24,7 @@
 #include "msm_vidc_driver.h"
 #include "msm_vidc_debug.h"
 #include "msm_vidc_state.h"
+#include "msm_vidc_fence.h"
 #include "msm_vidc_platform.h"
 #include "msm_vidc_core.h"
 #include "msm_vidc_memory.h"
@@ -567,6 +568,40 @@ static int msm_vidc_component_master_bind(struct device *dev)
 		return rc;
 	}
 
+	if (core->capabilities[SUPPORTS_SYNX_FENCE].value) {
+		if (msm_vidc_disable_synx_fence) {
+			/* override synx fence ops with dma fence ops */
+			core->fence_ops = get_dma_fence_ops();
+			if (!core->fence_ops) {
+				d_vpr_e("%s: invalid dma fence ops\n", __func__);
+				return -EINVAL;
+			}
+			core->capabilities[SUPPORTS_SYNX_FENCE].value = 0;
+		} else {
+			/* register for synx fence */
+			rc = call_fence_op(core, fence_register, core);
+			if (rc) {
+				d_vpr_e("%s: failed to register synx fence\n",
+					__func__);
+				core->capabilities[SUPPORTS_SYNX_FENCE].value = 0;
+				/*
+				 * - Bail out the session for time being for this
+				 *   case where synx fence register call retunrs error
+				 *   to help with debugging
+				 * - Re-initialize fence ops with dma_fence_ops.
+				 *   This is required once we start ignoring this
+				 *   synx fence register call error.
+				 */
+				core->fence_ops = get_dma_fence_ops();
+				if (!core->fence_ops) {
+					d_vpr_e("%s: invalid dma fence ops\n", __func__);
+					return -EINVAL;
+				}
+				return rc;
+			}
+		}
+	}
+
 	rc = msm_vidc_initialize_media(core);
 	if (rc) {
 		d_vpr_e("%s: media initialization failed\n", __func__);
@@ -607,6 +642,7 @@ static void msm_vidc_component_master_unbind(struct device *dev)
 	msm_vidc_core_deinit(core, true);
 	venus_hfi_queue_deinit(core);
 	msm_vidc_deinitialize_media(core);
+	call_fence_op(core, fence_deregister, core);
 	component_unbind_all(dev, core);
 
 	d_vpr_h("%s(): succssful\n", __func__);

+ 394 - 0
driver/vidc/src/msm_vidc_synx.c

@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <msm_hw_fence_synx_translation.h>
+#include "msm_vidc_core.h"
+#include "msm_vidc_internal.h"
+#include "msm_vidc_fence.h"
+#include "msm_vidc_debug.h"
+#include <synx_api.h>
+
+#define MSM_VIDC_SYNX_FENCE_CLIENT_ID      SYNX_CLIENT_HW_FENCE_VID_CTX0
+#define MSM_VIDC_SYNX_CREATE_DMA_FENCE     SYNX_CREATE_DMA_FENCE
+#define MAX_SYNX_FENCE_SESSION_NAME        64
+
+static const char *msm_vidc_synx_dma_fence_get_driver_name(struct dma_fence *df)
+{
+	struct msm_vidc_fence *fence;
+
+	if (df) {
+		fence = container_of(df, struct msm_vidc_fence, dma_fence);
+		return fence->name;
+	}
+	return "msm_vidc_synx_dma_fence_get_driver_name: invalid fence";
+}
+
+static const char *msm_vidc_synx_dma_fence_get_timeline_name(struct dma_fence *df)
+{
+	struct msm_vidc_fence *fence;
+
+	if (df) {
+		fence = container_of(df, struct msm_vidc_fence, dma_fence);
+		return fence->name;
+	}
+	return "msm_vidc_synx_dma_fence_get_timeline_name: invalid fence";
+}
+
+static void msm_vidc_synx_fence_release(struct dma_fence *df)
+{
+	struct msm_vidc_fence *fence;
+	int rc = 0;
+
+	if (!df) {
+		d_vpr_e("%s: invalid dma fence\n", __func__);
+		return;
+	}
+
+	fence = container_of(df, struct msm_vidc_fence, dma_fence);
+	if (!fence) {
+		d_vpr_e("%s: invalid fence\n", __func__);
+		return;
+	}
+	d_vpr_l("%s: name %s\n", __func__, fence->name);
+
+	/* destroy associated synx fence */
+	if (fence->session) {
+		rc = synx_hwfence_release((struct synx_session *)fence->session,
+			(u32)fence->fence_id);
+		if (rc)
+			d_vpr_e("%s: failed to destroy synx fence for %s\n",
+				__func__, fence->name);
+	}
+
+	msm_vidc_vmem_free((void **)&fence);
+	return;
+}
+
+static const struct dma_fence_ops msm_vidc_synx_dma_fence_ops = {
+	.get_driver_name = msm_vidc_synx_dma_fence_get_driver_name,
+	.get_timeline_name = msm_vidc_synx_dma_fence_get_timeline_name,
+	.release = msm_vidc_synx_fence_release,
+};
+
+static struct msm_vidc_fence *msm_vidc_get_synx_fence_from_id(
+	struct msm_vidc_inst *inst, u64 fence_id)
+{
+	struct msm_vidc_fence *fence, *dummy_fence;
+	bool found = false;
+
+	if (!inst) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return NULL;
+	}
+
+	list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) {
+		if (fence->fence_id == fence_id) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		i_vpr_l(inst, "%s: no fence available for id: %u\n",
+			__func__, fence_id);
+		return NULL;
+	}
+
+	return fence;
+}
+
+static void msm_vidc_synx_fence_destroy(struct msm_vidc_inst *inst, u64 fence_id)
+{
+	struct msm_vidc_fence *fence;
+
+	if (!inst || !inst->core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return;
+	}
+
+	fence = msm_vidc_get_synx_fence_from_id(inst, fence_id);
+	if (!fence) {
+		return;
+	}
+
+	i_vpr_e(inst, "%s: fence %s\n", __func__, fence->name);
+	list_del_init(&fence->list);
+
+	dma_fence_set_error(&fence->dma_fence, -EINVAL);
+	dma_fence_signal(&fence->dma_fence);
+	dma_fence_put(&fence->dma_fence);
+}
+
+static int msm_vidc_synx_fence_register(struct msm_vidc_core *core)
+{
+	struct synx_initialization_params params;
+	struct synx_session *session = NULL;
+	char synx_session_name[MAX_SYNX_FENCE_SESSION_NAME];
+	struct synx_queue_desc queue_desc;
+
+	if (!core && !core->capabilities) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!core->capabilities[SUPPORTS_SYNX_FENCE].value)
+		return 0;
+
+	/* fill synx_initialization_params */
+	memset(&params, 0, sizeof(struct synx_initialization_params));
+	memset(&queue_desc, 0, sizeof(struct synx_queue_desc));
+
+	params.id = (enum synx_client_id)MSM_VIDC_SYNX_FENCE_CLIENT_ID;
+	snprintf(synx_session_name, MAX_SYNX_FENCE_SESSION_NAME,
+		"video synx fence");
+	params.name = synx_session_name;
+	params.ptr = &queue_desc;
+
+	session =
+		(struct synx_session *)synx_hwfence_initialize(&params);
+	if (IS_ERR_OR_NULL(session)) {
+		d_vpr_e("%s: invalid synx fence session\n", __func__);
+		return -EINVAL;
+	}
+
+	/* fill core synx fence data */
+	core->synx_fence_data.client_id = (u32)params.id;
+	core->synx_fence_data.session = (void *)session;
+	core->synx_fence_data.queue.size = (u32)queue_desc.size;
+	core->synx_fence_data.queue.kvaddr = queue_desc.vaddr;
+	core->synx_fence_data.queue.phys_addr = (phys_addr_t)queue_desc.dev_addr;
+
+	core->synx_fence_data.queue.type = MSM_VIDC_BUF_INTERFACE_QUEUE;
+	core->synx_fence_data.queue.region = MSM_VIDC_NON_SECURE;
+	core->synx_fence_data.queue.direction = DMA_BIDIRECTIONAL;
+
+	d_vpr_h("%s: successfully registered synx fence\n", __func__);
+	return 0;
+}
+
+static int msm_vidc_synx_fence_deregister(struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	if (!core || !core->capabilities) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!core->capabilities[SUPPORTS_SYNX_FENCE].value)
+		return 0;
+
+	rc = synx_hwfence_uninitialize(
+		(struct synx_session *)core->synx_fence_data.session);
+	if (rc) {
+		d_vpr_e("%s: failed to deregister synx fence\n", __func__);
+		/* ignore error */
+		rc = 0;
+	} else {
+		d_vpr_l("%s: successfully deregistered synx fence\n", __func__);
+	}
+
+	return rc;
+}
+
+static struct msm_vidc_fence *msm_vidc_synx_dma_fence_create(struct msm_vidc_inst *inst)
+{
+	struct msm_vidc_fence *fence = NULL;
+	int rc = 0;
+
+	if (!inst) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return NULL;
+	}
+
+	rc = msm_vidc_vmem_alloc(sizeof(*fence), (void **)&fence, __func__);
+	if (rc)
+		return NULL;
+
+	fence->fd = INVALID_FD;
+	spin_lock_init(&fence->lock);
+	dma_fence_init(&fence->dma_fence, &msm_vidc_synx_dma_fence_ops,
+		&fence->lock, inst->fence_context.ctx_num,
+		++inst->fence_context.seq_num);
+	snprintf(fence->name, sizeof(fence->name), "synx %s: %llu",
+		inst->fence_context.name, inst->fence_context.seq_num);
+
+	fence->fence_id = fence->dma_fence.seqno;
+
+	INIT_LIST_HEAD(&fence->list);
+	list_add_tail(&fence->list, &inst->fence_list);
+	i_vpr_l(inst, "%s: created %s\n", __func__, fence->name);
+
+	return fence;
+}
+
+static struct msm_vidc_fence *msm_vidc_synx_fence_create(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct msm_vidc_fence *fence = NULL;
+	struct msm_vidc_core *core = NULL;
+	struct synx_create_params params;
+	u32 fence_id = 0;
+
+	if (!inst || !inst->core || !fence) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return NULL;
+	}
+	core = inst->core;
+	if (!core->capabilities) {
+		d_vpr_e("%s: invalid core caps\n", __func__);
+		return NULL;
+	}
+
+	/* return if synx fence is not supported */
+	if (!core->capabilities[SUPPORTS_SYNX_FENCE].value)
+		return NULL;
+
+	/* create dma fence */
+	fence = msm_vidc_synx_dma_fence_create(inst);
+	if (!fence) {
+		i_vpr_e(inst, "%s: failed to create dma fence\n", __func__);
+		return NULL;
+	}
+
+	if (!core->synx_fence_data.session) {
+		i_vpr_e(inst, "%s: invalid synx fence session\n", __func__);
+		goto destroy_dma_fence;
+	}
+
+	/* fill synx fence params structure */
+	memset(&params, 0, sizeof(struct synx_create_params));
+	params.name = fence->name;
+	params.fence = (void *)&fence->dma_fence;
+	params.h_synx = &fence_id;
+	params.flags = MSM_VIDC_SYNX_CREATE_DMA_FENCE;
+
+	/* create hw fence */
+	rc = synx_hwfence_create(
+		(struct synx_session *)core->synx_fence_data.session,
+		&params);
+	if (rc) {
+		i_vpr_e(inst, "%s: failed to create hw fence for %s",
+			__func__, fence->name);
+		goto destroy_dma_fence;
+	}
+
+	fence->fence_id = (u64)(*(params.h_synx));
+	/* this copy of hw fence client handle is req. to destroy synx fence */
+	fence->session = core->synx_fence_data.session;
+	i_vpr_l(inst, "%s: successfully created synx fence with id: %llu",
+		__func__, fence->fence_id);
+
+	return fence;
+
+destroy_dma_fence:
+	msm_vidc_synx_fence_destroy(inst, fence->fence_id);
+	return NULL;
+}
+
+int msm_vidc_synx_fence_create_fd(struct msm_vidc_inst *inst,
+	struct msm_vidc_fence *fence)
+{
+	int rc = 0;
+
+	if (!inst || !fence) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	fence->fd = get_unused_fd_flags(0);
+	if (fence->fd < 0) {
+		i_vpr_e(inst, "%s: getting fd (%d) failed\n", __func__,
+			fence->fd);
+		rc = -EINVAL;
+		goto err_fd;
+	}
+	fence->sync_file = sync_file_create(&fence->dma_fence);
+	if (!fence->sync_file) {
+		i_vpr_e(inst, "%s: sync_file_create failed\n", __func__);
+		rc = -EINVAL;
+		goto err_sync_file;
+	}
+	fd_install(fence->fd, fence->sync_file->file);
+
+	i_vpr_l(inst, "%s: created fd %d for fence %s\n", __func__,
+		fence->fd, fence->name);
+
+	return 0;
+
+err_sync_file:
+	put_unused_fd(fence->fd);
+err_fd:
+	return rc;
+}
+
+static int msm_vidc_synx_fence_signal(struct msm_vidc_inst *inst, u64 fence_id)
+{
+	int rc = 0;
+	struct msm_vidc_fence *fence;
+	struct msm_vidc_core *core;
+
+	if (!inst || !inst->core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	core = inst->core;
+	if (!core->capabilities) {
+		d_vpr_e("%s: invalid core caps\n", __func__);
+		return -EINVAL;
+	}
+
+	fence = msm_vidc_get_synx_fence_from_id(inst, fence_id);
+	if (!fence) {
+		i_vpr_e(inst, "%s: no fence available to signal with id: %u\n",
+			__func__, fence_id);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	i_vpr_l(inst, "%s: fence %s\n", __func__, fence->name);
+	list_del_init(&fence->list);
+
+	dma_fence_signal(&fence->dma_fence);
+	dma_fence_put(&fence->dma_fence);
+
+exit:
+	return rc;
+}
+
+static void msm_vidc_synx_fence_recover(struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	if (!core) {
+		d_vpr_e("%s: invalid paras\n", __func__);
+		return;
+	}
+
+	rc = synx_hwfence_recover(
+		(enum synx_client_id)core->synx_fence_data.client_id);
+	if (rc)
+		d_vpr_e("%s: failed to recover synx fences for client id: %d",
+			__func__,
+			(enum synx_client_id)core->synx_fence_data.client_id);
+
+	return;
+}
+
+const struct msm_vidc_fence_ops *get_synx_fence_ops(void)
+{
+	static struct msm_vidc_fence_ops synx_ops;
+
+	synx_ops.fence_register      = msm_vidc_synx_fence_register;
+	synx_ops.fence_deregister    = msm_vidc_synx_fence_deregister;
+	synx_ops.fence_create        = msm_vidc_synx_fence_create;
+	synx_ops.fence_create_fd     = msm_vidc_synx_fence_create_fd;
+	synx_ops.fence_destroy       = msm_vidc_synx_fence_destroy;
+	synx_ops.fence_signal        = msm_vidc_synx_fence_signal;
+	synx_ops.fence_recover       = msm_vidc_synx_fence_recover;
+
+	return &synx_ops;
+}

+ 110 - 25
driver/vidc/src/venus_hfi_queue.c

@@ -427,8 +427,12 @@ void venus_hfi_queue_deinit(struct msm_vidc_core *core)
 
 	call_mem_op(core, memory_unmap_free, core, &core->iface_q_table.mem);
 	call_mem_op(core, memory_unmap_free, core, &core->sfr.mem);
-	call_mem_op(core, iommu_unmap, core, &core->aon.mem);
+	call_mem_op(core, iommu_unmap, core, &core->aon_reg.mem);
+	call_mem_op(core, iommu_unmap, core, &core->fence_reg.mem);
+	call_mem_op(core, iommu_unmap, core, &core->qtimer_reg.mem);
 	call_mem_op(core, memory_unmap_free, core, &core->mmap_buf.mem);
+	call_mem_op(core, mem_dma_unmap_page, core,
+		&core->synx_fence_data.queue);
 
 	for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
 		core->iface_queues[i].q_hdr = NULL;
@@ -442,8 +446,14 @@ void venus_hfi_queue_deinit(struct msm_vidc_core *core)
 	core->sfr.align_virtual_addr = NULL;
 	core->sfr.align_device_addr = 0;
 
-	core->aon.align_virtual_addr = NULL;
-	core->aon.align_device_addr = 0;
+	core->aon_reg.align_virtual_addr = NULL;
+	core->aon_reg.align_device_addr = 0;
+
+	core->fence_reg.align_virtual_addr = NULL;
+	core->fence_reg.align_device_addr = 0;
+
+	core->qtimer_reg.align_virtual_addr = NULL;
+	core->qtimer_reg.align_device_addr = 0;
 
 	core->mmap_buf.align_virtual_addr = NULL;
 	core->mmap_buf.align_device_addr = 0;
@@ -488,13 +498,52 @@ int venus_hfi_reset_queue_header(struct msm_vidc_core *core)
 	return rc;
 }
 
+static int venus_hfi_iommu_map_registers(struct msm_vidc_core *core,
+	enum msm_vidc_device_region reg_region,
+	enum msm_vidc_buffer_region buf_region,
+	struct msm_vidc_mem_addr *core_mem)
+{
+	int rc = 0;
+	struct device_region_info *dev_reg;
+	struct msm_vidc_mem mem;
+
+	if (!core_mem) {
+		d_vpr_h("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(&mem, 0, sizeof(mem));
+	dev_reg = venus_hfi_get_device_region_info(core, reg_region);
+	if (!dev_reg) {
+		d_vpr_h("%s: %u device region not available\n",
+			__func__, reg_region);
+		goto skip_mmap_buffer;
+	}
+
+	mem.region = buf_region;
+	mem.phys_addr = dev_reg->phy_addr;
+	mem.size = dev_reg->size;
+	mem.device_addr = dev_reg->dev_addr;
+	rc = call_mem_op(core, iommu_map, core, &mem);
+	if (rc) {
+		d_vpr_e("%s: %u map failed\n", __func__, reg_region);
+		goto fail_alloc_queue;
+	}
+	core_mem->align_device_addr = mem.device_addr;
+	core_mem->mem = mem;
+
+skip_mmap_buffer:
+	return 0;
+fail_alloc_queue:
+	return -ENOMEM;
+}
+
 int venus_hfi_queue_init(struct msm_vidc_core *core)
 {
 	int rc = 0;
 	struct hfi_queue_table_header *q_tbl_hdr;
 	struct hfi_queue_header *q_hdr;
 	struct msm_vidc_iface_q_info *iface_q;
-	struct device_region_info *dev_reg;
 	struct msm_vidc_mem mem;
 	int offset = 0;
 	u32 *payload;
@@ -502,6 +551,11 @@ int venus_hfi_queue_init(struct msm_vidc_core *core)
 
 	d_vpr_h("%s()\n", __func__);
 
+	if (!core || !core->capabilities) {
+		d_vpr_h("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
 	if (core->iface_q_table.align_virtual_addr) {
 		d_vpr_h("%s: queues already allocated\n", __func__);
 		venus_hfi_reset_queue_header(core);
@@ -588,25 +642,39 @@ int venus_hfi_queue_init(struct msm_vidc_core *core)
 	/* write sfr buffer size in first word */
 	*((u32 *)core->sfr.align_virtual_addr) = core->sfr.mem_size;
 
-	/* map aon registers */
-	memset(&mem, 0, sizeof(mem));
-	dev_reg = venus_hfi_get_device_region_info(core, MSM_VIDC_AON_REGISTERS);
-	if (!dev_reg) {
-		d_vpr_h("%s: aon device region not available\n", __func__);
-		goto skip_mmap_buffer;
-	}
-	mem.region = MSM_VIDC_NON_SECURE;
-	mem.phys_addr = dev_reg->phy_addr;
-	mem.size = dev_reg->size;
-	mem.device_addr = dev_reg->dev_addr;
-	rc = call_mem_op(core, iommu_map, core, &mem);
-	if (rc) {
-		d_vpr_e("%s: aon map failed\n", __func__);
-		goto fail_alloc_queue;
+	/* map synx fence tx/rx queue buffer */
+	if (core->capabilities[SUPPORTS_SYNX_FENCE].value) {
+		/*
+		 * queue memory is already allocated by synx fence
+		 * driver during msm_vidc_synx_fence_register(..) call
+		 */
+		rc = call_mem_op(core, mem_dma_map_page, core,
+			&core->synx_fence_data.queue);
+		if (rc) {
+			d_vpr_e("%s: synx fence queue buffer map failed\n", __func__);
+			goto fail_alloc_queue;
+		}
 	}
-	core->aon.align_virtual_addr = mem.kvaddr;
-	core->aon.align_device_addr = mem.device_addr;
-	core->aon.mem = mem;
+
+	/* map aon_reg registers */
+	rc = venus_hfi_iommu_map_registers(core, MSM_VIDC_AON,
+		MSM_VIDC_NON_SECURE, &core->aon_reg);
+	if (rc)
+		return rc;
+
+	/* map fence registers */
+	rc = venus_hfi_iommu_map_registers(core,
+		MSM_VIDC_PROTOCOL_FENCE_CLIENT_VPU,
+		MSM_VIDC_NON_SECURE, &core->fence_reg);
+	if (rc)
+		return rc;
+
+	/* map qtimer low registers */
+	rc = venus_hfi_iommu_map_registers(core,
+		MSM_VIDC_QTIMER,
+		MSM_VIDC_NON_SECURE, &core->qtimer_reg);
+	if (rc)
+		return rc;
 
 	/* allocate 4k buffer for HFI_MMAP_ADDR */
 	memset(&mem, 0, sizeof(mem));
@@ -634,15 +702,32 @@ int venus_hfi_queue_init(struct msm_vidc_core *core)
 	 *     payload[7-8]   : address and size of HW mutex registers
 	 *     payload[9-10]  : address and size of IPCC registers
 	 *     payload[11-12] : address and size of AON registers
+	 *     payload[13-14] : address and size of synx fence queue memory
+	 *     payload[19-20] : address and size of IPC_PROTOCOL4_CLIENT_VERSION registers
+	 *     payload[21-22] : address and size of FENCE QTIMER registers
 	 */
 	memset(core->mmap_buf.align_virtual_addr, 0, ALIGNED_MMAP_BUF_SIZE);
 	payload = ((u32 *)core->mmap_buf.align_virtual_addr);
 	payload[0] = 1;
-	payload[11] = core->aon.mem.device_addr;
-	payload[12] = core->aon.mem.size;
+	if (core->aon_reg.mem.device_addr) {
+		payload[11] = core->aon_reg.mem.device_addr;
+		payload[12] = core->aon_reg.mem.size;
+	}
+	if (core->synx_fence_data.queue.device_addr) {
+		payload[13] = core->synx_fence_data.queue.device_addr;
+		payload[14] = core->synx_fence_data.queue.size;
+	}
+	if (core->fence_reg.mem.device_addr) {
+		payload[19] = core->fence_reg.mem.device_addr;
+		payload[20] = core->fence_reg.mem.size;
+	}
+	if (core->qtimer_reg.mem.device_addr) {
+		payload[21] = core->qtimer_reg.mem.device_addr;
+		payload[22] = core->qtimer_reg.mem.size;
+	}
 
-skip_mmap_buffer:
 	return 0;
+
 fail_alloc_queue:
 	return -ENOMEM;
 }

+ 2 - 2
driver/vidc/src/venus_hfi_response.c

@@ -1001,11 +1001,11 @@ static int handle_output_buffer(struct msm_vidc_inst *inst,
 	if (inst->hfi_frame_info.fence_id) {
 		if (buf->data_size) {
 			/* signal fence */
-			msm_vidc_fence_signal(inst,
+			call_fence_op(core, fence_signal, inst,
 				inst->hfi_frame_info.fence_id);
 		} else {
 			/* destroy fence */
-			msm_vidc_fence_destroy(inst,
+			call_fence_op(core, fence_destroy, inst,
 				inst->hfi_frame_info.fence_id);
 		}
 	}

+ 5 - 1
msm_video/Kbuild

@@ -25,7 +25,10 @@ endif
 LINUXINCLUDE   += -I$(VIDEO_DRIVER_ABS_PATH)/platform/common/inc \
                   -I$(VIDEO_DRIVER_ABS_PATH)/variant/common/inc \
                   -I$(VIDEO_DRIVER_ABS_PATH)/vidc/inc \
-                  -I$(VIDEO_ROOT)/include/uapi/vidc
+                  -I$(VIDEO_ROOT)/include/uapi/vidc \
+                  -I$(VIDEO_ROOT)/../mm-drivers/hw_fence/include/ \
+                  -I$(VIDEO_ROOT)/../synx-kernel/msm/synx/ \
+                  -I$(VIDEO_ROOT)/../synx-kernel/include/uapi/synx/media/
 
 USERINCLUDE    += -I$(VIDEO_ROOT)/include/uapi/vidc/media \
                   -I$(VIDEO_ROOT)/include/uapi/vidc
@@ -69,6 +72,7 @@ msm_video-objs += $(VIDEO_DRIVER_REL_PATH)/platform/common/src/msm_vidc_platform
                   $(VIDEO_DRIVER_REL_PATH)/vidc/src/msm_vidc_memory.o \
                   $(VIDEO_DRIVER_REL_PATH)/vidc/src/msm_vidc_memory_ext.o \
                   $(VIDEO_DRIVER_REL_PATH)/vidc/src/msm_vidc_fence.o \
+                  $(VIDEO_DRIVER_REL_PATH)/vidc/src/msm_vidc_synx.o \
                   $(VIDEO_DRIVER_REL_PATH)/vidc/src/venus_hfi.o \
                   $(VIDEO_DRIVER_REL_PATH)/vidc/src/venus_hfi_queue.o \
                   $(VIDEO_DRIVER_REL_PATH)/vidc/src/hfi_packet.o \