浏览代码

techpack: video: add video driver

Add initial video driver files.

Change-Id: Icd48bbf31e435cf36f149d6c3267cf3a4d7913b3
Signed-off-by: Maheshwar Ajja <[email protected]>
Maheshwar Ajja 4 年之前
父节点
当前提交
6f107f7e35
共有 39 个文件被更改,包括 9337 次插入0 次删除
  1. 46 0
      Makefile
  2. 1 0
      config/waipio_video.conf
  3. 8 0
      config/waipio_video.h
  4. 13 0
      driver/platform/waipio/inc/msm_vidc_waipio.h
  5. 236 0
      driver/platform/waipio/src/msm_vidc_waipio.c
  6. 15 0
      driver/variant/iris2/inc/msm_vidc_iris2.h
  7. 558 0
      driver/variant/iris2/src/msm_vidc_iris2.c
  8. 68 0
      driver/vidc/inc/fixedpoint.h
  9. 53 0
      driver/vidc/inc/hfi_packet.h
  10. 15 0
      driver/vidc/inc/msm_vdec.h
  11. 15 0
      driver/vidc/inc/msm_venc.h
  12. 48 0
      driver/vidc/inc/msm_vidc.h
  13. 248 0
      driver/vidc/inc/msm_vidc_bus.h
  14. 106 0
      driver/vidc/inc/msm_vidc_core.h
  15. 94 0
      driver/vidc/inc/msm_vidc_debug.h
  16. 41 0
      driver/vidc/inc/msm_vidc_driver.h
  17. 227 0
      driver/vidc/inc/msm_vidc_dt.h
  18. 107 0
      driver/vidc/inc/msm_vidc_inst.h
  19. 468 0
      driver/vidc/inc/msm_vidc_internal.h
  20. 23 0
      driver/vidc/inc/msm_vidc_memory.h
  21. 78 0
      driver/vidc/inc/msm_vidc_platform.h
  22. 55 0
      driver/vidc/inc/msm_vidc_v4l2.h
  23. 25 0
      driver/vidc/inc/msm_vidc_vb2.h
  24. 71 0
      driver/vidc/inc/venus_hfi.h
  25. 21 0
      driver/vidc/src/hfi_packet.c
  26. 108 0
      driver/vidc/src/msm_vdec.c
  27. 106 0
      driver/vidc/src/msm_venc.c
  28. 457 0
      driver/vidc/src/msm_vidc.c
  29. 16 0
      driver/vidc/src/msm_vidc_debug.c
  30. 151 0
      driver/vidc/src/msm_vidc_driver.c
  31. 970 0
      driver/vidc/src/msm_vidc_dt.c
  32. 407 0
      driver/vidc/src/msm_vidc_memory.c
  33. 133 0
      driver/vidc/src/msm_vidc_platform.c
  34. 367 0
      driver/vidc/src/msm_vidc_probe.c
  35. 200 0
      driver/vidc/src/msm_vidc_v4l2.c
  36. 44 0
      driver/vidc/src/msm_vidc_vb2.c
  37. 2351 0
      driver/vidc/src/venus_hfi.c
  38. 1355 0
      include/uapi/vidc/media/msm_media_info.h
  39. 32 0
      include/uapi/vidc/media/msm_vidc_utils.h

+ 46 - 0
Makefile

@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# auto-detect subdirs
+ifneq ($(CONFIG_ARCH_QTI_VM), y)
+ifeq ($(CONFIG_ARCH_WAIPIO), y)
+include $(srctree)/techpack/video/config/waipio_video.conf
+LINUXINCLUDE    += -include $(srctree)/techpack/video/config/waipio_video.h
+endif
+endif
+
+LINUXINCLUDE    += -I$(srctree)/techpack/video/include \
+                   -I$(srctree)/techpack/video/include/uapi \
+                   -I$(srctree)/techpack/video/include/uapi/vidc \
+                   -I$(srctree)/techpack/video/driver/vidc/inc \
+                   -I$(srctree)/techpack/video/driver/platform/waipio/inc \
+                   -I$(srctree)/techpack/video/driver/variant/iris2/inc
+
+USERINCLUDE     += -I$(srctree)/techpack/video/include/uapi
+
+ccflags-y       += -I$(srctree)/techpack/video/driver/vidc/src/ \
+                   -I$(srctree)/techpack/video/driver/platform/waipio/src/ \
+                   -I$(srctree)/techpack/video/driver/variant/iris2/src
+
+msm-vidc-objs   := driver/vidc/src/msm_vidc_v4l2.o \
+                   driver/vidc/src/msm_vidc_vb2.o \
+                   driver/vidc/src/msm_vidc.o \
+                   driver/vidc/src/msm_vdec.o \
+                   driver/vidc/src/msm_venc.o \
+                   driver/vidc/src/msm_vidc_vb2.o \
+                   driver/vidc/src/msm_vidc_driver.o \
+                   driver/vidc/src/msm_vidc_probe.o \
+                   driver/vidc/src/msm_vidc_dt.o \
+                   driver/vidc/src/msm_vidc_platform.o \
+                   driver/vidc/src/msm_vidc_debug.o \
+                   driver/vidc/src/msm_vidc_memory.o \
+                   driver/vidc/src/venus_hfi.o
+
+ifneq ($(CONFIG_ARCH_QTI_VM), y)
+ifeq ($(CONFIG_ARCH_WAIPIO), y)
+msm-vidc-objs   += driver/platform/waipio/src/msm_vidc_waipio.o \
+                   driver/hfi/iris2/src/msm_vidc_iris2.o
+endif
+endif
+
+obj-$(CONFIG_MSM_VIDC_V4L2) := msm-vidc.o
+

+ 1 - 0
config/waipio_video.conf

@@ -0,0 +1 @@
+export CONFIG_MSM_VIDC_V4L2=m

+ 8 - 0
config/waipio_video.h

@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#define CONFIG_MSM_VIDC_V4L2 1
+#define CONFIG_MSM_VIDC_IRIS2 1
+#define CONFIG_MSM_VIDC_WAIPIO 1

+ 13 - 0
driver/platform/waipio/inc/msm_vidc_waipio.h

@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_VIDC_WAIPIO_H_
+#define _MSM_VIDC_WAIPIO_H_
+
+#include "msm_vidc_core.h"
+
+int msm_vidc_init_platform_waipio(struct msm_vidc_core *core);
+
+#endif // _MSM_VIDC_WAIPIO_H_

+ 236 - 0
driver/platform/waipio/src/msm_vidc_waipio.c

@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+
+#include "msm_vidc_waipio.h"
+#include "msm_vidc_platform.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc_internal.h"
+#include "msm_vidc_core.h"
+
+#define DDR_TYPE_LPDDR4 0x6
+#define DDR_TYPE_LPDDR4X 0x7
+#define DDR_TYPE_LPDDR5 0x8
+#define DDR_TYPE_LPDDR5X 0x9
+
+#define UBWC_CONFIG(mco, mlo, hbo, bslo, bso, rs, mc, ml, hbb, bsl, bsp) \
+{	\
+	.override_bit_info.max_channel_override = mco,	\
+	.override_bit_info.mal_length_override = mlo,	\
+	.override_bit_info.hb_override = hbo,	\
+	.override_bit_info.bank_swzl_level_override = bslo,	\
+	.override_bit_info.bank_spreading_override = bso,	\
+	.override_bit_info.reserved = rs,	\
+	.max_channels = mc,	\
+	.mal_length = ml,	\
+	.highest_bank_bit = hbb,	\
+	.bank_swzl_level = bsl,	\
+	.bank_spreading = bsp,	\
+}
+
+#define ENC     MSM_VIDC_ENCODER
+#define DEC     MSM_VIDC_DECODER
+#define H264    MSM_VIDC_H264
+#define HEVC    MSM_VIDC_HEVC
+#define VP9     MSM_VIDC_VP9
+#define MPEG2   MSM_VIDC_MPEG2
+#define CODECS_ALL     (MSM_VIDC_H264 | MSM_VIDC_HEVC | \
+			MSM_VIDC_VP9 | MSM_VIDC_MPEG2)
+
+static struct msm_vidc_core_data core_data_waipio[] = {
+	/* {type, value} */
+	{ENC_CODECS, H264|HEVC},
+	{DEC_CODECS, H264|HEVC|VP9|MPEG2},
+	{MAX_SESSION_COUNT, 16},
+	{MAX_SECURE_SESSION_COUNT, 3},
+	{MAX_MBPF, 173056},	/* (8192x4320)/256 + (4096x2176)/256*/
+	{MAX_MBPS, 7833600},	/* max_load
+					 * 7680x4320@60fps or 3840x2176@240fps
+					 * which is greater than 4096x2176@120fps,
+					 * 8192x4320@48fps
+					 */
+	{MAX_MBPF_HQ, 8160}, /* ((1920x1088)/256) */
+	{MAX_MBPS_HQ, 489600}, /* ((1920x1088)/256)@60fps */
+	{MAX_MBPF_B_FRAME, 32640}, /* 3840x2176/256 */
+	{MAX_MBPS_B_FRAME, 1958400}, /* 3840x2176/256 MBs@60fps */
+	{SW_PC, 1},
+	{SW_PC_DELAY, 1500}, /* 1500 ms */
+	{FW_UNLOAD, 0},
+	{FW_UNLOAD_DELAY, 1000}, /* 1000 ms */
+	{HW_RESPONSE_TIMEOUT, 1000}, /* 1000 ms */
+	{DEBUG_TIMEOUT, 0},
+	{PREFIX_BUF_COUNT_PIX, 18},
+	{PREFIX_BUF_SIZE_PIX, 13434880}, /* Calculated by VENUS_BUFFER_SIZE for 4096x2160 UBWC */
+	{PREFIX_BUF_COUNT_NON_PIX, 1},
+	{PREFIX_BUF_SIZE_NON_PIX, 209715200}, /*
+		 * Internal buffer size is calculated for secure decode session
+		 * of resolution 4k (4096x2160)
+		 * Internal buf size = calculate_scratch_size() +
+		 *	calculate_scratch1_size() + calculate_persist1_size()
+		 * Take maximum between VP9 10bit, HEVC 10bit, AVC, MPEG2 secure
+		 * decoder sessions
+		 */
+	{PAGEFAULT_NON_FATAL, 1},
+	{PAGETABLE_CACHING, 0},
+	{DCVS, 1},
+	{DECODE_BATCH, 1},
+	{DECODE_BATCH_TIMEOUT, 200},
+	{AV_SYNC_WINDOW_SIZE, 40},
+};
+
+static struct msm_vidc_instance_data instance_data_waipio[] = {
+	/* {type, domains, codecs, min, max, step_or_menu, value} */
+	{FRAME_WIDTH, ENC|DEC, CODECS_ALL, 128, 8192, 1, 1920},
+	{FRAME_HEIGHT, ENC|DEC, CODECS_ALL, 128, 8192, 1, 1080},
+	/* (8192 * 4320) / 256 */
+	{MBPF, ENC|DEC, CODECS_ALL, 64, 138240, 1, 138240},
+	/* ((1920 * 1088) / 256) * 960 fps */
+	{MBPS, ENC|DEC, CODECS_ALL, 64, 7833600, 1, 7833600},
+	{FRAME_RATE, ENC|DEC, CODECS_ALL, 1, 960, 1, 30},
+	{BIT_RATE, ENC|DEC, CODECS_ALL, 1, 220000000, 1, 20000000},
+	{BIT_RATE, ENC, HEVC, 1, 160000000, 1, 20000000},
+	{CABAC_BIT_RATE, ENC, H264, 1, 160000000, 1, 20000000},
+	{SCALE_X, ENC, CODECS_ALL, 8192, 65536, 1, 8192},
+	{SCALE_Y, ENC, CODECS_ALL, 8192, 65536, 1, 8192},
+	{SCALE_X, DEC, CODECS_ALL, 65536, 65536, 1, 65536},
+	{SCALE_Y, DEC, CODECS_ALL, 65536, 65536, 1, 65536},
+	{B_FRAME, ENC, H264|HEVC, 0, 1, 1, 0},
+	{HIER_P_LAYERS, ENC, H264|HEVC, 0, 6, 1, 0},
+	{LTR_COUNT, ENC, H264|HEVC, 0, 2, 1, 0},
+	/* ((4096 * 2304) / 256) * 60 fps */
+	{POWER_SAVE_MBPS, ENC, CODECS_ALL,
+		0, 2211840, 1, 2211840},
+	{I_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 10},
+	{P_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20},
+	{B_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20},
+	{I_FRAME_QP, ENC, VP9, 0, 127, 1, 20},
+	{P_FRAME_QP, ENC, VP9, 0, 127, 1, 40},
+	{B_FRAME_QP, ENC, VP9, 0, 127, 1, 40},
+	/* 10 slices */
+	{SLICE_BYTE, ENC, H264|HEVC, 1, 10, 1, 10},
+	{SLICE_MB, ENC, H264|HEVC, 1, 10, 1, 10},
+
+	/* Mpeg2 decoder specific */
+	{FRAME_WIDTH, DEC, MPEG2, 128, 1920, 1, 1920},
+	{FRAME_HEIGHT, DEC, MPEG2, 128, 1920, 1, 1080},
+	/* (1920 * 1088) / 256 */
+	{MBPF, DEC, MPEG2, 64, 8160, 1, 8160},
+	/* ((1920 * 1088) / 256) * 30*/
+	{MBPS, DEC, MPEG2, 64, 244800, 1, 244800},
+	{FRAME_RATE, DEC, MPEG2, 1, 30, 1, 30},
+	{BIT_RATE, DEC, MPEG2, 1, 40000000, 1, 20000000},
+
+	/* Secure usecase specific */
+	{SECURE_FRAME_WIDTH, ENC|DEC, CODECS_ALL, 128, 4096, 1, 1920},
+	{SECURE_FRAME_HEIGHT, ENC|DEC, CODECS_ALL, 128, 4096, 1, 1080},
+	/* (4096 * 2304) / 256 */
+	{SECURE_MBPF, ENC|DEC, CODECS_ALL, 64, 36864, 1, 36864},
+	{SECURE_BIT_RATE, ENC|DEC, CODECS_ALL, 1, 40000000, 1, 20000000},
+
+	/* Batch Mode Decode */
+	{BATCH_MBPF, DEC, CODECS_ALL, 64, 34816, 1, 34816},
+	/* (4096 * 2176) / 256 */
+	{BATCH_FRAME_RATE, DEC, CODECS_ALL, 1, 120, 1, 120},
+
+	/* Lossless encoding usecase specific */
+	{LOSSLESS_FRAME_WIDTH, ENC, H264|HEVC, 128, 4096, 1, 1920},
+	{LOSSLESS_FRAME_HEIGHT, ENC, H264|HEVC, 128, 4096, 1, 1080},
+	/* (4096 * 2304) / 256 */
+	{LOSSLESS_MBPF, ENC, H264|HEVC, 64, 36864, 1, 36864},
+
+	/* All intra encoding usecase specific */
+	{ALL_INTRA_FRAME_RATE, ENC, H264|HEVC, 1, 240, 1, 30},
+
+	/* Image specific */
+	{HEVC_IMAGE_FRAME_WIDTH, ENC, HEVC, 128, 512, 1, 512},
+	{HEVC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 128, 512, 1, 512},
+	{HEIC_IMAGE_FRAME_WIDTH, ENC, HEVC, 512, 16384, 1, 16384},
+	{HEIC_IMAGE_FRAME_HEIGHT, ENC, HEVC, 512, 16384, 1, 16384},
+
+	{MB_CYCLES_VSP, ENC, CODECS_ALL, 25, 25, 1, 25},
+	{MB_CYCLES_VPP, ENC, CODECS_ALL, 675, 675, 1, 675},
+	{MB_CYCLES_LP, ENC, CODECS_ALL, 320, 320, 1, 320},
+	{MB_CYCLES_VSP, DEC, CODECS_ALL, 25, 25, 1, 25},
+	{MB_CYCLES_VSP, DEC, VP9, 60, 60, 1, 60},
+	{MB_CYCLES_VPP, DEC, CODECS_ALL, 200, 200, 1, 200},
+	{MB_CYCLES_LP, DEC, CODECS_ALL, 200, 200, 1, 200},
+	{MB_CYCLES_FW, ENC|DEC, CODECS_ALL, 326389, 326389, 1, 326389},
+	{MB_CYCLES_FW_VPP, ENC|DEC, CODECS_ALL, 44156, 44156, 1, 44156},
+};
+
+/*
+ * Custom conversion coefficients for resolution: 176x144 negative
+ * coeffs are converted to s4.9 format
+ * (e.g. -22 converted to ((1 << 13) - 22)
+ * 3x3 transformation matrix coefficients in s4.9 fixed point format
+ */
+static u32 vpe_csc_custom_matrix_coeff[MAX_MATRIX_COEFFS] = {
+	440, 8140, 8098, 0, 460, 52, 0, 34, 463
+};
+
+/* offset coefficients in s9 fixed point format */
+static u32 vpe_csc_custom_bias_coeff[MAX_BIAS_COEFFS] = {
+	53, 0, 4
+};
+
+/* clamping value for Y/U/V([min,max] for Y/U/V) */
+static u32 vpe_csc_custom_limit_coeff[MAX_LIMIT_COEFFS] = {
+	16, 235, 16, 240, 16, 240
+};
+
+/* Default UBWC config for LPDDR5 */
+static struct msm_vidc_ubwc_config_data ubwc_config_waipio[] = {
+	UBWC_CONFIG(1, 1, 1, 0, 0, 0, 8, 32, 16, 0, 0),
+};
+
+static struct msm_vidc_platform_data waipio_data = {
+	.core_data = core_data_waipio,
+	.core_data_size = ARRAY_SIZE(core_data_waipio),
+	.instance_data = instance_data_waipio,
+	.instance_data_size = ARRAY_SIZE(instance_data_waipio),
+	.csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff,
+	.csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff,
+	.csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff,
+	.ubwc_config = ubwc_config_waipio,
+};
+
+static int msm_vidc_init_data(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct msm_vidc_ubwc_config_data *ubwc_config;
+	u32 ddr_type;
+
+	if (!core || !core->platform) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	d_vpr_h("%s: initialize waipio data\n", __func__);
+
+	ubwc_config = waipio_data.ubwc_config;
+	ddr_type = of_fdt_get_ddrtype();
+	if (ddr_type == -ENOENT)
+		d_vpr_e("Failed to get ddr type, use LPDDR5\n");
+
+	if (ddr_type == DDR_TYPE_LPDDR4 || ddr_type == DDR_TYPE_LPDDR4X)
+		ubwc_config->highest_bank_bit = 0xf;
+	d_vpr_h("%s: DDR Type 0x%x hbb 0x%x\n", __func__,
+		ddr_type, ubwc_config->highest_bank_bit);
+
+	core->platform->data = waipio_data;
+
+	return rc;
+}
+
+int msm_vidc_init_platform_waipio(struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	rc = msm_vidc_init_data(core);
+	if (rc)
+		return rc;
+
+	return 0;
+}

+ 15 - 0
driver/variant/iris2/inc/msm_vidc_iris2.h

@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_vidc_core.h"
+
+//#if defined(CONFIG_MSM_VIDC_IRIS2)
+int msm_vidc_init_iris2(struct msm_vidc_core *core);
+//#else
+//static inline int msm_vidc_init_iris2(struct msm_vidc_core *core)
+//{
+//	return -EINVAL;
+//}
+//#endif

+ 558 - 0
driver/variant/iris2/src/msm_vidc_iris2.c

@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/interrupt.h>
+
+#include "msm_vidc_iris2.h"
+#include "venus_hfi.h"
+#include "msm_vidc_core.h"
+#include "msm_vidc_dt.h"
+#include "msm_vidc_internal.h"
+#include "msm_vidc_debug.h"
+
+
+#define VBIF_BASE_OFFS_IRIS2                   0x00080000
+#define CPU_BASE_OFFS_IRIS2                    0x000A0000
+#define AON_BASE_OFFS			               0x000E0000
+#define CPU_CS_BASE_OFFS_IRIS2		           (CPU_BASE_OFFS_IRIS2)
+#define CPU_IC_BASE_OFFS_IRIS2		           (CPU_BASE_OFFS_IRIS2)
+
+#define CPU_CS_A2HSOFTINTCLR_IRIS2             (CPU_CS_BASE_OFFS_IRIS2 + 0x1C)
+#define CPU_CS_VCICMD_IRIS2                    (CPU_CS_BASE_OFFS_IRIS2 + 0x20)
+#define CPU_CS_VCICMDARG0_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x24)
+#define CPU_CS_VCICMDARG1_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x28)
+#define CPU_CS_VCICMDARG2_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x2C)
+#define CPU_CS_VCICMDARG3_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x30)
+#define CPU_CS_VMIMSG_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x34)
+#define CPU_CS_VMIMSGAG0_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x38)
+#define CPU_CS_VMIMSGAG1_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x3C)
+#define CPU_CS_SCIACMD_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x48)
+#define CPU_CS_H2XSOFTINTEN_IRIS2	(CPU_CS_BASE_OFFS_IRIS2 + 0x148)
+
+/* HFI_CTRL_STATUS */
+#define CPU_CS_SCIACMDARG0_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x4C)
+#define CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK_IRIS2	0xfe
+#define CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY_IRIS2           0x100
+#define CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK_IRIS2     0x40000000
+
+/* HFI_QTBL_INFO */
+#define CPU_CS_SCIACMDARG1_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x50)
+
+/* HFI_QTBL_ADDR */
+#define CPU_CS_SCIACMDARG2_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x54)
+
+/* HFI_VERSION_INFO */
+#define CPU_CS_SCIACMDARG3_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x58)
+
+/* SFR_ADDR */
+#define CPU_CS_SCIBCMD_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x5C)
+
+/* MMAP_ADDR */
+#define CPU_CS_SCIBCMDARG0_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x60)
+
+/* UC_REGION_ADDR */
+#define CPU_CS_SCIBARG1_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x64)
+
+/* UC_REGION_ADDR */
+#define CPU_CS_SCIBARG2_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x68)
+
+/* FAL10 Feature Control */
+#define CPU_CS_X2RPMh_IRIS2		(CPU_CS_BASE_OFFS_IRIS2 + 0x168)
+#define CPU_CS_X2RPMh_MASK0_BMSK_IRIS2	0x1
+#define CPU_CS_X2RPMh_MASK0_SHFT_IRIS2	0x0
+#define CPU_CS_X2RPMh_MASK1_BMSK_IRIS2	0x2
+#define CPU_CS_X2RPMh_MASK1_SHFT_IRIS2	0x1
+#define CPU_CS_X2RPMh_SWOVERRIDE_BMSK_IRIS2	0x4
+#define CPU_CS_X2RPMh_SWOVERRIDE_SHFT_IRIS2	0x3
+
+#define CPU_IC_SOFTINT_IRIS2		(CPU_IC_BASE_OFFS_IRIS2 + 0x150)
+#define CPU_IC_SOFTINT_H2A_SHFT_IRIS2	0x0
+
+/*
+ * --------------------------------------------------------------------------
+ * MODULE: wrapper
+ * --------------------------------------------------------------------------
+ */
+#define WRAPPER_BASE_OFFS_IRIS2		0x000B0000
+#define WRAPPER_INTR_STATUS_IRIS2	(WRAPPER_BASE_OFFS_IRIS2 + 0x0C)
+#define WRAPPER_INTR_STATUS_A2HWD_BMSK_IRIS2	0x8
+#define WRAPPER_INTR_STATUS_A2H_BMSK_IRIS2	0x4
+
+#define WRAPPER_INTR_MASK_IRIS2		(WRAPPER_BASE_OFFS_IRIS2 + 0x10)
+#define WRAPPER_INTR_MASK_A2HWD_BMSK_IRIS2	0x8
+#define WRAPPER_INTR_MASK_A2HCPU_BMSK_IRIS2	0x4
+
+#define WRAPPER_CPU_CLOCK_CONFIG_IRIS2	(WRAPPER_BASE_OFFS_IRIS2 + 0x2000)
+#define WRAPPER_CPU_CGC_DIS_IRIS2	(WRAPPER_BASE_OFFS_IRIS2 + 0x2010)
+#define WRAPPER_CPU_STATUS_IRIS2	(WRAPPER_BASE_OFFS_IRIS2 + 0x2014)
+
+#define WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_IRIS2	(WRAPPER_BASE_OFFS_IRIS2 + 0x54)
+#define WRAPPER_DEBUG_BRIDGE_LPI_STATUS_IRIS2	(WRAPPER_BASE_OFFS_IRIS2 + 0x58)
+/*
+ * --------------------------------------------------------------------------
+ * MODULE: tz_wrapper
+ * --------------------------------------------------------------------------
+ */
+#define WRAPPER_TZ_BASE_OFFS	0x000C0000
+#define WRAPPER_TZ_CPU_CLOCK_CONFIG	(WRAPPER_TZ_BASE_OFFS)
+#define WRAPPER_TZ_CPU_STATUS	(WRAPPER_TZ_BASE_OFFS + 0x10)
+
+#define CTRL_INIT_IRIS2		CPU_CS_SCIACMD_IRIS2
+
+#define CTRL_STATUS_IRIS2	CPU_CS_SCIACMDARG0_IRIS2
+#define CTRL_ERROR_STATUS__M_IRIS2 \
+		CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK_IRIS2
+#define CTRL_INIT_IDLE_MSG_BMSK_IRIS2 \
+		CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK_IRIS2
+#define CTRL_STATUS_PC_READY_IRIS2 \
+		CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY_IRIS2
+
+
+#define QTBL_INFO_IRIS2		CPU_CS_SCIACMDARG1_IRIS2
+
+#define QTBL_ADDR_IRIS2		CPU_CS_SCIACMDARG2_IRIS2
+
+#define VERSION_INFO_IRIS2	    CPU_CS_SCIACMDARG3_IRIS2
+
+#define SFR_ADDR_IRIS2		    CPU_CS_SCIBCMD_IRIS2
+#define MMAP_ADDR_IRIS2		CPU_CS_SCIBCMDARG0_IRIS2
+#define UC_REGION_ADDR_IRIS2	CPU_CS_SCIBARG1_IRIS2
+#define UC_REGION_SIZE_IRIS2	CPU_CS_SCIBARG2_IRIS2
+
+#define AON_WRAPPER_MVP_NOC_LPI_CONTROL	(AON_BASE_OFFS)
+#define AON_WRAPPER_MVP_NOC_LPI_STATUS	(AON_BASE_OFFS + 0x4)
+
+/*
+ * --------------------------------------------------------------------------
+ * MODULE: vcodec noc error log registers (iris2)
+ * --------------------------------------------------------------------------
+ */
+#define VCODEC_NOC_VIDEO_A_NOC_BASE_OFFS		0x00010000
+#define VCODEC_NOC_ERL_MAIN_SWID_LOW			0x00011200
+#define VCODEC_NOC_ERL_MAIN_SWID_HIGH			0x00011204
+#define VCODEC_NOC_ERL_MAIN_MAINCTL_LOW			0x00011208
+#define VCODEC_NOC_ERL_MAIN_ERRVLD_LOW			0x00011210
+#define VCODEC_NOC_ERL_MAIN_ERRCLR_LOW			0x00011218
+#define VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW			0x00011220
+#define VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH		0x00011224
+#define VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW			0x00011228
+#define VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH		0x0001122C
+#define VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW			0x00011230
+#define VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH		0x00011234
+#define VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW			0x00011238
+#define VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH		0x0001123C
+
+static int __interrupt_init_iris2(struct msm_vidc_core *vidc_core)
+{
+	u32 mask_val = 0;
+	struct msm_vidc_core *core = vidc_core;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	/* All interrupts should be disabled initially 0x1F6 : Reset value */
+	mask_val = __read_register(core, WRAPPER_INTR_MASK_IRIS2);
+
+	/* Write 0 to unmask CPU and WD interrupts */
+	mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BMSK_IRIS2|
+			WRAPPER_INTR_MASK_A2HCPU_BMSK_IRIS2);
+	__write_register(core, WRAPPER_INTR_MASK_IRIS2, mask_val);
+
+	return 0;
+}
+
+static int __setup_ucregion_memory_map_iris2(struct msm_vidc_core *vidc_core)
+{
+	struct msm_vidc_core *core = vidc_core;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	__write_register(core, UC_REGION_ADDR_IRIS2,
+			(u32)core->iface_q_table.align_device_addr);
+	__write_register(core, UC_REGION_SIZE_IRIS2, SHARED_QSIZE);
+	__write_register(core, QTBL_ADDR_IRIS2,
+			(u32)core->iface_q_table.align_device_addr);
+	__write_register(core, QTBL_INFO_IRIS2, 0x01);
+	if (core->sfr.align_device_addr)
+		__write_register(core, SFR_ADDR_IRIS2,
+				(u32)core->sfr.align_device_addr);
+	/* update queues vaddr for debug purpose */
+	__write_register(core, CPU_CS_VCICMDARG0_IRIS2,
+		(u32)core->iface_q_table.align_virtual_addr);
+	__write_register(core, CPU_CS_VCICMDARG1_IRIS2,
+		(u32)((u64)core->iface_q_table.align_virtual_addr >> 32));
+
+	return 0;
+}
+
+static int __power_off_iris2(struct msm_vidc_core *vidc_core)
+{
+	u32 lpi_status, reg_status = 0, count = 0, max_count = 10;
+	struct msm_vidc_core *core = vidc_core;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!core->power_enabled)
+		return 0;
+
+	if (!(core->intr_status & WRAPPER_INTR_STATUS_A2HWD_BMSK_IRIS2))
+		disable_irq_nosync(core->dt->irq);
+	core->intr_status = 0;
+
+	/* HPG 6.1.2 Step 1  */
+	__write_register(core, CPU_CS_X2RPMh_IRIS2, 0x3);
+
+	/* HPG 6.1.2 Step 2, noc to low power */
+	//if (core->res->vpu_ver == VPU_VERSION_IRIS2_1)
+	//	goto skip_aon_mvp_noc;
+
+	__write_register(core, AON_WRAPPER_MVP_NOC_LPI_CONTROL, 0x1);
+	while (!reg_status && count < max_count) {
+		lpi_status =
+			 __read_register(core,
+				AON_WRAPPER_MVP_NOC_LPI_STATUS);
+		reg_status = lpi_status & BIT(0);
+		d_vpr_h("Noc: lpi_status %d noc_status %d (count %d)\n",
+			lpi_status, reg_status, count);
+		usleep_range(50, 100);
+		count++;
+	}
+	if (count == max_count)
+		d_vpr_e("NOC not in qaccept status %d\n", reg_status);
+
+//skip_aon_mvp_noc:
+	/* HPG 6.1.2 Step 3, debug bridge to low power */
+	__write_register(core,
+		WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_IRIS2, 0x7);
+	reg_status = 0;
+	count = 0;
+	while ((reg_status != 0x7) && count < max_count) {
+		lpi_status = __read_register(core,
+				 WRAPPER_DEBUG_BRIDGE_LPI_STATUS_IRIS2);
+		reg_status = lpi_status & 0x7;
+		d_vpr_h("DBLP Set : lpi_status %d reg_status %d (count %d)\n",
+			lpi_status, reg_status, count);
+		usleep_range(50, 100);
+		count++;
+	}
+	if (count == max_count)
+		d_vpr_e("DBLP Set: status %d\n", reg_status);
+
+	/* HPG 6.1.2 Step 4, debug bridge to lpi release */
+	__write_register(core,
+		WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_IRIS2, 0x0);
+	lpi_status = 0x1;
+	count = 0;
+	while (lpi_status && count < max_count) {
+		lpi_status = __read_register(core,
+				 WRAPPER_DEBUG_BRIDGE_LPI_STATUS_IRIS2);
+		d_vpr_h("DBLP Release: lpi_status %d(count %d)\n",
+			lpi_status, count);
+		usleep_range(50, 100);
+		count++;
+	}
+	if (count == max_count)
+		d_vpr_e("DBLP Release: lpi_status %d\n", lpi_status);
+
+	/* HPG 6.1.2 Step 6 */
+	__disable_unprepare_clks(core);
+
+	/* HPG 6.1.2 Step 5 */
+	if (__disable_regulators(core))
+		d_vpr_e("%s: Failed to disable regulators\n", __func__);
+
+	if (__unvote_buses(core))
+		d_vpr_e("%s: Failed to unvote for buses\n", __func__);
+	core->power_enabled = false;
+
+	return 0;
+}
+
+static int __prepare_pc_iris2(struct msm_vidc_core *vidc_core)
+{
+	int rc = 0;
+	u32 wfi_status = 0, idle_status = 0, pc_ready = 0;
+	u32 ctrl_status = 0;
+	int count = 0;
+	const int max_tries = 10;
+	struct msm_vidc_core *core = vidc_core;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_status = __read_register(core, CTRL_STATUS_IRIS2);
+	pc_ready = ctrl_status & CTRL_STATUS_PC_READY_IRIS2;
+	idle_status = ctrl_status & BIT(30);
+
+	if (pc_ready) {
+		d_vpr_h("Already in pc_ready state\n");
+		return 0;
+	}
+
+	wfi_status = BIT(0) & __read_register(core, WRAPPER_TZ_CPU_STATUS);
+	if (!wfi_status || !idle_status) {
+		d_vpr_e("Skipping PC, wfi status not set\n");
+		goto skip_power_off;
+	}
+
+	rc = __prepare_pc(core);
+	if (rc) {
+		d_vpr_e("Failed __prepare_pc %d\n", rc);
+		goto skip_power_off;
+	}
+
+	while (count < max_tries) {
+		wfi_status = BIT(0) & __read_register(core,
+				WRAPPER_TZ_CPU_STATUS);
+		ctrl_status = __read_register(core,
+				CTRL_STATUS_IRIS2);
+		if (wfi_status && (ctrl_status & CTRL_STATUS_PC_READY_IRIS2))
+			break;
+		usleep_range(150, 250);
+		count++;
+	}
+
+	if (count == max_tries) {
+		d_vpr_e("Skip PC. Core is not in right state\n");
+		goto skip_power_off;
+	}
+
+	return rc;
+
+skip_power_off:
+	d_vpr_e("Skip PC, wfi=%#x, idle=%#x, pcr=%#x, ctrl=%#x)\n",
+		wfi_status, idle_status, pc_ready, ctrl_status);
+	return -EAGAIN;
+}
+
+static int __raise_interrupt_iris2(struct msm_vidc_core *vidc_core)
+{
+	struct msm_vidc_core *core = vidc_core;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	__write_register(core, CPU_IC_SOFTINT_IRIS2,
+				1 << CPU_IC_SOFTINT_H2A_SHFT_IRIS2);
+	return 0;
+}
+
+static int __watchdog_iris2(struct msm_vidc_core *vidc_core, u32 intr_status)
+{
+	int rc = 0;
+	struct msm_vidc_core *core = vidc_core;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (intr_status & WRAPPER_INTR_STATUS_A2HWD_BMSK_IRIS2)
+		rc = 1;
+
+	return rc;
+}
+
+static int __noc_error_info_iris2(struct msm_vidc_core *vidc_core)
+{
+	u32 val = 0;
+	struct msm_vidc_core *core = vidc_core;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	//if (core->res->vpu_ver == VPU_VERSION_IRIS2_1)
+	//	return;
+
+	val = __read_register(core, VCODEC_NOC_ERL_MAIN_SWID_LOW);
+	d_vpr_e("VCODEC_NOC_ERL_MAIN_SWID_LOW:     %#x\n", val);
+	val = __read_register(core, VCODEC_NOC_ERL_MAIN_SWID_HIGH);
+	d_vpr_e("VCODEC_NOC_ERL_MAIN_SWID_HIGH:     %#x\n", val);
+	val = __read_register(core, VCODEC_NOC_ERL_MAIN_MAINCTL_LOW);
+	d_vpr_e("VCODEC_NOC_ERL_MAIN_MAINCTL_LOW:     %#x\n", val);
+	val = __read_register(core, VCODEC_NOC_ERL_MAIN_ERRVLD_LOW);
+	d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRVLD_LOW:     %#x\n", val);
+	val = __read_register(core, VCODEC_NOC_ERL_MAIN_ERRCLR_LOW);
+	d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRCLR_LOW:     %#x\n", val);
+	val = __read_register(core, VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW);
+	d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW:     %#x\n", val);
+	val = __read_register(core, VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH);
+	d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH:     %#x\n", val);
+	val = __read_register(core, VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW);
+	d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW:     %#x\n", val);
+	val = __read_register(core, VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH);
+	d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH:     %#x\n", val);
+	val = __read_register(core, VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW);
+	d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW:     %#x\n", val);
+	val = __read_register(core, VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH);
+	d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH:     %#x\n", val);
+	val = __read_register(core, VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW);
+	d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW:     %#x\n", val);
+	val = __read_register(core, VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH);
+	d_vpr_e("VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH:     %#x\n", val);
+
+	return 0;
+}
+
+static int __clear_interrupt_iris2(struct msm_vidc_core *vidc_core)
+{
+	u32 intr_status = 0, mask = 0;
+	struct msm_vidc_core *core = vidc_core;
+
+	if (!core) {
+		d_vpr_e("%s: NULL core\n", __func__);
+		return 0;
+	}
+
+	intr_status = __read_register(core, WRAPPER_INTR_STATUS_IRIS2);
+	mask = (WRAPPER_INTR_STATUS_A2H_BMSK_IRIS2|
+		WRAPPER_INTR_STATUS_A2HWD_BMSK_IRIS2|
+		CTRL_INIT_IDLE_MSG_BMSK_IRIS2);
+
+	if (intr_status & mask) {
+		core->intr_status |= intr_status;
+		core->reg_count++;
+		d_vpr_l("INTERRUPT: times: %d interrupt_status: %d\n",
+			core->reg_count, intr_status);
+	} else {
+		core->spur_count++;
+	}
+
+	__write_register(core, CPU_CS_A2HSOFTINTCLR_IRIS2, 1);
+
+	return 0;
+}
+
+static int __boot_firmware_iris2(struct msm_vidc_core *vidc_core)
+{
+	int rc = 0;
+	u32 ctrl_init_val = 0, ctrl_status = 0, count = 0, max_tries = 1000;
+	struct msm_vidc_core *core = vidc_core;
+
+	if (!core) {
+		d_vpr_e("%s: NULL core\n", __func__);
+		return 0;
+	}
+
+	ctrl_init_val = BIT(0);
+
+	__write_register(core, CTRL_INIT_IRIS2, ctrl_init_val);
+	while (!ctrl_status && count < max_tries) {
+		ctrl_status = __read_register(core, CTRL_STATUS_IRIS2);
+		if ((ctrl_status & CTRL_ERROR_STATUS__M_IRIS2) == 0x4) {
+			d_vpr_e("invalid setting for UC_REGION\n");
+			break;
+		}
+
+		usleep_range(50, 100);
+		count++;
+	}
+
+	if (count >= max_tries) {
+		d_vpr_e("Error booting up vidc firmware\n");
+		rc = -ETIME;
+	}
+
+	/* Enable interrupt before sending commands to venus */
+	__write_register(core, CPU_CS_H2XSOFTINTEN_IRIS2, 0x1);
+	__write_register(core, CPU_CS_X2RPMh_IRIS2, 0x0);
+
+	return rc;
+}
+
+static struct msm_vidc_venus_ops iris2_ops = {
+	.boot_firmware = __boot_firmware_iris2,
+	.interrupt_init = __interrupt_init_iris2,
+	.raise_interrupt = __raise_interrupt_iris2,
+	.clear_interrupt = __clear_interrupt_iris2,
+	.setup_ucregion_memmap = __setup_ucregion_memory_map_iris2,
+	.clock_config_on_enable = NULL,
+	.reset_ahb2axi_bridge = __reset_ahb2axi_bridge,
+	.power_off = __power_off_iris2,
+	.prepare_pc = __prepare_pc_iris2,
+	.watchdog = __watchdog_iris2,
+	.noc_error_info = __noc_error_info_iris2,
+};
+
+static int msm_vidc_buffer_size(struct msm_vidc_inst *inst,
+		enum msm_vidc_buffer_type type)
+{
+	int rc = 0;
+
+	if (!inst) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	d_vpr_h("%s()\n", __func__);
+	return rc;
+}
+
+static int msm_vidc_buffer_min_count(struct msm_vidc_inst *inst,
+		enum msm_vidc_buffer_type type)
+{
+	int rc = 0;
+
+	if (!inst) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	d_vpr_h("%s()\n", __func__);
+	return rc;
+}
+
+static int msm_vidc_buffer_extra_count(struct msm_vidc_inst *inst,
+		enum msm_vidc_buffer_type type)
+{
+	int rc = 0;
+
+	if (!inst) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	d_vpr_h("%s()\n", __func__);
+	return rc;
+}
+
+static struct msm_vidc_session_ops msm_session_ops = {
+	.buffer_size = msm_vidc_buffer_size,
+	.min_count = msm_vidc_buffer_min_count,
+	.extra_count = msm_vidc_buffer_extra_count,
+	.calc_freq = NULL,
+	.calc_bw = NULL,
+	.decide_work_route = NULL,
+	.decide_work_mode = NULL,
+	.decide_core_and_power_mode = NULL,
+};
+
+int msm_vidc_init_iris2(struct msm_vidc_core *core)
+{
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	d_vpr_h("%s()\n", __func__);
+	core->venus_ops = &iris2_ops;
+	core->session_ops = &msm_session_ops;
+
+	return 0;
+}

+ 68 - 0
driver/vidc/inc/fixedpoint.h

@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifdef _FIXP_ARITH_H
+#error "This implementation is meant to override fixp-arith.h, don't use both"
+#endif
+
+#ifndef _FIXEDPOINT_H_
+#define _FIXEDPOINT_H_
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+/*
+ * Normally would typedef'ed, but checkpatch doesn't like typedef.
+ * Also should be normally typedef'ed to intmax_t but that doesn't seem to be
+ * available in the kernel
+ */
+#define fp_t size_t
+
+/* (Arbitrarily) make the first 25% of the bits to be the fractional bits */
+#define FP_FRACTIONAL_BITS ((sizeof(fp_t) * 8) / 4)
+
+#define FP(__i, __f_n, __f_d) \
+	((((fp_t)(__i)) << FP_FRACTIONAL_BITS) + \
+	(((__f_n) << FP_FRACTIONAL_BITS) / (__f_d)))
+
+#define FP_INT(__i) FP(__i, 0, 1)
+#define FP_ONE FP_INT(1)
+#define FP_ZERO FP_INT(0)
+
+static inline size_t fp_frac_base(void)
+{
+	return GENMASK(FP_FRACTIONAL_BITS - 1, 0);
+}
+
+static inline size_t fp_frac(fp_t a)
+{
+	return a & GENMASK(FP_FRACTIONAL_BITS - 1, 0);
+}
+
+static inline size_t fp_int(fp_t a)
+{
+	return a >> FP_FRACTIONAL_BITS;
+}
+
+static inline size_t fp_round(fp_t a)
+{
+	/* is the fractional part >= frac_max / 2? */
+	bool round_up = fp_frac(a) >= fp_frac_base() / 2;
+
+	return fp_int(a) + round_up;
+}
+
+static inline fp_t fp_mult(fp_t a, fp_t b)
+{
+	return (a * b) >> FP_FRACTIONAL_BITS;
+}
+
+
+static inline fp_t fp_div(fp_t a, fp_t b)
+{
+	return (a << FP_FRACTIONAL_BITS) / b;
+}
+
+#endif

+ 53 - 0
driver/vidc/inc/hfi_packet.h

@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _HFI_PACKET_H_
+#define _HFI_PACKET_H_
+
+#include "msm_vidc_internal.h"
+#include "msm_vidc_inst.h"
+#include "msm_vidc_core.h"
+
+struct hfi_header {
+	u32 size;
+	u32 session_id;
+	u32 header_id;
+	u32 reserved[4];
+	u32 num_packets;
+};
+
+struct hfi_packet {
+	u32 size;
+	u32 type;
+	u32 flags;
+	u32 payload_info;
+	u32 port;
+	u32 packet_id;
+	u32 reserved[2];
+};
+
+struct hfi_buffer {
+	u32 type;
+	u32 index;
+	u64 base_address;
+	u32 addr_offset;
+	u32 buffer_size;
+	u32 data_offset;
+	u32 data_size;
+	u32 flags;
+	u64 timestamp;
+	u32 reserved[5];
+};
+
+int hfi_packet_sys_init(struct msm_vidc_core *core,
+		void *packet, u32 packet_size);
+int hfi_packet_image_version(struct msm_vidc_core *core,
+		void *packet, u32 packet_size);
+int hfi_packet_sys_debug_config(struct msm_vidc_core *core,
+		void *packet, u32 packet_size, u32 mode);
+int hfi_packet_sys_pc_prep(struct msm_vidc_core *core,
+		void *packet, u32 packet_size);
+
+#endif // _HFI_PACKET_H_

+ 15 - 0
driver/vidc/inc/msm_vdec.h

@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_VDEC_H_
+#define _MSM_VDEC_H_
+
+#include "msm_vidc_core.h"
+#include "msm_vidc_inst.h"
+
+int msm_vdec_inst_init(struct msm_vidc_inst *inst);
+int msm_vdec_ctrl_init(struct msm_vidc_inst *inst);
+
+#endif // _MSM_VDEC_H_

+ 15 - 0
driver/vidc/inc/msm_venc.h

@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_VENC_H_
+#define _MSM_VENC_H_
+
+#include "msm_vidc_core.h"
+#include "msm_vidc_inst.h"
+
+int msm_venc_inst_init(struct msm_vidc_inst *inst);
+int msm_venc_ctrl_init(struct msm_vidc_inst *inst);
+
+#endif // _MSM_VENC_H_

+ 48 - 0
driver/vidc/inc/msm_vidc.h

@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_VIDC_H_
+#define _MSM_VIDC_H_
+
+#include <linux/videodev2.h>
+#include <media/media-device.h>
+
+union msm_v4l2_cmd {
+	struct v4l2_decoder_cmd dec;
+	struct v4l2_encoder_cmd enc;
+};
+
+void *msm_vidc_open(void *core, u32 session_type);
+int msm_vidc_close(void *instance);
+int msm_vidc_suspend(int core_id);
+int msm_vidc_querycap(void *instance, struct v4l2_capability *cap);
+int msm_vidc_enum_fmt(void *instance, struct v4l2_fmtdesc *f);
+int msm_vidc_s_fmt(void *instance, struct v4l2_format *f);
+int msm_vidc_g_fmt(void *instance, struct v4l2_format *f);
+int msm_vidc_s_ctrl(void *instance, struct v4l2_control *a);
+int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
+int msm_vidc_g_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
+int msm_vidc_g_ctrl(void *instance, struct v4l2_control *a);
+int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b);
+int msm_vidc_release_buffer(void *instance, int buffer_type,
+		unsigned int buffer_index);
+int msm_vidc_qbuf(void *instance, struct media_device *mdev,
+		struct v4l2_buffer *b);
+int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b);
+int msm_vidc_streamon(void *instance, enum v4l2_buf_type i);
+int msm_vidc_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl);
+int msm_vidc_query_menu(void *instance, struct v4l2_querymenu *qmenu);
+int msm_vidc_streamoff(void *instance, enum v4l2_buf_type i);
+int msm_vidc_cmd(void *instance, union msm_v4l2_cmd *cmd);
+int msm_vidc_poll(void *instance, struct file *filp,
+		struct poll_table_struct *pt);
+int msm_vidc_subscribe_event(void *instance,
+		const struct v4l2_event_subscription *sub);
+int msm_vidc_unsubscribe_event(void *instance,
+		const struct v4l2_event_subscription *sub);
+int msm_vidc_dqevent(void *instance, struct v4l2_event *event);
+int msm_vidc_g_crop(void *instance, struct v4l2_crop *a);
+int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize);
+#endif

+ 248 - 0
driver/vidc/inc/msm_vidc_bus.h

@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __H_MSM_VIDC_BUS_DEFS_H__
+#define __H_MSM_VIDC_BUS_DEFS_H__
+
+#include "fixedpoint.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc_internal.h"
+
+#define COMPRESSION_RATIO_MAX 5
+
+enum vidc_bus_type {
+	PERF,
+	DDR,
+	LLCC,
+};
+
+/*
+ * Minimum dimensions for which to calculate bandwidth.
+ * This means that anything bandwidth(0, 0) ==
+ * bandwidth(BASELINE_DIMENSIONS.width, BASELINE_DIMENSIONS.height)
+ */
+static const struct {
+	int height, width;
+} BASELINE_DIMENSIONS = {
+	.width = 1280,
+	.height = 720,
+};
+
+/* converts Mbps to bps (the "b" part can be bits or bytes based on context) */
+#define kbps(__mbps) ((__mbps) * 1000)
+#define bps(__mbps) (kbps(__mbps) * 1000)
+
+#define GENERATE_COMPRESSION_PROFILE(__bpp, __worst) {              \
+	.bpp = __bpp,                                                          \
+	.ratio = __worst,                \
+}
+
+/*
+ * The below table is a structural representation of the following table:
+ *  Resolution |    Bitrate |              Compression Ratio          |
+ * ............|............|.........................................|
+ * Width Height|Average High|Avg_8bpc Worst_8bpc Avg_10bpc Worst_10bpc|
+ *  1280    720|      7   14|    1.69       1.28      1.49        1.23|
+ *  1920   1080|     20   40|    1.69       1.28      1.49        1.23|
+ *  2560   1440|     32   64|     2.2       1.26      1.97        1.22|
+ *  3840   2160|     42   84|     2.2       1.26      1.97        1.22|
+ *  4096   2160|     44   88|     2.2       1.26      1.97        1.22|
+ *  4096   2304|     48   96|     2.2       1.26      1.97        1.22|
+ */
+static struct lut {
+	int frame_size; /* width x height */
+	int frame_rate;
+	unsigned long bitrate;
+	struct {
+		int bpp;
+		fp_t ratio;
+	} compression_ratio[COMPRESSION_RATIO_MAX];
+} const LUT[] = {
+	{
+		.frame_size = 1280 * 720,
+		.frame_rate = 30,
+		.bitrate = 14,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 28, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 23, 100)),
+		}
+	},
+	{
+		.frame_size = 1280 * 720,
+		.frame_rate = 60,
+		.bitrate = 22,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 28, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 23, 100)),
+		}
+	},
+	{
+		.frame_size = 1920 * 1088,
+		.frame_rate = 30,
+		.bitrate = 40,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 28, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 23, 100)),
+		}
+	},
+	{
+		.frame_size = 1920 * 1088,
+		.frame_rate = 60,
+		.bitrate = 64,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 28, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 23, 100)),
+		}
+	},
+	{
+		.frame_size = 2560 * 1440,
+		.frame_rate = 30,
+		.bitrate = 64,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 2560 * 1440,
+		.frame_rate = 60,
+		.bitrate = 102,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 3840 * 2160,
+		.frame_rate = 30,
+		.bitrate = 84,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 3840 * 2160,
+		.frame_rate = 60,
+		.bitrate = 134,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 4096 * 2160,
+		.frame_rate = 30,
+		.bitrate = 88,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 4096 * 2160,
+		.frame_rate = 60,
+		.bitrate = 141,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 4096 * 2304,
+		.frame_rate = 30,
+		.bitrate = 96,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 4096 * 2304,
+		.frame_rate = 60,
+		.bitrate = 154,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
+					FP(1, 22, 100)),
+		}
+	},
+};
+
+static inline u32 get_type_frm_name(const char *name)
+{
+	if (!strcmp(name, "venus-llcc"))
+		return LLCC;
+	else if (!strcmp(name, "venus-ddr"))
+		return DDR;
+	else
+		return PERF;
+}
+
+#define DUMP_HEADER_MAGIC 0xdeadbeef
+#define DUMP_FP_FMT "%FP" /* special format for fp_t */
+
+struct dump {
+	char *key;
+	char *format;
+	size_t val;
+};
+
+struct lut const *__lut(int width, int height, int fps);
+fp_t __compression_ratio(struct lut const *entry, int bpp);
+void __dump(struct dump dump[], int len);
+
+static inline bool __ubwc(enum msm_vidc_colorformat_type f)
+{
+	switch (f) {
+	case MSM_VIDC_FMT_NV12_UBWC:
+	case MSM_VIDC_FMT_NV12_TP10_UBWC:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static inline int __bpp(enum msm_vidc_colorformat_type f)
+{
+	switch (f) {
+	case MSM_VIDC_FMT_NV12:
+	case MSM_VIDC_FMT_NV21:
+	case MSM_VIDC_FMT_NV12_UBWC:
+	case MSM_VIDC_FMT_RGBA8888_UBWC:
+		return 8;
+	case MSM_VIDC_FMT_NV12_P010_UBWC:
+	case MSM_VIDC_FMT_NV12_TP10_UBWC:
+		return 10;
+	default:
+		d_vpr_e("Unsupported colorformat (%x)", f);
+		return INT_MAX;
+	}
+}
+
+#endif // __H_MSM_VIDC_BUS_DEFS_H__

+ 106 - 0
driver/vidc/inc/msm_vidc_core.h

@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_VIDC_CORE_H_
+#define _MSM_VIDC_CORE_H_
+
+#include <linux/platform_device.h>
+
+#include "msm_vidc_internal.h"
+
+struct msm_vidc_core;
+
+#define call_venus_op(d, op, ...)			\
+	(((d) && (d)->venus_ops && (d)->venus_ops->op) ? \
+	((d)->venus_ops->op(__VA_ARGS__)):0)
+
+struct msm_vidc_venus_ops {
+	int (*boot_firmware)(struct msm_vidc_core *core);
+	int (*reset_ahb2axi_bridge)(struct msm_vidc_core *core);
+	int (*clock_config_on_enable)(struct msm_vidc_core *core);
+	int (*interrupt_init)(struct msm_vidc_core *core);
+	int (*setup_ucregion_memmap)(struct msm_vidc_core *core);
+	int (*raise_interrupt)(struct msm_vidc_core *core);
+	int (*clear_interrupt)(struct msm_vidc_core *core);
+	int (*prepare_pc)(struct msm_vidc_core *core);
+	int (*power_off)(struct msm_vidc_core *core);
+	int (*watchdog)(struct msm_vidc_core *core, u32 intr_status);
+	int (*noc_error_info)(struct msm_vidc_core *core);
+};
+
+struct msm_vidc_mem_addr {
+	u32 align_device_addr;
+	u8 *align_virtual_addr;
+	u32 mem_size;
+	struct msm_vidc_map   map;
+	struct msm_vidc_alloc alloc;
+};
+
+struct msm_vidc_iface_q_info {
+	void *q_hdr;
+	struct msm_vidc_mem_addr q_array;
+};
+
+struct msm_video_device {
+	enum msm_vidc_domain_type              type;
+	struct video_device                    vdev;
+};
+
+struct msm_vidc_core_power {
+	u64 clk_freq;
+	u64 bw_ddr;
+	u64 bw_llcc;
+};
+
+enum msm_vidc_core_state {
+	MSM_VIDC_CORE_DEINIT       = 0,
+	MSM_VIDC_CORE_INIT         = 1,
+	MSM_VIDC_CORE_ERROR        = 2,
+};
+
+struct msm_vidc_core {
+	struct platform_device                *pdev;
+	struct msm_video_device                vdev[2];
+	struct v4l2_device                     v4l2_dev;
+	struct list_head                       instances;
+	struct list_head                       dangling_instances;
+	struct dentry                         *debugfs_root;
+	enum msm_vidc_core_state               state;
+	struct mutex                           lock;
+	struct msm_vidc_dt                    *dt;
+	struct msm_vidc_platform              *platform;
+	u8 __iomem                            *register_base_addr;
+	u32                                    intr_status;
+	u32                                    spur_count;
+	u32                                    reg_count;
+	bool                                   power_enabled;
+	struct msm_vidc_mem_addr               sfr;
+	struct msm_vidc_mem_addr               iface_q_table;
+	struct msm_vidc_iface_q_info           iface_queues[VIDC_IFACEQ_NUMQ];
+	struct work_struct                     device_work;
+	struct workqueue_struct               *device_workq;
+	struct delayed_work                    pm_work;
+	struct workqueue_struct               *pm_workq;
+	struct delayed_work                    fw_unload_work;
+	struct delayed_work                    batch_work;
+	struct work_struct                     ssr_work;
+	struct msm_vidc_core_power             power;
+	struct msm_vidc_ssr                    ssr;
+	bool                                   smmu_fault_handled;
+	u32                                    skip_pc_count;
+	u32                                    last_packet_type;
+	u8                                    *packet;
+	u32                                    packet_size;
+	struct v4l2_file_operations           *v4l2_file_ops;
+	struct v4l2_ioctl_ops                 *v4l2_ioctl_ops;
+	struct v4l2_ctrl_ops                  *v4l2_ctrl_ops;
+	struct vb2_ops                        *vb2_ops;
+	struct vb2_mem_ops                    *vb2_mem_ops;
+	struct msm_vidc_venus_ops             *venus_ops;
+	struct msm_vidc_session_ops           *session_ops;
+	struct msm_vidc_memory_ops            *mem_ops;
+};
+
+#endif // _MSM_VIDC_CORE_H_

+ 94 - 0
driver/vidc/inc/msm_vidc_debug.h

@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MSM_VIDC_DEBUG__
+#define __MSM_VIDC_DEBUG__
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/time.h>
+
+#ifndef VIDC_DBG_LABEL
+#define VIDC_DBG_LABEL "msm_vidc"
+#endif
+
+#define VIDC_DBG_TAG VIDC_DBG_LABEL ": %6s: %08x: %5s: "
+#define FW_DBG_TAG VIDC_DBG_LABEL ": %6s: "
+#define DEFAULT_SID ((u32)-1)
+
+extern int msm_vidc_debug;
+extern bool msm_vidc_lossless_encode;
+extern bool msm_vidc_syscache_disable;
+
+/* To enable messages OR these values and
+ * echo the result to debugfs file.
+ *
+ * To enable all messages set debug_level = 0x101F
+ */
+
+enum vidc_msg_prio {
+	VIDC_ERR        = 0x00000001,
+	VIDC_INFO       = 0x00000001,
+	VIDC_HIGH       = 0x00000002,
+	VIDC_LOW        = 0x00000004,
+	VIDC_PERF       = 0x00000008,
+	VIDC_PKT        = 0x00000010,
+	VIDC_BUS        = 0x00000020,
+	VIDC_ENCODER    = 0x00000100,
+	VIDC_DECODER    = 0x00000200,
+	VIDC_PRINTK     = 0x00001000,
+	VIDC_FTRACE     = 0x00002000,
+	FW_LOW          = 0x00010000,
+	FW_MEDIUM       = 0x00020000,
+	FW_HIGH         = 0x00040000,
+	FW_ERROR        = 0x00080000,
+	FW_FATAL        = 0x00100000,
+	FW_PERF         = 0x00200000,
+	FW_PRINTK       = 0x10000000,
+	FW_FTRACE       = 0x20000000,
+};
+#define FW_LOGSHIFT    16
+#define FW_LOGMASK     0x0FFF0000
+
+#define dprintk(__level, sid, __fmt, ...)	\
+	do { \
+		pr_err(VIDC_DBG_TAG __fmt, \
+			"level", \
+			sid, \
+			"codec", \
+			##__VA_ARGS__); \
+	} while (0)
+
+#define s_vpr_e(sid, __fmt, ...) dprintk(VIDC_ERR, sid, __fmt, ##__VA_ARGS__)
+#define s_vpr_i(sid, __fmt, ...) dprintk(VIDC_INFO, sid, __fmt, ##__VA_ARGS__)
+#define s_vpr_h(sid, __fmt, ...) dprintk(VIDC_HIGH, sid, __fmt, ##__VA_ARGS__)
+#define s_vpr_l(sid, __fmt, ...) dprintk(VIDC_LOW, sid, __fmt, ##__VA_ARGS__)
+#define s_vpr_p(sid, __fmt, ...) dprintk(VIDC_PERF, sid, __fmt, ##__VA_ARGS__)
+#define s_vpr_t(sid, __fmt, ...) dprintk(VIDC_PKT, sid, __fmt, ##__VA_ARGS__)
+#define s_vpr_b(sid, __fmt, ...) dprintk(VIDC_BUS, sid, __fmt, ##__VA_ARGS__)
+#define s_vpr_hp(sid, __fmt, ...) \
+			dprintk(VIDC_HIGH|VIDC_PERF, sid, __fmt, ##__VA_ARGS__)
+
+#define d_vpr_e(__fmt, ...)	\
+			dprintk(VIDC_ERR, DEFAULT_SID, __fmt, ##__VA_ARGS__)
+#define d_vpr_i(__fmt, ...) \
+			dprintk(VIDC_INFO, DEFAULT_SID, __fmt, ##__VA_ARGS__)
+#define d_vpr_h(__fmt, ...) \
+			dprintk(VIDC_HIGH, DEFAULT_SID, __fmt, ##__VA_ARGS__)
+#define d_vpr_l(__fmt, ...) \
+			dprintk(VIDC_LOW, DEFAULT_SID, __fmt, ##__VA_ARGS__)
+#define d_vpr_p(__fmt, ...) \
+			dprintk(VIDC_PERF, DEFAULT_SID, __fmt, ##__VA_ARGS__)
+#define d_vpr_t(__fmt, ...) \
+			dprintk(VIDC_PKT, DEFAULT_SID, __fmt, ##__VA_ARGS__)
+#define d_vpr_b(__fmt, ...) \
+			dprintk(VIDC_BUS, DEFAULT_SID, __fmt, ##__VA_ARGS__)
+
+#define MSM_VIDC_ERROR(value)					\
+	do {	if (value)					\
+			d_vpr_e("BugOn");		\
+	} while (0)
+#endif

+ 41 - 0
driver/vidc/inc/msm_vidc_driver.h

@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_VIDC_DRIVER_H_
+#define _MSM_VIDC_DRIVER_H_
+
+#include <linux/workqueue.h>
+#include <linux/iommu.h>
+
+#include "msm_vidc_internal.h"
+#include "msm_vidc_core.h"
+#include "msm_vidc_inst.h"
+
+static inline is_decode_session(struct msm_vidc_inst *inst)
+{
+	return inst->domain == MSM_VIDC_DECODER;
+}
+
+static inline is_encode_session(struct msm_vidc_inst *inst)
+{
+	return inst->domain == MSM_VIDC_ENCODER;
+}
+
+int msm_vidc_add_session(struct msm_vidc_inst *inst);
+int msm_vidc_core_init(struct msm_vidc_core *core);
+int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
+		struct device *dev, unsigned long iova, int flags, void *data);
+int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
+		enum msm_vidc_ssr_trigger_type type);
+void msm_vidc_ssr_handler(struct work_struct *work);
+void msm_vidc_pm_work_handler(struct work_struct *work);
+void msm_vidc_fw_unload_handler(struct work_struct *work);
+void msm_vidc_batch_handler(struct work_struct *work);
+int msm_vidc_setup_event_queue(struct msm_vidc_inst *inst);
+int msm_vidc_queue_init(struct msm_vidc_inst *inst);
+u32 msm_vidc_convert_color_fmt(u32 v4l2_fmt);
+
+#endif // _MSM_VIDC_DRIVER_H_
+

+ 227 - 0
driver/vidc/inc/msm_vidc_dt.h

@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_VIDC_DT_H_
+#define _MSM_VIDC_DT_H_
+
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+#include "msm_vidc_internal.h"
+
+/*
+ * These are helper macros to iterate over various lists within
+ * msm_vidc_core->dt.  The intention is to cut down on a lot of boiler-plate
+ * code
+ */
+
+/* Read as "for each 'thing' in a set of 'thingies'" */
+#define venus_hfi_for_each_thing(__device, __thing, __thingy) \
+	venus_hfi_for_each_thing_continue(__device, __thing, __thingy, 0)
+
+#define venus_hfi_for_each_thing_reverse(__device, __thing, __thingy) \
+	venus_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \
+			(__device)->dt->__thingy##_set.count - 1)
+
+/* TODO: the __from parameter technically not required since we can figure it
+ * out with some pointer magic (i.e. __thing - __thing##_tbl[0]).  If this macro
+ * sees extensive use, probably worth cleaning it up but for now omitting it
+ * since it introduces unnecessary complexity.
+ */
+#define venus_hfi_for_each_thing_continue(__device, __thing, __thingy, __from) \
+	for (__thing = &(__device)->dt->\
+			__thingy##_set.__thingy##_tbl[__from]; \
+		__thing < &(__device)->dt->__thingy##_set.__thingy##_tbl[0] + \
+			((__device)->dt->__thingy##_set.count - __from); \
+		++__thing)
+
+#define venus_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \
+		__from) \
+	for (__thing = &(__device)->dt->\
+			__thingy##_set.__thingy##_tbl[__from]; \
+		__thing >= &(__device)->dt->__thingy##_set.__thingy##_tbl[0]; \
+		--__thing)
+
+/* Regular set helpers */
+#define venus_hfi_for_each_regulator(__device, __rinfo) \
+	venus_hfi_for_each_thing(__device, __rinfo, regulator)
+
+#define venus_hfi_for_each_regulator_reverse(__device, __rinfo) \
+	venus_hfi_for_each_thing_reverse(__device, __rinfo, regulator)
+
+#define venus_hfi_for_each_regulator_reverse_continue(__device, __rinfo, \
+		__from) \
+	venus_hfi_for_each_thing_reverse_continue(__device, __rinfo, \
+			regulator, __from)
+
+/* Clock set helpers */
+#define venus_hfi_for_each_clock(__device, __cinfo) \
+	venus_hfi_for_each_thing(__device, __cinfo, clock)
+
+#define venus_hfi_for_each_clock_reverse(__device, __cinfo) \
+	venus_hfi_for_each_thing_reverse(__device, __cinfo, clock)
+
+#define venus_hfi_for_each_clock_reverse_continue(__device, __rinfo, \
+		__from) \
+	venus_hfi_for_each_thing_reverse_continue(__device, __rinfo, \
+			clock, __from)
+
+/* Bus set helpers */
+#define venus_hfi_for_each_bus(__device, __binfo) \
+	venus_hfi_for_each_thing(__device, __binfo, bus)
+#define venus_hfi_for_each_bus_reverse(__device, __binfo) \
+	venus_hfi_for_each_thing_reverse(__device, __binfo, bus)
+
+/* Subcache set helpers */
+#define venus_hfi_for_each_subcache(__device, __sinfo) \
+	venus_hfi_for_each_thing(__device, __sinfo, subcache)
+#define venus_hfi_for_each_subcache_reverse(__device, __sinfo) \
+	venus_hfi_for_each_thing_reverse(__device, __sinfo, subcache)
+
+struct reg_value_pair {
+	u32 reg;
+	u32 value;
+	u32 mask;
+};
+
+struct reg_set {
+	struct reg_value_pair *reg_tbl;
+	u32 count;
+};
+
+struct addr_range {
+	u32 start;
+	u32 size;
+};
+
+struct addr_set {
+	struct addr_range *addr_tbl;
+	u32 count;
+};
+
+struct context_bank_info {
+	struct list_head list;
+	const char *name;
+	u32 buffer_type;
+	bool is_secure;
+	struct addr_range addr_range;
+	struct device *dev;
+	struct iommu_domain *domain;
+};
+
+struct buffer_usage_table {
+	u32 buffer_type;
+	u32 tz_usage;
+};
+
+struct buffer_usage_set {
+	struct buffer_usage_table *buffer_usage_tbl;
+	u32 count;
+};
+
+struct regulator_info {
+	struct regulator *regulator;
+	bool has_hw_power_collapse;
+	char *name;
+};
+
+struct regulator_set {
+	struct regulator_info *regulator_tbl;
+	u32 count;
+};
+
+struct clock_info {
+	const char *name;
+	struct clk *clk;
+	u32 count;
+	bool has_scaling;
+	bool has_mem_retention;
+};
+
+struct clock_set {
+	struct clock_info *clock_tbl;
+	u32 count;
+};
+
+struct bus_info {
+	const char *name;
+	u32 range[2];
+	struct device *dev;
+	struct icc_path *path;
+};
+
+struct bus_set {
+	struct bus_info *bus_tbl;
+	u32 count;
+};
+
+struct reset_info {
+	struct reset_control *rst;
+	const char *name;
+};
+
+struct reset_set {
+	struct reset_info *reset_tbl;
+	u32 count;
+};
+
+struct allowed_clock_rates_table {
+	u32 clock_rate;
+};
+
+struct clock_profile_entry {
+	u32 codec_mask;
+	u32 vpp_cycles;
+	u32 vsp_cycles;
+	u32 low_power_cycles;
+};
+
+struct clock_freq_table {
+	struct clock_profile_entry *clk_prof_entries;
+	u32 count;
+};
+
+struct subcache_info {
+	const char *name;
+	bool isactive;
+	bool isset;
+	struct llcc_slice_desc *subcache;
+};
+
+struct subcache_set {
+	struct subcache_info *subcache_tbl;
+	u32 count;
+};
+
+struct msm_vidc_dt {
+	void *core;
+	phys_addr_t register_base;
+	u32 register_size;
+	u32 irq;
+	u32 sku_version;
+	struct allowed_clock_rates_table *allowed_clks_tbl;
+	u32 allowed_clks_tbl_size;
+	struct clock_freq_table clock_freq_tbl;
+	bool sys_cache_present;
+	bool sys_cache_res_set;
+	struct subcache_set subcache_set;
+	struct reg_set reg_set;
+	struct addr_set qdss_addr_set;
+	struct buffer_usage_set buffer_usage_set;
+	struct regulator_set regulator_set;
+	struct clock_set clock_set;
+	struct bus_set bus_set;
+	struct reset_set reset_set;
+	struct list_head context_banks;
+	struct mutex cb_lock;
+	const char *fw_name;
+	void *fw_cookie;
+};
+
+int msm_vidc_init_dt(struct platform_device *pdev);
+int msm_vidc_read_context_bank_resources_from_dt(struct platform_device *pdev);
+void msm_vidc_deinit_dt(struct platform_device *pdev);
+
+#endif // _MSM_VIDC_DT_H_

+ 107 - 0
driver/vidc/inc/msm_vidc_inst.h

@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_VIDC_INST_H_
+#define _MSM_VIDC_INST_H_
+
+#include "msm_vidc_internal.h"
+
+struct msm_vidc_inst;
+
+#define call_session_op(c, op, ...)			\
+	(((c) && (c)->session_ops && (c)->session_ops->op) ? \
+	((c)->session_ops->op(__VA_ARGS__)) : 0)
+
+struct msm_vidc_session_ops {
+	int (*calc_freq)(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf);
+	int (*calc_bw)(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf);
+	int (*decide_work_route)(struct msm_vidc_inst *inst);
+	int (*decide_work_mode)(struct msm_vidc_inst *inst);
+	int (*decide_core_and_power_mode)(struct msm_vidc_inst *inst);
+	int (*buffer_size)(struct msm_vidc_inst *inst, enum msm_vidc_buffer_type type);
+	int (*min_count)(struct msm_vidc_inst *inst, enum msm_vidc_buffer_type type);
+	int (*extra_count)(struct msm_vidc_inst *inst, enum msm_vidc_buffer_type type);
+};
+
+struct msm_vidc_allocations {
+	struct msm_vidc_alloc_info        scratch;
+	struct msm_vidc_alloc_info        scratch_1;
+	struct msm_vidc_alloc_info        scratch_2;
+	struct msm_vidc_alloc_info        persist;
+	struct msm_vidc_alloc_info        persist_1;
+};
+
+struct msm_vidc_maps {
+	struct msm_vidc_map_info        input;
+	struct msm_vidc_map_info        output;
+	struct msm_vidc_map_info        input_meta;
+	struct msm_vidc_map_info        output_meta;
+	struct msm_vidc_map_info        scratch;
+	struct msm_vidc_map_info        scratch_1;
+	struct msm_vidc_map_info        scratch_2;
+	struct msm_vidc_map_info        persist;
+	struct msm_vidc_map_info        persist_1;
+};
+
+struct msm_vidc_buffers {
+	struct msm_vidc_buffer_info        input;
+	struct msm_vidc_buffer_info        output;
+	struct msm_vidc_buffer_info        input_meta;
+	struct msm_vidc_buffer_info        output_meta;
+	struct msm_vidc_buffer_info        scratch;
+	struct msm_vidc_buffer_info        scratch_1;
+	struct msm_vidc_buffer_info        scratch_2;
+	struct msm_vidc_buffer_info        persist;
+	struct msm_vidc_buffer_info        persist_1;
+};
+
+enum msm_vidc_inst_state {
+	MSM_VIDC_OPEN                      = 1,
+	MSM_VIDC_START_INPUT               = 2,
+	MSM_VIDC_START_OUTPUT              = 3,
+	MSM_VIDC_START                     = 4,
+	MSM_VIDC_DRC                       = 5,
+	MSM_VIDC_DRC_LAST_FLAG             = 6,
+	MSM_VIDC_DRAIN                     = 7,
+	MSM_VIDC_DRAIN_LAST_FLAG           = 8,
+	MSM_VIDC_DRC_DRAIN                 = 9,
+	MSM_VIDC_DRC_DRAIN_LAST_FLAG       = 10,
+	MSM_VIDC_DRAIN_START_INPUT         = 11,
+	MSM_VIDC_ERROR                     = 12,
+};
+
+struct msm_vidc_inst {
+	struct list_head                   list;
+	struct mutex                       lock;
+	enum msm_vidc_inst_state           state;
+	enum msm_vidc_domain_type          domain;
+	enum msm_vidc_codec_type           codec;
+	void                              *core;
+	struct kref                        kref;
+	u32                                session_id;
+	u32                                sid;
+	struct v4l2_format                 fmts[MAX_PORT];
+	struct v4l2_ctrl_handler           ctrl_handler;
+	struct v4l2_fh                     event_handler;
+	struct v4l2_ctrl                 **ctrls;
+	u32                                num_ctrls;
+	struct vb2_queue                   vb2q[MAX_PORT];
+	struct msm_vidc_properties         prop;
+	struct msm_vidc_power              power;
+	struct msm_vidc_buffers            buffers;
+	struct msm_vidc_maps               maps;
+	struct msm_vidc_allocations        allocations;
+	struct msm_vidc_port_settings      port_settings[MAX_PORT];
+	struct msm_vidc_decode_batch       decode_batch;
+	struct msm_vidc_decode_vpp_delay   decode_vpp_delay;
+	struct msm_vidc_session_idle       session_idle;
+	struct list_head                   input_ts;
+	struct list_head                   enc_input_crs;
+	struct list_head                   decode_bitrate_data;
+	struct dentry                     *debugfs_root;
+	struct msm_vidc_debug              debug;
+};
+
+#endif // _MSM_VIDC_INST_H_

+ 468 - 0
driver/vidc/inc/msm_vidc_internal.h

@@ -0,0 +1,468 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_VIDC_INTERNAL_H_
+#define _MSM_VIDC_INTERNAL_H_
+
+#include <linux/bits.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+
+#define MAX_NAME_LENGTH   128
+#define MAX_MATRIX_COEFFS 9
+#define MAX_BIAS_COEFFS   3
+#define MAX_LIMIT_COEFFS  6
+#define MAX_DEBUGFS_NAME  50
+#define DEFAULT_TIMEOUT   3
+#define DEFAULT_HEIGHT    240
+#define DEFAULT_WIDTH     320
+#define MIN_SUPPORTED_WIDTH   32
+#define MIN_SUPPORTED_HEIGHT  32
+#define DEFAULT_FPS       30
+#define MINIMUM_FPS       1
+#define MAXIMUM_FPS       960
+#define SINGLE_INPUT_BUFFER   1
+#define SINGLE_OUTPUT_BUFFER  1
+#define MAX_NUM_INPUT_BUFFERS    VIDEO_MAX_FRAME // same as VB2_MAX_FRAME
+#define MAX_NUM_OUTPUT_BUFFERS   VIDEO_MAX_FRAME // same as VB2_MAX_FRAME
+#define MAX_SUPPORTED_INSTANCES  16
+#define MAX_BSE_VPP_DELAY        6
+#define DEFAULT_BSE_VPP_DELAY    2
+
+/* Maintains the number of FTB's between each FBD over a window */
+#define DCVS_FTB_WINDOW 16
+/* Superframe can have maximum of 32 frames */
+#define VIDC_SUPERFRAME_MAX 32
+#define COLOR_RANGE_UNSPECIFIED (-1)
+
+#define V4L2_EVENT_VIDC_BASE  10
+#define INPUT_PLANE V4L2_BUF_TYPE_VIDEO_OUTPUT
+#define OUTPUT_PLANE V4L2_BUF_TYPE_VIDEO_CAPTURE
+#define INPUT_META_PLANE V4L2_BUF_TYPE_META_OUTPUT
+#define OUTPUT_META_PLANE V4L2_BUF_TYPE_META_CAPTURE
+
+#define VIDC_IFACEQ_MAX_PKT_SIZE                1024
+#define VIDC_IFACEQ_MED_PKT_SIZE                768
+#define VIDC_IFACEQ_MIN_PKT_SIZE                8
+#define VIDC_IFACEQ_VAR_SMALL_PKT_SIZE          100
+#define VIDC_IFACEQ_VAR_LARGE_PKT_SIZE          512
+#define VIDC_IFACEQ_VAR_HUGE_PKT_SIZE          (1024*12)
+
+#define NUM_MBS_PER_SEC(__height, __width, __fps) \
+	(NUM_MBS_PER_FRAME(__height, __width) * __fps)
+
+#define NUM_MBS_PER_FRAME(__height, __width) \
+	((ALIGN(__height, 16) / 16) * (ALIGN(__width, 16) / 16))
+
+/*
+ * Convert Q16 number into Integer and Fractional part upto 2 places.
+ * Ex : 105752 / 65536 = 1.61; 1.61 in Q16 = 105752;
+ * Integer part =  105752 / 65536 = 1;
+ * Reminder = 105752 * 0xFFFF = 40216; Last 16 bits.
+ * Fractional part = 40216 * 100 / 65536 = 61;
+ * Now convert to FP(1, 61, 100).
+ */
+#define Q16_INT(q) ((q) >> 16)
+#define Q16_FRAC(q) ((((q) & 0xFFFF) * 100) >> 16)
+
+enum msm_vidc_domain_type {
+	MSM_VIDC_ENCODER           = BIT(0),
+	MSM_VIDC_DECODER           = BIT(1),
+};
+
+enum msm_vidc_codec_type {
+	MSM_VIDC_H264              = BIT(0),
+	MSM_VIDC_HEVC              = BIT(1),
+	MSM_VIDC_VP9               = BIT(2),
+	MSM_VIDC_MPEG2             = BIT(3),
+};
+
+enum msm_vidc_colorformat_type {
+	MSM_VIDC_FMT_NV12              		= BIT(0),
+	MSM_VIDC_FMT_NV21           	   	= BIT(1),
+	MSM_VIDC_FMT_NV12_UBWC         		= BIT(2),
+	MSM_VIDC_FMT_NV12_P010_UBWC         = BIT(3),
+	MSM_VIDC_FMT_NV12_TP10_UBWC         = BIT(4),
+	MSM_VIDC_FMT_RGBA8888_UBWC          = BIT(5),
+	MSM_VIDC_FMT_SDE_Y_CBCR_H2V2_P010_VENUS = BIT(6),
+};
+
+enum msm_vidc_buffer_type {
+	MSM_VIDC_QUEUE             = BIT(0),
+	MSM_VIDC_INPUT             = BIT(1),
+	MSM_VIDC_OUTPUT            = BIT(2),
+	MSM_VIDC_INPUT_META        = BIT(3),
+	MSM_VIDC_OUTPUT_META       = BIT(4),
+	MSM_VIDC_DPB               = BIT(5),
+	MSM_VIDC_ARP               = BIT(6),
+	MSM_VIDC_LINE              = BIT(7),
+	MSM_VIDC_BIN               = BIT(8),
+};
+
+enum msm_vidc_buffer_attributes {
+	MSM_VIDC_DEFERRED_SUBMISSION       = BIT(0),
+	MSM_VIDC_READ_ONLY                 = BIT(1),
+	MSM_VIDC_PENDING_RELEASE           = BIT(2),
+};
+
+enum msm_vidc_buffer_region {
+	MSM_VIDC_NON_SECURE                = BIT(0),
+	MSM_VIDC_SECURE_PIXEL              = BIT(1),
+	MSM_VIDC_SECURE_NONPIXEL           = BIT(2),
+	MSM_VIDC_SECURE_BITSTREAM          = BIT(3),
+};
+
+enum msm_vidc_port_type {
+	INPUT_PORT,
+	OUTPUT_PORT,
+	INPUT_META_PORT,
+	OUTPUT_META_PORT,
+	MAX_PORT,
+};
+
+enum msm_vidc_core_data_type {
+	ENC_CODECS = 0,
+	DEC_CODECS,
+	MAX_SESSION_COUNT,
+	MAX_SECURE_SESSION_COUNT,
+	MAX_LOAD,
+	MAX_MBPF,
+	MAX_MBPS,
+	MAX_MBPF_HQ,
+	MAX_MBPS_HQ,
+	MAX_MBPF_B_FRAME,
+	MAX_MBPS_B_FRAME,
+	SW_PC,
+	SW_PC_DELAY,
+	FW_UNLOAD,
+	FW_UNLOAD_DELAY,
+	HW_RESPONSE_TIMEOUT,
+	DEBUG_TIMEOUT,
+	PREFIX_BUF_COUNT_PIX,
+	PREFIX_BUF_SIZE_PIX,
+	PREFIX_BUF_COUNT_NON_PIX,
+	PREFIX_BUF_SIZE_NON_PIX,
+	PAGEFAULT_NON_FATAL,
+	PAGETABLE_CACHING,
+	DCVS,
+	DECODE_BATCH,
+	DECODE_BATCH_TIMEOUT,
+	AV_SYNC_WINDOW_SIZE,
+	CLK_FREQ_THRESHOLD,
+};
+
+enum msm_vidc_instance_data_type {
+	FRAME_WIDTH,
+	FRAME_HEIGHT,
+	MBPF,
+	MBPS,
+	FRAME_RATE,
+	BIT_RATE,
+	CABAC_BIT_RATE,
+	LTR_COUNT,
+	LCU_SIZE,
+	POWER_SAVE_MBPS,
+	SCALE_X,
+	SCALE_Y,
+	PROFILE,
+	LEVEL,
+	I_FRAME_QP,
+	P_FRAME_QP,
+	B_FRAME_QP,
+	B_FRAME,
+	HIER_P_LAYERS,
+	BLUR_WIDTH,
+	BLUR_HEIGHT,
+	SLICE_BYTE,
+	SLICE_MB,
+	SECURE,
+	SECURE_FRAME_WIDTH,
+	SECURE_FRAME_HEIGHT,
+	SECURE_MBPF,
+	SECURE_BIT_RATE,
+	BATCH_MBPF,
+	BATCH_FRAME_RATE,
+	LOSSLESS_FRAME_WIDTH,
+	LOSSLESS_FRAME_HEIGHT,
+	LOSSLESS_MBPF,
+	ALL_INTRA_FRAME_RATE,
+	HEVC_IMAGE_FRAME_WIDTH,
+	HEVC_IMAGE_FRAME_HEIGHT,
+	HEIC_IMAGE_FRAME_WIDTH,
+	HEIC_IMAGE_FRAME_HEIGHT,
+	MB_CYCLES_VSP,
+	MB_CYCLES_VPP,
+	MB_CYCLES_LP,
+	MB_CYCLES_FW,
+	MB_CYCLES_FW_VPP,
+};
+
+enum efuse_purpose {
+	SKU_VERSION = 0,
+};
+
+enum sku_version {
+	SKU_VERSION_0 = 0,
+	SKU_VERSION_1,
+	SKU_VERSION_2,
+};
+
+enum msm_vidc_ssr_trigger_type {
+	SSR_ERR_FATAL = 1,
+	SSR_SW_DIV_BY_ZERO,
+	SSR_HW_WDOG_IRQ,
+};
+
+enum msm_vidc_cache_op {
+	MSM_VIDC_CACHE_CLEAN,
+	MSM_VIDC_CACHE_INVALIDATE,
+	MSM_VIDC_CACHE_CLEAN_INVALIDATE,
+};
+
+enum msm_vidc_dcvs_flags {
+	MSM_VIDC_DCVS_INCR               = BIT(0),
+	MSM_VIDC_DCVS_DECR               = BIT(1),
+};
+
+enum msm_vidc_clock_properties {
+	CLOCK_PROP_HAS_SCALING           = BIT(0),
+	CLOCK_PROP_HAS_MEM_RETENTION     = BIT(1),
+};
+
+enum profiling_points {
+	FRAME_PROCESSING       = 0,
+	MAX_PROFILING_POINTS,
+};
+
+#define HFI_MASK_QHDR_TX_TYPE			0xFF000000
+#define HFI_MASK_QHDR_RX_TYPE			0x00FF0000
+#define HFI_MASK_QHDR_PRI_TYPE			0x0000FF00
+#define HFI_MASK_QHDR_Q_ID_TYPE			0x000000FF
+#define HFI_Q_ID_HOST_TO_CTRL_CMD_Q		0x00
+#define HFI_Q_ID_CTRL_TO_HOST_MSG_Q		0x01
+#define HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q	0x02
+#define HFI_MASK_QHDR_STATUS			0x000000FF
+
+#define VIDC_IFACEQ_NUMQ					3
+#define VIDC_IFACEQ_CMDQ_IDX				0
+#define VIDC_IFACEQ_MSGQ_IDX				1
+#define VIDC_IFACEQ_DBGQ_IDX				2
+#define VIDC_IFACEQ_MAX_BUF_COUNT			50
+#define VIDC_IFACE_MAX_PARALLEL_CLNTS		16
+#define VIDC_IFACEQ_DFLT_QHDR				0x01010000
+
+struct hfi_queue_table_header {
+	u32 qtbl_version;
+	u32 qtbl_size;
+	u32 qtbl_qhdr0_offset;
+	u32 qtbl_qhdr_size;
+	u32 qtbl_num_q;
+	u32 qtbl_num_active_q;
+	void *device_addr;
+	char name[256];
+};
+
+struct hfi_queue_header {
+	u32 qhdr_status;
+	u32 qhdr_start_addr;
+	u32 qhdr_type;
+	u32 qhdr_q_size;
+	u32 qhdr_pkt_size;
+	u32 qhdr_pkt_drop_cnt;
+	u32 qhdr_rx_wm;
+	u32 qhdr_tx_wm;
+	u32 qhdr_rx_req;
+	u32 qhdr_tx_req;
+	u32 qhdr_rx_irq_status;
+	u32 qhdr_tx_irq_status;
+	u32 qhdr_read_idx;
+	u32 qhdr_write_idx;
+};
+
+#define VIDC_IFACEQ_TABLE_SIZE (sizeof(struct hfi_queue_table_header) \
+	+ sizeof(struct hfi_queue_header) * VIDC_IFACEQ_NUMQ)
+
+#define VIDC_IFACEQ_QUEUE_SIZE	(VIDC_IFACEQ_MAX_PKT_SIZE *  \
+	VIDC_IFACEQ_MAX_BUF_COUNT * VIDC_IFACE_MAX_PARALLEL_CLNTS)
+
+#define VIDC_IFACEQ_GET_QHDR_START_ADDR(ptr, i)     \
+	(void *)((ptr + sizeof(struct hfi_queue_table_header)) + \
+		(i * sizeof(struct hfi_queue_header)))
+
+#define QDSS_SIZE 4096
+#define SFR_SIZE 4096
+
+#define QUEUE_SIZE (VIDC_IFACEQ_TABLE_SIZE + \
+	(VIDC_IFACEQ_QUEUE_SIZE * VIDC_IFACEQ_NUMQ))
+
+#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
+#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
+#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
+#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
+			ALIGNED_QDSS_SIZE, SZ_1M)
+
+struct buf_count {
+	u32                    etb;
+	u32                    ftb;
+	u32                    fbd;
+	u32                    ebd;
+};
+
+struct profile_data {
+	u32                    start;
+	u32                    stop;
+	u32                    cumulative;
+	char                   name[64];
+	u32                    sampling;
+	u32                    average;
+};
+
+struct msm_vidc_debug {
+	struct profile_data    pdata[MAX_PROFILING_POINTS];
+	u32                    profile;
+	u32                    samples;
+	struct buf_count       count;
+};
+
+struct msm_vidc_input_cr_data {
+	struct list_head       list;
+	u32                    index;
+	u32                    input_cr;
+};
+
+struct msm_vidc_timestamps {
+	struct list_head       list;
+	u64                    timestamp_us;
+	u32                    framerate;
+	bool                   is_valid;
+};
+
+struct msm_vidc_session_idle {
+	bool                   idle;
+	u64                    last_activity_time_ns;
+};
+
+struct msm_vidc_port_settings {
+	u32                    aligned_width;
+	u32                    aligned_height;
+	u32                    crop_width;
+	u32                    crop_height;
+	u32                    min_count;
+	u32                    poc;
+};
+
+struct msm_vidc_decode_vpp_delay {
+	bool                   enable;
+	u32                    size;
+};
+
+struct msm_vidc_decode_batch {
+	bool                   enable;
+	u32                    size;
+	struct delayed_work    work;
+};
+
+struct msm_vidc_power {
+	u32                    buffer_counter;
+	u32                    min_threshold;
+	u32                    nom_threshold;
+	u32                    max_threshold;
+	bool                   dcvs_mode;
+	u32                    dcvs_window;
+	u64                    min_freq;
+	u64                    curr_freq;
+	u32                    ddr_bw;
+	u32                    sys_cache_bw;
+	u32                    dcvs_flags;
+};
+
+struct msm_vidc_alloc {
+	enum msm_vidc_buffer_type   buffer_type;
+	enum msm_vidc_buffer_region region;
+	u32                         size;
+	u8                          cached:1;
+	u8                          secure:1;
+	u8                          map_kernel:1;
+	struct dma_buf             *dmabuf;
+	void                       *kvaddr;
+};
+
+struct msm_vidc_alloc_info {
+	struct list_head            list; // list of "struct msm_vidc_alloc"
+};
+
+struct msm_vidc_map {
+	bool                        valid;
+	enum msm_vidc_buffer_type   buffer_type;
+	enum msm_vidc_buffer_region region;
+	struct dma_buf             *dmabuf;
+	u32                         refcount;
+	u64                         device_addr;
+	struct sg_table            *table;
+	struct dma_buf_attachment  *attach;
+};
+
+struct msm_vidc_map_info {
+	struct list_head            list; // list of "struct msm_vidc_map"
+};
+
+struct msm_vidc_buffer {
+	bool                               valid;
+	enum msm_vidc_buffer_type          type;
+	u32                                index;
+	int                                fd;
+	u32                                buffer_size;
+	u32                                data_offset;
+	u32                                data_size;
+	u64                                device_addr;
+	void                              *dmabuf;
+	u32                                flags;
+	u64                                timestamp;
+	enum msm_vidc_buffer_attributes    attr;
+};
+
+struct msm_vidc_buffer_info {
+	struct list_head       list; // list of "struct msm_vidc_buffer"
+	u32                    min_count;
+	u32                    extra_count;
+	u32                    actual_count;
+	u32                    size;
+};
+
+struct msm_vidc_properties {
+	u32                    frame_rate;
+	u32                    operating_rate;
+	u32                    bit_rate;
+	u32                    profile;
+	u32                    level;
+	u32                    entropy_mode;
+	u32                    rc_type;
+};
+
+struct msm_vidc_ssr {
+	bool                               trigger;
+	enum msm_vidc_ssr_trigger_type     ssr_type;
+};
+
+#define call_mem_op(c, op, ...)			\
+	(((c) && (c)->mem_ops && (c)->mem_ops->op) ? \
+	((c)->mem_ops->op(__VA_ARGS__)) : 0)
+
+struct msm_vidc_memory_ops {
+	int (*allocate)(void *inst, struct msm_vidc_buffer *mbuf);
+	int (*dma_map)(void *inst, struct msm_vidc_buffer *mbuf);
+	int (*dma_unmap)(void *inst, struct msm_vidc_buffer *mbuf);
+	int (*free)(void *inst, struct msm_vidc_buffer *mbuf);
+	int (*cache_op)(void *inst, struct msm_vidc_buffer *mbuf,
+				enum msm_vidc_cache_op cache_op);
+};
+
+#endif // _MSM_VIDC_INTERNAL_H_

+ 23 - 0
driver/vidc/inc/msm_vidc_memory.h

@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_VIDC_MEMORY_H_
+#define _MSM_VIDC_MEMORY_H_
+
+#include "msm_vidc_internal.h"
+#include "msm_vidc_core.h"
+
+int msm_vidc_memory_alloc(struct msm_vidc_core *core,
+	struct msm_vidc_alloc *alloc);
+int msm_vidc_memory_free(struct msm_vidc_core *core,
+	struct msm_vidc_alloc *alloc);
+int msm_vidc_memory_map(struct msm_vidc_core *core,
+	struct msm_vidc_map *map);
+int msm_vidc_memory_unmap(struct msm_vidc_core *core,
+	struct msm_vidc_map *map);
+struct dma_buf *msm_vidc_memory_get_dmabuf(int fd);
+void msm_vidc_memory_put_dmabuf(void *dmabuf);
+
+#endif // _MSM_VIDC_MEMORY_H_

+ 78 - 0
driver/vidc/inc/msm_vidc_platform.h

@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_VIDC_PLATFORM_H_
+#define _MSM_VIDC_PLATFORM_H_
+
+#include <linux/platform_device.h>
+
+#include "msm_vidc_internal.h"
+
+struct msm_vidc_core_data {
+	enum msm_vidc_core_data_type type;
+	u32 value;
+};
+
+struct msm_vidc_instance_data {
+	enum msm_vidc_instance_data_type type;
+	enum msm_vidc_domain_type domains;
+	enum msm_vidc_codec_type codecs;
+	u32 min;
+	u32 max;
+	u32 step_or_menu;
+	u32 value;
+};
+
+struct msm_vidc_csc_coeff {
+	u32 *vpe_csc_custom_matrix_coeff;
+	u32 *vpe_csc_custom_bias_coeff;
+	u32 *vpe_csc_custom_limit_coeff;
+};
+
+struct msm_vidc_efuse_data {
+	u32 start_address;
+	u32 size;
+	u32 mask;
+	u32 shift;
+	enum efuse_purpose purpose;
+};
+
+struct msm_vidc_ubwc_config_data {
+	struct {
+		u32 max_channel_override : 1;
+		u32 mal_length_override : 1;
+		u32 hb_override : 1;
+		u32 bank_swzl_level_override : 1;
+		u32 bank_spreading_override : 1;
+		u32 reserved : 27;
+	} override_bit_info;
+
+	u32 max_channels;
+	u32 mal_length;
+	u32 highest_bank_bit;
+	u32 bank_swzl_level;
+	u32 bank_spreading;
+};
+
+struct msm_vidc_platform_data {
+	struct msm_vidc_core_data *core_data;
+	u32 core_data_size;
+	struct msm_vidc_instance_data *instance_data;
+	u32 instance_data_size;
+	struct allowed_clock_rates_table *allowed_clks_tbl;
+	u32 allowed_clks_tbl_size;
+	struct msm_vidc_csc_coeff csc_data;
+	struct msm_vidc_ubwc_config_data *ubwc_config;
+};
+
+struct msm_vidc_platform {
+	void *core;
+	struct msm_vidc_platform_data data;
+};
+
+int msm_vidc_init_platform(struct platform_device *pdev);
+int msm_vidc_deinit_platform(struct platform_device *pdev);
+
+#endif // _MSM_VIDC_PLATFORM_H_

+ 55 - 0
driver/vidc/inc/msm_vidc_v4l2.h

@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_VIDC_V4L2_H_
+#define _MSM_VIDC_V4L2_H_
+
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+
+int msm_v4l2_open(struct file *filp);
+int msm_v4l2_close(struct file *filp);
+int msm_v4l2_querycap(struct file *filp, void *fh,
+		struct v4l2_capability *cap);
+int msm_v4l2_enum_fmt(struct file *file, void *fh,
+		struct v4l2_fmtdesc *f);
+int msm_v4l2_s_fmt(struct file *file, void *fh,
+		struct v4l2_format *f);
+int msm_v4l2_g_fmt(struct file *file, void *fh,
+		struct v4l2_format *f);
+int msm_v4l2_s_ctrl(struct file *file, void *fh,
+		struct v4l2_control *a);
+int msm_v4l2_g_ctrl(struct file *file, void *fh,
+		struct v4l2_control *a);
+int msm_v4l2_reqbufs(struct file *file, void *fh,
+		struct v4l2_requestbuffers *b);
+int msm_v4l2_qbuf(struct file *file, void *fh,
+		struct v4l2_buffer *b);
+int msm_v4l2_dqbuf(struct file *file, void *fh,
+		struct v4l2_buffer *b);
+int msm_v4l2_streamon(struct file *file, void *fh,
+		enum v4l2_buf_type i);
+int msm_v4l2_streamoff(struct file *file, void *fh,
+		enum v4l2_buf_type i);
+int msm_v4l2_subscribe_event(struct v4l2_fh *fh,
+		const struct v4l2_event_subscription *sub);
+int msm_v4l2_unsubscribe_event(struct v4l2_fh *fh,
+		const struct v4l2_event_subscription *sub);
+int msm_v4l2_decoder_cmd(struct file *file, void *fh,
+		struct v4l2_decoder_cmd *dec);
+int msm_v4l2_encoder_cmd(struct file *file, void *fh,
+		struct v4l2_encoder_cmd *enc);
+int msm_v4l2_enum_framesizes(struct file *file, void *fh,
+		struct v4l2_frmsizeenum *fsize);
+int msm_v4l2_queryctrl(struct file *file, void *fh,
+		struct v4l2_queryctrl *ctrl);
+int msm_v4l2_querymenu(struct file *file, void *fh,
+		struct v4l2_querymenu *qmenu);
+unsigned int msm_v4l2_poll(struct file *filp,
+	struct poll_table_struct *pt);
+#endif // _MSM_VIDC_V4L2_H_

+ 25 - 0
driver/vidc/inc/msm_vidc_vb2.h

@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_VIDC_VB2_H_
+#define _MSM_VIDC_VB2_H_
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+
+/* vb2_mem_ops */
+void *msm_vb2_get_userptr(struct device *dev, unsigned long vaddr,
+			unsigned long size, enum dma_data_direction dma_dir);
+void msm_vb2_put_userptr(void *buf_priv);
+
+/* vb2_ops */
+int msm_vidc_queue_setup(struct vb2_queue *q,
+		unsigned int *num_buffers, unsigned int *num_planes,
+		unsigned int sizes[], struct device *alloc_devs[]);
+int msm_vidc_start_streaming(struct vb2_queue *q, unsigned int count);
+void msm_vidc_stop_streaming(struct vb2_queue *q);
+void msm_vidc_buf_queue(struct vb2_buffer *vb2);
+void msm_vidc_buf_cleanup(struct vb2_buffer *vb);
+#endif // _MSM_VIDC_VB2_H_

+ 71 - 0
driver/vidc/inc/venus_hfi.h

@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _VENUS_HFI_H_
+#define _VENUS_HFI_H_
+
+#include "msm_vidc_internal.h"
+#include "msm_vidc_inst.h"
+#include "msm_vidc_core.h"
+
+#define VIDC_MAX_NAME_LENGTH 		64
+#define VIDC_MAX_PC_SKIP_COUNT 		10
+#define VIDC_MAX_SUBCACHES 			4
+#define VIDC_MAX_SUBCACHE_SIZE 		52
+
+enum vidc_resource_id {
+	VIDC_RESOURCE_NONE,
+	VIDC_RESOURCE_SYSCACHE,
+	VIDC_UNUSED_RESOURCE = 0x10000000,
+};
+
+struct vidc_resource_hdr {
+	enum vidc_resource_id resource_id;
+	void *resource_handle;
+};
+
+struct vidc_buffer_addr_info {
+	enum msm_vidc_buffer_type buffer_type;
+	u32 buffer_size;
+	u32 num_buffers;
+	u32 align_device_addr;
+	u32 extradata_addr;
+	u32 extradata_size;
+	u32 response_required;
+};
+
+struct hfi_resource_subcache_type {
+	u32 size;
+	u32 sc_id;
+};
+
+struct hfi_resource_syscache_info_type {
+	u32 num_entries;
+	struct hfi_resource_subcache_type rg_subcache_entries[1];
+};
+
+int venus_hfi_core_init(struct msm_vidc_core *core);
+int venus_hfi_core_release(struct msm_vidc_core *core);
+int venus_hfi_suspend(struct msm_vidc_core *core);
+int venus_hfi_session_open(struct msm_vidc_core *core, struct msm_vidc_inst *inst);
+void venus_hfi_work_handler(struct work_struct *work);
+void venus_hfi_pm_work_handler(struct work_struct *work);
+
+void __write_register(struct msm_vidc_core *core,
+		u32 reg, u32 value);
+int __read_register(struct msm_vidc_core *core, u32 reg);
+void __disable_unprepare_clks(struct msm_vidc_core *core);
+int __disable_regulators(struct msm_vidc_core *core);
+int __unvote_buses(struct msm_vidc_core *core);
+int __prepare_pc(struct msm_vidc_core *core);
+
+int __reset_ahb2axi_bridge(struct msm_vidc_core *core);
+int __clock_config_on_enable(struct msm_vidc_core *core);
+int __interrupt_init(struct msm_vidc_core *core);
+int __setup_ucregion_memmap(struct msm_vidc_core *core);
+int __raise_interrupt(struct msm_vidc_core *core);
+int __power_off(struct msm_vidc_core *core);
+
+#endif // _VENUS_HFI_H_

+ 21 - 0
driver/vidc/src/hfi_packet.c

@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "hfi_packet.h"
+#include "msm_vidc_core.h"
+#include "msm_vidc_inst.h"
+#include "msm_vidc_debug.h"
+
+int hfi_packet_sys_init(struct msm_vidc_core *core, void *pkt, u32 pkt_size)
+{
+	d_vpr_h("%s()\n", __func__);
+	return 0;
+}
+
+int hfi_packet_sys_pc_prep(struct msm_vidc_core *core, void *pkt, u32 pkt_size)
+{
+	d_vpr_h("%s()\n", __func__);
+	return 0;
+}

+ 108 - 0
driver/vidc/src/msm_vdec.c

@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <media/msm_vidc_utils.h>
+#include <media/msm_media_info.h>
+
+#include "msm_vdec.h"
+#include "msm_vidc_core.h"
+#include "msm_vidc_inst.h"
+#include "msm_vidc_driver.h"
+#include "msm_vidc_internal.h"
+#include "msm_vidc_platform.h"
+#include "msm_vidc_debug.h"
+
+
+int msm_vdec_inst_init(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct msm_vidc_core *core;
+	struct v4l2_format *f;
+
+	d_vpr_h("%s()\n", __func__);
+	if (!inst || !inst->core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	core = inst->core;
+
+	INIT_DELAYED_WORK(&inst->decode_batch.work, msm_vidc_batch_handler);
+
+	f = &inst->fmts[INPUT_PORT];
+	f->type = INPUT_PLANE;
+	f->fmt.pix.width = DEFAULT_WIDTH;
+	f->fmt.pix.height = DEFAULT_HEIGHT;
+	f->fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
+	f->fmt.pix.bytesperline = 0;
+	f->fmt.pix.sizeimage = call_session_op(core, buffer_size,
+			inst, MSM_VIDC_INPUT);
+	inst->buffers.input.min_count =
+			call_session_op(core, min_count, inst, MSM_VIDC_INPUT);
+	inst->buffers.input.extra_count =
+			call_session_op(core, extra_count, inst, MSM_VIDC_INPUT);
+	inst->buffers.input.actual_count =
+			inst->buffers.input.min_count +
+			inst->buffers.input.extra_count;
+	inst->buffers.input.size = f->fmt.pix.sizeimage;
+
+	f = &inst->fmts[INPUT_META_PORT];
+	f->type = INPUT_META_PLANE;
+	f->fmt.meta.dataformat = V4L2_PIX_FMT_VIDC_META;
+	f->fmt.meta.buffersize = call_session_op(core, buffer_size,
+			inst, MSM_VIDC_INPUT_META);
+	inst->buffers.input_meta.min_count = inst->buffers.input.min_count;
+	inst->buffers.input_meta.extra_count = inst->buffers.input.extra_count;
+	inst->buffers.input_meta.actual_count = inst->buffers.input.actual_count;
+	inst->buffers.input_meta.size = f->fmt.meta.buffersize;
+
+	f = &inst->fmts[OUTPUT_PORT];
+	f->type = OUTPUT_PLANE;
+	f->fmt.pix.pixelformat = V4L2_PIX_FMT_NV12_UBWC;
+	f->fmt.pix.width = VENUS_Y_STRIDE(
+		msm_vidc_convert_color_fmt(f->fmt.pix.pixelformat), DEFAULT_WIDTH);
+	f->fmt.pix.height = VENUS_Y_SCANLINES(
+		msm_vidc_convert_color_fmt(f->fmt.pix.pixelformat), DEFAULT_HEIGHT);
+	f->fmt.pix.bytesperline = f->fmt.pix.width;
+	f->fmt.pix.sizeimage = call_session_op(core, buffer_size,
+			inst, MSM_VIDC_OUTPUT);
+	inst->buffers.output.min_count =
+			call_session_op(core, min_count, inst, MSM_VIDC_OUTPUT);
+	inst->buffers.output.extra_count =
+			call_session_op(core, extra_count, inst, MSM_VIDC_OUTPUT);
+	inst->buffers.output.actual_count =
+			inst->buffers.output.min_count +
+			inst->buffers.output.extra_count;
+	inst->buffers.output.size = f->fmt.pix.sizeimage;
+
+	f = &inst->fmts[OUTPUT_META_PORT];
+	f->type = OUTPUT_META_PLANE;
+	f->fmt.meta.dataformat = V4L2_PIX_FMT_VIDC_META;
+	f->fmt.meta.buffersize = call_session_op(core, buffer_size,
+			inst, MSM_VIDC_OUTPUT_META);
+	inst->buffers.output_meta.min_count = inst->buffers.output.min_count;
+	inst->buffers.output_meta.extra_count = inst->buffers.output.extra_count;
+	inst->buffers.output_meta.actual_count = inst->buffers.output.actual_count;
+	inst->buffers.output_meta.size = f->fmt.meta.buffersize;			
+
+	inst->prop.frame_rate = DEFAULT_FPS << 16;
+	inst->prop.operating_rate = DEFAULT_FPS << 16;
+
+	return rc;
+}
+
+int msm_vdec_ctrl_init(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct msm_vidc_core *core;
+
+	d_vpr_h("%s()\n", __func__);
+	if (!inst || !inst->core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	core = inst->core;
+
+	return rc;
+}

+ 106 - 0
driver/vidc/src/msm_venc.c

@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <media/msm_vidc_utils.h>
+#include <media/msm_media_info.h>
+
+#include "msm_venc.h"
+#include "msm_vidc_core.h"
+#include "msm_vidc_inst.h"
+#include "msm_vidc_driver.h"
+#include "msm_vidc_internal.h"
+#include "msm_vidc_platform.h"
+#include "msm_vidc_debug.h"
+
+
+int msm_venc_inst_init(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct msm_vidc_core *core;
+	struct v4l2_format *f;
+
+	d_vpr_h("%s()\n", __func__);
+	if (!inst || !inst->core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	core = inst->core;
+
+	f = &inst->fmts[OUTPUT_PORT];
+	f->type = OUTPUT_PLANE;
+	f->fmt.pix.width = DEFAULT_WIDTH;
+	f->fmt.pix.height = DEFAULT_HEIGHT;
+	f->fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
+	f->fmt.pix.bytesperline = 0;
+	f->fmt.pix.sizeimage = call_session_op(core, buffer_size,
+			inst, MSM_VIDC_OUTPUT);
+	inst->buffers.output.min_count =
+			call_session_op(core, min_count, inst, MSM_VIDC_OUTPUT);
+	inst->buffers.output.extra_count =
+			call_session_op(core, extra_count, inst, MSM_VIDC_OUTPUT);
+	inst->buffers.output.actual_count =
+			inst->buffers.output.min_count +
+			inst->buffers.output.extra_count;
+	inst->buffers.output.size = f->fmt.pix.sizeimage;
+
+	f = &inst->fmts[OUTPUT_META_PORT];
+	f->type = OUTPUT_META_PLANE;
+	f->fmt.meta.dataformat = V4L2_PIX_FMT_VIDC_META;
+	f->fmt.meta.buffersize = call_session_op(core, buffer_size,
+			inst, MSM_VIDC_OUTPUT_META);
+	inst->buffers.output_meta.min_count = inst->buffers.output.min_count;
+	inst->buffers.output_meta.extra_count = inst->buffers.output.extra_count;
+	inst->buffers.output_meta.actual_count = inst->buffers.output.actual_count;
+	inst->buffers.output_meta.size = f->fmt.meta.buffersize;			
+
+	f = &inst->fmts[INPUT_PORT];
+	f->type = INPUT_PLANE;
+	f->fmt.pix.pixelformat = V4L2_PIX_FMT_NV12_UBWC;
+	f->fmt.pix.width = VENUS_Y_STRIDE(
+		msm_vidc_convert_color_fmt(f->fmt.pix.pixelformat), DEFAULT_WIDTH);
+	f->fmt.pix.height = VENUS_Y_SCANLINES(
+		msm_vidc_convert_color_fmt(f->fmt.pix.pixelformat), DEFAULT_HEIGHT);
+	f->fmt.pix.bytesperline = f->fmt.pix.width;
+	f->fmt.pix.sizeimage = call_session_op(core, buffer_size,
+			inst, MSM_VIDC_INPUT);
+	inst->buffers.input.min_count =
+			call_session_op(core, min_count, inst, MSM_VIDC_INPUT);
+	inst->buffers.input.extra_count =
+			call_session_op(core, extra_count, inst, MSM_VIDC_INPUT);
+	inst->buffers.input.actual_count =
+			inst->buffers.input.min_count +
+			inst->buffers.input.extra_count;
+	inst->buffers.input.size = f->fmt.pix.sizeimage;
+
+	f = &inst->fmts[INPUT_META_PORT];
+	f->type = INPUT_META_PLANE;
+	f->fmt.meta.dataformat = V4L2_PIX_FMT_VIDC_META;
+	f->fmt.meta.buffersize = call_session_op(core, buffer_size,
+			inst, MSM_VIDC_INPUT_META);
+	inst->buffers.input_meta.min_count = inst->buffers.input.min_count;
+	inst->buffers.input_meta.extra_count = inst->buffers.input.extra_count;
+	inst->buffers.input_meta.actual_count = inst->buffers.input.actual_count;
+	inst->buffers.input_meta.size = f->fmt.meta.buffersize;
+
+	inst->prop.frame_rate = DEFAULT_FPS << 16;
+	inst->prop.operating_rate = DEFAULT_FPS << 16;
+
+	return rc;
+}
+
+int msm_venc_ctrl_init(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct msm_vidc_core *core;
+
+	d_vpr_h("%s()\n", __func__);
+	if (!inst || !inst->core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	core = inst->core;
+
+	return rc;
+}

+ 457 - 0
driver/vidc/src/msm_vidc.c

@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_vidc.h"
+#include "msm_vidc_core.h"
+#include "msm_vidc_inst.h"
+#include "msm_vdec.h"
+#include "msm_venc.h"
+#include "msm_vidc_internal.h"
+#include "msm_vidc_driver.h"
+#include "msm_vidc_vb2.h"
+#include "msm_vidc_v4l2.h"
+#include "msm_vidc_debug.h"
+
+#define MSM_VIDC_DRV_NAME "msm_vidc_driver"
+/* kernel/msm-4.19 */
+#define MSM_VIDC_VERSION     ((0 << 16) + (4 << 8) + 19)
+
+#define MAX_EVENTS 30
+
+bool valid_v4l2_buffer(struct v4l2_buffer *b,
+		struct msm_vidc_inst *inst)
+{
+	return true;
+}
+/*
+static int get_poll_flags(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct vb2_queue *outq = &inst->bufq[PORT_INPUT].vb2_bufq;
+	struct vb2_queue *capq = &inst->bufq[PORT_OUTPUT].vb2_bufq;
+	struct vb2_buffer *out_vb = NULL;
+	struct vb2_buffer *cap_vb = NULL;
+	unsigned long flags = 0;
+
+	if (v4l2_event_pending(&inst->event_handler))
+		rc |= POLLPRI;
+
+	spin_lock_irqsave(&capq->done_lock, flags);
+	if (!list_empty(&capq->done_list))
+		cap_vb = list_first_entry(&capq->done_list, struct vb2_buffer,
+								done_entry);
+	if (cap_vb && (cap_vb->state == VB2_BUF_STATE_DONE
+				|| cap_vb->state == VB2_BUF_STATE_ERROR))
+		rc |= POLLIN | POLLRDNORM;
+	spin_unlock_irqrestore(&capq->done_lock, flags);
+
+	spin_lock_irqsave(&outq->done_lock, flags);
+	if (!list_empty(&outq->done_list))
+		out_vb = list_first_entry(&outq->done_list, struct vb2_buffer,
+								done_entry);
+	if (out_vb && (out_vb->state == VB2_BUF_STATE_DONE
+				|| out_vb->state == VB2_BUF_STATE_ERROR))
+		rc |= POLLOUT | POLLWRNORM;
+	spin_unlock_irqrestore(&outq->done_lock, flags);
+
+	return rc;
+}
+*/
+
+int msm_vidc_poll(void *instance, struct file *filp,
+		struct poll_table_struct *wait)
+{
+/*
+	struct msm_vidc_inst *inst = instance;
+	struct vb2_queue *outq = NULL;
+	struct vb2_queue *capq = NULL;
+
+	if (!inst)
+		return -EINVAL;
+
+	outq = &inst->bufq[PORT_INPUT].vb2_bufq;
+	capq = &inst->bufq[PORT_OUTPUT].vb2_bufq;
+
+	poll_wait(filp, &inst->event_handler.wait, wait);
+	poll_wait(filp, &capq->done_wq, wait);
+	poll_wait(filp, &outq->done_wq, wait);
+	return get_poll_flags(inst);
+*/
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_poll);
+
+int msm_vidc_querycap(void *instance, struct v4l2_capability *cap)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !cap)
+		return -EINVAL;
+
+	strlcpy(cap->driver, MSM_VIDC_DRV_NAME, sizeof(cap->driver));
+	cap->bus_info[0] = 0;
+	cap->version = MSM_VIDC_VERSION;
+	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+		V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+		V4L2_CAP_STREAMING;
+	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+	memset(cap->reserved, 0, sizeof(cap->reserved));
+
+	if (inst->domain == MSM_VIDC_DECODER)
+		strlcpy(cap->card, "msm_vidc_decoder", sizeof(cap->card));
+	else if (inst->domain == MSM_VIDC_ENCODER)
+		strlcpy(cap->card, "msm_vidc_encoder", sizeof(cap->card));
+	else
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_querycap);
+
+int msm_vidc_enum_fmt(void *instance, struct v4l2_fmtdesc *f)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !f)
+		return -EINVAL;
+
+	if (inst->domain == MSM_VIDC_DECODER)
+		return 0;//msm_vdec_enum_fmt(instance, f);
+	else if (inst->domain == MSM_VIDC_ENCODER)
+		return 0;//msm_venc_enum_fmt(instance, f);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(msm_vidc_enum_fmt);
+
+int msm_vidc_query_ctrl(void *instance, struct v4l2_queryctrl *q_ctrl)
+{
+	int rc = 0;
+	struct msm_vidc_inst *inst = instance;
+	struct v4l2_ctrl *ctrl;
+
+	if (!inst || !q_ctrl) {
+		d_vpr_e("%s: invalid params %pK %pK\n",
+			__func__, inst, q_ctrl);
+		return -EINVAL;
+	}
+
+	ctrl = v4l2_ctrl_find(&inst->ctrl_handler, q_ctrl->id);
+	if (!ctrl) {
+		s_vpr_e(inst->sid, "%s: get_ctrl failed for id %d\n",
+			__func__, q_ctrl->id);
+		return -EINVAL;
+	}
+	q_ctrl->minimum = ctrl->minimum;
+	q_ctrl->maximum = ctrl->maximum;
+	q_ctrl->default_value = ctrl->default_value;
+	/* remove tier info for HEVC level */
+	if (q_ctrl->id == V4L2_CID_MPEG_VIDEO_HEVC_LEVEL) {
+		q_ctrl->minimum &= ~(0xF << 28);
+		q_ctrl->maximum &= ~(0xF << 28);
+	}
+	if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
+		q_ctrl->flags = ~(ctrl->menu_skip_mask);
+	} else {
+		q_ctrl->flags = 0;
+		q_ctrl->step = ctrl->step;
+	}
+	s_vpr_h(inst->sid,
+		"query ctrl: %s: min %d, max %d, default %d step %d flags %#x\n",
+		ctrl->name, q_ctrl->minimum, q_ctrl->maximum,
+		q_ctrl->default_value, q_ctrl->step, q_ctrl->flags);
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_query_ctrl);
+
+int msm_vidc_query_menu(void *instance, struct v4l2_querymenu *qmenu)
+{
+	int rc = 0;
+	struct msm_vidc_inst *inst = instance;
+	struct v4l2_ctrl *ctrl;
+
+	if (!inst || !qmenu) {
+		d_vpr_e("%s: invalid params %pK %pK\n",
+			__func__, inst, qmenu);
+		return -EINVAL;
+	}
+
+	ctrl = v4l2_ctrl_find(&inst->ctrl_handler, qmenu->id);
+	if (!ctrl) {
+		s_vpr_e(inst->sid, "%s: get_ctrl failed for id %d\n",
+			__func__, qmenu->id);
+		return -EINVAL;
+	}
+	if (ctrl->type != V4L2_CTRL_TYPE_MENU) {
+		s_vpr_e(inst->sid, "%s: ctrl: %s: type (%d) is not MENU type\n",
+			__func__, ctrl->name, ctrl->type);
+		return -EINVAL;
+	}
+	if (qmenu->index < ctrl->minimum || qmenu->index > ctrl->maximum)
+		return -EINVAL;
+
+	if (ctrl->menu_skip_mask & (1 << qmenu->index))
+		rc = -EINVAL;
+
+	s_vpr_h(inst->sid,
+		"%s: ctrl: %s: min %d, max %d, menu_skip_mask %#x, qmenu: id %d, index %d, %s\n",
+		__func__, ctrl->name, ctrl->minimum, ctrl->maximum,
+		ctrl->menu_skip_mask, qmenu->id, qmenu->index,
+		rc ? "not supported" : "supported");
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_query_menu);
+
+int msm_vidc_s_fmt(void *instance, struct v4l2_format *f)
+{
+	int rc = 0;
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !f)
+		return -EINVAL;
+
+	if (inst->domain == MSM_VIDC_DECODER)
+		rc = 0;//msm_vdec_s_fmt(instance, f);
+	if (inst->domain == MSM_VIDC_ENCODER)
+		rc = 0;//msm_venc_s_fmt(instance, f);
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_s_fmt);
+
+int msm_vidc_g_fmt(void *instance, struct v4l2_format *f)
+{
+	int rc = 0;
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !f)
+		return -EINVAL;
+
+	if (inst->domain == MSM_VIDC_DECODER)
+		rc = 0;//msm_vdec_g_fmt(instance, f);
+	if (inst->domain == MSM_VIDC_ENCODER)
+		rc = 0;//msm_venc_g_fmt(instance, f);
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_g_fmt);
+
+int msm_vidc_s_ctrl(void *instance, struct v4l2_control *control)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !control)
+		return -EINVAL;
+
+	return 0;//msm_comm_s_ctrl(instance, control);
+}
+EXPORT_SYMBOL(msm_vidc_s_ctrl);
+
+int msm_vidc_g_ctrl(void *instance, struct v4l2_control *control)
+{
+	struct msm_vidc_inst *inst = instance;
+	struct v4l2_ctrl *ctrl = NULL;
+	int rc = 0;
+
+	if (!inst || !control)
+		return -EINVAL;
+
+	ctrl = v4l2_ctrl_find(&inst->ctrl_handler, control->id);
+	if (ctrl) {
+		rc = 0;//try_get_ctrl_for_instance(inst, ctrl);
+		if (!rc)
+			control->value = ctrl->val;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_g_ctrl);
+
+int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b)
+{
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_reqbufs);
+
+int msm_vidc_qbuf(void *instance, struct media_device *mdev,
+		struct v4l2_buffer *b)
+{
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_qbuf);
+
+int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b)
+{
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_dqbuf);
+
+int msm_vidc_streamon(void *instance, enum v4l2_buf_type i)
+{
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_streamon);
+
+int msm_vidc_streamoff(void *instance, enum v4l2_buf_type i)
+{
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_streamoff);
+
+int msm_vidc_cmd(void *instance, union msm_v4l2_cmd *cmd)
+{
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_cmd);
+
+int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
+{
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_enum_framesizes);
+
+int msm_vidc_subscribe_event(void *inst,
+		const struct v4l2_event_subscription *sub)
+{
+	int rc = 0;
+	struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
+
+	if (!inst || !sub)
+		return -EINVAL;
+
+	rc = v4l2_event_subscribe(&vidc_inst->event_handler,
+		sub, MAX_EVENTS, NULL);
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_subscribe_event);
+
+int msm_vidc_unsubscribe_event(void *inst,
+		const struct v4l2_event_subscription *sub)
+{
+	int rc = 0;
+	struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
+
+	if (!inst || !sub)
+		return -EINVAL;
+
+	rc = v4l2_event_unsubscribe(&vidc_inst->event_handler, sub);
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_unsubscribe_event);
+
+int msm_vidc_dqevent(void *inst, struct v4l2_event *event)
+{
+	int rc = 0;
+	struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
+
+	if (!inst || !event)
+		return -EINVAL;
+
+	rc = v4l2_event_dequeue(&vidc_inst->event_handler, event, false);
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_dqevent);
+
+void *msm_vidc_open(void *vidc_core, u32 session_type)
+{
+	int rc = 0;
+	struct msm_vidc_inst *inst;
+	struct msm_vidc_core *core;
+
+	d_vpr_h("%s()\n", __func__);
+	core = vidc_core;
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return NULL;
+	}
+
+	if (session_type != MSM_VIDC_DECODER &&
+	    session_type != MSM_VIDC_ENCODER) {
+		d_vpr_e("%s: invalid session_type %d\n",
+			__func__, session_type);
+		return NULL;
+	}
+
+	if (core->state == MSM_VIDC_CORE_ERROR) {
+		d_vpr_e("%s: core invalid state\n", __func__);
+		return NULL;
+	}
+
+	if (core->state == MSM_VIDC_CORE_DEINIT) {
+		rc = msm_vidc_core_init(core);
+		if (rc)
+			return NULL;
+	}
+
+	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+	if (!inst) {
+		d_vpr_e("%s: failed to allocate inst memory\n", __func__);
+		return NULL;
+	}
+	inst->core = core;
+
+	rc = msm_vidc_add_session(inst);
+	if (rc) {
+		d_vpr_e("%s: failed to get session id\n", __func__);
+		return NULL;
+	}
+
+	s_vpr_i(inst->sid, "Opening video instance: %d\n", session_type);
+
+	kref_init(&inst->kref);
+	INIT_LIST_HEAD(&inst->buffers.input.list);
+	INIT_LIST_HEAD(&inst->buffers.input_meta.list);
+	INIT_LIST_HEAD(&inst->buffers.output.list);
+	INIT_LIST_HEAD(&inst->buffers.output_meta.list);
+	INIT_LIST_HEAD(&inst->buffers.scratch.list);
+	INIT_LIST_HEAD(&inst->buffers.scratch_1.list);
+	INIT_LIST_HEAD(&inst->buffers.scratch_2.list);
+	INIT_LIST_HEAD(&inst->buffers.persist.list);
+	INIT_LIST_HEAD(&inst->buffers.persist_1.list);
+	inst->domain = session_type;
+	inst->state = MSM_VIDC_OPEN;
+	//inst->debugfs_root =
+	//	msm_vidc_debugfs_init_inst(inst, core->debugfs_root);
+
+	if (is_decode_session(inst)) {
+		rc = msm_vdec_inst_init(inst);
+		if (rc)
+			goto error;
+		rc = msm_vdec_ctrl_init(inst);
+		if (rc)
+			goto error;
+	} else if (is_encode_session(inst)) {
+		rc = msm_venc_inst_init(inst);
+		if (rc)
+			goto error;
+		rc = msm_venc_ctrl_init(inst);
+		if (rc)
+			goto error;
+	}
+
+	rc = msm_vidc_queue_init(inst);
+	if (rc)
+		goto error;
+
+	rc = msm_vidc_setup_event_queue(inst);
+	if (rc)
+		goto error;
+
+	//msm_power_setup(inst);
+	// send cmd to firmware here
+
+	return inst;
+
+error:
+	msm_vidc_close(inst);
+	return NULL;
+}
+EXPORT_SYMBOL(msm_vidc_open);
+
+int msm_vidc_close(void *instance)
+{
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_close);

+ 16 - 0
driver/vidc/src/msm_vidc_debug.c

@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_vidc_debug.h"
+
+int msm_vidc_debug = VIDC_HIGH | VIDC_LOW | VIDC_PKT | VIDC_ERR | VIDC_PRINTK |
+	FW_ERROR | FW_FATAL | FW_FTRACE;
+EXPORT_SYMBOL(msm_vidc_debug);
+
+bool msm_vidc_lossless_encode = !true;
+EXPORT_SYMBOL(msm_vidc_lossless_encode);
+
+bool msm_vidc_syscache_disable = !true;
+EXPORT_SYMBOL(msm_vidc_syscache_disable);

+ 151 - 0
driver/vidc/src/msm_vidc_driver.c

@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/iommu.h>
+#include <linux/workqueue.h>
+#include <media/msm_vidc_utils.h>
+#include <media/msm_media_info.h>
+
+#include "msm_vidc_driver.h"
+#include "msm_vidc_internal.h"
+#include "msm_vidc_debug.h"
+#include "venus_hfi.h"
+
+u32 msm_vidc_convert_color_fmt(u32 v4l2_fmt)
+{
+	switch (v4l2_fmt) {
+	case V4L2_PIX_FMT_NV12:
+		return COLOR_FMT_NV12;
+	case V4L2_PIX_FMT_NV21:
+		return COLOR_FMT_NV21;
+	case V4L2_PIX_FMT_NV12_512:
+		return COLOR_FMT_NV12_512;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS:
+		return COLOR_FMT_P010;
+	case V4L2_PIX_FMT_NV12_UBWC:
+		return COLOR_FMT_NV12_UBWC;
+	case V4L2_PIX_FMT_NV12_TP10_UBWC:
+		return COLOR_FMT_NV12_BPP10_UBWC;
+	case V4L2_PIX_FMT_RGBA8888_UBWC:
+		return COLOR_FMT_RGBA8888_UBWC;
+	default:
+		d_vpr_e(
+			"Invalid v4l2 color fmt FMT : %x, Set default(NV12)",
+			v4l2_fmt);
+		return COLOR_FMT_NV12;
+	}
+}
+
+int msm_vidc_setup_event_queue(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+
+	return rc;
+}
+
+int msm_vidc_queue_init(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+
+	return rc;
+}
+
+int msm_vidc_add_session(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct msm_vidc_inst *i;
+	struct msm_vidc_core *core;
+	u32 count = 0;
+
+	if (!inst || !inst->core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	core = inst->core;
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(i, &core->instances, list)
+		count++;
+
+	if (count < MAX_SUPPORTED_INSTANCES) {
+		list_add_tail(&inst->list, &core->instances);
+	} else {
+		d_vpr_e("%s: total sessions %d reached max limit %d\n",
+			__func__, count, MAX_SUPPORTED_INSTANCES);
+		rc = -EINVAL;
+	}
+	mutex_unlock(&core->lock);
+
+	/* assign session_id */
+	inst->session_id = count + 1;
+	inst->sid = inst->session_id;
+
+	return rc;
+}
+
+int msm_vidc_core_init(struct msm_vidc_core *core)
+{
+	int rc;
+
+	d_vpr_h("%s()\n", __func__);
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core->lock);
+	if (core->state == MSM_VIDC_CORE_ERROR) {
+		d_vpr_e("%s: core invalid state\n", __func__);
+		rc = -EINVAL;
+		goto unlock;
+	}
+	if (core->state == MSM_VIDC_CORE_INIT) {
+		rc = 0;
+		goto unlock;
+	}
+
+	rc = venus_hfi_core_init(core);
+	if (rc) {
+		d_vpr_e("%s: core init failed\n", __func__);
+		core->state = MSM_VIDC_CORE_DEINIT;
+		goto unlock;
+	}
+
+	core->state = MSM_VIDC_CORE_INIT;
+	core->smmu_fault_handled = false;
+	core->ssr.trigger = false;
+
+unlock:
+	mutex_unlock(&core->lock);
+	return rc;
+}
+
+int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
+		struct device *dev, unsigned long iova, int flags, void *data)
+{
+	return -EINVAL;
+}
+
+int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
+		enum msm_vidc_ssr_trigger_type type)
+{
+	return 0;
+}
+
+void msm_vidc_ssr_handler(struct work_struct *work)
+{
+}
+
+void msm_vidc_pm_work_handler(struct work_struct *work)
+{
+}
+
+void msm_vidc_fw_unload_handler(struct work_struct *work)
+{
+}
+
+void msm_vidc_batch_handler(struct work_struct *work)
+{
+}

+ 970 - 0
driver/vidc/src/msm_vidc_dt.c

@@ -0,0 +1,970 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/sort.h>
+
+#include "msm_vidc_dt.h"
+#include "msm_vidc_internal.h"
+#include "msm_vidc_core.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc_driver.h"
+
+static size_t get_u32_array_num_elements(struct device_node *np,
+					char *name)
+{
+	int len;
+	size_t num_elements = 0;
+
+	if (!of_get_property(np, name, &len)) {
+		d_vpr_e("Failed to read %s from device tree\n", name);
+		goto fail_read;
+	}
+
+	num_elements = len / sizeof(u32);
+	if (num_elements <= 0) {
+		d_vpr_e("%s not specified in device tree\n", name);
+		goto fail_read;
+	}
+	return num_elements;
+
+fail_read:
+	return 0;
+}
+
+/**
+ * msm_vidc_load_u32_table() - load dtsi table entries
+ * @pdev: A pointer to the platform device.
+ * @of_node:      A pointer to the device node.
+ * @table_name:   A pointer to the dtsi table entry name.
+ * @struct_size:  The size of the structure which is nothing but
+ *                a single entry in the dtsi table.
+ * @table:        A pointer to the table pointer which needs to be
+ *                filled by the dtsi table entries.
+ * @num_elements: Number of elements pointer which needs to be filled
+ *                with the number of elements in the table.
+ *
+ * This is a generic implementation to load single or multiple array
+ * table from dtsi. The array elements should be of size equal to u32.
+ *
+ * Return:        Return '0' for success else appropriate error value.
+ */
+static int msm_vidc_load_u32_table(struct platform_device *pdev,
+		struct device_node *of_node, char *table_name, int struct_size,
+		u32 **table, u32 *num_elements)
+{
+	int rc = 0, num_elemts = 0;
+	u32 *ptbl = NULL;
+
+	if (!of_find_property(of_node, table_name, NULL)) {
+		d_vpr_h("%s not found\n", table_name);
+		return 0;
+	}
+
+	num_elemts = get_u32_array_num_elements(of_node, table_name);
+	if (!num_elemts) {
+		d_vpr_e("no elements in %s\n", table_name);
+		return 0;
+	}
+	num_elemts /= struct_size / sizeof(u32);
+
+	ptbl = devm_kzalloc(&pdev->dev, num_elemts * struct_size, GFP_KERNEL);
+	if (!ptbl) {
+		d_vpr_e("Failed to alloc table %s\n", table_name);
+		return -ENOMEM;
+	}
+
+	if (of_property_read_u32_array(of_node, table_name, ptbl,
+			num_elemts * struct_size / sizeof(u32))) {
+		d_vpr_e("Failed to read %s\n", table_name);
+		return -EINVAL;
+	}
+
+	*table = ptbl;
+	if (num_elements)
+		*num_elements = num_elemts;
+
+	return rc;
+}
+
+/* A comparator to compare loads (needed later on) */
+static int cmp(const void *a, const void *b)
+{
+	/* want to sort in reverse so flip the comparison */
+	return ((struct allowed_clock_rates_table *)b)->clock_rate -
+		((struct allowed_clock_rates_table *)a)->clock_rate;
+}
+
+static void msm_vidc_free_allowed_clocks_table(struct msm_vidc_dt *dt)
+{
+	dt->allowed_clks_tbl = NULL;
+}
+
+static void msm_vidc_free_reg_table(struct msm_vidc_dt *dt)
+{
+	dt->reg_set.reg_tbl = NULL;
+}
+
+static void msm_vidc_free_qdss_addr_table(struct msm_vidc_dt *dt)
+{
+	dt->qdss_addr_set.addr_tbl = NULL;
+}
+
+static void msm_vidc_free_bus_table(struct msm_vidc_dt *dt)
+{
+	dt->bus_set.bus_tbl = NULL;
+	dt->bus_set.count = 0;
+}
+
+static void msm_vidc_free_buffer_usage_table(struct msm_vidc_dt *dt)
+{
+	dt->buffer_usage_set.buffer_usage_tbl = NULL;
+}
+
+static void msm_vidc_free_regulator_table(struct msm_vidc_dt *dt)
+{
+	int c = 0;
+
+	for (c = 0; c < dt->regulator_set.count; ++c) {
+		struct regulator_info *rinfo =
+			&dt->regulator_set.regulator_tbl[c];
+
+		rinfo->name = NULL;
+	}
+
+	dt->regulator_set.regulator_tbl = NULL;
+	dt->regulator_set.count = 0;
+}
+
+static void msm_vidc_free_clock_table(struct msm_vidc_dt *dt)
+{
+	dt->clock_set.clock_tbl = NULL;
+	dt->clock_set.count = 0;
+}
+
+static int msm_vidc_load_fw_name(struct msm_vidc_core *core)
+{
+	struct platform_device *pdev = core->pdev;
+
+	return of_property_read_string_index(pdev->dev.of_node,
+				"vidc,firmware-name", 0, &core->dt->fw_name);
+}
+
+static int msm_vidc_load_reg_table(struct msm_vidc_core *core)
+{
+	struct reg_set *reg_set;
+	struct platform_device *pdev = core->pdev;
+	struct msm_vidc_dt *dt = core->dt;
+	int i;
+	int rc = 0;
+
+	if (!of_find_property(pdev->dev.of_node, "qcom,reg-presets", NULL)) {
+		/*
+		 * qcom,reg-presets is an optional property.  It likely won't be
+		 * present if we don't have any register settings to program
+		 */
+		d_vpr_h("reg-presets not found\n");
+		return 0;
+	}
+
+	reg_set = &dt->reg_set;
+	reg_set->count = get_u32_array_num_elements(pdev->dev.of_node,
+			"qcom,reg-presets");
+	reg_set->count /=  sizeof(*reg_set->reg_tbl) / sizeof(u32);
+
+	if (!reg_set->count) {
+		d_vpr_h("no elements in reg set\n");
+		return rc;
+	}
+
+	reg_set->reg_tbl = devm_kzalloc(&pdev->dev, reg_set->count *
+			sizeof(*(reg_set->reg_tbl)), GFP_KERNEL);
+	if (!reg_set->reg_tbl) {
+		d_vpr_e("%s: Failed to alloc register table\n", __func__);
+		return -ENOMEM;
+	}
+
+	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,reg-presets",
+		(u32 *)reg_set->reg_tbl, reg_set->count * 3)) {
+		d_vpr_e("Failed to read register table\n");
+		msm_vidc_free_reg_table(core->dt);
+		return -EINVAL;
+	}
+	for (i = 0; i < reg_set->count; i++) {
+		d_vpr_h("reg = %#x, value = %#x, mask = %#x\n",
+			reg_set->reg_tbl[i].reg, reg_set->reg_tbl[i].value,
+			reg_set->reg_tbl[i].mask);
+	}
+	return rc;
+}
+static int msm_vidc_load_qdss_table(struct msm_vidc_core *core)
+{
+	struct addr_set *qdss_addr_set;
+	struct platform_device *pdev = core->pdev;
+	struct msm_vidc_dt *dt = core->dt;
+	int i;
+	int rc = 0;
+
+	if (!of_find_property(pdev->dev.of_node, "qcom,qdss-presets", NULL)) {
+		/*
+		 * qcom,qdss-presets is an optional property. It likely won't be
+		 * present if we don't have any register settings to program
+		 */
+		d_vpr_h("qdss-presets not found\n");
+		return rc;
+	}
+
+	qdss_addr_set = &dt->qdss_addr_set;
+	qdss_addr_set->count = get_u32_array_num_elements(pdev->dev.of_node,
+					"qcom,qdss-presets");
+	qdss_addr_set->count /= sizeof(*qdss_addr_set->addr_tbl) / sizeof(u32);
+
+	if (!qdss_addr_set->count) {
+		d_vpr_h("no elements in qdss reg set\n");
+		return rc;
+	}
+
+	qdss_addr_set->addr_tbl = devm_kzalloc(&pdev->dev,
+			qdss_addr_set->count * sizeof(*qdss_addr_set->addr_tbl),
+			GFP_KERNEL);
+	if (!qdss_addr_set->addr_tbl) {
+		d_vpr_e("%s: Failed to alloc register table\n", __func__);
+		rc = -ENOMEM;
+		goto err_qdss_addr_tbl;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,qdss-presets",
+		(u32 *)qdss_addr_set->addr_tbl, qdss_addr_set->count * 2);
+	if (rc) {
+		d_vpr_e("Failed to read qdss address table\n");
+		msm_vidc_free_qdss_addr_table(core->dt);
+		rc = -EINVAL;
+		goto err_qdss_addr_tbl;
+	}
+
+	for (i = 0; i < qdss_addr_set->count; i++) {
+		d_vpr_h("qdss addr = %x, value = %x\n",
+				qdss_addr_set->addr_tbl[i].start,
+				qdss_addr_set->addr_tbl[i].size);
+	}
+err_qdss_addr_tbl:
+	return rc;
+}
+
+static int msm_vidc_load_subcache_info(struct msm_vidc_core *core)
+{
+	int rc = 0, num_subcaches = 0, c;
+	struct platform_device *pdev = core->pdev;
+	struct msm_vidc_dt *dt = core->dt;
+	struct subcache_set *subcaches = &dt->subcache_set;
+
+	num_subcaches = of_property_count_strings(pdev->dev.of_node,
+		"cache-slice-names");
+	if (num_subcaches <= 0) {
+		d_vpr_h("No subcaches found\n");
+		goto err_load_subcache_table_fail;
+	}
+
+	subcaches->subcache_tbl = devm_kzalloc(&pdev->dev,
+		sizeof(*subcaches->subcache_tbl) * num_subcaches, GFP_KERNEL);
+	if (!subcaches->subcache_tbl) {
+		d_vpr_e("Failed to allocate memory for subcache tbl\n");
+		rc = -ENOMEM;
+		goto err_load_subcache_table_fail;
+	}
+
+	subcaches->count = num_subcaches;
+	d_vpr_h("Found %d subcaches\n", num_subcaches);
+
+	for (c = 0; c < num_subcaches; ++c) {
+		struct subcache_info *vsc = &dt->subcache_set.subcache_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+			"cache-slice-names", c, &vsc->name);
+	}
+
+	dt->sys_cache_present = true;
+
+	return 0;
+
+err_load_subcache_table_fail:
+	dt->sys_cache_present = false;
+	subcaches->count = 0;
+	subcaches->subcache_tbl = NULL;
+
+	return rc;
+}
+
+static int msm_vidc_load_allowed_clocks_table(
+		struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct platform_device *pdev = core->pdev;
+	struct msm_vidc_dt *dt = core->dt;
+
+	if (!of_find_property(pdev->dev.of_node,
+			"qcom,allowed-clock-rates", NULL)) {
+		d_vpr_h("allowed-clock-rates not found\n");
+		return 0;
+	}
+
+	rc = msm_vidc_load_u32_table(pdev, pdev->dev.of_node,
+				"qcom,allowed-clock-rates",
+				sizeof(*dt->allowed_clks_tbl),
+				(u32 **)&dt->allowed_clks_tbl,
+				&dt->allowed_clks_tbl_size);
+	if (rc) {
+		d_vpr_e("%s: failed to read allowed clocks table\n", __func__);
+		return rc;
+	}
+
+	sort(dt->allowed_clks_tbl, dt->allowed_clks_tbl_size,
+		 sizeof(*dt->allowed_clks_tbl), cmp, NULL);
+
+	return 0;
+}
+
+static int msm_vidc_load_bus_table(struct msm_vidc_core *core)
+{
+	struct platform_device *pdev = core->pdev;
+	struct msm_vidc_dt *dt = core->dt;
+	struct bus_set *buses = &dt->bus_set;
+	int c = 0, num_buses = 0, rc = 0;
+	u32 *bus_ranges = NULL;
+
+	num_buses = of_property_count_strings(pdev->dev.of_node,
+				"interconnect-names");
+	if (num_buses <= 0) {
+		d_vpr_e("No buses found\n");
+		return -EINVAL;
+	}
+
+	buses->count = num_buses;
+	d_vpr_h("Found %d bus interconnects\n", num_buses);
+
+	bus_ranges = kzalloc(2 * num_buses * sizeof(*bus_ranges), GFP_KERNEL);
+	if (!bus_ranges) {
+		d_vpr_e("No memory to read bus ranges\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+				"qcom,bus-range-kbps", bus_ranges,
+				num_buses * 2);
+	if (rc) {
+		d_vpr_e(
+			"Failed to read bus ranges: defaulting to <0 INT_MAX>\n");
+		for (c = 0; c < num_buses; c++) {
+			bus_ranges[c * 2] = 0;
+			bus_ranges[c * 2 + 1] = INT_MAX;
+		}
+	}
+
+	buses->bus_tbl = devm_kzalloc(&pdev->dev, num_buses *
+				sizeof(*buses->bus_tbl), GFP_KERNEL);
+	if (!buses->bus_tbl) {
+		d_vpr_e("No memory for bus table\n");
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	for (c = 0; c < num_buses; c++) {
+		struct bus_info *bus = &dt->bus_set.bus_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+			"interconnect-names", c, &bus->name);
+
+		bus->dev = &pdev->dev;
+		bus->range[0] = bus_ranges[c * 2];
+		bus->range[1] = bus_ranges[c * 2 + 1];
+
+		d_vpr_h("Found bus %s\n", bus->name);
+	}
+
+exit:
+	kfree(bus_ranges);
+	return rc;
+}
+
+/* TODO: move this to platform data */
+static int msm_vidc_load_buffer_usage_table(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct platform_device *pdev = core->pdev;
+	struct msm_vidc_dt *dt = core->dt;
+	struct buffer_usage_set *buffer_usage_set = &dt->buffer_usage_set;
+
+	if (!of_find_property(pdev->dev.of_node,
+				"qcom,buffer-type-tz-usage-table", NULL)) {
+		/*
+		 * qcom,buffer-type-tz-usage-table is an optional property.  It
+		 * likely won't be present if the core doesn't support content
+		 * protection
+		 */
+		d_vpr_h("buffer-type-tz-usage-table not found\n");
+		return 0;
+	}
+
+	buffer_usage_set->count = get_u32_array_num_elements(
+		pdev->dev.of_node, "qcom,buffer-type-tz-usage-table");
+	buffer_usage_set->count /=
+		sizeof(*buffer_usage_set->buffer_usage_tbl) / sizeof(u32);
+	if (!buffer_usage_set->count) {
+		d_vpr_h("no elements in buffer usage set\n");
+		return 0;
+	}
+
+	buffer_usage_set->buffer_usage_tbl = devm_kzalloc(&pdev->dev,
+			buffer_usage_set->count *
+			sizeof(*buffer_usage_set->buffer_usage_tbl),
+			GFP_KERNEL);
+	if (!buffer_usage_set->buffer_usage_tbl) {
+		d_vpr_e("%s: Failed to alloc buffer usage table\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_load_buf_usage;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+		    "qcom,buffer-type-tz-usage-table",
+		(u32 *)buffer_usage_set->buffer_usage_tbl,
+		buffer_usage_set->count *
+		sizeof(*buffer_usage_set->buffer_usage_tbl) / sizeof(u32));
+	if (rc) {
+		d_vpr_e("Failed to read buffer usage table\n");
+		goto err_load_buf_usage;
+	}
+
+	return 0;
+err_load_buf_usage:
+	msm_vidc_free_buffer_usage_table(core->dt);
+	return rc;
+}
+
+static int msm_vidc_load_regulator_table(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct platform_device *pdev = core->pdev;
+	struct msm_vidc_dt *dt = core->dt;
+	struct regulator_set *regulators = &dt->regulator_set;
+	struct device_node *domains_parent_node = NULL;
+	struct property *domains_property = NULL;
+	int reg_count = 0;
+
+	regulators->count = 0;
+	regulators->regulator_tbl = NULL;
+
+	domains_parent_node = pdev->dev.of_node;
+	for_each_property_of_node(domains_parent_node, domains_property) {
+		const char *search_string = "-supply";
+		char *supply;
+		bool matched = false;
+
+		/* check if current property is possibly a regulator */
+		supply = strnstr(domains_property->name, search_string,
+				strlen(domains_property->name) + 1);
+		matched = supply && (*(supply + strlen(search_string)) == '\0');
+		if (!matched)
+			continue;
+
+		reg_count++;
+	}
+
+	regulators->regulator_tbl = devm_kzalloc(&pdev->dev,
+			sizeof(*regulators->regulator_tbl) *
+			reg_count, GFP_KERNEL);
+
+	if (!regulators->regulator_tbl) {
+		rc = -ENOMEM;
+		d_vpr_e("Failed to alloc memory for regulator table\n");
+		goto err_reg_tbl_alloc;
+	}
+
+	for_each_property_of_node(domains_parent_node, domains_property) {
+		const char *search_string = "-supply";
+		char *supply;
+		bool matched = false;
+		struct device_node *regulator_node = NULL;
+		struct regulator_info *rinfo = NULL;
+
+		/* check if current property is possibly a regulator */
+		supply = strnstr(domains_property->name, search_string,
+				strlen(domains_property->name) + 1);
+		matched = supply && (supply[strlen(search_string)] == '\0');
+		if (!matched)
+			continue;
+
+		/* make sure prop isn't being misused */
+		regulator_node = of_parse_phandle(domains_parent_node,
+				domains_property->name, 0);
+		if (IS_ERR(regulator_node)) {
+			d_vpr_e("%s is not a phandle\n",
+				domains_property->name);
+			continue;
+		}
+		regulators->count++;
+
+		/* populate regulator info */
+		rinfo = &regulators->regulator_tbl[regulators->count - 1];
+		rinfo->name = devm_kzalloc(&pdev->dev,
+			(supply - domains_property->name) + 1, GFP_KERNEL);
+		if (!rinfo->name) {
+			rc = -ENOMEM;
+			d_vpr_e("Failed to alloc memory for regulator name\n");
+			goto err_reg_name_alloc;
+		}
+		strlcpy(rinfo->name, domains_property->name,
+			(supply - domains_property->name) + 1);
+
+		rinfo->has_hw_power_collapse = of_property_read_bool(
+			regulator_node, "qcom,support-hw-trigger");
+
+		d_vpr_h("Found regulator %s: h/w collapse = %s\n",
+				rinfo->name,
+				rinfo->has_hw_power_collapse ? "yes" : "no");
+	}
+
+	if (!regulators->count)
+		d_vpr_h("No regulators found");
+
+	return 0;
+
+err_reg_name_alloc:
+err_reg_tbl_alloc:
+	msm_vidc_free_regulator_table(core->dt);
+	return rc;
+}
+
+static int msm_vidc_load_clock_table(struct msm_vidc_core *core)
+{
+	int rc = 0, num_clocks = 0, c = 0;
+	struct platform_device *pdev = core->pdev;
+	struct msm_vidc_dt *dt = core->dt;
+	int *clock_props = NULL;
+	struct clock_set *clocks = &dt->clock_set;
+
+	num_clocks = of_property_count_strings(pdev->dev.of_node,
+				"clock-names");
+	if (num_clocks <= 0) {
+		d_vpr_h("No clocks found\n");
+		clocks->count = 0;
+		rc = 0;
+		goto err_load_clk_table_fail;
+	}
+
+	clock_props = devm_kzalloc(&pdev->dev, num_clocks *
+			sizeof(*clock_props), GFP_KERNEL);
+	if (!clock_props) {
+		d_vpr_e("No memory to read clock properties\n");
+		rc = -ENOMEM;
+		goto err_load_clk_table_fail;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+				"qcom,clock-configs", clock_props,
+				num_clocks);
+	if (rc) {
+		d_vpr_e("Failed to read clock properties: %d\n", rc);
+		goto err_load_clk_prop_fail;
+	}
+
+	clocks->clock_tbl = devm_kzalloc(&pdev->dev, sizeof(*clocks->clock_tbl)
+			* num_clocks, GFP_KERNEL);
+	if (!clocks->clock_tbl) {
+		d_vpr_e("Failed to allocate memory for clock tbl\n");
+		rc = -ENOMEM;
+		goto err_load_clk_prop_fail;
+	}
+
+	clocks->count = num_clocks;
+	d_vpr_h("Found %d clocks\n", num_clocks);
+
+	for (c = 0; c < num_clocks; ++c) {
+		struct clock_info *vc = &dt->clock_set.clock_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+				"clock-names", c, &vc->name);
+
+		if (clock_props[c] & CLOCK_PROP_HAS_SCALING) {
+			vc->has_scaling = true;
+		} else {
+			vc->has_scaling = false;
+		}
+
+		if (clock_props[c] & CLOCK_PROP_HAS_MEM_RETENTION)
+			vc->has_mem_retention = true;
+		else
+			vc->has_mem_retention = false;
+
+		d_vpr_h("Found clock %s: scale-able = %s\n", vc->name,
+			vc->has_scaling ? "yes" : "no");
+	}
+
+
+	return 0;
+
+err_load_clk_prop_fail:
+err_load_clk_table_fail:
+	return rc;
+}
+
+static int msm_vidc_load_reset_table(struct msm_vidc_core *core)
+{
+	struct platform_device *pdev = core->pdev;
+	struct msm_vidc_dt *dt = core->dt;
+	struct reset_set *rst = &dt->reset_set;
+	int num_clocks = 0, c = 0;
+
+	num_clocks = of_property_count_strings(pdev->dev.of_node,
+				"reset-names");
+	if (num_clocks <= 0) {
+		d_vpr_h("No reset clocks found\n");
+		rst->count = 0;
+		return 0;
+	}
+
+	rst->reset_tbl = devm_kcalloc(&pdev->dev, num_clocks,
+			sizeof(*rst->reset_tbl), GFP_KERNEL);
+	if (!rst->reset_tbl)
+		return -ENOMEM;
+
+	rst->count = num_clocks;
+	d_vpr_h("Found %d reset clocks\n", num_clocks);
+
+	for (c = 0; c < num_clocks; ++c) {
+		struct reset_info *rc = &dt->reset_set.reset_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+				"reset-names", c, &rc->name);
+	}
+
+	return 0;
+}
+
+static int msm_decide_dt_node(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct platform_device *pdev = core->pdev;
+	u32 sku_index = 0;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "sku-index",
+			&sku_index);
+	if (rc) {
+		d_vpr_h("'sku_index' not found in node\n");
+		return 0;
+	}
+
+	return 0;
+}
+
+static int msm_vidc_read_resources_from_dt(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_vidc_core *core;
+	struct msm_vidc_dt *dt;
+	struct resource *kres;
+
+	if (!pdev) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	core = dev_get_drvdata(&pdev->dev);
+	if (!core || !core->dt) {
+		d_vpr_e("%s: core not found in device %s",
+				dev_name(&pdev->dev));
+		return -EINVAL;
+	}
+	dt = core->dt;
+
+	rc = msm_decide_dt_node(core);
+	if (rc)
+		return rc;
+
+	INIT_LIST_HEAD(&dt->context_banks);
+
+	kres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dt->register_base = kres ? kres->start : -1;
+	dt->register_size = kres ? (kres->end + 1 - kres->start) : -1;
+
+	kres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	dt->irq = kres ? kres->start : -1;
+
+	rc = msm_vidc_load_fw_name(core);
+	if (rc)
+		d_vpr_e("%s: failed to load fw name, rc %d, using default fw\n",
+			__func__, rc);
+
+	rc = msm_vidc_load_subcache_info(core);
+	if (rc)
+		d_vpr_e("Failed to load subcache info: %d\n", rc);
+
+	rc = msm_vidc_load_qdss_table(core);
+	if (rc)
+		d_vpr_e("Failed to load qdss reg table: %d\n", rc);
+
+	rc = msm_vidc_load_reg_table(core);
+	if (rc) {
+		d_vpr_e("Failed to load reg table: %d\n", rc);
+		goto err_load_reg_table;
+	}
+
+	// TODO: move this table to platform
+	rc = msm_vidc_load_buffer_usage_table(core);
+	if (rc) {
+		d_vpr_e("Failed to load buffer usage table: %d\n", rc);
+		goto err_load_buffer_usage_table;
+	}
+
+	rc = msm_vidc_load_regulator_table(core);
+	if (rc) {
+		d_vpr_e("Failed to load list of regulators %d\n", rc);
+		goto err_load_regulator_table;
+	}
+
+	rc = msm_vidc_load_bus_table(core);
+	if (rc) {
+		d_vpr_e("Failed to load bus table: %d\n", rc);
+		goto err_load_bus_table;
+	}
+
+	rc = msm_vidc_load_clock_table(core);
+	if (rc) {
+		d_vpr_e("Failed to load clock table: %d\n", rc);
+		goto err_load_clock_table;
+	}
+
+	// TODO: move this table to platform
+	rc = msm_vidc_load_allowed_clocks_table(core);
+	if (rc) {
+		d_vpr_e("Failed to load allowed clocks table: %d\n", rc);
+		goto err_load_allowed_clocks_table;
+	}
+
+	rc = msm_vidc_load_reset_table(core);
+	if (rc) {
+		d_vpr_e("Failed to load reset table: %d\n", rc);
+		goto err_load_reset_table;
+	}
+
+	return rc;
+
+err_load_reset_table:
+	msm_vidc_free_allowed_clocks_table(core->dt);
+err_load_allowed_clocks_table:
+	msm_vidc_free_clock_table(core->dt);
+err_load_clock_table:
+	msm_vidc_free_bus_table(core->dt);
+err_load_bus_table:
+	msm_vidc_free_regulator_table(core->dt);
+err_load_regulator_table:
+	msm_vidc_free_buffer_usage_table(core->dt);
+err_load_buffer_usage_table:
+	msm_vidc_free_reg_table(core->dt);
+err_load_reg_table:
+	return rc;
+}
+
+static int msm_vidc_setup_context_bank(struct msm_vidc_core *core,
+		struct context_bank_info *cb, struct device *dev)
+{
+	int rc = 0;
+	struct bus_type *bus;
+
+	if (!core || !dev || !cb) {
+		d_vpr_e("%s: Invalid Input params\n", __func__);
+		return -EINVAL;
+	}
+	cb->dev = dev;
+
+	bus = cb->dev->bus;
+	if (IS_ERR_OR_NULL(bus)) {
+		d_vpr_e("%s: failed to get bus type\n", __func__);
+		rc = PTR_ERR(bus) ? PTR_ERR(bus) : -ENODEV;
+		goto remove_cb;
+	}
+
+	cb->domain = iommu_get_domain_for_dev(cb->dev);
+
+	/*
+	 * configure device segment size and segment boundary to ensure
+	 * iommu mapping returns one mapping (which is required for partial
+	 * cache operations)
+	 */
+	if (!dev->dma_parms)
+		dev->dma_parms =
+			devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
+	dma_set_max_seg_size(dev, (unsigned int)DMA_BIT_MASK(32));
+	dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
+
+	d_vpr_h("Attached %s and created mapping\n", dev_name(dev));
+	d_vpr_h(
+		"Context bank: %s, buffer_type: %#x, is_secure: %d, address range start: %#x, size: %#x, dev: %pK, domain: %pK",
+		cb->name, cb->buffer_type, cb->is_secure, cb->addr_range.start,
+		cb->addr_range.size, cb->dev, cb->domain);
+
+remove_cb:
+	return rc;
+}
+
+static int msm_vidc_populate_context_bank(struct device *dev,
+		struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct context_bank_info *cb = NULL;
+	struct device_node *np = NULL;
+
+	if (!dev || !core || !core->dt) {
+		d_vpr_e("%s: invalid inputs\n", __func__);
+		return -EINVAL;
+	}
+
+	np = dev->of_node;
+	cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
+	if (!cb) {
+		d_vpr_e("%s: Failed to allocate cb\n", __func__);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&cb->list);
+	list_add_tail(&cb->list, &core->dt->context_banks);
+
+	rc = of_property_read_string(np, "label", &cb->name);
+	if (rc) {
+		d_vpr_h("Failed to read cb label from device tree\n");
+		rc = 0;
+	}
+
+	d_vpr_h("%s: context bank has name %s\n", __func__, cb->name);
+	rc = of_property_read_u32_array(np, "virtual-addr-pool",
+			(u32 *)&cb->addr_range, 2);
+	if (rc) {
+		d_vpr_e("Could not read addr pool: context bank: %s %d\n",
+			cb->name, rc);
+		goto err_setup_cb;
+	}
+
+	cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank");
+	d_vpr_h("context bank %s: secure = %d\n",
+			cb->name, cb->is_secure);
+
+	/* setup buffer type for each sub device*/
+	rc = of_property_read_u32(np, "buffer-types", &cb->buffer_type);
+	if (rc) {
+		d_vpr_e("failed to load buffer_type info %d\n", rc);
+		rc = -ENOENT;
+		goto err_setup_cb;
+	}
+	d_vpr_h("context bank %s address start %x size %x buffer_type %x\n",
+		cb->name, cb->addr_range.start,
+		cb->addr_range.size, cb->buffer_type);
+
+	rc = msm_vidc_setup_context_bank(core, cb, dev);
+	if (rc) {
+		d_vpr_e("Cannot setup context bank %d\n", rc);
+		goto err_setup_cb;
+	}
+
+	iommu_set_fault_handler(cb->domain,
+		msm_vidc_smmu_fault_handler, (void *)core);
+
+	return 0;
+
+err_setup_cb:
+	list_del(&cb->list);
+	return rc;
+}
+
+int msm_vidc_read_context_bank_resources_from_dt(struct platform_device *pdev)
+{
+	struct msm_vidc_core *core;
+	int rc = 0;
+
+	if (!pdev) {
+		d_vpr_e("Invalid platform device\n");
+		return -EINVAL;
+	} else if (!pdev->dev.parent) {
+		d_vpr_e("Failed to find a parent for %s\n",
+			dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	core = dev_get_drvdata(pdev->dev.parent);
+	if (!core) {
+		d_vpr_e("Failed to find cookie in parent device %s",
+				dev_name(pdev->dev.parent));
+		return -EINVAL;
+	}
+
+	rc = msm_vidc_populate_context_bank(&pdev->dev, core);
+	if (rc)
+		d_vpr_e("Failed to probe context bank\n");
+	else
+		d_vpr_h("Successfully probed context bank\n");
+
+	return rc;
+}
+
+void msm_vidc_deinit_dt(struct platform_device *pdev)
+{
+	struct msm_vidc_core *core;
+
+	if (!pdev) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return;
+	}
+
+	core = dev_get_drvdata(&pdev->dev);
+	if (!core) {
+		d_vpr_e("%s: core not found in device %s",
+				dev_name(&pdev->dev));
+		return;
+	} else if (!core->dt) {
+		d_vpr_e("%s: invalid dt in device %s",
+				dev_name(&pdev->dev));
+		return;
+	}
+
+	msm_vidc_free_clock_table(core->dt);
+	msm_vidc_free_regulator_table(core->dt);
+	msm_vidc_free_allowed_clocks_table(core->dt);
+	msm_vidc_free_reg_table(core->dt);
+	msm_vidc_free_qdss_addr_table(core->dt);
+	msm_vidc_free_bus_table(core->dt);
+	msm_vidc_free_buffer_usage_table(core->dt);
+}
+
+int msm_vidc_init_dt(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_vidc_dt *dt;
+	struct msm_vidc_core *core;
+
+	if (!pdev) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	core = dev_get_drvdata(&pdev->dev);
+	if (!core) {
+		d_vpr_e("%s: core not found in device %s",
+				dev_name(&pdev->dev));
+		return -EINVAL;
+	}
+
+	dt = kzalloc(sizeof(struct msm_vidc_dt), GFP_KERNEL);
+	if (!dt)
+		return -ENOMEM;
+
+	core->dt = dt;
+	dt->core = core;
+
+	rc = msm_vidc_read_resources_from_dt(pdev);
+	if (rc)
+		return rc;
+
+	return 0;
+}

+ 407 - 0
driver/vidc/src/msm_vidc_memory.c

@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/msm_ion.h>
+#include <linux/ion.h>
+
+#include "msm_vidc_memory.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc_internal.h"
+#include "msm_vidc_dt.h"
+#include "msm_vidc_core.h"
+
+
+static int get_ion_secure_flag(enum msm_vidc_buffer_region region)
+{
+	u32 ion_flag = 0;
+
+	switch (region) {
+	case MSM_VIDC_SECURE_PIXEL:
+		ion_flag = ION_FLAG_CP_PIXEL;
+		break;
+	case MSM_VIDC_SECURE_NONPIXEL:
+		ion_flag = ION_FLAG_CP_NON_PIXEL;
+		break;
+	case MSM_VIDC_SECURE_BITSTREAM:
+		ion_flag = ION_FLAG_CP_BITSTREAM;
+		break;
+	default:
+		d_vpr_e("invalid secure region : %#x\n", region);
+	}
+
+	return ion_flag;
+}
+
+struct context_bank_info *get_context_bank(struct msm_vidc_core *core,
+		enum msm_vidc_buffer_region region)
+{
+	char *name;
+	struct context_bank_info *cb = NULL, *match = NULL;
+
+	switch (region) {
+	case MSM_VIDC_NON_SECURE:
+		name = "venus_ns";
+		break;
+	case MSM_VIDC_SECURE_PIXEL:
+		name = "venus_sec_pixel";
+		break;
+	case MSM_VIDC_SECURE_NONPIXEL:
+		name = "venus_sec_non_pixel";
+		break;
+	case MSM_VIDC_SECURE_BITSTREAM:
+		name = "venus_sec_bitstream";
+		break;
+	default:
+		d_vpr_e("invalid region : %#x\n", region);
+		return NULL;
+	}
+
+	list_for_each_entry(cb, &core->dt->context_banks, list) {
+		if (!strcmp(cb->name, name)) {
+			match = cb;
+			break;
+		}
+	}
+	if (!match)
+		d_vpr_e("cb not found for region %#x\n", region);
+
+	return match;
+}
+
+struct dma_buf *msm_vidc_memory_get_dmabuf(int fd)
+{
+	struct dma_buf *dmabuf;
+
+	dmabuf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(dmabuf)) {
+		d_vpr_e("Failed to get dmabuf for %d, error %ld\n",
+				fd, PTR_ERR(dmabuf));
+		dmabuf = NULL;
+	}
+
+	return dmabuf;
+}
+
+void msm_vidc_memory_put_dmabuf(void *dmabuf)
+{
+	if (!dmabuf) {
+		d_vpr_e("%s: NULL dmabuf\n", __func__);
+		return;
+	}
+
+	dma_buf_put((struct dma_buf *)dmabuf);
+}
+
+int msm_vidc_memory_map(struct msm_vidc_core *core, struct msm_vidc_map *map)
+{
+	int rc = 0;
+	struct dma_buf_attachment *attach = NULL;
+	struct sg_table *table = NULL;
+	struct context_bank_info *cb = NULL;
+
+	if (!core || !map) {
+		d_vpr_e("%s: invalid params\n",	__func__);
+		return -EINVAL;
+	}
+
+	if (map->refcount) {
+		map->refcount++;
+		return 0;
+	}
+
+	cb = get_context_bank(core, map->region);
+	if (!cb) {
+		d_vpr_e("%s: Failed to get context bank device\n",
+			 __func__);
+		rc = -EIO;
+		goto error_cb;
+	}
+
+	/* Prepare a dma buf for dma on the given device */
+	attach = dma_buf_attach(map->dmabuf, cb->dev);
+	if (IS_ERR_OR_NULL(attach)) {
+		rc = PTR_ERR(attach) ? PTR_ERR(attach) : -ENOMEM;
+		d_vpr_e("Failed to attach dmabuf\n");
+		goto error_attach;
+	}
+
+	/*
+	 * Get the scatterlist for the given attachment
+	 * Mapping of sg is taken care by map attachment
+	 */
+	attach->dma_map_attrs = DMA_ATTR_DELAYED_UNMAP;
+	/*
+	 * We do not need dma_map function to perform cache operations
+	 * on the whole buffer size and hence pass skip sync flag.
+	 * We do the required cache operations separately for the
+	 * required buffer size
+	 */
+	attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+	if (core->dt->sys_cache_present)
+		attach->dma_map_attrs |=
+			DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+
+	table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR_OR_NULL(table)) {
+		rc = PTR_ERR(table) ? PTR_ERR(table) : -ENOMEM;
+		d_vpr_e("Failed to map table\n");
+		goto error_table;
+	}
+	if (!table->sgl) {
+		d_vpr_e("sgl is NULL\n");
+		rc = -ENOMEM;
+		goto error_sg;
+	}
+
+	map->device_addr = table->sgl->dma_address;
+	map->table = table;
+	map->attach = attach;
+	map->refcount++;
+	return 0;
+
+error_sg:
+	dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
+error_table:
+	dma_buf_detach(map->dmabuf, attach);
+error_attach:
+error_cb:
+	return rc;
+}
+
+int msm_vidc_memory_unmap(struct msm_vidc_core *core, struct msm_vidc_map *map)
+{
+	int rc = 0;
+
+	if (!core || !map) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (map->refcount) {
+		map->refcount--;
+	} else {
+		d_vpr_e("unmap called while refcount is zero already\n");
+		return -EINVAL;
+	}
+
+	if (map->refcount)
+		goto exit;
+
+	dma_buf_unmap_attachment(map->attach, map->table, DMA_BIDIRECTIONAL);
+	dma_buf_detach(map->dmabuf, map->attach);
+
+	map->device_addr = 0x0;
+	map->dmabuf = NULL;
+	map->attach = NULL;
+	map->table = NULL;
+
+exit:
+	return rc;
+}
+
+int msm_vidc_memory_alloc(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
+{
+	int rc = 0;
+	int ion_flags = 0;
+	int ion_secure_flag = 0;
+	unsigned long heap_mask = 0;
+	int size = 0;
+
+	if (!mem) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	size = ALIGN(mem->size, SZ_4K);
+
+	if (mem->cached)
+		ion_flags |= ION_FLAG_CACHED;
+
+	if (mem->secure) {
+		ion_secure_flag = get_ion_secure_flag(mem->region);
+		ion_flags |= ION_FLAG_SECURE | ion_secure_flag;
+		heap_mask = ION_HEAP(ION_SECURE_HEAP_ID);
+	} else {
+		heap_mask = ION_HEAP(ION_SYSTEM_HEAP_ID);
+	}
+
+	mem->dmabuf = ion_alloc(size, heap_mask, ion_flags);
+	if (IS_ERR_OR_NULL(mem->dmabuf)) {
+		d_vpr_e("%s: ion alloc failed\n", __func__);
+		mem->dmabuf = NULL;
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	if (mem->map_kernel) {
+		dma_buf_begin_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
+		mem->kvaddr = dma_buf_vmap(mem->dmabuf);
+		if (!mem->kvaddr) {
+			d_vpr_e("%s: kernel map failed\n", __func__);
+			rc = -EIO;
+			goto error;
+		}
+	}
+
+	d_vpr_h(
+		"%s: dmabuf = %pK, size = %d, kvaddr = %pK, buffer_type = %#x\n",
+		__func__, mem->dmabuf, mem->size,
+		mem->kvaddr, mem->buffer_type);
+	return 0;
+
+error:
+	msm_vidc_memory_free(core, mem);
+	return rc;
+}
+
+int msm_vidc_memory_free(struct msm_vidc_core *core, struct msm_vidc_alloc *mem)
+{
+	int rc = 0;
+
+	if (!mem || !mem->dmabuf) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	d_vpr_h(
+		"%s: dmabuf = %pK, size = %d, kvaddr = %pK, buffer_type = %#x\n",
+		__func__, mem->dmabuf, mem->size,
+		mem->kvaddr, mem->buffer_type);
+
+	if (mem->kvaddr) {
+		dma_buf_vunmap(mem->dmabuf, mem->kvaddr);
+		mem->kvaddr = NULL;
+		dma_buf_end_cpu_access(mem->dmabuf, DMA_BIDIRECTIONAL);
+	}
+
+	if (mem->dmabuf) {
+		dma_buf_put(mem->dmabuf);
+		mem->dmabuf = NULL;
+	}
+
+	return rc;
+};
+/*
+int msm_memory_cache_operations(struct dma_buf *dbuf,
+	enum smem_cache_ops cache_op, unsigned long offset,
+	unsigned long size, u32 sid)
+{
+	int rc = 0;
+	unsigned long flags = 0;
+
+	if (!dbuf) {
+		s_vpr_e(sid, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = dma_buf_get_flags(dbuf, &flags);
+	if (rc) {
+		s_vpr_e(sid, "%s: dma_buf_get_flags failed, err %d\n",
+			__func__, rc);
+		return rc;
+	} else if (!(flags & ION_FLAG_CACHED)) {
+		return rc;
+	}
+
+	switch (cache_op) {
+	case SMEM_CACHE_CLEAN:
+	case SMEM_CACHE_CLEAN_INVALIDATE:
+		rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
+				offset, size);
+		if (rc)
+			break;
+		rc = dma_buf_end_cpu_access_partial(dbuf, DMA_TO_DEVICE,
+				offset, size);
+		break;
+	case SMEM_CACHE_INVALIDATE:
+		rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
+				offset, size);
+		if (rc)
+			break;
+		rc = dma_buf_end_cpu_access_partial(dbuf, DMA_FROM_DEVICE,
+				offset, size);
+		break;
+	default:
+		s_vpr_e(sid, "%s: cache (%d) operation not supported\n",
+			__func__, cache_op);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+int msm_smem_memory_prefetch(struct msm_vidc_inst *inst)
+{
+	int i, rc = 0;
+	struct memory_regions *vidc_regions = NULL;
+	struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
+
+	if (!inst) {
+		d_vpr_e("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	vidc_regions = &inst->regions;
+	if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
+		s_vpr_e(inst->sid, "%s: invalid num_regions %d, max %d\n",
+			__func__, vidc_regions->num_regions,
+			MEMORY_REGIONS_MAX);
+		return -EINVAL;
+	}
+
+	memset(ion_region, 0, sizeof(ion_region));
+	for (i = 0; i < vidc_regions->num_regions; i++) {
+		ion_region[i].size = vidc_regions->region[i].size;
+		ion_region[i].vmid = vidc_regions->region[i].vmid;
+	}
+
+	rc = msm_ion_heap_prefetch(ION_SECURE_HEAP_ID, ion_region,
+		vidc_regions->num_regions);
+	if (rc)
+		s_vpr_e(inst->sid, "%s: prefetch failed, ret: %d\n",
+			__func__, rc);
+	else
+		s_vpr_l(inst->sid, "%s: prefetch succeeded\n", __func__);
+
+	return rc;
+}
+
+int msm_smem_memory_drain(struct msm_vidc_inst *inst)
+{
+	int i, rc = 0;
+	struct memory_regions *vidc_regions = NULL;
+	struct ion_prefetch_region ion_region[MEMORY_REGIONS_MAX];
+
+	if (!inst) {
+		d_vpr_e("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	vidc_regions = &inst->regions;
+	if (vidc_regions->num_regions > MEMORY_REGIONS_MAX) {
+		s_vpr_e(inst->sid, "%s: invalid num_regions %d, max %d\n",
+			__func__, vidc_regions->num_regions,
+			MEMORY_REGIONS_MAX);
+		return -EINVAL;
+	}
+
+	memset(ion_region, 0, sizeof(ion_region));
+	for (i = 0; i < vidc_regions->num_regions; i++) {
+		ion_region[i].size = vidc_regions->region[i].size;
+		ion_region[i].vmid = vidc_regions->region[i].vmid;
+	}
+
+	rc = msm_ion_heap_drain(ION_SECURE_HEAP_ID, ion_region,
+		vidc_regions->num_regions);
+	if (rc)
+		s_vpr_e(inst->sid, "%s: drain failed, ret: %d\n", __func__, rc);
+	else
+		s_vpr_l(inst->sid, "%s: drain succeeded\n", __func__);
+
+	return rc;
+}
+*/

+ 133 - 0
driver/vidc/src/msm_vidc_platform.c

@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of_platform.h>
+
+#include "msm_vidc_waipio.h"
+
+#include "msm_vidc_platform.h"
+#include "msm_vidc_iris2.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc_v4l2.h"
+#include "msm_vidc_vb2.h"
+
+
+static struct v4l2_file_operations msm_v4l2_file_operations = {
+	.owner                          = THIS_MODULE,
+	.open                           = msm_v4l2_open,
+	.release                        = msm_v4l2_close,
+	.unlocked_ioctl                 = video_ioctl2,
+	.poll                           = msm_v4l2_poll,
+};
+
+static struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = {
+	.vidioc_querycap                = msm_v4l2_querycap,
+	.vidioc_enum_fmt_vid_cap        = msm_v4l2_enum_fmt,
+	.vidioc_enum_fmt_vid_out        = msm_v4l2_enum_fmt,
+	.vidioc_enum_framesizes         = msm_v4l2_enum_framesizes,
+	.vidioc_s_fmt_vid_cap_mplane    = msm_v4l2_s_fmt,
+	.vidioc_s_fmt_vid_out_mplane    = msm_v4l2_s_fmt,
+	.vidioc_g_fmt_vid_cap_mplane    = msm_v4l2_g_fmt,
+	.vidioc_g_fmt_vid_out_mplane    = msm_v4l2_g_fmt,
+	.vidioc_reqbufs                 = msm_v4l2_reqbufs,
+	.vidioc_qbuf                    = msm_v4l2_qbuf,
+	.vidioc_dqbuf                   = msm_v4l2_dqbuf,
+	.vidioc_streamon                = msm_v4l2_streamon,
+	.vidioc_streamoff               = msm_v4l2_streamoff,
+	.vidioc_s_ctrl                  = msm_v4l2_s_ctrl,
+	.vidioc_g_ctrl                  = msm_v4l2_g_ctrl,
+	.vidioc_queryctrl               = msm_v4l2_queryctrl,
+	.vidioc_querymenu               = msm_v4l2_querymenu,
+	.vidioc_subscribe_event         = msm_v4l2_subscribe_event,
+	.vidioc_unsubscribe_event       = msm_v4l2_unsubscribe_event,
+	.vidioc_decoder_cmd             = msm_v4l2_decoder_cmd,
+	.vidioc_encoder_cmd             = msm_v4l2_encoder_cmd,
+};
+
+static struct v4l2_ctrl_ops msm_v4l2_ctrl_ops = {
+	//.s_ctrl                         = msm_vidc_s_ctrl,
+};
+
+static struct vb2_ops msm_vb2_ops = {
+	.queue_setup                    = msm_vidc_queue_setup,
+	.start_streaming                = msm_vidc_start_streaming,
+	.buf_queue                      = msm_vidc_buf_queue,
+	.buf_cleanup                    = msm_vidc_buf_cleanup,
+	.stop_streaming                 = msm_vidc_stop_streaming,
+};
+
+static struct vb2_mem_ops msm_vb2_mem_ops = {
+	.get_userptr                    = msm_vb2_get_userptr,
+	.put_userptr                    = msm_vb2_put_userptr,
+};
+
+static int msm_vidc_init_ops(struct msm_vidc_core *core)
+{
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	d_vpr_h("%s: initialize ops\n", __func__);
+	core->v4l2_file_ops = &msm_v4l2_file_operations;
+	core->v4l2_ioctl_ops = &msm_v4l2_ioctl_ops;
+	core->v4l2_ctrl_ops = &msm_v4l2_ctrl_ops;
+	core->vb2_ops = &msm_vb2_ops;
+	core->vb2_mem_ops = &msm_vb2_mem_ops;
+
+	return 0;
+}
+
+int msm_vidc_init_platform(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_vidc_platform *platform;
+	struct msm_vidc_core *core;
+
+	if (!pdev) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	d_vpr_h("%s()\n", __func__);
+
+	core = dev_get_drvdata(&pdev->dev);
+	if (!core) {
+		d_vpr_e("%s: core not found in device %s",
+				dev_name(&pdev->dev));
+		return -EINVAL;
+	}
+
+	platform = kzalloc(sizeof(struct msm_vidc_platform), GFP_KERNEL);
+	if (!platform)
+		return -ENOMEM;
+
+	core->platform = platform;
+	platform->core = core;
+
+	/* selected ops can be re-assigned in platform specific file */
+	rc = msm_vidc_init_ops(core);
+	if (rc)
+		return rc;
+
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-vidc")) { // "qcom,msm-vidc-waipio"
+		rc = msm_vidc_init_platform_waipio(core);
+		if (rc)
+			return rc;
+	}
+
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-vidc")) { // "qcom,msm-vidc-iris2"
+		rc = msm_vidc_init_iris2(core);
+		if (rc)
+			return rc;
+	}
+
+	return rc;
+}
+
+int msm_vidc_deinit_platform(struct platform_device *pdev)
+{
+	return 0;
+}

+ 367 - 0
driver/vidc/src/msm_vidc_probe.c

@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+
+#include "msm_vidc_internal.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc_driver.h"
+#include "msm_vidc_dt.h"
+#include "msm_vidc_platform.h"
+#include "msm_vidc_core.h"
+#include "venus_hfi.h"
+
+#define BASE_DEVICE_NUMBER 32
+
+static irqreturn_t msm_vidc_isr(int irq, void *data)
+{
+	struct msm_vidc_core *core = data;
+
+	d_vpr_e("%s()\n", __func__);
+
+	disable_irq_nosync(irq);
+	queue_work(core->device_workq, &core->device_work);
+
+	return IRQ_HANDLED;
+}
+
+static int msm_vidc_init_irq(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct msm_vidc_dt *dt;
+
+	d_vpr_e("%s()\n", __func__);
+
+	if (!core || !core->pdev || !core->dt) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	dt = core->dt;
+
+	core->register_base_addr = devm_ioremap_nocache(&core->pdev->dev,
+			dt->register_base, dt->register_size);
+	if (!core->register_base_addr) {
+		d_vpr_e("could not map reg addr %pa of size %d\n",
+			&dt->register_base, dt->register_size);
+		goto exit;
+	}
+
+	rc = request_irq(dt->irq, msm_vidc_isr, IRQF_TRIGGER_HIGH,
+				     "msm_vidc", core);
+	if (unlikely(rc)) {
+		d_vpr_e("%s: request_irq failed\n", __func__);
+		goto exit;
+	}
+	disable_irq_nosync(dt->irq);
+
+	d_vpr_h("%s: reg_base = %pa, reg_size = %d\n",
+		__func__, &dt->register_base, dt->register_size);
+
+	return 0;
+
+exit:
+	if (core->device_workq)
+		destroy_workqueue(core->device_workq);
+
+	return rc;
+}
+
+static struct attribute *msm_vidc_core_attrs[] = {
+	NULL
+};
+
+static struct attribute_group msm_vidc_core_attr_group = {
+	.attrs = msm_vidc_core_attrs,
+};
+
+static const struct of_device_id msm_vidc_dt_match[] = {
+	{.compatible = "qcom,msm-vidc"},
+	{.compatible = "qcom,msm-vidc,context-bank"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_vidc_dt_match);
+
+
+void msm_vidc_release_video_device(struct video_device *vdev)
+{
+	d_vpr_e("%s:\n", __func__);
+}
+
+static int msm_vidc_register_video_device(struct msm_vidc_core *core,
+		enum msm_vidc_domain_type type, int nr)
+{
+	int rc = 0;
+	int index;
+
+	d_vpr_h("%s()\n", __func__);
+
+	if (type == MSM_VIDC_DECODER)
+		index = 0;
+	else if (type == MSM_VIDC_ENCODER)
+		index = 1;
+	else
+		return -EINVAL;
+
+	core->vdev[index].vdev.release =
+		msm_vidc_release_video_device;
+	core->vdev[index].vdev.fops = core->v4l2_file_ops;
+	core->vdev[index].vdev.ioctl_ops = core->v4l2_ioctl_ops;
+	core->vdev[index].vdev.vfl_dir = VFL_DIR_M2M;
+	core->vdev[index].type = type;
+	core->vdev[index].vdev.v4l2_dev = &core->v4l2_dev;
+	core->vdev[index].vdev.device_caps =
+		V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+		V4L2_CAP_STREAMING;
+	rc = video_register_device(&core->vdev[index].vdev,
+					VFL_TYPE_GRABBER, nr);
+	if (rc) {
+		d_vpr_e("Failed to register the video device\n");
+		return rc;
+	}
+	video_set_drvdata(&core->vdev[index].vdev, core);
+	//rc = device_create_file(&core->vdev[index].vdev.dev, &dev_attr_link_name);
+	if (rc) {
+		d_vpr_e("Failed to create video device file\n");
+		video_unregister_device(&core->vdev[index].vdev);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int msm_vidc_initialize_core(struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	d_vpr_h("%s()\n", __func__);
+
+	core->state = MSM_VIDC_CORE_DEINIT;
+
+	core->device_workq = create_singlethread_workqueue("device_workq");
+	if (!core->device_workq) {
+		d_vpr_e("%s: create device workq failed\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	core->pm_workq = create_singlethread_workqueue("pm_workq");
+	if (!core->pm_workq) {
+		d_vpr_e("%s: create pm workq failed\n", __func__);
+		destroy_workqueue(core->device_workq);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	mutex_init(&core->lock);
+	INIT_LIST_HEAD(&core->instances);
+	INIT_LIST_HEAD(&core->dangling_instances);
+
+	INIT_WORK(&core->device_work, venus_hfi_work_handler);
+	INIT_DELAYED_WORK(&core->pm_work, venus_hfi_pm_work_handler);
+	INIT_DELAYED_WORK(&core->fw_unload_work, msm_vidc_fw_unload_handler);
+	INIT_DELAYED_WORK(&core->batch_work, msm_vidc_batch_handler);
+	INIT_WORK(&core->ssr_work, msm_vidc_ssr_handler);
+
+exit:
+	return rc;
+}
+
+static int msm_vidc_probe_video_device(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_vidc_core *core;
+	int nr = BASE_DEVICE_NUMBER;
+
+	d_vpr_h("%s()\n", __func__);
+
+	core = kzalloc(sizeof(*core), GFP_KERNEL);
+	if (!core)
+		return -ENOMEM;
+
+	core->pdev = pdev;
+	dev_set_drvdata(&pdev->dev, core);
+
+	rc = msm_vidc_initialize_core(core);
+	if (rc) {
+		d_vpr_e("%s: init core failed with %d\n", __func__, rc);
+		goto exit;
+	}
+
+	rc = msm_vidc_init_dt(pdev);
+	if (rc) {
+		d_vpr_e("%s: init dt failed with %d\n", __func__, rc);
+		return -EINVAL;
+	}
+
+	rc = msm_vidc_init_platform(pdev);
+	if (rc) {
+		d_vpr_e("%s: init platform failed with %d\n", __func__, rc);
+		return -EINVAL;
+	}
+
+	rc = msm_vidc_init_irq(core);
+	if (rc)
+		return rc;
+
+	rc = sysfs_create_group(&pdev->dev.kobj, &msm_vidc_core_attr_group);
+	if (rc) {
+		d_vpr_e("Failed to create attributes\n");
+		goto exit;
+	}
+
+	rc = v4l2_device_register(&pdev->dev, &core->v4l2_dev);
+	if (rc) {
+		d_vpr_e("Failed to register v4l2 device\n");
+		goto exit;
+	}
+
+	/* setup the decoder device */
+	rc = msm_vidc_register_video_device(core, MSM_VIDC_DECODER, nr);
+	if (rc) {
+		d_vpr_e("Failed to register video decoder\n");
+		goto exit;
+	}
+
+	/* setup the encoder device */
+	rc = msm_vidc_register_video_device(core, MSM_VIDC_ENCODER, nr + 1);
+	if (rc) {
+		d_vpr_e("Failed to register video encoder\n");
+		goto exit;
+	}
+
+	//rc = msm_vidc_debugfs_init_core(core);
+
+	d_vpr_h("populating sub devices\n");
+	/*
+	 * Trigger probe for each sub-device i.e. qcom,msm-vidc,context-bank.
+	 * When msm_vidc_probe is called for each sub-device, parse the
+	 * context-bank details and store it in core->resources.context_banks
+	 * list.
+	 */
+	rc = of_platform_populate(pdev->dev.of_node, msm_vidc_dt_match, NULL,
+			&pdev->dev);
+	if (rc) {
+		d_vpr_e("Failed to trigger probe for sub-devices\n");
+		goto exit;
+	}
+
+exit:
+	return rc;
+}
+
+static int msm_vidc_probe_context_bank(struct platform_device *pdev)
+{
+	d_vpr_h("%s()\n", __func__);
+
+	return msm_vidc_read_context_bank_resources_from_dt(pdev);
+}
+
+static int msm_vidc_probe(struct platform_device *pdev)
+{
+	d_vpr_h("%s()\n", __func__);
+
+	/*
+	 * Sub devices probe will be triggered by of_platform_populate() towards
+	 * the end of the probe function after msm-vidc device probe is
+	 * completed. Return immediately after completing sub-device probe.
+	 */
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-vidc")) {
+		return msm_vidc_probe_video_device(pdev);
+	} else if (of_device_is_compatible(pdev->dev.of_node,
+				"qcom,msm-vidc,context-bank")) {
+		return msm_vidc_probe_context_bank(pdev);
+	}
+
+	/* How did we end up here? */
+	MSM_VIDC_ERROR(1);
+	return -EINVAL;
+}
+
+static int msm_vidc_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	d_vpr_h("%s()\n", __func__);
+
+/*
+	struct msm_vidc_core *core;
+
+	if (!pdev) {
+		d_vpr_e("%s: invalid input %pK", __func__, pdev);
+		return -EINVAL;
+	}
+
+	core = dev_get_drvdata(&pdev->dev);
+	if (!core) {
+		d_vpr_e("%s: invalid core", __func__);
+		return -EINVAL;
+	}
+
+	if (core->vidc_core_workq)
+		destroy_workqueue(core->vidc_core_workq);
+	vidc_hfi_deinitialize(core->hfi_type, core->device);
+	device_remove_file(&core->vdev[MSM_VIDC_ENCODER].vdev.dev,
+				&dev_attr_link_name);
+	video_unregister_device(&core->vdev[MSM_VIDC_ENCODER].vdev);
+	device_remove_file(&core->vdev[MSM_VIDC_DECODER].vdev.dev,
+				&dev_attr_link_name);
+	video_unregister_device(&core->vdev[MSM_VIDC_DECODER].vdev);
+	v4l2_device_unregister(&core->v4l2_dev);
+
+	//msm_vidc_free_platform_resources(&core->resources);
+	sysfs_remove_group(&pdev->dev.kobj, &msm_vidc_core_attr_group);
+	dev_set_drvdata(&pdev->dev, NULL);
+	mutex_destroy(&core->lock);
+	kfree(core);
+*/
+	return rc;
+}
+
+static struct platform_driver msm_vidc_driver = {
+	.probe = msm_vidc_probe,
+	.remove = msm_vidc_remove,
+	.driver = {
+		.name = "msm_vidc_v4l2",
+		.of_match_table = msm_vidc_dt_match,
+	},
+};
+
+static int __init msm_vidc_init(void)
+{
+	int rc = 0;
+
+	d_vpr_h("%s()\n", __func__);
+
+	rc = platform_driver_register(&msm_vidc_driver);
+	if (rc) {
+		d_vpr_e("Failed to register platform driver\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static void __exit msm_vidc_exit(void)
+{
+	d_vpr_h("%s()\n", __func__);
+
+	platform_driver_unregister(&msm_vidc_driver);
+}
+
+module_init(msm_vidc_init);
+module_exit(msm_vidc_exit);
+
+MODULE_SOFTDEP("pre: subsys-pil-tz");
+MODULE_LICENSE("GPL v2");

+ 200 - 0
driver/vidc/src/msm_vidc_v4l2.c

@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_vidc_v4l2.h"
+#include "msm_vidc_internal.h"
+#include "msm_vidc_core.h"
+#include "msm_vidc_inst.h"
+#include "msm_vidc_debug.h"
+#include "msm_vidc.h"
+
+static struct msm_vidc_inst *get_vidc_inst(struct file *filp, void *fh)
+{
+	if (!filp->private_data)
+		return NULL;
+	return container_of(filp->private_data,
+					struct msm_vidc_inst, event_handler);
+}
+
+unsigned int msm_v4l2_poll(struct file *filp, struct poll_table_struct *pt)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(filp, NULL);
+
+	return msm_vidc_poll((void *)vidc_inst, filp, pt);
+}
+
+int msm_v4l2_open(struct file *filp)
+{
+	struct video_device *vdev = video_devdata(filp);
+	struct msm_video_device *vid_dev =
+		container_of(vdev, struct msm_video_device, vdev);
+	struct msm_vidc_core *core = video_drvdata(filp);
+	struct msm_vidc_inst *vidc_inst;
+
+	vidc_inst = msm_vidc_open(core, vid_dev->type);
+	if (!vidc_inst) {
+		d_vpr_e("Failed to create instance, type = %d\n",
+			vid_dev->type);
+		return -ENOMEM;
+	}
+	clear_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags);
+	filp->private_data = &(vidc_inst->event_handler);
+	return 0;
+}
+
+int msm_v4l2_close(struct file *filp)
+{
+	int rc = 0;
+	struct msm_vidc_inst *vidc_inst;
+
+	vidc_inst = get_vidc_inst(filp, NULL);
+
+	rc = msm_vidc_close(vidc_inst);
+	filp->private_data = NULL;
+	return rc;
+}
+
+int msm_v4l2_querycap(struct file *filp, void *fh,
+			struct v4l2_capability *cap)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(filp, fh);
+
+	return msm_vidc_querycap((void *)vidc_inst, cap);
+}
+
+int msm_v4l2_enum_fmt(struct file *file, void *fh,
+					struct v4l2_fmtdesc *f)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_enum_fmt((void *)vidc_inst, f);
+}
+
+int msm_v4l2_s_fmt(struct file *file, void *fh,
+					struct v4l2_format *f)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_s_fmt((void *)vidc_inst, f);
+}
+
+int msm_v4l2_g_fmt(struct file *file, void *fh,
+					struct v4l2_format *f)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_g_fmt((void *)vidc_inst, f);
+}
+
+int msm_v4l2_s_ctrl(struct file *file, void *fh,
+					struct v4l2_control *a)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_s_ctrl((void *)vidc_inst, a);
+}
+
+int msm_v4l2_g_ctrl(struct file *file, void *fh,
+					struct v4l2_control *a)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_g_ctrl((void *)vidc_inst, a);
+}
+
+int msm_v4l2_reqbufs(struct file *file, void *fh,
+				struct v4l2_requestbuffers *b)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_reqbufs((void *)vidc_inst, b);
+}
+
+int msm_v4l2_qbuf(struct file *file, void *fh,
+				struct v4l2_buffer *b)
+{
+	struct video_device *vdev = video_devdata(file);
+	return msm_vidc_qbuf(get_vidc_inst(file, fh), vdev->v4l2_dev->mdev, b);
+}
+
+int msm_v4l2_dqbuf(struct file *file, void *fh,
+				struct v4l2_buffer *b)
+{
+	return msm_vidc_dqbuf(get_vidc_inst(file, fh), b);
+}
+
+int msm_v4l2_streamon(struct file *file, void *fh,
+				enum v4l2_buf_type i)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_streamon((void *)vidc_inst, i);
+}
+
+int msm_v4l2_streamoff(struct file *file, void *fh,
+				enum v4l2_buf_type i)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_streamoff((void *)vidc_inst, i);
+}
+
+int msm_v4l2_subscribe_event(struct v4l2_fh *fh,
+				const struct v4l2_event_subscription *sub)
+{
+	struct msm_vidc_inst *vidc_inst = container_of(fh,
+			struct msm_vidc_inst, event_handler);
+
+	return msm_vidc_subscribe_event((void *)vidc_inst, sub);
+}
+
+int msm_v4l2_unsubscribe_event(struct v4l2_fh *fh,
+				const struct v4l2_event_subscription *sub)
+{
+	struct msm_vidc_inst *vidc_inst = container_of(fh,
+			struct msm_vidc_inst, event_handler);
+
+	return msm_vidc_unsubscribe_event((void *)vidc_inst, sub);
+}
+
+int msm_v4l2_decoder_cmd(struct file *file, void *fh,
+				struct v4l2_decoder_cmd *dec)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_cmd((void *)vidc_inst, (union msm_v4l2_cmd *)dec);
+}
+
+int msm_v4l2_encoder_cmd(struct file *file, void *fh,
+				struct v4l2_encoder_cmd *enc)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_cmd((void *)vidc_inst, (union msm_v4l2_cmd *)enc);
+}
+
+int msm_v4l2_enum_framesizes(struct file *file, void *fh,
+				struct v4l2_frmsizeenum *fsize)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_enum_framesizes((void *)vidc_inst, fsize);
+}
+
+int msm_v4l2_queryctrl(struct file *file, void *fh,
+	struct v4l2_queryctrl *ctrl)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_query_ctrl((void *)vidc_inst, ctrl);
+}
+
+int msm_v4l2_querymenu(struct file *file, void *fh,
+	struct v4l2_querymenu *qmenu)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_query_menu((void *)vidc_inst, qmenu);
+}

+ 44 - 0
driver/vidc/src/msm_vidc_vb2.c

@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_vidc_vb2.h"
+#include "msm_vidc_core.h"
+#include "msm_vidc_inst.h"
+#include "msm_vidc_internal.h"
+#include "msm_vidc_debug.h"
+
+void *msm_vb2_get_userptr(struct device *dev, unsigned long vaddr,
+			unsigned long size, enum dma_data_direction dma_dir)
+{
+	return (void *)0xdeadbeef;
+}
+
+void msm_vb2_put_userptr(void *buf_priv)
+{
+}
+
+int msm_vidc_queue_setup(struct vb2_queue *q,
+		unsigned int *num_buffers, unsigned int *num_planes,
+		unsigned int sizes[], struct device *alloc_devs[])
+{
+	return 0;
+}
+
+int msm_vidc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	return 0;
+}
+
+void msm_vidc_stop_streaming(struct vb2_queue *q)
+{
+}
+
+void msm_vidc_buf_queue(struct vb2_buffer *vb2)
+{
+}
+
+void msm_vidc_buf_cleanup(struct vb2_buffer *vb)
+{
+}

+ 2351 - 0
driver/vidc/src/venus_hfi.c

@@ -0,0 +1,2351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk-provider.h>
+#include <linux/iommu.h>
+#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/irqreturn.h>
+#include <linux/reset.h>
+#include <linux/interconnect.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include "venus_hfi.h"
+#include "msm_vidc_core.h"
+#include "msm_vidc_bus.h"
+#include "msm_vidc_dt.h"
+#include "msm_vidc_platform.h"
+#include "msm_vidc_memory.h"
+#include "msm_vidc_debug.h"
+#include "hfi_packet.h"
+
+static int __resume(struct msm_vidc_core *core);
+static int __suspend(struct msm_vidc_core *core);
+
+struct tzbsp_memprot {
+	u32 cp_start;
+	u32 cp_size;
+	u32 cp_nonpixel_start;
+	u32 cp_nonpixel_size;
+};
+
+enum tzbsp_video_state {
+	TZBSP_VIDEO_STATE_SUSPEND = 0,
+	TZBSP_VIDEO_STATE_RESUME = 1,
+	TZBSP_VIDEO_STATE_RESTORE_THRESHOLD = 2,
+};
+
+enum reset_state {
+	INIT = 1,
+	ASSERT,
+	DEASSERT,
+};
+
+/* Less than 50MBps is treated as trivial BW change */
+#define TRIVIAL_BW_THRESHOLD 50000
+#define TRIVIAL_BW_CHANGE(a, b) \
+	((a) > (b) ? (a) - (b) < TRIVIAL_BW_THRESHOLD : \
+		(b) - (a) < TRIVIAL_BW_THRESHOLD)
+
+/**
+ * Utility function to enforce some of our assumptions.  Spam calls to this
+ * in hotspots in code to double check some of the assumptions that we hold.
+ */
+struct lut const *__lut(int width, int height, int fps)
+{
+	int frame_size = height * width, c = 0;
+
+	do {
+		if (LUT[c].frame_size >= frame_size && LUT[c].frame_rate >= fps)
+			return &LUT[c];
+	} while (++c < ARRAY_SIZE(LUT));
+
+	return &LUT[ARRAY_SIZE(LUT) - 1];
+}
+
+fp_t __compression_ratio(struct lut const *entry, int bpp)
+{
+	int c = 0;
+
+	for (c = 0; c < COMPRESSION_RATIO_MAX; ++c) {
+		if (entry->compression_ratio[c].bpp == bpp)
+			return entry->compression_ratio[c].ratio;
+	}
+
+	WARN(true, "Shouldn't be here, LUT possibly corrupted?\n");
+	return FP_ZERO; /* impossible */
+}
+
+
+void __dump(struct dump dump[], int len)
+{
+	int c = 0;
+
+	for (c = 0; c < len; ++c) {
+		char format_line[128] = "", formatted_line[128] = "";
+
+		if (dump[c].val == DUMP_HEADER_MAGIC) {
+			snprintf(formatted_line, sizeof(formatted_line), "%s\n",
+					 dump[c].key);
+		} else {
+			bool fp_format = !strcmp(dump[c].format, DUMP_FP_FMT);
+
+			if (!fp_format) {
+				snprintf(format_line, sizeof(format_line),
+						 "    %-35s: %s\n", dump[c].key,
+						 dump[c].format);
+				snprintf(formatted_line, sizeof(formatted_line),
+						 format_line, dump[c].val);
+			} else {
+				size_t integer_part, fractional_part;
+
+				integer_part = fp_int(dump[c].val);
+				fractional_part = fp_frac(dump[c].val);
+				snprintf(formatted_line, sizeof(formatted_line),
+						 "    %-35s: %zd + %zd/%zd\n",
+						 dump[c].key, integer_part,
+						 fractional_part,
+						 fp_frac_base());
+
+
+			}
+		}
+		d_vpr_b("%s", formatted_line);
+	}
+}
+
+static void __dump_packet(u8 *packet)
+{
+	u32 c = 0, packet_size = *(u32 *)packet;
+	const int row_size = 32;
+	/*
+	 * row must contain enough for 0xdeadbaad * 8 to be converted into
+	 * "de ad ba ab " * 8 + '\0'
+	 */
+	char row[3 * 32];
+
+	for (c = 0; c * row_size < packet_size; ++c) {
+		int bytes_to_read = ((c + 1) * row_size > packet_size) ?
+			packet_size % row_size : row_size;
+		hex_dump_to_buffer(packet + c * row_size, bytes_to_read,
+				row_size, 4, row, sizeof(row), false);
+		d_vpr_t("%s\n", row);
+	}
+}
+
+static void __fatal_error(struct msm_vidc_core *core, bool fatal)
+{
+	return;
+	fatal &= core->platform->data.core_data[DEBUG_TIMEOUT].value;
+	MSM_VIDC_ERROR(fatal);
+}
+
+static void __strict_check(struct msm_vidc_core *core)
+{
+	__fatal_error(core, !mutex_is_locked(&core->lock));
+}
+
+static bool __core_in_valid_state(struct msm_vidc_core *core)
+{
+	return core->state != MSM_VIDC_CORE_ERROR;
+}
+
+static bool is_sys_cache_present(struct msm_vidc_core *core)
+{
+	return core->dt->sys_cache_present;
+}
+
+void __write_register(struct msm_vidc_core *core,
+		u32 reg, u32 value)
+{
+	u32 hwiosymaddr = reg;
+	u8 *base_addr;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return;
+	}
+
+	__strict_check(core);
+
+	if (!core->power_enabled) {
+		d_vpr_e("HFI Write register failed : Power is OFF\n");
+		__fatal_error(core, true);
+		return;
+	}
+
+	base_addr = core->register_base_addr;
+	d_vpr_l("Base addr: %pK, writing to: %#x, Value: %#x...\n",
+		base_addr, hwiosymaddr, value);
+	base_addr += hwiosymaddr;
+	writel_relaxed(value, base_addr);
+
+	/*
+	 * Memory barrier to make sure value is written into the register.
+	 */
+	wmb();
+}
+
+/*
+ * Argument mask is used to specify which bits to update. In case mask is 0x11,
+ * only bits 0 & 4 will be updated with corresponding bits from value. To update
+ * entire register with value, set mask = 0xFFFFFFFF.
+ */
+void __write_register_masked(struct msm_vidc_core *core,
+		u32 reg, u32 value, u32 mask)
+{
+	u32 prev_val, new_val;
+	u8 *base_addr;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return;
+	}
+
+	__strict_check(core);
+
+	if (!core->power_enabled) {
+		d_vpr_e("%s: register write failed, power is off\n",
+			__func__);
+		__fatal_error(core, true);
+		return;
+	}
+
+	base_addr = core->register_base_addr;
+	base_addr += reg;
+
+	prev_val = readl_relaxed(base_addr);
+	/*
+	 * Memory barrier to ensure register read is correct
+	 */
+	rmb();
+
+	new_val = (prev_val & ~mask) | (value & mask);
+	d_vpr_l(
+		"Base addr: %pK, writing to: %#x, previous-value: %#x, value: %#x, mask: %#x, new-value: %#x...\n",
+		base_addr, reg, prev_val, value, mask, new_val);
+	writel_relaxed(new_val, base_addr);
+	/*
+	 * Memory barrier to make sure value is written into the register.
+	 */
+	wmb();
+}
+
+int __read_register(struct msm_vidc_core *core, u32 reg)
+{
+	int rc = 0;
+	u8 *base_addr;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	__strict_check(core);
+
+	if (!core->power_enabled) {
+		d_vpr_e("HFI Read register failed : Power is OFF\n");
+		__fatal_error(core, true);
+		return -EINVAL;
+	}
+
+	base_addr = core->register_base_addr;
+
+	rc = readl_relaxed(base_addr + reg);
+	/*
+	 * Memory barrier to make sure value is read correctly from the
+	 * register.
+	 */
+	rmb();
+	d_vpr_l("Base addr: %pK, read from: %#x, value: %#x...\n",
+		base_addr, reg, rc);
+
+	return rc;
+}
+
+static void __schedule_power_collapse_work(struct msm_vidc_core *core)
+{
+	return;
+	if (!core->platform->data.core_data[SW_PC].value)
+		return;
+
+	cancel_delayed_work(&core->pm_work);
+	if (!queue_delayed_work(core->pm_workq,
+			&core->pm_work, msecs_to_jiffies(
+			core->platform->data.core_data[SW_PC_DELAY].value))) {
+		d_vpr_l("PM work already scheduled\n");
+	}
+}
+
+static void __cancel_power_collapse_work(struct msm_vidc_core *core)
+{
+	return;
+	if (!core->platform->data.core_data[SW_PC].value)
+		return;
+
+	cancel_delayed_work(&core->pm_work);
+}
+
+static int __acquire_regulator(struct regulator_info *rinfo,
+				struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	if (rinfo->has_hw_power_collapse) {
+		rc = regulator_set_mode(rinfo->regulator,
+				REGULATOR_MODE_NORMAL);
+		if (rc) {
+			/*
+			 * This is somewhat fatal, but nothing we can do
+			 * about it. We can't disable the regulator w/o
+			 * getting it back under s/w control
+			 */
+			d_vpr_e(
+				"Failed to acquire regulator control: %s\n",
+				rinfo->name);
+		} else {
+
+			d_vpr_h("Acquire regulator control from HW: %s\n",
+					rinfo->name);
+
+		}
+	}
+
+	if (!regulator_is_enabled(rinfo->regulator)) {
+		d_vpr_e("Regulator is not enabled %s\n",
+			rinfo->name);
+		__fatal_error(core, true);
+	}
+
+	return rc;
+}
+
+static int __hand_off_regulator(struct regulator_info *rinfo)
+{
+	int rc = 0;
+
+	if (rinfo->has_hw_power_collapse) {
+		rc = regulator_set_mode(rinfo->regulator,
+				REGULATOR_MODE_FAST);
+		if (rc) {
+			d_vpr_e(
+				"Failed to hand off regulator control: %s\n",
+				rinfo->name);
+		} else {
+			d_vpr_h("Hand off regulator control to HW: %s\n",
+					rinfo->name);
+		}
+	}
+
+	return rc;
+}
+
+static int __hand_off_regulators(struct msm_vidc_core *core)
+{
+	struct regulator_info *rinfo;
+	int rc = 0, c = 0;
+
+	venus_hfi_for_each_regulator(core, rinfo) {
+		rc = __hand_off_regulator(rinfo);
+		/*
+		 * If one regulator hand off failed, driver should take
+		 * the control for other regulators back.
+		 */
+		if (rc)
+			goto err_reg_handoff_failed;
+		c++;
+	}
+
+	return rc;
+err_reg_handoff_failed:
+	venus_hfi_for_each_regulator_reverse_continue(core, rinfo, c)
+		__acquire_regulator(rinfo, core);
+
+	return rc;
+}
+
+static void __set_registers(struct msm_vidc_core *core)
+{
+	struct reg_set *reg_set;
+	int i;
+
+	if (!core || !core->dt) {
+		d_vpr_e("core resources null, cannot set registers\n");
+		return;
+	}
+
+	reg_set = &core->dt->reg_set;
+	for (i = 0; i < reg_set->count; i++) {
+		__write_register_masked(core, reg_set->reg_tbl[i].reg,
+				reg_set->reg_tbl[i].value,
+				reg_set->reg_tbl[i].mask);
+	}
+}
+
+static int __vote_bandwidth(struct bus_info *bus,
+	unsigned long bw_kbps)
+{
+	int rc = 0;
+
+	d_vpr_p("Voting bus %s to ab %llu kbps\n", bus->name, bw_kbps);
+	rc = icc_set_bw(bus->path, kbps_to_icc(bw_kbps), 0);
+	if (rc)
+		d_vpr_e("Failed voting bus %s to ab %llu, rc=%d\n",
+				bus->name, bw_kbps, rc);
+
+	return rc;
+}
+
+int __unvote_buses(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct bus_info *bus = NULL;
+
+	core->power.bw_ddr = 0;
+	core->power.bw_llcc = 0;
+
+	venus_hfi_for_each_bus(core, bus) {
+		rc = __vote_bandwidth(bus, 0);
+		if (rc)
+			goto err_unknown_device;
+	}
+
+err_unknown_device:
+	return rc;
+}
+
+static int __vote_buses(struct msm_vidc_core *core,
+		unsigned long bw_ddr, unsigned long bw_llcc)
+{
+	int rc = 0;
+	struct bus_info *bus = NULL;
+	unsigned long bw_kbps = 0, bw_prev = 0;
+	enum vidc_bus_type type;
+
+	venus_hfi_for_each_bus(core, bus) {
+		if (bus && bus->path) {
+			type = get_type_frm_name(bus->name);
+
+			if (type == DDR) {
+				bw_kbps = bw_ddr;
+				bw_prev = core->power.bw_ddr;
+			} else if (type == LLCC) {
+				bw_kbps = bw_llcc;
+				bw_prev = core->power.bw_llcc;
+			} else {
+				bw_kbps = bus->range[1];
+				bw_prev = core->power.bw_ddr ?
+						bw_kbps : 0;
+			}
+
+			/* ensure freq is within limits */
+			bw_kbps = clamp_t(typeof(bw_kbps), bw_kbps,
+				bus->range[0], bus->range[1]);
+
+			if (TRIVIAL_BW_CHANGE(bw_kbps, bw_prev) && bw_prev) {
+				d_vpr_l("Skip voting bus %s to %llu bps",
+					bus->name, bw_kbps * 1000);
+				continue;
+			}
+
+			rc = __vote_bandwidth(bus, bw_kbps);
+
+			if (type == DDR)
+				core->power.bw_ddr = bw_kbps;
+			else if (type == LLCC)
+				core->power.bw_llcc = bw_kbps;
+		} else {
+			d_vpr_e("No BUS to Vote\n");
+		}
+	}
+
+	return rc;
+}
+
+static int __tzbsp_set_video_state(enum tzbsp_video_state state)
+{
+	int tzbsp_rsp = qcom_scm_set_remote_state(state, 0);
+
+	d_vpr_l("Set state %d, resp %d\n", state, tzbsp_rsp);
+	if (tzbsp_rsp) {
+		d_vpr_e("Failed to set video core state to suspend: %d\n",
+			tzbsp_rsp);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __set_clk_rate(struct msm_vidc_core *core,
+		struct clock_info *cl, u64 rate)
+{
+	int rc = 0;
+	struct clk *clk = cl->clk;
+
+	rc = clk_set_rate(clk, rate);
+	if (rc) {
+		d_vpr_e(
+			"%s: Failed to set clock rate %llu %s: %d\n",
+			__func__, rate, cl->name, rc);
+		return rc;
+	}
+
+	core->power.clk_freq = rate;
+
+	return rc;
+}
+
+static int __set_clocks(struct msm_vidc_core *core, u32 freq)
+{
+	struct clock_info *cl;
+	int rc = 0;
+
+	/* bail early if requested clk_freq is not changed */
+	if (freq == core->power.clk_freq)
+		return 0;
+
+	venus_hfi_for_each_clock(core, cl) {
+		if (cl->has_scaling) {/* has_scaling */
+			rc = __set_clk_rate(core, cl, freq);
+			if (rc)
+				return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int __scale_clocks(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	u32 rate = 0;
+
+	allowed_clks_tbl = core->dt->allowed_clks_tbl;
+	rate = core->power.clk_freq ? core->power.clk_freq :
+		allowed_clks_tbl[0].clock_rate;
+
+	rc = __set_clocks(core, rate);
+	return rc;
+}
+
+static int __write_queue(struct msm_vidc_iface_q_info *qinfo, u8 *packet,
+		bool *rx_req_is_set)
+{
+	struct hfi_queue_header *queue;
+	u32 packet_size_in_words, new_write_idx;
+	u32 empty_space, read_idx, write_idx;
+	u32 *write_ptr;
+
+	if (!qinfo || !packet) {
+		d_vpr_e("%s: invalid params %pK %pK\n",
+			__func__, qinfo, packet);
+		return -EINVAL;
+	} else if (!qinfo->q_array.align_virtual_addr) {
+		d_vpr_e("Queues have already been freed\n");
+		return -EINVAL;
+	}
+
+	queue = (struct hfi_queue_header *) qinfo->q_hdr;
+	if (!queue) {
+		d_vpr_e("queue not present\n");
+		return -ENOENT;
+	}
+
+	if (msm_vidc_debug & VIDC_PKT) {
+		d_vpr_t("%s: %pK\n", __func__, qinfo);
+		__dump_packet(packet);
+	}
+
+	packet_size_in_words = (*(u32 *)packet) >> 2;
+	if (!packet_size_in_words || packet_size_in_words >
+		qinfo->q_array.mem_size>>2) {
+		d_vpr_e("Invalid packet size\n");
+		return -ENODATA;
+	}
+
+	read_idx = queue->qhdr_read_idx;
+	write_idx = queue->qhdr_write_idx;
+
+	empty_space = (write_idx >=  read_idx) ?
+		((qinfo->q_array.mem_size>>2) - (write_idx -  read_idx)) :
+		(read_idx - write_idx);
+	if (empty_space <= packet_size_in_words) {
+		queue->qhdr_tx_req =  1;
+		d_vpr_e("Insufficient size (%d) to write (%d)\n",
+					  empty_space, packet_size_in_words);
+		return -ENOTEMPTY;
+	}
+
+	queue->qhdr_tx_req =  0;
+
+	new_write_idx = write_idx + packet_size_in_words;
+	write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+			(write_idx << 2));
+	if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+	    write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+	    qinfo->q_array.mem_size)) {
+		d_vpr_e("Invalid write index");
+		return -ENODATA;
+	}
+
+	if (new_write_idx < (qinfo->q_array.mem_size >> 2)) {
+		memcpy(write_ptr, packet, packet_size_in_words << 2);
+	} else {
+		new_write_idx -= qinfo->q_array.mem_size >> 2;
+		memcpy(write_ptr, packet, (packet_size_in_words -
+			new_write_idx) << 2);
+		memcpy((void *)qinfo->q_array.align_virtual_addr,
+			packet + ((packet_size_in_words - new_write_idx) << 2),
+			new_write_idx  << 2);
+	}
+
+	/*
+	 * Memory barrier to make sure packet is written before updating the
+	 * write index
+	 */
+	mb();
+	queue->qhdr_write_idx = new_write_idx;
+	if (rx_req_is_set)
+		*rx_req_is_set = queue->qhdr_rx_req == 1;
+	/*
+	 * Memory barrier to make sure write index is updated before an
+	 * interrupt is raised on venus.
+	 */
+	mb();
+	return 0;
+}
+#if 0
+static int __read_queue(struct msm_vidc_iface_q_info *qinfo, u8 *packet,
+		u32 *pb_tx_req_is_set)
+{
+	struct hfi_queue_header *queue;
+	u32 packet_size_in_words, new_read_idx;
+	u32 *read_ptr;
+	u32 receive_request = 0;
+	u32 read_idx, write_idx;
+	int rc = 0;
+	u32 sid;
+
+	if (!qinfo || !packet || !pb_tx_req_is_set) {
+		d_vpr_e("%s: invalid params %pK %pK %pK\n",
+			__func__, qinfo, packet, pb_tx_req_is_set);
+		return -EINVAL;
+	} else if (!qinfo->q_array.align_virtual_addr) {
+		d_vpr_e("Queues have already been freed\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Memory barrier to make sure data is valid before
+	 *reading it
+	 */
+	mb();
+	queue = (struct hfi_queue_header *) qinfo->q_hdr;
+
+	if (!queue) {
+		d_vpr_e("Queue memory is not allocated\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Do not set receive request for debug queue, if set,
+	 * Venus generates interrupt for debug messages even
+	 * when there is no response message available.
+	 * In general debug queue will not become full as it
+	 * is being emptied out for every interrupt from Venus.
+	 * Venus will anyway generates interrupt if it is full.
+	 */
+	if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
+		receive_request = 1;
+
+	read_idx = queue->qhdr_read_idx;
+	write_idx = queue->qhdr_write_idx;
+
+	if (read_idx == write_idx) {
+		queue->qhdr_rx_req = receive_request;
+		/*
+		 * mb() to ensure qhdr is updated in main memory
+		 * so that venus reads the updated header values
+		 */
+		mb();
+		*pb_tx_req_is_set = 0;
+		d_vpr_l(
+			"%s queue is empty, rx_req = %u, tx_req = %u, read_idx = %u\n",
+			receive_request ? "message" : "debug",
+			queue->qhdr_rx_req, queue->qhdr_tx_req,
+			queue->qhdr_read_idx);
+		return -ENODATA;
+	}
+
+	read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+				(read_idx << 2));
+	if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+	    read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+	    qinfo->q_array.mem_size - sizeof(*read_ptr))) {
+		d_vpr_e("Invalid read index\n");
+		return -ENODATA;
+	}
+
+	packet_size_in_words = (*read_ptr) >> 2;
+	if (!packet_size_in_words) {
+		d_vpr_e("Zero packet size\n");
+		return -ENODATA;
+	}
+
+	new_read_idx = read_idx + packet_size_in_words;
+	if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE) &&
+		read_idx <= (qinfo->q_array.mem_size >> 2)) {
+		if (new_read_idx < (qinfo->q_array.mem_size >> 2)) {
+			memcpy(packet, read_ptr,
+					packet_size_in_words << 2);
+		} else {
+			new_read_idx -= (qinfo->q_array.mem_size >> 2);
+			memcpy(packet, read_ptr,
+			(packet_size_in_words - new_read_idx) << 2);
+			memcpy(packet + ((packet_size_in_words -
+					new_read_idx) << 2),
+					(u8 *)qinfo->q_array.align_virtual_addr,
+					new_read_idx << 2);
+		}
+	} else {
+		d_vpr_e("BAD packet received, read_idx: %#x, pkt_size: %d\n",
+			read_idx, packet_size_in_words << 2);
+		d_vpr_e("Dropping this packet\n");
+		new_read_idx = write_idx;
+		rc = -ENODATA;
+	}
+
+	if (new_read_idx != write_idx)
+		queue->qhdr_rx_req = 0;
+	else
+		queue->qhdr_rx_req = receive_request;
+
+	queue->qhdr_read_idx = new_read_idx;
+	/*
+	 * mb() to ensure qhdr is updated in main memory
+	 * so that venus reads the updated header values
+	 */
+	mb();
+
+	*pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0;
+
+	if ((msm_vidc_debug & VIDC_PKT) &&
+		!(queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q)) {
+		sid = *((u32 *)packet + 2);
+		d_vpr_t("%s: %pK\n", __func__, qinfo);
+		__dump_packet(packet);
+	}
+
+	return rc;
+}
+#endif
+/* Writes into cmdq without raising an interrupt */
+static int __iface_cmdq_write_relaxed(struct msm_vidc_core *core,
+		void *pkt, bool *requires_interrupt)
+{
+	struct msm_vidc_iface_q_info *q_info;
+	//struct vidc_hal_cmd_pkt_hdr *cmd_packet;
+	int result = -E2BIG;
+
+	if (!core || !pkt) {
+		d_vpr_e("%s: invalid params %pK %pK\n",
+			__func__, core, pkt);
+		return -EINVAL;
+	}
+
+	__strict_check(core);
+
+	if (!__core_in_valid_state(core)) {
+		d_vpr_e("%s: fw not in init state\n", __func__);
+		result = -EINVAL;
+		goto err_q_null;
+	}
+
+	//cmd_packet = (struct vidc_hal_cmd_pkt_hdr *)pkt;
+	//core->last_packet_type = cmd_packet->packet_type;
+
+	q_info = &core->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
+	if (!q_info) {
+		d_vpr_e("cannot write to shared Q's\n");
+		goto err_q_null;
+	}
+
+	if (!q_info->q_array.align_virtual_addr) {
+		d_vpr_e("cannot write to shared CMD Q's\n");
+		result = -ENODATA;
+		goto err_q_null;
+	}
+
+	if (__resume(core)) {
+		d_vpr_e("%s: Power on failed\n", __func__);
+		goto err_q_write;
+	}
+
+	if (!__write_queue(q_info, (u8 *)pkt, requires_interrupt)) {
+		__schedule_power_collapse_work(core);
+		result = 0;
+	} else {
+		d_vpr_e("__iface_cmdq_write: queue full\n");
+	}
+
+err_q_write:
+err_q_null:
+	return result;
+}
+
+static int __iface_cmdq_write(struct msm_vidc_core *core,
+	void *pkt)
+{
+	bool needs_interrupt = false;
+	int rc = __iface_cmdq_write_relaxed(core, pkt, &needs_interrupt);
+
+	if (!rc && needs_interrupt)
+		call_venus_op(core, raise_interrupt, core);
+
+	return rc;
+}
+/*
+static int __iface_msgq_read(struct msm_vidc_core *core, void *pkt)
+{
+	u32 tx_req_is_set = 0;
+	int rc = 0;
+	struct msm_vidc_iface_q_info *q_info;
+
+	if (!pkt) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	__strict_check(core);
+
+	if (!__core_in_valid_state(core)) {
+		d_vpr_e("%s: fw not in init state\n", __func__);
+		rc = -EINVAL;
+		goto read_error_null;
+	}
+
+	q_info = &core->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
+	if (!q_info->q_array.align_virtual_addr) {
+		d_vpr_e("cannot read from shared MSG Q's\n");
+		rc = -ENODATA;
+		goto read_error_null;
+	}
+
+	if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
+		if (tx_req_is_set)
+			call_venus_op(core, raise_interrupt, core);
+		rc = 0;
+	} else
+		rc = -ENODATA;
+
+read_error_null:
+	return rc;
+}
+
+static int __iface_dbgq_read(struct msm_vidc_core *core, void *pkt)
+{
+	u32 tx_req_is_set = 0;
+	int rc = 0;
+	struct msm_vidc_iface_q_info *q_info;
+
+	if (!pkt) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	__strict_check(core);
+
+	q_info = &core->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
+	if (!q_info->q_array.align_virtual_addr) {
+		d_vpr_e("cannot read from shared DBG Q's\n");
+		rc = -ENODATA;
+		goto dbg_error_null;
+	}
+
+	if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
+		if (tx_req_is_set)
+			call_venus_op(core, raise_interrupt, core);
+		rc = 0;
+	} else
+		rc = -ENODATA;
+
+dbg_error_null:
+	return rc;
+}
+*/
+static int __sys_set_debug(struct msm_vidc_core *core, u32 debug)
+{
+	int rc = 0;
+
+	//rc = call_hfi_pkt_op(core, sys_debug_config, pkt, debug);
+	rc = hfi_packet_sys_debug_config(core, core->packet,
+			core->packet_size, debug);
+	if (rc) {
+		d_vpr_e("Debug mode setting to FW failed\n");
+		return -ENOTEMPTY;
+	}
+
+	if (__iface_cmdq_write(core, core->packet))
+		return -ENOTEMPTY;
+	return 0;
+}
+/*
+static int __sys_set_coverage(struct msm_vidc_core *core,
+		u32 mode)
+{
+	int rc = 0;
+
+	//rc = call_hfi_pkt_op(core, sys_coverage_config,	pkt, mode);
+	if (rc) {
+		d_vpr_e("Coverage mode setting to FW failed\n");
+		return -ENOTEMPTY;
+	}
+
+	//if (__iface_cmdq_write(core, pkt, sid)) {
+	//	d_vpr_e("Failed to send coverage pkt to f/w\n");
+	//	return -ENOTEMPTY;
+	//}
+
+	return 0;
+}
+
+static int __sys_set_power_control(struct msm_vidc_core *core, bool enable)
+{
+	struct regulator_info *rinfo;
+	bool supported = false;
+
+	venus_hfi_for_each_regulator(core, rinfo) {
+		if (rinfo->has_hw_power_collapse) {
+			supported = true;
+			break;
+		}
+	}
+
+	if (!supported)
+		return 0;
+
+	//call_hfi_pkt_op(core, sys_power_control, pkt, enable);
+	//if (__iface_cmdq_write(core, pkt, sid))
+	//	return -ENOTEMPTY;
+	return 0;
+}
+*/
+
+int __prepare_pc(struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	rc = hfi_packet_sys_pc_prep(core, core->packet, core->packet_size);
+	if (rc) {
+		d_vpr_e("Failed to create sys pc prep pkt\n");
+		goto err_pc_prep;
+	}
+
+	if (__iface_cmdq_write(core, core->packet))
+		rc = -ENOTEMPTY;
+	if (rc)
+		d_vpr_e("Failed to prepare venus for power off");
+err_pc_prep:
+	return rc;
+}
+
+static int __power_collapse(struct msm_vidc_core *core, bool force)
+{
+	int rc = 0;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	if (!core->power_enabled) {
+		d_vpr_h("%s: Power already disabled\n", __func__);
+		goto exit;
+	}
+
+	if (!__core_in_valid_state(core)) {
+		d_vpr_e("%s: Core not in init state\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = call_venus_op(core, prepare_pc, core);
+	if (rc)
+		goto skip_power_off;
+
+	//__flush_debug_queue(core, core->raw_packet);
+
+	rc = __suspend(core);
+	if (rc)
+		d_vpr_e("Failed __suspend\n");
+
+exit:
+	return rc;
+
+skip_power_off:
+	return -EAGAIN;
+}
+
+static int __protect_cp_mem(struct msm_vidc_core *core)
+{
+	struct tzbsp_memprot memprot;
+	int rc = 0;
+	struct context_bank_info *cb;
+
+	if (!core)
+		return -EINVAL;
+
+	memprot.cp_start = 0x0;
+	memprot.cp_size = 0x0;
+	memprot.cp_nonpixel_start = 0x0;
+	memprot.cp_nonpixel_size = 0x0;
+
+	list_for_each_entry(cb, &core->dt->context_banks, list) {
+		if (!strcmp(cb->name, "venus_ns")) {
+			memprot.cp_size = cb->addr_range.start;
+
+			d_vpr_h("%s: memprot.cp_size: %#x\n",
+				__func__, memprot.cp_size);
+		}
+
+		if (!strcmp(cb->name, "venus_sec_non_pixel")) {
+			memprot.cp_nonpixel_start = cb->addr_range.start;
+			memprot.cp_nonpixel_size = cb->addr_range.size;
+
+			d_vpr_h("%s: cp_nonpixel_start: %#x size: %#x\n",
+				__func__, memprot.cp_nonpixel_start,
+				memprot.cp_nonpixel_size);
+		}
+	}
+
+	rc = qcom_scm_mem_protect_video(memprot.cp_start, memprot.cp_size,
+			memprot.cp_nonpixel_start, memprot.cp_nonpixel_size);
+
+	if (rc)
+		d_vpr_e("Failed to protect memory(%d)\n", rc);
+
+	return rc;
+}
+
+static int __core_set_resource(struct msm_vidc_core *core,
+		struct vidc_resource_hdr *resource_hdr, void *resource_value)
+{
+	int rc = 0;
+
+	if (!core || !resource_hdr || !resource_value) {
+		d_vpr_e("%s: invalid params %pK %pK %pK\n", __func__,
+			core, resource_hdr, resource_value);
+		return -EINVAL;
+	}
+
+	//rc = hfi_packet_sys_set_resource(core, core->packet, core->packet_size,
+	//		resource_hdr, resource_value);
+	if (rc) {
+		d_vpr_e("set_res: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	//rc = __iface_cmdq_write(core, core->packet);
+	if (rc)
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	return rc;
+}
+
+static int __core_release_resource(struct msm_vidc_core *core,
+		struct vidc_resource_hdr *resource_hdr)
+{
+	int rc = 0;
+
+	if (!core || !resource_hdr) {
+		d_vpr_e("%s: invalid params %pK %pK\n",
+			__func__, core, resource_hdr);
+		return -EINVAL;
+	}
+
+	//rc = hfi_packet_sys_release_resource(core, core->packet, core->packet_size, resource_hdr);
+	if (rc) {
+		d_vpr_e("release_res: failed to create packet\n");
+		goto err_create_pkt;
+	}
+
+	//rc = __iface_cmdq_write(core, core->packet);
+	if (rc)
+		rc = -ENOTEMPTY;
+
+err_create_pkt:
+	return rc;
+}
+
+
+
+
+static void __deinit_clocks(struct msm_vidc_core *core)
+{
+	struct clock_info *cl;
+
+	core->power.clk_freq = 0;
+	venus_hfi_for_each_clock_reverse(core, cl) {
+		if (cl->clk) {
+			clk_put(cl->clk);
+			cl->clk = NULL;
+		}
+	}
+}
+
+static int __init_clocks(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct clock_info *cl = NULL;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	venus_hfi_for_each_clock(core, cl) {
+		d_vpr_h("%s: scalable? %d, count %d\n",
+				cl->name, cl->has_scaling, cl->count);
+	}
+
+	venus_hfi_for_each_clock(core, cl) {
+		if (!cl->clk) {
+			cl->clk = clk_get(&core->pdev->dev, cl->name);
+			if (IS_ERR_OR_NULL(cl->clk)) {
+				d_vpr_e("Failed to get clock: %s\n", cl->name);
+				rc = PTR_ERR(cl->clk) ?
+					PTR_ERR(cl->clk) : -EINVAL;
+				cl->clk = NULL;
+				goto err_clk_get;
+			}
+		}
+	}
+	core->power.clk_freq = 0;
+	return 0;
+
+err_clk_get:
+	__deinit_clocks(core);
+	return rc;
+}
+
+static int __handle_reset_clk(struct msm_vidc_core *core,
+			int reset_index, enum reset_state state)
+{
+	int rc = 0;
+	struct msm_vidc_dt *dt = core->dt;
+	struct reset_control *rst;
+	struct reset_set *rst_set = &dt->reset_set;
+
+	if (!rst_set->reset_tbl)
+		return 0;
+
+	rst = rst_set->reset_tbl[reset_index].rst;
+	d_vpr_h("reset_clk: name %s reset_state %d rst %pK\n",
+		rst_set->reset_tbl[reset_index].name, state, rst);
+
+	switch (state) {
+	case INIT:
+		if (rst)
+			goto skip_reset_init;
+
+		rst = devm_reset_control_get(&core->pdev->dev,
+				rst_set->reset_tbl[reset_index].name);
+		if (IS_ERR(rst))
+			rc = PTR_ERR(rst);
+
+		rst_set->reset_tbl[reset_index].rst = rst;
+		break;
+	case ASSERT:
+		if (!rst) {
+			rc = PTR_ERR(rst);
+			goto failed_to_reset;
+		}
+
+		rc = reset_control_assert(rst);
+		break;
+	case DEASSERT:
+		if (!rst) {
+			rc = PTR_ERR(rst);
+			goto failed_to_reset;
+		}
+		rc = reset_control_deassert(rst);
+		break;
+	default:
+		d_vpr_e("%s: invalid reset request\n", __func__);
+		if (rc)
+			goto failed_to_reset;
+	}
+
+	return 0;
+
+skip_reset_init:
+failed_to_reset:
+	return rc;
+}
+
+void __disable_unprepare_clks(struct msm_vidc_core *core)
+{
+	struct clock_info *cl;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return;
+	}
+
+	venus_hfi_for_each_clock_reverse(core, cl) {
+		d_vpr_h("Clock: %s disable and unprepare\n",
+				cl->name);
+
+		if (!__clk_is_enabled(cl->clk))
+			d_vpr_e("%s: clock %s already disabled\n",
+				__func__, cl->name);
+
+		clk_disable_unprepare(cl->clk);
+
+		if (__clk_is_enabled(cl->clk))
+			d_vpr_e("%s: clock %s not disabled\n",
+				__func__, cl->name);
+	}
+}
+
+int __reset_ahb2axi_bridge(struct msm_vidc_core *core)
+{
+	int rc, i;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < core->dt->reset_set.count; i++) {
+		rc = __handle_reset_clk(core, i, ASSERT);
+		if (rc) {
+			d_vpr_e("failed to assert reset clocks\n");
+			goto failed_to_reset;
+		}
+
+		/* wait for deassert */
+		usleep_range(400, 500);
+
+		rc = __handle_reset_clk(core, i, DEASSERT);
+		if (rc) {
+			d_vpr_e("failed to deassert reset clocks\n");
+			goto failed_to_reset;
+		}
+	}
+
+	return 0;
+
+failed_to_reset:
+	return rc;
+}
+
+static int __prepare_enable_clks(struct msm_vidc_core *core)
+{
+	struct clock_info *cl = NULL, *cl_fail = NULL;
+	int rc = 0, c = 0;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	venus_hfi_for_each_clock(core, cl) {
+		/*
+		 * For the clocks we control, set the rate prior to preparing
+		 * them.  Since we don't really have a load at this point, scale
+		 * it to the lowest frequency possible
+		 */
+		if (cl->has_scaling)
+			__set_clk_rate(core, cl,
+					clk_round_rate(cl->clk, 0));
+
+		if (__clk_is_enabled(cl->clk))
+			d_vpr_e("%s: clock %s already enabled\n",
+				__func__, cl->name);
+
+		rc = clk_prepare_enable(cl->clk);
+		if (rc) {
+			d_vpr_e("Failed to enable clocks\n");
+			cl_fail = cl;
+			goto fail_clk_enable;
+		}
+
+		if (!__clk_is_enabled(cl->clk))
+			d_vpr_e("%s: clock %s not enabled\n",
+				__func__, cl->name);
+
+		c++;
+		d_vpr_h("Clock: %s prepared and enabled\n", cl->name);
+	}
+
+	call_venus_op(core, clock_config_on_enable, core);
+	return rc;
+
+fail_clk_enable:
+	venus_hfi_for_each_clock_reverse_continue(core, cl, c) {
+		d_vpr_e("Clock: %s disable and unprepare\n",
+			cl->name);
+		clk_disable_unprepare(cl->clk);
+	}
+
+	return rc;
+}
+
+static void __deinit_bus(struct msm_vidc_core *core)
+{
+	struct bus_info *bus = NULL;
+
+	if (!core)
+		return;
+
+	core->power.bw_ddr = 0;
+	core->power.bw_llcc = 0;
+
+	venus_hfi_for_each_bus_reverse(core, bus) {
+		icc_put(bus->path);
+		bus->path = NULL;
+	}
+}
+
+static int __init_bus(struct msm_vidc_core *core)
+{
+	struct bus_info *bus = NULL;
+	int rc = 0;
+
+	if (!core)
+		return -EINVAL;
+
+	venus_hfi_for_each_bus(core, bus) {
+		if (!strcmp(bus->name, "venus-llcc")) {
+			if (msm_vidc_syscache_disable) {
+				d_vpr_h("Skipping LLC bus init: %s\n",
+					bus->name);
+				continue;
+			}
+		}
+		bus->path = of_icc_get(bus->dev, bus->name);
+		if (IS_ERR_OR_NULL(bus->path)) {
+			rc = PTR_ERR(bus->path) ?
+				PTR_ERR(bus->path) : -EBADHANDLE;
+
+			d_vpr_e("Failed to register bus %s: %d\n",
+					bus->name, rc);
+			bus->path = NULL;
+			goto err_add_dev;
+		}
+	}
+
+	return 0;
+
+err_add_dev:
+	__deinit_bus(core);
+	return rc;
+}
+
+static void __deinit_regulators(struct msm_vidc_core *core)
+{
+	struct regulator_info *rinfo = NULL;
+
+	venus_hfi_for_each_regulator_reverse(core, rinfo) {
+		if (rinfo->regulator) {
+			regulator_put(rinfo->regulator);
+			rinfo->regulator = NULL;
+		}
+	}
+}
+
+static int __init_regulators(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct regulator_info *rinfo = NULL;
+
+	venus_hfi_for_each_regulator(core, rinfo) {
+		rinfo->regulator = regulator_get(&core->pdev->dev,
+				rinfo->name);
+		if (IS_ERR_OR_NULL(rinfo->regulator)) {
+			rc = PTR_ERR(rinfo->regulator) ?
+				PTR_ERR(rinfo->regulator) : -EBADHANDLE;
+			d_vpr_e("Failed to get regulator: %s\n", rinfo->name);
+			rinfo->regulator = NULL;
+			goto err_reg_get;
+		}
+	}
+
+	return 0;
+
+err_reg_get:
+	__deinit_regulators(core);
+	return rc;
+}
+
+static void __deinit_subcaches(struct msm_vidc_core *core)
+{
+	struct subcache_info *sinfo = NULL;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		goto exit;
+	}
+
+	if (!is_sys_cache_present(core))
+		goto exit;
+
+	venus_hfi_for_each_subcache_reverse(core, sinfo) {
+		if (sinfo->subcache) {
+			d_vpr_h("deinit_subcaches: %s\n", sinfo->name);
+			llcc_slice_putd(sinfo->subcache);
+			sinfo->subcache = NULL;
+		}
+	}
+
+exit:
+	return;
+}
+
+static int __init_subcaches(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct subcache_info *sinfo = NULL;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!is_sys_cache_present(core))
+		return 0;
+
+	venus_hfi_for_each_subcache(core, sinfo) {
+		if (!strcmp("vidsc0", sinfo->name)) {
+			sinfo->subcache = llcc_slice_getd(LLCC_VIDSC0);
+		} else if (!strcmp("vidsc1", sinfo->name)) {
+			sinfo->subcache = llcc_slice_getd(LLCC_VIDSC1);
+		} else if (!strcmp("vidscfw", sinfo->name)) {
+			sinfo->subcache = llcc_slice_getd(LLCC_VIDFW);
+		} else {
+			d_vpr_e("Invalid subcache name %s\n",
+					sinfo->name);
+		}
+		if (IS_ERR_OR_NULL(sinfo->subcache)) {
+			rc = PTR_ERR(sinfo->subcache) ?
+				PTR_ERR(sinfo->subcache) : -EBADHANDLE;
+			d_vpr_e("init_subcaches: invalid subcache: %s rc %d\n",
+				sinfo->name, rc);
+			sinfo->subcache = NULL;
+			goto err_subcache_get;
+		}
+		d_vpr_h("init_subcaches: %s\n", sinfo->name);
+	}
+
+	return 0;
+
+err_subcache_get:
+	__deinit_subcaches(core);
+	return rc;
+}
+
+static int __init_resources(struct msm_vidc_core *core)
+{
+	int i, rc = 0;
+
+	rc = __init_regulators(core);
+	if (rc) {
+		d_vpr_e("Failed to get all regulators\n");
+		return -ENODEV;
+	}
+
+	rc = __init_clocks(core);
+	if (rc) {
+		d_vpr_e("Failed to init clocks\n");
+		rc = -ENODEV;
+		goto err_init_clocks;
+	}
+
+	for (i = 0; i < core->dt->reset_set.count; i++) {
+		rc = __handle_reset_clk(core, i, INIT);
+		if (rc) {
+			d_vpr_e("Failed to init reset clocks\n");
+			rc = -ENODEV;
+			goto err_init_reset_clk;
+		}
+	}
+
+	rc = __init_bus(core);
+	if (rc) {
+		d_vpr_e("Failed to init bus: %d\n", rc);
+		goto err_init_bus;
+	}
+
+	rc = __init_subcaches(core);
+	if (rc)
+		d_vpr_e("Failed to init subcaches: %d\n", rc);
+
+	return rc;
+
+err_init_reset_clk:
+err_init_bus:
+	__deinit_clocks(core);
+err_init_clocks:
+	__deinit_regulators(core);
+	return rc;
+}
+
+static void __deinit_resources(struct msm_vidc_core *core)
+{
+	__deinit_subcaches(core);
+	__deinit_bus(core);
+	__deinit_clocks(core);
+	__deinit_regulators(core);
+}
+
+static int __disable_regulator(struct regulator_info *rinfo,
+				struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	d_vpr_h("Disabling regulator %s\n", rinfo->name);
+
+	/*
+	 * This call is needed. Driver needs to acquire the control back
+	 * from HW in order to disable the regualtor. Else the behavior
+	 * is unknown.
+	 */
+
+	rc = __acquire_regulator(rinfo, core);
+	if (rc) {
+		/*
+		 * This is somewhat fatal, but nothing we can do
+		 * about it. We can't disable the regulator w/o
+		 * getting it back under s/w control
+		 */
+		d_vpr_e("Failed to acquire control on %s\n",
+			rinfo->name);
+
+		goto disable_regulator_failed;
+	}
+
+	if (!regulator_is_enabled(rinfo->regulator))
+		d_vpr_e("%s: regulator %s already disabled\n",
+			__func__, rinfo->name);
+
+	rc = regulator_disable(rinfo->regulator);
+	if (rc) {
+		d_vpr_e("Failed to disable %s: %d\n",
+			rinfo->name, rc);
+		goto disable_regulator_failed;
+	}
+
+	if (regulator_is_enabled(rinfo->regulator))
+		d_vpr_e("%s: regulator %s not disabled\n",
+			__func__, rinfo->name);
+
+	return 0;
+disable_regulator_failed:
+
+	/* Bring attention to this issue */
+	__fatal_error(core, true);
+	return rc;
+}
+
+static int __enable_hw_power_collapse(struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	rc = __hand_off_regulators(core);
+	if (rc)
+		d_vpr_e("%s: Failed to enable HW power collapse %d\n",
+				__func__, rc);
+	return rc;
+}
+
+static int __enable_regulators(struct msm_vidc_core *core)
+{
+	int rc = 0, c = 0;
+	struct regulator_info *rinfo;
+
+	d_vpr_h("Enabling regulators\n");
+
+	venus_hfi_for_each_regulator(core, rinfo) {
+		if (regulator_is_enabled(rinfo->regulator))
+			d_vpr_e("%s: regulator %s already enabled\n",
+				__func__, rinfo->name);
+
+		rc = regulator_enable(rinfo->regulator);
+		if (rc) {
+			d_vpr_e("Failed to enable %s: %d\n",
+					rinfo->name, rc);
+			goto err_reg_enable_failed;
+		}
+
+		if (!regulator_is_enabled(rinfo->regulator))
+			d_vpr_e("%s: regulator %s not enabled\n",
+				__func__, rinfo->name);
+
+		d_vpr_h("Enabled regulator %s\n",
+				rinfo->name);
+		c++;
+	}
+
+	return 0;
+
+err_reg_enable_failed:
+	venus_hfi_for_each_regulator_reverse_continue(core, rinfo, c)
+		__disable_regulator(rinfo, core);
+
+	return rc;
+}
+
+int __disable_regulators(struct msm_vidc_core *core)
+{
+	struct regulator_info *rinfo;
+
+	d_vpr_h("Disabling regulators\n");
+	venus_hfi_for_each_regulator_reverse(core, rinfo)
+		__disable_regulator(rinfo, core);
+
+	return 0;
+}
+
+static int __release_subcaches(struct msm_vidc_core *core)
+{
+	struct subcache_info *sinfo;
+	int rc = 0;
+	u32 c = 0;
+	u32 resource[VIDC_MAX_SUBCACHE_SIZE];
+	struct hfi_resource_syscache_info_type *sc_res_info;
+	struct hfi_resource_subcache_type *sc_res;
+	struct vidc_resource_hdr rhdr;
+
+	if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
+		return 0;
+
+	memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
+
+	sc_res_info = (struct hfi_resource_syscache_info_type *)resource;
+	sc_res = &(sc_res_info->rg_subcache_entries[0]);
+
+	/* Release resource command to Venus */
+	venus_hfi_for_each_subcache_reverse(core, sinfo) {
+		if (sinfo->isset) {
+			/* Update the entry */
+			sc_res[c].size = sinfo->subcache->slice_size;
+			sc_res[c].sc_id = sinfo->subcache->slice_id;
+			c++;
+			sinfo->isset = false;
+		}
+	}
+
+	if (c > 0) {
+		d_vpr_h("Releasing %d subcaches\n", c);
+		rhdr.resource_handle = sc_res_info; /* cookie */
+		rhdr.resource_id = VIDC_RESOURCE_SYSCACHE;
+
+		rc = __core_release_resource(core, &rhdr);
+		if (rc)
+			d_vpr_e("Failed to release %d subcaches\n", c);
+	}
+
+	core->dt->sys_cache_res_set = false;
+
+	return 0;
+}
+
+static int __disable_subcaches(struct msm_vidc_core *core)
+{
+	struct subcache_info *sinfo;
+	int rc = 0;
+
+	if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
+		return 0;
+
+	/* De-activate subcaches */
+	venus_hfi_for_each_subcache_reverse(core, sinfo) {
+		if (sinfo->isactive) {
+			d_vpr_h("De-activate subcache %s\n",
+				sinfo->name);
+			rc = llcc_slice_deactivate(sinfo->subcache);
+			if (rc) {
+				d_vpr_e("Failed to de-activate %s: %d\n",
+					sinfo->name, rc);
+			}
+			sinfo->isactive = false;
+		}
+	}
+
+	return 0;
+}
+
+static int __enable_subcaches(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	u32 c = 0;
+	struct subcache_info *sinfo;
+
+	if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
+		return 0;
+
+	/* Activate subcaches */
+	venus_hfi_for_each_subcache(core, sinfo) {
+		rc = llcc_slice_activate(sinfo->subcache);
+		if (rc) {
+			d_vpr_e("Failed to activate %s: %d\n",
+				sinfo->name, rc);
+			__fatal_error(core, true);
+			goto err_activate_fail;
+		}
+		sinfo->isactive = true;
+		d_vpr_h("Activated subcache %s\n", sinfo->name);
+		c++;
+	}
+
+	d_vpr_h("Activated %d Subcaches to Venus\n", c);
+
+	return 0;
+
+err_activate_fail:
+	__release_subcaches(core);
+	__disable_subcaches(core);
+	return 0;
+}
+
+static int __set_subcaches(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	u32 c = 0;
+	struct subcache_info *sinfo;
+	u32 resource[VIDC_MAX_SUBCACHE_SIZE];
+	struct hfi_resource_syscache_info_type *sc_res_info;
+	struct hfi_resource_subcache_type *sc_res;
+	struct vidc_resource_hdr rhdr;
+
+	if (core->dt->sys_cache_res_set) {
+		d_vpr_h("Subcaches already set to Venus\n");
+		return 0;
+	}
+
+	memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
+
+	sc_res_info = (struct hfi_resource_syscache_info_type *)resource;
+	sc_res = &(sc_res_info->rg_subcache_entries[0]);
+
+	venus_hfi_for_each_subcache(core, sinfo) {
+		if (sinfo->isactive) {
+			sc_res[c].size = sinfo->subcache->slice_size;
+			sc_res[c].sc_id = sinfo->subcache->slice_id;
+			c++;
+		}
+	}
+
+	/* Set resource to Venus for activated subcaches */
+	if (c) {
+		d_vpr_h("Setting %d Subcaches\n", c);
+
+		rhdr.resource_handle = sc_res_info; /* cookie */
+		rhdr.resource_id = VIDC_RESOURCE_SYSCACHE;
+
+		sc_res_info->num_entries = c;
+
+		rc = __core_set_resource(core, &rhdr, (void *)sc_res_info);
+		if (rc) {
+			d_vpr_e("Failed to set subcaches %d\n", rc);
+			goto err_fail_set_subacaches;
+		}
+
+		venus_hfi_for_each_subcache(core, sinfo) {
+			if (sinfo->isactive)
+				sinfo->isset = true;
+		}
+
+		d_vpr_h("Set Subcaches done to Venus\n");
+		core->dt->sys_cache_res_set = true;
+	}
+
+	return 0;
+
+err_fail_set_subacaches:
+	__disable_subcaches(core);
+
+	return 0;
+}
+/*
+static int __set_ubwc_config(struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	if (!core->platform->data.ubwc_config) {
+		d_vpr_h("%s: invalid ubwc config\n", __func__);
+		return -EINVAL;
+	}
+
+	//rc = hfi_packet_sys_ubwc_config(core, core->packet, core->packet_size);
+	if (rc)
+		return rc;
+
+	//rc = __iface_cmdq_write(core, core->packet));
+	if (rc)
+		return rc;
+
+	d_vpr_h("Configured UBWC Config\n");
+	return rc;
+}
+*/
+static int __venus_power_on(struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	if (core->power_enabled)
+		return 0;
+
+	core->power_enabled = true;
+	/* Vote for all hardware resources */
+	rc = __vote_buses(core, INT_MAX, INT_MAX);
+	if (rc) {
+		d_vpr_e("Failed to vote buses, err: %d\n", rc);
+		goto fail_vote_buses;
+	}
+
+	rc = __enable_regulators(core);
+	if (rc) {
+		d_vpr_e("Failed to enable GDSC, err = %d\n", rc);
+		goto fail_enable_gdsc;
+	}
+
+	rc = call_venus_op(core, reset_ahb2axi_bridge, core);
+	if (rc) {
+		d_vpr_e("Failed to reset ahb2axi: %d\n", rc);
+		goto fail_enable_clks;
+	}
+
+	rc = __prepare_enable_clks(core);
+	if (rc) {
+		d_vpr_e("Failed to enable clocks: %d\n", rc);
+		goto fail_enable_clks;
+	}
+
+	rc = __scale_clocks(core);
+	if (rc) {
+		d_vpr_e("Failed to scale clocks, performance might be affected\n");
+		rc = 0;
+	}
+
+	/*
+	 * Re-program all of the registers that get reset as a result of
+	 * regulator_disable() and _enable()
+	 */
+	__set_registers(core);
+
+	call_venus_op(core, interrupt_init, core);
+	core->intr_status = 0;
+	enable_irq(core->dt->irq);
+
+	return rc;
+
+fail_enable_clks:
+	__disable_regulators(core);
+fail_enable_gdsc:
+	__unvote_buses(core);
+fail_vote_buses:
+	core->power_enabled = false;
+	return rc;
+}
+
+static int __suspend(struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	} else if (!core->power_enabled) {
+		d_vpr_h("Power already disabled\n");
+		return 0;
+	}
+
+	d_vpr_h("Entering suspend\n");
+
+	rc = __tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
+	if (rc) {
+		d_vpr_e("Failed to suspend video core %d\n", rc);
+		goto err_tzbsp_suspend;
+	}
+
+	__disable_subcaches(core);
+
+	call_venus_op(core, power_off, core);
+	d_vpr_h("Venus power off\n");
+	return rc;
+
+err_tzbsp_suspend:
+	return rc;
+}
+
+static int __resume(struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	} else if (core->power_enabled) {
+		goto exit;
+	} else if (!__core_in_valid_state(core)) {
+		d_vpr_e("%s: core in deinit state\n", __func__);
+		return -EINVAL;
+	}
+
+	d_vpr_h("Resuming from power collapse\n");
+	rc = __venus_power_on(core);
+	if (rc) {
+		d_vpr_e("Failed to power on venus\n");
+		goto err_venus_power_on;
+	}
+
+	/* Reboot the firmware */
+	rc = __tzbsp_set_video_state(TZBSP_VIDEO_STATE_RESUME);
+	if (rc) {
+		d_vpr_e("Failed to resume video core %d\n", rc);
+		goto err_set_video_state;
+	}
+
+	/*
+	 * Hand off control of regulators to h/w _after_ loading fw.
+	 * Note that the GDSC will turn off when switching from normal
+	 * (s/w triggered) to fast (HW triggered) unless the h/w vote is
+	 * present.
+	 */
+	if (__enable_hw_power_collapse(core))
+		d_vpr_e("Failed to enabled inter-frame PC\n");
+
+	call_venus_op(core, setup_ucregion_memmap, core);
+
+	/* Wait for boot completion */
+	rc = call_venus_op(core, boot_firmware, core);
+	if (rc) {
+		d_vpr_e("Failed to reset venus core\n");
+		goto err_reset_core;
+	}
+
+	__sys_set_debug(core, (msm_vidc_debug & FW_LOGMASK) >> FW_LOGSHIFT);
+	__enable_subcaches(core);
+	__set_subcaches(core);
+
+	d_vpr_h("Resumed from power collapse\n");
+exit:
+	/* Don't reset skip_pc_count for SYS_PC_PREP cmd */
+	//if (core->last_packet_type != HFI_CMD_SYS_PC_PREP)
+	//	core->skip_pc_count = 0;
+	return rc;
+err_reset_core:
+	__tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
+err_set_video_state:
+	call_venus_op(core, power_off, core);
+err_venus_power_on:
+	d_vpr_e("Failed to resume from power collapse\n");
+	return rc;
+}
+
+static void __set_queue_hdr_defaults(struct hfi_queue_header *q_hdr)
+{
+	q_hdr->qhdr_status = 0x1;
+	q_hdr->qhdr_type = VIDC_IFACEQ_DFLT_QHDR;
+	q_hdr->qhdr_q_size = VIDC_IFACEQ_QUEUE_SIZE / 4;
+	q_hdr->qhdr_pkt_size = 0;
+	q_hdr->qhdr_rx_wm = 0x1;
+	q_hdr->qhdr_tx_wm = 0x1;
+	q_hdr->qhdr_rx_req = 0x1;
+	q_hdr->qhdr_tx_req = 0x0;
+	q_hdr->qhdr_rx_irq_status = 0x0;
+	q_hdr->qhdr_tx_irq_status = 0x0;
+	q_hdr->qhdr_read_idx = 0x0;
+	q_hdr->qhdr_write_idx = 0x0;
+}
+
+static void __interface_queues_release(struct msm_vidc_core *core)
+{
+	int i;
+
+	d_vpr_h("%s()\n", __func__);
+
+	msm_vidc_memory_unmap(core, &core->iface_q_table.map);
+	msm_vidc_memory_free(core, &core->iface_q_table.alloc);
+	msm_vidc_memory_unmap(core, &core->sfr.map);
+	msm_vidc_memory_free(core, &core->sfr.alloc);
+
+	for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
+		core->iface_queues[i].q_hdr = NULL;
+		core->iface_queues[i].q_array.align_virtual_addr = NULL;
+		core->iface_queues[i].q_array.align_device_addr = 0;
+	}
+
+	core->iface_q_table.align_virtual_addr = NULL;
+	core->iface_q_table.align_device_addr = 0;
+
+	core->sfr.align_virtual_addr = NULL;
+	core->sfr.align_device_addr = 0;
+}
+
+static int __interface_queues_init(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct hfi_queue_table_header *q_tbl_hdr;
+	struct hfi_queue_header *q_hdr;
+	struct msm_vidc_iface_q_info *iface_q;
+	struct msm_vidc_alloc alloc;
+	struct msm_vidc_map map;
+	int offset = 0;
+	u32 q_size;
+	u32 i;
+
+	d_vpr_h("%s()\n", __func__);
+	q_size = SHARED_QSIZE - ALIGNED_SFR_SIZE - ALIGNED_QDSS_SIZE;
+
+	memset(&alloc, 0, sizeof(alloc));
+	alloc.buffer_type = MSM_VIDC_QUEUE;
+	alloc.region     = MSM_VIDC_NON_SECURE;
+	alloc.size       = q_size;
+	alloc.cached     = false;
+	alloc.secure     = false;
+	alloc.map_kernel = true;
+	rc = msm_vidc_memory_alloc(core, &alloc);
+	if (rc) {
+		d_vpr_e("%s: alloc failed\n", __func__);
+		goto fail_alloc_queue;
+	}
+
+	memset(&map, 0, sizeof(map));
+	map.buffer_type  = alloc.buffer_type;
+	map.region       = alloc.region;
+	map.dmabuf       = alloc.dmabuf;
+	rc = msm_vidc_memory_map(core, &map);
+	if (rc) {
+		d_vpr_e("%s: alloc failed\n", __func__);
+		goto fail_alloc_queue;
+	}
+
+	core->iface_q_table.align_virtual_addr = alloc.kvaddr;
+	core->iface_q_table.align_device_addr = map.device_addr;
+	core->iface_q_table.mem_size = VIDC_IFACEQ_TABLE_SIZE;
+	core->iface_q_table.alloc = alloc;
+	core->iface_q_table.map = map;
+	offset += core->iface_q_table.mem_size;
+
+	for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
+		iface_q = &core->iface_queues[i];
+		iface_q->q_array.align_device_addr = map.device_addr + offset;
+		iface_q->q_array.align_virtual_addr = alloc.kvaddr + offset;
+		iface_q->q_array.mem_size = VIDC_IFACEQ_QUEUE_SIZE;
+		offset += iface_q->q_array.mem_size;
+		iface_q->q_hdr = VIDC_IFACEQ_GET_QHDR_START_ADDR(
+				core->iface_q_table.align_virtual_addr, i);
+		__set_queue_hdr_defaults(iface_q->q_hdr);
+	}
+
+	q_tbl_hdr = (struct hfi_queue_table_header *)
+			core->iface_q_table.align_virtual_addr;
+	q_tbl_hdr->qtbl_version = 0;
+	q_tbl_hdr->device_addr = (void *)core;
+	strlcpy(q_tbl_hdr->name, "msm_v4l2_vidc", sizeof(q_tbl_hdr->name));
+	q_tbl_hdr->qtbl_size = VIDC_IFACEQ_TABLE_SIZE;
+	q_tbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_queue_table_header);
+	q_tbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_queue_header);
+	q_tbl_hdr->qtbl_num_q = VIDC_IFACEQ_NUMQ;
+	q_tbl_hdr->qtbl_num_active_q = VIDC_IFACEQ_NUMQ;
+
+	iface_q = &core->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
+
+	iface_q = &core->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
+
+	iface_q = &core->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
+	q_hdr = iface_q->q_hdr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
+	/*
+	 * Set receive request to zero on debug queue as there is no
+	 * need of interrupt from video hardware for debug messages
+	 */
+	q_hdr->qhdr_rx_req = 0;
+
+	/* sfr buffer */
+	memset(&alloc, 0, sizeof(alloc));
+	alloc.buffer_type = MSM_VIDC_QUEUE;
+	alloc.region     = MSM_VIDC_NON_SECURE;
+	alloc.size       = ALIGNED_SFR_SIZE;
+	alloc.cached     = false;
+	alloc.secure     = false;
+	alloc.map_kernel = true;
+	rc = msm_vidc_memory_alloc(core, &alloc);
+	if (rc) {
+		d_vpr_e("%s: sfr alloc failed\n", __func__);
+		goto fail_alloc_queue;
+	}
+	memset(&map, 0, sizeof(map));
+	map.buffer_type  = alloc.buffer_type;
+	map.region       = alloc.region;
+	map.dmabuf       = alloc.dmabuf;
+	rc = msm_vidc_memory_map(core, &map);
+	if (rc) {
+		d_vpr_e("%s: sfr map failed\n", __func__);
+		goto fail_alloc_queue;
+	}
+	core->sfr.align_device_addr = map.device_addr;
+	core->sfr.align_virtual_addr = alloc.kvaddr;
+	core->sfr.mem_size = ALIGNED_SFR_SIZE;
+	core->sfr.alloc = alloc;
+	core->sfr.map = map;
+	/* write sfr buffer size in first word */
+	*((u32 *)core->sfr.align_virtual_addr) = ALIGNED_SFR_SIZE;
+
+	rc = call_venus_op(core, setup_ucregion_memmap, core);
+	if (rc)
+		return rc;
+
+	return 0;
+fail_alloc_queue:
+	return -ENOMEM;
+}
+
+static int __load_fw(struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	rc = __init_resources(core);
+	if (rc) {
+		d_vpr_e("Failed to init resources: %d\n", rc);
+		goto fail_init_res;
+	}
+
+	rc = __venus_power_on(core);
+	if (rc) {
+		d_vpr_e("%s: power on failed\n", __func__);
+		goto fail_venus_power_on;
+	}
+
+	if (!core->dt->fw_cookie) {
+		core->dt->fw_cookie = subsystem_get_with_fwname("venus",
+								core->dt->fw_name);
+		if (IS_ERR_OR_NULL(core->dt->fw_cookie)) {
+			d_vpr_e("%s: firmware download failed\n", __func__);
+			core->dt->fw_cookie = NULL;
+			rc = -ENOMEM;
+			goto fail_load_fw;
+		}
+	}
+
+	rc = __protect_cp_mem(core);
+	if (rc) {
+		d_vpr_e("%s: protect memory failed\n", __func__);
+		goto fail_protect_mem;
+	}
+	/*
+	* Hand off control of regulators to h/w _after_ loading fw.
+	* Note that the GDSC will turn off when switching from normal
+	* (s/w triggered) to fast (HW triggered) unless the h/w vote is
+	* present.
+	*/
+	if (__enable_hw_power_collapse(core))
+		d_vpr_e("%s: hardware power collapse unsuccessful\n", __func__);
+
+	return rc;
+fail_protect_mem:
+	if (core->dt->fw_cookie)
+		subsystem_put(core->dt->fw_cookie);
+	core->dt->fw_cookie = NULL;
+fail_load_fw:
+	call_venus_op(core, power_off, core);
+fail_venus_power_on:
+	__deinit_resources(core);
+fail_init_res:
+	return rc;
+}
+
+static void __unload_fw(struct msm_vidc_core *core)
+{
+	if (!core->dt->fw_cookie)
+		return;
+
+	cancel_delayed_work(&core->pm_work);
+	if (core->state != MSM_VIDC_CORE_DEINIT)
+		flush_workqueue(core->pm_workq);
+
+	subsystem_put(core->dt->fw_cookie);
+	__interface_queues_release(core);
+	call_venus_op(core, power_off, core);
+	core->dt->fw_cookie = NULL;
+	__deinit_resources(core);
+
+	d_vpr_h("Firmware unloaded successfully\n");
+}
+
+void venus_hfi_work_handler(struct work_struct *work)
+{
+	struct msm_vidc_core *core;
+
+	core = container_of(work, struct msm_vidc_core, device_work);
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return;
+	}
+	d_vpr_e("%s(): core %pK\n", __func__, core);
+}
+
+void venus_hfi_pm_work_handler(struct work_struct *work)
+{
+	int rc = 0;
+	struct msm_vidc_core *core;
+
+	core = container_of(work, struct msm_vidc_core, device_work);
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return;
+	}
+	d_vpr_e("%s(): core %pK\n", __func__, core);
+
+	/*
+	 * It is ok to check this variable outside the lock since
+	 * it is being updated in this context only
+	 */
+	if (core->skip_pc_count >= VIDC_MAX_PC_SKIP_COUNT) {
+		d_vpr_e("Failed to PC for %d times\n",
+				core->skip_pc_count);
+		core->skip_pc_count = 0;
+		//__process_fatal_error(core);
+		return;
+	}
+
+	mutex_lock(&core->lock);
+	rc = __power_collapse(core, false);
+	switch (rc) {
+	case 0:
+		core->skip_pc_count = 0;
+		/* Cancel pending delayed works if any */
+		__cancel_power_collapse_work(core);
+		d_vpr_h("%s: power collapse successful!\n", __func__);
+		break;
+	case -EBUSY:
+		core->skip_pc_count = 0;
+		d_vpr_h("%s: retry PC as dsp is busy\n", __func__);
+		__schedule_power_collapse_work(core);
+		break;
+	case -EAGAIN:
+		core->skip_pc_count++;
+		d_vpr_e("%s: retry power collapse (count %d)\n",
+			__func__, core->skip_pc_count);
+		__schedule_power_collapse_work(core);
+		break;
+	default:
+		d_vpr_e("%s: power collapse failed\n", __func__);
+		break;
+	}
+	mutex_unlock(&core->lock);
+}
+
+int venus_hfi_core_init(struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	d_vpr_h("%s(): core %p\n", __func__, core);
+
+	if (!core) {
+		d_vpr_e("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	core->packet_size = 4096;
+	core->packet = kzalloc(core->packet_size, GFP_KERNEL);
+	if (!core->packet) {
+		d_vpr_e("%s(): core packet allocation failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	rc = __load_fw(core);
+	if (rc)
+		return rc;
+
+	rc = __interface_queues_init(core);
+	if (rc)
+		goto error;
+
+	rc = call_venus_op(core, boot_firmware, core);
+	if (rc)
+		goto error;
+
+	rc =  hfi_packet_sys_init(core, core->packet, core->packet_size);
+	if (rc)
+		goto error;
+
+	rc = __iface_cmdq_write(core, core->packet);
+	if (rc)
+		goto error;
+
+	rc = hfi_packet_image_version(core, core->packet, core->packet_size);
+	if (rc)
+		goto error;
+
+	rc = __iface_cmdq_write(core, core->packet);
+	if (rc)
+		goto error;
+
+	__sys_set_debug(core, (msm_vidc_debug & FW_LOGMASK) >> FW_LOGSHIFT);
+	__enable_subcaches(core);
+	__set_subcaches(core);
+
+	d_vpr_h("%s(): successful\n", __func__);
+	return 0;
+
+error:
+	d_vpr_h("%s(): failed\n", __func__);
+	__unload_fw(core);
+	kfree(core->packet);
+	return rc;
+}
+
+int venus_hfi_core_release(struct msm_vidc_core *core)
+{
+	d_vpr_h("%s(): core %p\n", __func__, core);
+
+	return 0;
+}
+
+int venus_hfi_suspend(struct msm_vidc_core *core)
+{
+	int rc = 0;
+
+	if (!core) {
+		d_vpr_e("%s: invalid device\n", __func__);
+		return -EINVAL;
+	}
+
+	d_vpr_h("Suspending Venus\n");
+	rc = __power_collapse(core, true);
+	if (!rc) {
+		/* Cancel pending delayed works if any */
+		__cancel_power_collapse_work(core);
+	} else {
+		d_vpr_e("%s: Venus is busy\n", __func__);
+		rc = -EBUSY;
+	}
+
+	return rc;
+}
+
+int venus_hfi_session_open(struct msm_vidc_core *core, struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+
+	d_vpr_h("%s(): inst %p, core %p\n",
+		__func__, inst, core);
+
+	return rc;
+}
+

+ 1355 - 0
include/uapi/vidc/media/msm_media_info.h

@@ -0,0 +1,1355 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MSM_MEDIA_INFO_H__
+#define __MSM_MEDIA_INFO_H__
+
+/* Width and Height should be multiple of 16 */
+#define INTERLACE_WIDTH_MAX 1920
+#define INTERLACE_HEIGHT_MAX 1920
+#define INTERLACE_MB_PER_FRAME_MAX ((1920*1088)/256)
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+	((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+	(((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+enum color_fmts {
+	/* Venus NV12:
+	 * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+	 * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * U V U V U V U V U V U V . . . .  ^
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 512
+	 * UV_Stride : Width aligned to 512
+	 * Y_Scanlines: Height aligned to 512
+	 * UV_Scanlines: Height/2 aligned to 256
+	 * Total size = align(Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines, 4096)
+	 */
+	COLOR_FMT_NV12,
+	/* Venus NV21:
+	 * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+	 * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * V U V U V U V U V U V U . . . .  ^
+	 * V U V U V U V U V U V U . . . .  |
+	 * V U V U V U V U V U V U . . . .  |
+	 * V U V U V U V U V U V U . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Padding & Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 512
+	 * UV_Stride : Width aligned to 512
+	 * Y_Scanlines: Height aligned to 512
+	 * UV_Scanlines: Height/2 aligned to 256
+	 * Total size = align(Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines, 4096)
+	 */
+	COLOR_FMT_NV21,
+	/*
+	 * The buffer can be of 2 types:
+	 * (1) Venus NV12 UBWC Progressive
+	 * (2) Venus NV12 UBWC Interlaced
+	 *
+	 * (1) Venus NV12 UBWC Progressive Buffer Format:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 * Y_Stride = align(Width, 128)
+	 * UV_Stride = align(Width, 128)
+	 * Y_Scanlines = align(Height, 32)
+	 * UV_Scanlines = align(Height/2, 32)
+	 * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 *
+	 * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size, 4096)
+	 *
+	 *
+	 * (2) Venus NV12 UBWC Interlaced Buffer Format:
+	 * Compressed Macro-tile format for NV12 interlaced.
+	 * Contains 8 planes in the following order -
+	 * (A) Y_Meta_Top_Field_Plane
+	 * (B) Y_UBWC_Top_Field_Plane
+	 * (C) UV_Meta_Top_Field_Plane
+	 * (D) UV_UBWC_Top_Field_Plane
+	 * (E) Y_Meta_Bottom_Field_Plane
+	 * (F) Y_UBWC_Bottom_Field_Plane
+	 * (G) UV_Meta_Bottom_Field_Plane
+	 * (H) UV_UBWC_Bottom_Field_Plane
+	 * Y_Meta_Top_Field_Plane consists of meta information to decode
+	 * compressed tile data for Y_UBWC_Top_Field_Plane.
+	 * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile
+	 * format for top field of an interlaced frame.
+	 * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together
+	 * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+	 * 8 bit Y samples for top field of an interlaced frame.
+	 *
+	 * UV_Meta_Top_Field_Plane consists of meta information to decode
+	 * compressed tile data in UV_UBWC_Top_Field_Plane.
+	 * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile
+	 * format for top field of an interlaced frame.
+	 * UBWC decoder block will use UV_Meta_Top_Field_Plane data together
+	 * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+	 * 8 bit subsampled color difference samples for top field of an
+	 * interlaced frame.
+	 *
+	 * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is
+	 * independently decodable and randomly accessible. There is no
+	 * dependency between tiles.
+	 *
+	 * Y_Meta_Bottom_Field_Plane consists of meta information to decode
+	 * compressed tile data for Y_UBWC_Bottom_Field_Plane.
+	 * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile
+	 * format for bottom field of an interlaced frame.
+	 * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data
+	 * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less
+	 * uncompressed 8 bit Y samples for bottom field of an interlaced frame.
+	 *
+	 * UV_Meta_Bottom_Field_Plane consists of meta information to decode
+	 * compressed tile data in UV_UBWC_Bottom_Field_Plane.
+	 * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed
+	 * macro-tile format for bottom field of an interlaced frame.
+	 * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together
+	 * with UV_UBWC_Bottom_Field_Plane data to produce loss-less
+	 * uncompressed 8 bit subsampled color difference samples for bottom
+	 * field of an interlaced frame.
+	 *
+	 * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is
+	 * independently decodable and randomly accessible. There is no
+	 * dependency between tiles.
+	 *
+	 * <-----Y_TF_Meta_Stride---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . . Half_height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_TF_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-Compressed tile Y_TF Stride->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height  |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_TF_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----UV_TF_Meta_Stride---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_TF_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <-Compressed tile UV_TF Stride->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_TF_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * <-----Y_BF_Meta_Stride---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . . Half_height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_BF_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-Compressed tile Y_BF Stride->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height  |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_BF_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----UV_BF_Meta_Stride---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_BF_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <-Compressed tile UV_BF Stride->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_BF_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 * Half_height = (Height+1)>>1
+	 * Y_TF_Stride = align(Width, 128)
+	 * UV_TF_Stride = align(Width, 128)
+	 * Y_TF_Scanlines = align(Half_height, 32)
+	 * UV_TF_Scanlines = align((Half_height+1)/2, 32)
+	 * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096)
+	 * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096)
+	 * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+	 * Y_TF_Meta_Plane_size =
+	 *     align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096)
+	 * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+	 * UV_TF_Meta_Plane_size =
+	 *     align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096)
+	 * Y_BF_Stride = align(Width, 128)
+	 * UV_BF_Stride = align(Width, 128)
+	 * Y_BF_Scanlines = align(Half_height, 32)
+	 * UV_BF_Scanlines = align((Half_height+1)/2, 32)
+	 * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096)
+	 * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096)
+	 * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+	 * Y_BF_Meta_Plane_size =
+	 *     align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096)
+	 * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+	 * UV_BF_Meta_Plane_size =
+	 *     align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096)
+	 *
+	 * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size +
+	 *           Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size +
+	 *			 Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size +
+	 *           Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +, 4096)
+	 */
+	COLOR_FMT_NV12_UBWC,
+	/* Venus NV12 10-bit UBWC:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ----->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 *
+	 * Y_Stride = align(Width * 4/3, 256)
+	 * UV_Stride = align(Width * 4/3, 256)
+	 * Y_Scanlines = align(Height, 32)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 *
+	 * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size, 4096)
+	 */
+	COLOR_FMT_NV12_BPP10_UBWC,
+	/* Venus RGBA8888 format:
+	 * Contains 1 plane in the following order -
+	 * (A) RGBA plane
+	 *
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 128)
+	 * RGB_Scanlines = align(Height, 32)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 *
+	 * Total size = align(RGB_Plane_size , 4096)
+	 */
+	COLOR_FMT_RGBA8888,
+	/* Venus RGBA8888 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGBA plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 256)
+	 * RGB_Scanlines = align(Height, 16)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size, 4096)
+	 */
+	COLOR_FMT_RGBA8888_UBWC,
+	/* Venus RGBA1010102 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGBA plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 256)
+	 * RGB_Scanlines = align(Height, 16)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size, 4096)
+	 */
+	COLOR_FMT_RGBA1010102_UBWC,
+	/* Venus RGB565 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGB plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 2, 256)
+	 * RGB_Scanlines = align(Height, 16)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size, 4096)
+	 */
+	COLOR_FMT_RGB565_UBWC,
+	/* P010 UBWC:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ----->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 *
+	 * Y_Stride = align(Width * 2, 256)
+	 * UV_Stride = align(Width * 2, 256)
+	 * Y_Scanlines = align(Height, 16)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 *
+	 * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size, 4096)
+	 */
+	COLOR_FMT_P010_UBWC,
+	/* Venus P010:
+	 * YUV 4:2:0 image with a plane of 10 bit Y samples followed
+	 * by an interleaved U/V plane containing 10 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * U V U V U V U V U V U V . . . .  ^
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width * 2 aligned to 256
+	 * UV_Stride : Width * 2 aligned to 256
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * Total size = align(Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines, 4096)
+	 */
+	COLOR_FMT_P010,
+	/* Venus NV12_512:
+	 * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+	 * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * U V U V U V U V U V U V . . . .  ^
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 512
+	 * UV_Stride : Width aligned to 512
+	 * Y_Scanlines: Height aligned to 512
+	 * UV_Scanlines: Height/2 aligned to 256
+	 * Total size = align((Y_Stride * Y_Scanlines
+	 *          + UV_Stride  * UV_Scanlines), 4096)
+	 */
+	COLOR_FMT_NV12_512,
+};
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_STRIDE(unsigned int color_fmt,
+	unsigned int width)
+{
+	unsigned int alignment, stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12_512:
+		alignment = 512;
+		stride = MSM_MEDIA_ALIGN(width, alignment);
+		break;
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width, alignment);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width, 192);
+		stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+		break;
+	case COLOR_FMT_P010_UBWC:
+	case COLOR_FMT_P010:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_STRIDE(unsigned int color_fmt,
+	unsigned int width)
+{
+	unsigned int alignment, stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_512:
+		alignment = 512;
+		stride = MSM_MEDIA_ALIGN(width, alignment);
+		break;
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width, alignment);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width, 192);
+		stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+		break;
+	case COLOR_FMT_P010_UBWC:
+	case COLOR_FMT_P010:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_SCANLINES(unsigned int color_fmt,
+	unsigned int height)
+{
+	unsigned int alignment, sclines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12_512:
+		alignment = 512;
+		break;
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010:
+		alignment = 32;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		alignment = 16;
+		break;
+	default:
+		return 0;
+	}
+	sclines = MSM_MEDIA_ALIGN(height, alignment);
+invalid_input:
+	return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_SCANLINES(unsigned int color_fmt,
+	unsigned int height)
+{
+	unsigned int alignment, sclines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_512:
+		alignment = 256;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+	case COLOR_FMT_P010:
+		alignment = 16;
+		break;
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 32;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
+
+invalid_input:
+	return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_META_STRIDE(unsigned int color_fmt,
+	unsigned int width)
+{
+	int y_tile_width = 0, y_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		y_tile_width = 32;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		y_tile_width = 48;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+	y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
+
+invalid_input:
+	return y_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_META_SCANLINES(unsigned int color_fmt,
+	unsigned int height)
+{
+	int y_tile_height = 0, y_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+		y_tile_height = 8;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		y_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+	y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+
+invalid_input:
+	return y_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_META_STRIDE(unsigned int color_fmt,
+	unsigned int width)
+{
+	int uv_tile_width = 0, uv_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		uv_tile_width = 16;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		uv_tile_width = 24;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
+	uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+
+invalid_input:
+	return uv_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_META_SCANLINES(unsigned int color_fmt,
+	unsigned int height)
+{
+	int uv_tile_height = 0, uv_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+		uv_tile_height = 8;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		uv_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
+	uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+
+invalid_input:
+	return uv_meta_scanlines;
+}
+
+static inline unsigned int VENUS_RGB_STRIDE(unsigned int color_fmt,
+	unsigned int width)
+{
+	unsigned int alignment = 0, stride = 0, bpp = 4;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888:
+		alignment = 128;
+		break;
+	case COLOR_FMT_RGB565_UBWC:
+		alignment = 256;
+		bpp = 2;
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+		alignment = 256;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
+
+invalid_input:
+	return stride;
+}
+
+static inline unsigned int VENUS_RGB_SCANLINES(unsigned int color_fmt,
+	unsigned int height)
+{
+	unsigned int alignment = 0, scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888:
+		alignment = 32;
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		alignment = 16;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	scanlines = MSM_MEDIA_ALIGN(height, alignment);
+
+invalid_input:
+	return scanlines;
+}
+
+static inline unsigned int VENUS_RGB_META_STRIDE(unsigned int color_fmt,
+	unsigned int width)
+{
+	int rgb_tile_width = 0, rgb_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_tile_width = 16;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
+	rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+
+invalid_input:
+	return rgb_meta_stride;
+}
+
+static inline unsigned int VENUS_RGB_META_SCANLINES(unsigned int color_fmt,
+	unsigned int height)
+{
+	int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
+	rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+
+invalid_input:
+	return rgb_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ * @height
+ * Progressive: height
+ * Interlaced: height
+ */
+static inline unsigned int VENUS_BUFFER_SIZE(unsigned int color_fmt,
+	unsigned int width, unsigned int height)
+{
+	unsigned int size = 0;
+	unsigned int y_plane, uv_plane, y_stride,
+		uv_stride, y_sclines, uv_sclines;
+	unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+	unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+	unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+	unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+	unsigned int rgb_stride = 0, rgb_scanlines = 0;
+	unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
+	unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
+
+	if (!width || !height)
+		goto invalid_input;
+
+	y_stride = VENUS_Y_STRIDE(color_fmt, width);
+	uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+	y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+	uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+	rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
+	rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_P010:
+	case COLOR_FMT_NV12_512:
+		y_plane = y_stride * y_sclines;
+		uv_plane = uv_stride * uv_sclines;
+		size = y_plane + uv_plane;
+		break;
+	case COLOR_FMT_NV12_UBWC:
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		if (width <= INTERLACE_WIDTH_MAX &&
+			height <= INTERLACE_HEIGHT_MAX &&
+			(height * width) / 256 <= INTERLACE_MB_PER_FRAME_MAX) {
+			y_sclines =
+				VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
+			y_ubwc_plane =
+				MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+			uv_sclines =
+				VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
+			uv_ubwc_plane =
+				MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+			y_meta_scanlines =
+			VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
+			y_meta_plane = MSM_MEDIA_ALIGN(
+				y_meta_stride * y_meta_scanlines, 4096);
+			uv_meta_scanlines =
+			VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
+			uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+				uv_meta_scanlines, 4096);
+			size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+				uv_meta_plane)*2;
+		} else {
+			y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+			y_ubwc_plane =
+				MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+			uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+			uv_ubwc_plane =
+				MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+			y_meta_scanlines =
+				VENUS_Y_META_SCANLINES(color_fmt, height);
+			y_meta_plane = MSM_MEDIA_ALIGN(
+				y_meta_stride * y_meta_scanlines, 4096);
+			uv_meta_scanlines =
+				VENUS_UV_META_SCANLINES(color_fmt, height);
+			uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+				uv_meta_scanlines, 4096);
+			size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+				uv_meta_plane);
+		}
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+				y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+					uv_meta_scanlines, 4096);
+
+		size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane;
+		break;
+	case COLOR_FMT_P010_UBWC:
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+				y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+					uv_meta_scanlines, 4096);
+
+		size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane;
+		break;
+	case COLOR_FMT_RGBA8888:
+		rgb_plane = MSM_MEDIA_ALIGN(rgb_stride  * rgb_scanlines, 4096);
+		size = rgb_plane;
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
+							4096);
+		rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
+		rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
+					height);
+		rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
+					rgb_meta_scanlines, 4096);
+		size = rgb_ubwc_plane + rgb_meta_plane;
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return MSM_MEDIA_ALIGN(size, 4096);
+}
+
+static inline unsigned int VENUS_BUFFER_SIZE_USED(unsigned int color_fmt,
+	unsigned int width, unsigned int height, unsigned int interlace)
+{
+	unsigned int size = 0;
+	unsigned int y_stride, uv_stride, y_sclines, uv_sclines;
+	unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+	unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+	unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+	unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+
+	if (!width || !height)
+		goto invalid_input;
+
+	if (!interlace && color_fmt == COLOR_FMT_NV12_UBWC) {
+		y_stride = VENUS_Y_STRIDE(color_fmt, width);
+		uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+		y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines =
+			VENUS_Y_META_SCANLINES(color_fmt, height);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+			y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines =
+			VENUS_UV_META_SCANLINES(color_fmt, height);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+			uv_meta_scanlines, 4096);
+		size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane);
+		size = MSM_MEDIA_ALIGN(size, 4096);
+	} else {
+		size = VENUS_BUFFER_SIZE(color_fmt, width, height);
+	}
+invalid_input:
+	return size;
+}
+
+#endif

+ 32 - 0
include/uapi/vidc/media/msm_vidc_utils.h

@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MSM_VIDC_UTILS_H__
+#define __MSM_VIDC_UTILS_H__
+
+#include <linux/types.h>
+#include <linux/v4l2-controls.h>
+
+/* vendor color format start */
+/* UBWC 8-bit Y/CbCr 4:2:0  */
+#define V4L2_PIX_FMT_NV12_UBWC                  v4l2_fourcc('Q', '1', '2', '8')
+/* NV12_512 8-bit Y/CbCr 4:2:0  */
+#define V4L2_PIX_FMT_NV12_512                   v4l2_fourcc('Q', '5', '1', '2')
+/* NV12 10-bit Y/CbCr 4:2:0 */
+#define V4L2_PIX_FMT_NV12_P010_UBWC             v4l2_fourcc('Q', '1', '2', 'B')
+/* UBWC 10-bit Y/CbCr 4:2:0 */
+#define V4L2_PIX_FMT_NV12_TP10_UBWC             v4l2_fourcc('Q', '1', '2', 'A')
+#define V4L2_PIX_FMT_RGBA8888_UBWC              v4l2_fourcc('Q', 'R', 'G', 'B')
+/* Y/CbCr 4:2:0 P10 Venus */
+#define V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS v4l2_fourcc('Q', 'P', '1', '0')
+#define V4L2_PIX_FMT_VIDC_META                  v4l2_fourcc('Q', 'M', 'E', 'T')
+/* vendor color format end */
+
+/* vendor controls start */
+#define V4L2_CID_MPEG_MSM_VIDC_BASE             (V4L2_CTRL_CLASS_MPEG | 0x2000)
+
+/* vendor controls end */
+
+#endif // __MSM_VIDC_UTILS_H__