Jelajahi Sumber

Camera: Bring over camera driver changes

Bring over camera driver changes as of msm-4.19
commit  5a5551a7 (Merge "msm: camera: reqmgr: Fix CRM
shift one req issue").

Change-Id: Ic0c2b2d74d1b3470c1c51d98228e312fb13c501a
Signed-off-by: Jigarkumar Zala <[email protected]>
Jigarkumar Zala 6 tahun lalu
induk
melakukan
05349feaa2
100 mengubah file dengan 34812 tambahan dan 10 penghapusan
  1. 21 2
      Makefile
  2. 0 8
      camera_stub.c
  3. 1 0
      config/konacamera.conf
  4. 8 0
      config/konacameraconf.h
  5. 13 0
      drivers/Makefile
  6. 12 0
      drivers/cam_cdm/Makefile
  7. 253 0
      drivers/cam_cdm/cam_cdm.h
  8. 587 0
      drivers/cam_cdm/cam_cdm_core_common.c
  9. 45 0
      drivers/cam_cdm/cam_cdm_core_common.h
  10. 1135 0
      drivers/cam_cdm/cam_cdm_hw_core.c
  11. 573 0
      drivers/cam_cdm/cam_cdm_intf.c
  12. 202 0
      drivers/cam_cdm/cam_cdm_intf_api.h
  13. 199 0
      drivers/cam_cdm/cam_cdm_soc.c
  14. 21 0
      drivers/cam_cdm/cam_cdm_soc.h
  15. 717 0
      drivers/cam_cdm/cam_cdm_util.c
  16. 161 0
      drivers/cam_cdm/cam_cdm_util.h
  17. 18 0
      drivers/cam_cdm/cam_cdm_virtual.h
  18. 382 0
      drivers/cam_cdm/cam_cdm_virtual_core.c
  19. 135 0
      drivers/cam_cdm/cam_hw_cdm170_reg.h
  20. 10 0
      drivers/cam_core/Makefile
  21. 586 0
      drivers/cam_core/cam_context.c
  22. 460 0
      drivers/cam_core/cam_context.h
  23. 1018 0
      drivers/cam_core/cam_context_utils.c
  24. 33 0
      drivers/cam_core/cam_context_utils.h
  25. 46 0
      drivers/cam_core/cam_hw.h
  26. 80 0
      drivers/cam_core/cam_hw_intf.h
  27. 338 0
      drivers/cam_core/cam_hw_mgr_intf.h
  28. 836 0
      drivers/cam_core/cam_node.c
  29. 104 0
      drivers/cam_core/cam_node.h
  30. 154 0
      drivers/cam_core/cam_subdev.c
  31. 14 0
      drivers/cam_cpas/Makefile
  32. 1857 0
      drivers/cam_cpas/cam_cpas_hw.c
  33. 212 0
      drivers/cam_cpas/cam_cpas_hw.h
  34. 128 0
      drivers/cam_cpas/cam_cpas_hw_intf.h
  35. 725 0
      drivers/cam_cpas/cam_cpas_intf.c
  36. 614 0
      drivers/cam_cpas/cam_cpas_soc.c
  37. 123 0
      drivers/cam_cpas/cam_cpas_soc.h
  38. 9 0
      drivers/cam_cpas/camss_top/Makefile
  39. 82 0
      drivers/cam_cpas/camss_top/cam_camsstop_hw.c
  40. 9 0
      drivers/cam_cpas/cpas_top/Makefile
  41. 635 0
      drivers/cam_cpas/cpas_top/cam_cpastop_hw.c
  42. 268 0
      drivers/cam_cpas/cpas_top/cam_cpastop_hw.h
  43. 531 0
      drivers/cam_cpas/cpas_top/cpastop100.h
  44. 530 0
      drivers/cam_cpas/cpas_top/cpastop_v150_100.h
  45. 538 0
      drivers/cam_cpas/cpas_top/cpastop_v170_110.h
  46. 558 0
      drivers/cam_cpas/cpas_top/cpastop_v175_100.h
  47. 558 0
      drivers/cam_cpas/cpas_top/cpastop_v175_101.h
  48. 760 0
      drivers/cam_cpas/cpas_top/cpastop_v175_120.h
  49. 760 0
      drivers/cam_cpas/cpas_top/cpastop_v175_130.h
  50. 743 0
      drivers/cam_cpas/cpas_top/cpastop_v480_100.h
  51. 543 0
      drivers/cam_cpas/include/cam_cpas_api.h
  52. 17 0
      drivers/cam_fd/Makefile
  53. 249 0
      drivers/cam_fd/cam_fd_context.c
  54. 30 0
      drivers/cam_fd/cam_fd_context.h
  55. 207 0
      drivers/cam_fd/cam_fd_dev.c
  56. 17 0
      drivers/cam_fd/fd_hw_mgr/Makefile
  57. 1949 0
      drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
  58. 179 0
      drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
  59. 18 0
      drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h
  60. 16 0
      drivers/cam_fd/fd_hw_mgr/fd_hw/Makefile
  61. 1164 0
      drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
  62. 237 0
      drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h
  63. 235 0
      drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c
  64. 282 0
      drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
  65. 295 0
      drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
  66. 46 0
      drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h
  67. 63 0
      drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h
  68. 63 0
      drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v501.h
  69. 61 0
      drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v600.h
  70. 17 0
      drivers/cam_icp/Makefile
  71. 271 0
      drivers/cam_icp/cam_icp_context.c
  72. 42 0
      drivers/cam_icp/cam_icp_context.h
  73. 275 0
      drivers/cam_icp/cam_icp_subdev.c
  74. 168 0
      drivers/cam_icp/fw_inc/hfi_intf.h
  75. 336 0
      drivers/cam_icp/fw_inc/hfi_reg.h
  76. 564 0
      drivers/cam_icp/fw_inc/hfi_session_defs.h
  77. 540 0
      drivers/cam_icp/fw_inc/hfi_sys_defs.h
  78. 942 0
      drivers/cam_icp/hfi.c
  79. 12 0
      drivers/cam_icp/icp_hw/Makefile
  80. 14 0
      drivers/cam_icp/icp_hw/a5_hw/Makefile
  81. 518 0
      drivers/cam_icp/icp_hw/a5_hw/a5_core.c
  82. 81 0
      drivers/cam_icp/icp_hw/a5_hw/a5_core.h
  83. 228 0
      drivers/cam_icp/icp_hw/a5_hw/a5_dev.c
  84. 191 0
      drivers/cam_icp/icp_hw/a5_hw/a5_soc.c
  85. 36 0
      drivers/cam_icp/icp_hw/a5_hw/a5_soc.h
  86. 14 0
      drivers/cam_icp/icp_hw/bps_hw/Makefile
  87. 416 0
      drivers/cam_icp/icp_hw/bps_hw/bps_core.c
  88. 39 0
      drivers/cam_icp/icp_hw/bps_hw/bps_core.h
  89. 207 0
      drivers/cam_icp/icp_hw/bps_hw/bps_dev.c
  90. 162 0
      drivers/cam_icp/icp_hw/bps_hw/bps_soc.c
  91. 26 0
      drivers/cam_icp/icp_hw/bps_hw/bps_soc.h
  92. 19 0
      drivers/cam_icp/icp_hw/icp_hw_mgr/Makefile
  93. 5582 0
      drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
  94. 397 0
      drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
  95. 74 0
      drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
  96. 37 0
      drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
  97. 33 0
      drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
  98. 37 0
      drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
  99. 47 0
      drivers/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
  100. 14 0
      drivers/cam_icp/icp_hw/ipe_hw/Makefile

+ 21 - 2
Makefile

@@ -1,4 +1,23 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-ccflags-y := -Wno-unused-function
-obj-y := camera_stub.o
+# auto-detect subdirs
+ifeq ($(CONFIG_ARCH_KONA), y)
+include $(srctree)/techpack/camera/config/konacamera.conf
+endif
+
+# Use USERINCLUDE when you must reference the UAPI directories only.
+USERINCLUDE     += \
+               -I$(srctree)/techpack/camera/include/uapi
+
+# Use LINUXINCLUDE when you must reference the include/ directory.
+# Needed to be compatible with the O= option
+LINUXINCLUDE    += \
+                -I$(srctree)/techpack/camera/include/uapi \
+                -I$(srctree)/techpack/camera/include
+
+ifeq ($(CONFIG_ARCH_KONA), y)
+LINUXINCLUDE    += \
+		-include $(srctree)/techpack/camera/config/konacameraconf.h
+endif
+
+obj-y += drivers/

+ 0 - 8
camera_stub.c

@@ -1,8 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
- */
-
-static void _camera_techpack_stub(void)
-{
-}

+ 1 - 0
config/konacamera.conf

@@ -0,0 +1 @@
+export CONFIG_SPECTRA_CAMERA=y

+ 8 - 0
config/konacameraconf.h

@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+
+#define CONFIG_SPECTRA_CAMERA 1
+

+ 13 - 0
drivers/Makefile

@@ -0,0 +1,13 @@
+obj-y += cam_req_mgr/
+obj-y += cam_utils/
+obj-y += cam_core/
+obj-y += cam_sync/
+obj-y += cam_smmu/
+obj-y += cam_cpas/
+obj-y += cam_cdm/
+obj-y += cam_isp/
+obj-y += cam_sensor_module/
+obj-y += cam_icp/
+obj-y += cam_jpeg/
+obj-y += cam_fd/
+obj-y += cam_lrme/

+ 12 - 0
drivers/cam_cdm/Makefile

@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_smmu
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm_util.o cam_cdm_intf.o cam_cdm_soc.o\
+				cam_cdm_core_common.o cam_cdm_virtual_core.o \
+				cam_cdm_hw_core.o

+ 253 - 0
drivers/cam_cdm/cam_cdm.h

@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CDM_H_
+#define _CAM_CDM_H_
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/bug.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_soc_util.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_intf.h"
+#include "cam_hw.h"
+#include "cam_debug_util.h"
+
+#define CAM_MAX_SW_CDM_VERSION_SUPPORTED  1
+#define CAM_SW_CDM_INDEX                  0
+#define CAM_CDM_INFLIGHT_WORKS            5
+#define CAM_CDM_HW_RESET_TIMEOUT          300
+
+#define CAM_CDM_HW_ID_MASK      0xF
+#define CAM_CDM_HW_ID_SHIFT     0x5
+#define CAM_CDM_CLIENTS_ID_MASK 0x1F
+
+#define CAM_CDM_GET_HW_IDX(x) (((x) >> CAM_CDM_HW_ID_SHIFT) & \
+	CAM_CDM_HW_ID_MASK)
+#define CAM_CDM_CREATE_CLIENT_HANDLE(hw_idx, client_idx) \
+	((((hw_idx) & CAM_CDM_HW_ID_MASK) << CAM_CDM_HW_ID_SHIFT) | \
+	 ((client_idx) & CAM_CDM_CLIENTS_ID_MASK))
+#define CAM_CDM_GET_CLIENT_IDX(x) ((x) & CAM_CDM_CLIENTS_ID_MASK)
+#define CAM_PER_CDM_MAX_REGISTERED_CLIENTS (CAM_CDM_CLIENTS_ID_MASK + 1)
+#define CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM (CAM_CDM_HW_ID_MASK + 1)
+
+/* enum cam_cdm_reg_attr - read, write, read and write permissions.*/
+enum cam_cdm_reg_attr {
+	CAM_REG_ATTR_READ,
+	CAM_REG_ATTR_WRITE,
+	CAM_REG_ATTR_READ_WRITE,
+};
+
+/* enum cam_cdm_hw_process_intf_cmd - interface commands.*/
+enum cam_cdm_hw_process_intf_cmd {
+	CAM_CDM_HW_INTF_CMD_ACQUIRE,
+	CAM_CDM_HW_INTF_CMD_RELEASE,
+	CAM_CDM_HW_INTF_CMD_SUBMIT_BL,
+	CAM_CDM_HW_INTF_CMD_RESET_HW,
+	CAM_CDM_HW_INTF_CMD_INVALID,
+};
+
+/* enum cam_cdm_regs - CDM driver offset enums.*/
+enum cam_cdm_regs {
+	/*cfg_offsets 0*/
+	CDM_CFG_HW_VERSION,
+	CDM_CFG_TITAN_VERSION,
+	CDM_CFG_RST_CMD,
+	CDM_CFG_CGC_CFG,
+	CDM_CFG_CORE_CFG,
+	CDM_CFG_CORE_EN,
+	CDM_CFG_FE_CFG,
+	/*irq_offsets 7*/
+	CDM_IRQ_MASK,
+	CDM_IRQ_CLEAR,
+	CDM_IRQ_CLEAR_CMD,
+	CDM_IRQ_SET,
+	CDM_IRQ_SET_CMD,
+	CDM_IRQ_STATUS,
+	CDM_IRQ_USR_DATA,
+	/*BL FIFO Registers 14*/
+	CDM_BL_FIFO_BASE_REG,
+	CDM_BL_FIFO_LEN_REG,
+	CDM_BL_FIFO_STORE_REG,
+	CDM_BL_FIFO_CFG,
+	CDM_BL_FIFO_RB,
+	CDM_BL_FIFO_BASE_RB,
+	CDM_BL_FIFO_LEN_RB,
+	CDM_BL_FIFO_PENDING_REQ_RB,
+	/*CDM System Debug Registers 22*/
+	CDM_DBG_WAIT_STATUS,
+	CDM_DBG_SCRATCH_0_REG,
+	CDM_DBG_SCRATCH_1_REG,
+	CDM_DBG_SCRATCH_2_REG,
+	CDM_DBG_SCRATCH_3_REG,
+	CDM_DBG_SCRATCH_4_REG,
+	CDM_DBG_SCRATCH_5_REG,
+	CDM_DBG_SCRATCH_6_REG,
+	CDM_DBG_SCRATCH_7_REG,
+	CDM_DBG_LAST_AHB_ADDR,
+	CDM_DBG_LAST_AHB_DATA,
+	CDM_DBG_CORE_DBUG,
+	CDM_DBG_LAST_AHB_ERR_ADDR,
+	CDM_DBG_LAST_AHB_ERR_DATA,
+	CDM_DBG_CURRENT_BL_BASE,
+	CDM_DBG_CURRENT_BL_LEN,
+	CDM_DBG_CURRENT_USED_AHB_BASE,
+	CDM_DBG_DEBUG_STATUS,
+	/*FE Bus Miser Registers 40*/
+	CDM_BUS_MISR_CFG_0,
+	CDM_BUS_MISR_CFG_1,
+	CDM_BUS_MISR_RD_VAL,
+	/*Performance Counter registers 43*/
+	CDM_PERF_MON_CTRL,
+	CDM_PERF_MON_0,
+	CDM_PERF_MON_1,
+	CDM_PERF_MON_2,
+	/*Spare registers 47*/
+	CDM_SPARE,
+};
+
+/* struct cam_cdm_reg_offset - struct for offset with attribute.*/
+struct cam_cdm_reg_offset {
+	uint32_t offset;
+	enum cam_cdm_reg_attr attribute;
+};
+
+/* struct cam_cdm_reg_offset_table - struct for whole offset table.*/
+struct cam_cdm_reg_offset_table {
+	uint32_t first_offset;
+	uint32_t last_offset;
+	uint32_t reg_count;
+	const struct cam_cdm_reg_offset *offsets;
+	uint32_t offset_max_size;
+};
+
+/* enum cam_cdm_flags - Bit fields for CDM flags used */
+enum cam_cdm_flags {
+	CAM_CDM_FLAG_SHARED_CDM,
+	CAM_CDM_FLAG_PRIVATE_CDM,
+};
+
+/* enum cam_cdm_type - Enum for possible CAM CDM types */
+enum cam_cdm_type {
+	CAM_VIRTUAL_CDM,
+	CAM_HW_CDM,
+};
+
+/* enum cam_cdm_mem_base_index - Enum for possible CAM CDM types */
+enum cam_cdm_mem_base_index {
+	CAM_HW_CDM_BASE_INDEX,
+	CAM_HW_CDM_MAX_INDEX = CAM_SOC_MAX_BLOCK,
+};
+
+/* struct cam_cdm_client - struct for cdm clients data.*/
+struct cam_cdm_client {
+	struct cam_cdm_acquire_data data;
+	void __iomem  *changebase_addr;
+	uint32_t stream_on;
+	uint32_t refcount;
+	struct mutex lock;
+	uint32_t handle;
+};
+
+/* struct cam_cdm_work_payload - struct for cdm work payload data.*/
+struct cam_cdm_work_payload {
+	struct cam_hw_info *hw;
+	uint32_t irq_status;
+	uint32_t irq_data;
+	struct work_struct work;
+};
+
+/* enum cam_cdm_bl_cb_type - Enum for possible CAM CDM cb request types */
+enum cam_cdm_bl_cb_type {
+	CAM_HW_CDM_BL_CB_CLIENT = 1,
+	CAM_HW_CDM_BL_CB_INTERNAL,
+};
+
+/* struct cam_cdm_bl_cb_request_entry - callback entry for work to process.*/
+struct cam_cdm_bl_cb_request_entry {
+	uint8_t bl_tag;
+	enum cam_cdm_bl_cb_type request_type;
+	uint32_t client_hdl;
+	void *userdata;
+	uint32_t cookie;
+	struct list_head entry;
+};
+
+/* struct cam_cdm_hw_intf_cmd_submit_bl - cdm interface submit command.*/
+struct cam_cdm_hw_intf_cmd_submit_bl {
+	uint32_t handle;
+	struct cam_cdm_bl_request *data;
+};
+
+/* struct cam_cdm_hw_mem - CDM hw memory struct */
+struct cam_cdm_hw_mem {
+	int32_t handle;
+	uint32_t vaddr;
+	uintptr_t kmdvaddr;
+	size_t size;
+};
+
+/* struct cam_cdm - CDM hw device struct */
+struct cam_cdm {
+	uint32_t index;
+	char name[128];
+	enum cam_cdm_id id;
+	enum cam_cdm_flags flags;
+	struct completion reset_complete;
+	struct completion bl_complete;
+	struct workqueue_struct *work_queue;
+	struct list_head bl_request_list;
+	struct cam_hw_version version;
+	uint32_t hw_version;
+	uint32_t hw_family_version;
+	struct cam_iommu_handle iommu_hdl;
+	struct cam_cdm_reg_offset_table *offset_tbl;
+	struct cam_cdm_utils_ops *ops;
+	struct cam_cdm_client *clients[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
+	uint8_t bl_tag;
+	atomic_t error;
+	atomic_t bl_done;
+	struct cam_cdm_hw_mem gen_irq;
+	uint32_t cpas_handle;
+};
+
+/* struct cam_cdm_private_dt_data - CDM hw custom dt data */
+struct cam_cdm_private_dt_data {
+	bool dt_cdm_shared;
+	uint32_t dt_num_supported_clients;
+	const char *dt_cdm_client_name[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
+};
+
+/* struct cam_cdm_intf_devices - CDM mgr interface devices */
+struct cam_cdm_intf_devices {
+	struct mutex lock;
+	uint32_t refcount;
+	struct cam_hw_intf *device;
+	struct cam_cdm_private_dt_data *data;
+};
+
+/* struct cam_cdm_intf_mgr - CDM mgr interface device struct */
+struct cam_cdm_intf_mgr {
+	bool probe_done;
+	struct cam_cdm_intf_devices nodes[CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM];
+	uint32_t cdm_count;
+	uint32_t dt_supported_hw_cdm;
+	int32_t refcount;
+};
+
+int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t *index);
+int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t index);
+
+#endif /* _CAM_CDM_H_ */

+ 587 - 0
drivers/cam_cdm/cam_cdm_core_common.c

@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_io_util.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_soc.h"
+#include "cam_cdm_core_common.h"
+
+static void cam_cdm_get_client_refcount(struct cam_cdm_client *client)
+{
+	mutex_lock(&client->lock);
+	CAM_DBG(CAM_CDM, "CDM client get refcount=%d",
+		client->refcount);
+	client->refcount++;
+	mutex_unlock(&client->lock);
+}
+
+static void cam_cdm_put_client_refcount(struct cam_cdm_client *client)
+{
+	mutex_lock(&client->lock);
+	CAM_DBG(CAM_CDM, "CDM client put refcount=%d",
+		client->refcount);
+	if (client->refcount > 0) {
+		client->refcount--;
+	} else {
+		CAM_ERR(CAM_CDM, "Refcount put when zero");
+		WARN_ON(1);
+	}
+	mutex_unlock(&client->lock);
+}
+
+bool cam_cdm_set_cam_hw_version(
+	uint32_t ver, struct cam_hw_version *cam_version)
+{
+	switch (ver) {
+	case CAM_CDM170_VERSION:
+	case CAM_CDM175_VERSION:
+	case CAM_CDM480_VERSION:
+		cam_version->major    = (ver & 0xF0000000);
+		cam_version->minor    = (ver & 0xFFF0000);
+		cam_version->incr     = (ver & 0xFFFF);
+		cam_version->reserved = 0;
+		return true;
+	default:
+		CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util", ver);
+	break;
+	}
+	return false;
+}
+
+bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
+{
+	if (!irq_data)
+		return false;
+
+	CAM_DBG(CAM_CDM, "CPAS error callback type=%d", irq_data->irq_type);
+
+	return false;
+}
+
+struct cam_cdm_utils_ops *cam_cdm_get_ops(
+	uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version)
+{
+	if (by_cam_version == false) {
+		switch (ver) {
+		case CAM_CDM170_VERSION:
+		case CAM_CDM175_VERSION:
+		case CAM_CDM480_VERSION:
+			return &CDM170_ops;
+		default:
+			CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util",
+				ver);
+		}
+	} else if (cam_version) {
+		if (((cam_version->major == 1) &&
+			(cam_version->minor == 0) &&
+			(cam_version->incr == 0)) ||
+			((cam_version->major == 1) &&
+			(cam_version->minor == 1) &&
+			(cam_version->incr == 0)) ||
+			((cam_version->major == 1) &&
+			(cam_version->minor == 2) &&
+			(cam_version->incr == 0))) {
+
+			CAM_DBG(CAM_CDM,
+				"cam_hw_version=%x:%x:%x supported",
+				cam_version->major, cam_version->minor,
+				cam_version->incr);
+			return &CDM170_ops;
+		}
+
+		CAM_ERR(CAM_CDM, "cam_hw_version=%x:%x:%x not supported",
+			cam_version->major, cam_version->minor,
+			cam_version->incr);
+	}
+
+	return NULL;
+}
+
+struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
+	uint32_t tag, struct list_head *bl_list)
+{
+	struct cam_cdm_bl_cb_request_entry *node;
+
+	list_for_each_entry(node, bl_list, entry) {
+		if (node->bl_tag == tag)
+			return node;
+	}
+	CAM_ERR(CAM_CDM, "Could not find the bl request for tag=%x", tag);
+
+	return NULL;
+}
+
+int cam_cdm_get_caps(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *cdm_core;
+
+	if ((cdm_hw) && (cdm_hw->core_info) && (get_hw_cap_args) &&
+		(sizeof(struct cam_iommu_handle) == arg_size)) {
+		cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+		*((struct cam_iommu_handle *)get_hw_cap_args) =
+			cdm_core->iommu_hdl;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int cam_cdm_find_free_client_slot(struct cam_cdm *hw)
+{
+	int i;
+
+	for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
+		if (hw->clients[i] == NULL) {
+			CAM_DBG(CAM_CDM, "Found client slot %d", i);
+			return i;
+		}
+	}
+	CAM_ERR(CAM_CDM, "No more client slots");
+
+	return -EBUSY;
+}
+
+
+void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_cb_status status, void *data)
+{
+	int i;
+	struct cam_cdm *core = NULL;
+	struct cam_cdm_client *client = NULL;
+
+	if (!cdm_hw) {
+		CAM_ERR(CAM_CDM, "CDM Notify called with NULL hw info");
+		return;
+	}
+	core = (struct cam_cdm *)cdm_hw->core_info;
+
+	if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
+		int client_idx;
+		struct cam_cdm_bl_cb_request_entry *node =
+			(struct cam_cdm_bl_cb_request_entry *)data;
+
+		client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl);
+		client = core->clients[client_idx];
+		if ((!client) || (client->handle != node->client_hdl)) {
+			CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
+				node->client_hdl);
+			return;
+		}
+		cam_cdm_get_client_refcount(client);
+		if (client->data.cam_cdm_callback) {
+			CAM_DBG(CAM_CDM, "Calling client=%s cb cookie=%d",
+				client->data.identifier, node->cookie);
+			client->data.cam_cdm_callback(node->client_hdl,
+				node->userdata, CAM_CDM_CB_STATUS_BL_SUCCESS,
+				node->cookie);
+			CAM_DBG(CAM_CDM, "Exit client cb cookie=%d",
+				node->cookie);
+		} else {
+			CAM_ERR(CAM_CDM, "No cb registered for client hdl=%x",
+				node->client_hdl);
+		}
+		cam_cdm_put_client_refcount(client);
+		return;
+	}
+
+	for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
+		if (core->clients[i] != NULL) {
+			client = core->clients[i];
+			mutex_lock(&client->lock);
+			CAM_DBG(CAM_CDM, "Found client slot %d", i);
+			if (client->data.cam_cdm_callback) {
+				if (status == CAM_CDM_CB_STATUS_PAGEFAULT) {
+					unsigned long iova =
+						(unsigned long)data;
+
+					client->data.cam_cdm_callback(
+						client->handle,
+						client->data.userdata,
+						CAM_CDM_CB_STATUS_PAGEFAULT,
+						(iova & 0xFFFFFFFF));
+				}
+			} else {
+				CAM_ERR(CAM_CDM,
+					"No cb registered for client hdl=%x",
+					client->handle);
+			}
+			mutex_unlock(&client->lock);
+		}
+	}
+}
+
+int cam_cdm_stream_ops_internal(void *hw_priv,
+	void *start_args, bool operation)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *core = NULL;
+	int rc = -EPERM;
+	int client_idx;
+	struct cam_cdm_client *client;
+	uint32_t *handle = start_args;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	core = (struct cam_cdm *)cdm_hw->core_info;
+	client_idx = CAM_CDM_GET_CLIENT_IDX(*handle);
+	client = core->clients[client_idx];
+	if (!client) {
+		CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client, *handle);
+		return -EINVAL;
+	}
+	cam_cdm_get_client_refcount(client);
+	if (*handle != client->handle) {
+		CAM_ERR(CAM_CDM, "client id given handle=%x invalid", *handle);
+		cam_cdm_put_client_refcount(client);
+		return -EINVAL;
+	}
+	if (operation == true) {
+		if (true == client->stream_on) {
+			CAM_ERR(CAM_CDM,
+				"Invalid CDM client is already streamed ON");
+			cam_cdm_put_client_refcount(client);
+			return rc;
+		}
+	} else {
+		if (client->stream_on == false) {
+			CAM_ERR(CAM_CDM,
+				"Invalid CDM client is already streamed Off");
+			cam_cdm_put_client_refcount(client);
+			return rc;
+		}
+	}
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	if (operation == true) {
+		if (!cdm_hw->open_count) {
+			struct cam_ahb_vote ahb_vote;
+			struct cam_axi_vote axi_vote = {0};
+
+			ahb_vote.type = CAM_VOTE_ABSOLUTE;
+			ahb_vote.vote.level = CAM_SVS_VOTE;
+			axi_vote.num_paths = 1;
+			axi_vote.axi_path[0].path_data_type =
+				CAM_AXI_PATH_DATA_ALL;
+			axi_vote.axi_path[0].transac_type =
+				CAM_AXI_TRANSACTION_READ;
+			axi_vote.axi_path[0].camnoc_bw =
+				CAM_CPAS_DEFAULT_AXI_BW;
+			axi_vote.axi_path[0].mnoc_ab_bw =
+				CAM_CPAS_DEFAULT_AXI_BW;
+			axi_vote.axi_path[0].mnoc_ib_bw =
+				CAM_CPAS_DEFAULT_AXI_BW;
+
+			rc = cam_cpas_start(core->cpas_handle,
+				&ahb_vote, &axi_vote);
+			if (rc != 0) {
+				CAM_ERR(CAM_CDM, "CPAS start failed");
+				goto end;
+			}
+			CAM_DBG(CAM_CDM, "CDM init first time");
+			if (core->id == CAM_CDM_VIRTUAL) {
+				CAM_DBG(CAM_CDM,
+					"Virtual CDM HW init first time");
+				rc = 0;
+			} else {
+				CAM_DBG(CAM_CDM, "CDM HW init first time");
+				rc = cam_hw_cdm_init(hw_priv, NULL, 0);
+				if (rc == 0) {
+					rc = cam_hw_cdm_alloc_genirq_mem(
+						hw_priv);
+					if (rc != 0) {
+						CAM_ERR(CAM_CDM,
+							"Genirqalloc failed");
+						cam_hw_cdm_deinit(hw_priv,
+							NULL, 0);
+					}
+				} else {
+					CAM_ERR(CAM_CDM, "CDM HW init failed");
+				}
+			}
+			if (rc == 0) {
+				cdm_hw->open_count++;
+				client->stream_on = true;
+			} else {
+				if (cam_cpas_stop(core->cpas_handle))
+					CAM_ERR(CAM_CDM, "CPAS stop failed");
+			}
+		} else {
+			cdm_hw->open_count++;
+			CAM_DBG(CAM_CDM, "CDM HW already ON count=%d",
+				cdm_hw->open_count);
+			rc = 0;
+			client->stream_on = true;
+		}
+	} else {
+		if (cdm_hw->open_count) {
+			cdm_hw->open_count--;
+			CAM_DBG(CAM_CDM, "stream OFF CDM %d",
+				cdm_hw->open_count);
+			if (!cdm_hw->open_count) {
+				CAM_DBG(CAM_CDM, "CDM Deinit now");
+				if (core->id == CAM_CDM_VIRTUAL) {
+					CAM_DBG(CAM_CDM,
+						"Virtual CDM HW Deinit");
+					rc = 0;
+				} else {
+					CAM_DBG(CAM_CDM, "CDM HW Deinit now");
+					rc = cam_hw_cdm_deinit(
+						hw_priv, NULL, 0);
+					if (cam_hw_cdm_release_genirq_mem(
+						hw_priv))
+						CAM_ERR(CAM_CDM,
+							"Genirq release fail");
+				}
+				if (rc) {
+					CAM_ERR(CAM_CDM,
+						"Deinit failed in streamoff");
+				} else {
+					client->stream_on = false;
+					rc = cam_cpas_stop(core->cpas_handle);
+					if (rc)
+						CAM_ERR(CAM_CDM,
+							"CPAS stop failed");
+				}
+			} else {
+				client->stream_on = false;
+				rc = 0;
+				CAM_DBG(CAM_CDM,
+					"Client stream off success =%d",
+					cdm_hw->open_count);
+			}
+		} else {
+			CAM_DBG(CAM_CDM, "stream OFF CDM Invalid %d",
+				cdm_hw->open_count);
+			rc = -ENXIO;
+		}
+	}
+end:
+	cam_cdm_put_client_refcount(client);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	return rc;
+}
+
+int cam_cdm_stream_start(void *hw_priv,
+	void *start_args, uint32_t size)
+{
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	rc = cam_cdm_stream_ops_internal(hw_priv, start_args, true);
+	return rc;
+
+}
+
+int cam_cdm_stream_stop(void *hw_priv,
+	void *start_args, uint32_t size)
+{
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	rc = cam_cdm_stream_ops_internal(hw_priv, start_args, false);
+	return rc;
+
+}
+
+int cam_cdm_process_cmd(void *hw_priv,
+	uint32_t cmd, void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_data = NULL;
+	struct cam_cdm *core = NULL;
+	int rc = -EINVAL;
+
+	if ((!hw_priv) || (!cmd_args) ||
+		(cmd >= CAM_CDM_HW_INTF_CMD_INVALID))
+		return rc;
+
+	soc_data = &cdm_hw->soc_info;
+	core = (struct cam_cdm *)cdm_hw->core_info;
+	switch (cmd) {
+	case CAM_CDM_HW_INTF_CMD_SUBMIT_BL: {
+		struct cam_cdm_hw_intf_cmd_submit_bl *req;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(struct cam_cdm_hw_intf_cmd_submit_bl) != arg_size) {
+			CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
+				arg_size);
+			break;
+		}
+		req = (struct cam_cdm_hw_intf_cmd_submit_bl *)cmd_args;
+		if ((req->data->type < 0) ||
+			(req->data->type > CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA)) {
+			CAM_ERR(CAM_CDM, "Invalid req bl cmd addr type=%d",
+				req->data->type);
+			break;
+		}
+		idx = CAM_CDM_GET_CLIENT_IDX(req->handle);
+		client = core->clients[idx];
+		if ((!client) || (req->handle != client->handle)) {
+			CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
+				req->handle);
+			break;
+		}
+		cam_cdm_get_client_refcount(client);
+		if ((req->data->flag == true) &&
+			(!client->data.cam_cdm_callback)) {
+			CAM_ERR(CAM_CDM,
+				"CDM request cb without registering cb");
+			cam_cdm_put_client_refcount(client);
+			break;
+		}
+		if (client->stream_on != true) {
+			CAM_ERR(CAM_CDM,
+				"Invalid CDM needs to be streamed ON first");
+			cam_cdm_put_client_refcount(client);
+			break;
+		}
+		if (core->id == CAM_CDM_VIRTUAL)
+			rc = cam_virtual_cdm_submit_bl(cdm_hw, req, client);
+		else
+			rc = cam_hw_cdm_submit_bl(cdm_hw, req, client);
+
+		cam_cdm_put_client_refcount(client);
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_ACQUIRE: {
+		struct cam_cdm_acquire_data *data;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(struct cam_cdm_acquire_data) != arg_size) {
+			CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
+				arg_size);
+			break;
+		}
+
+		mutex_lock(&cdm_hw->hw_mutex);
+		data = (struct cam_cdm_acquire_data *)cmd_args;
+		CAM_DBG(CAM_CDM, "Trying to acquire client=%s in hw idx=%d",
+			data->identifier, core->index);
+		idx = cam_cdm_find_free_client_slot(core);
+		if ((idx < 0) || (core->clients[idx])) {
+			mutex_unlock(&cdm_hw->hw_mutex);
+			CAM_ERR(CAM_CDM,
+				"Fail to client slots, client=%s in hw idx=%d",
+			data->identifier, core->index);
+			break;
+		}
+		core->clients[idx] = kzalloc(sizeof(struct cam_cdm_client),
+			GFP_KERNEL);
+		if (!core->clients[idx]) {
+			mutex_unlock(&cdm_hw->hw_mutex);
+			rc = -ENOMEM;
+			break;
+		}
+
+		mutex_unlock(&cdm_hw->hw_mutex);
+		client = core->clients[idx];
+		mutex_init(&client->lock);
+		data->ops = core->ops;
+		if (core->id == CAM_CDM_VIRTUAL) {
+			data->cdm_version.major = 1;
+			data->cdm_version.minor = 0;
+			data->cdm_version.incr = 0;
+			data->cdm_version.reserved = 0;
+			data->ops = cam_cdm_get_ops(0,
+					&data->cdm_version, true);
+			if (!data->ops) {
+				mutex_destroy(&client->lock);
+				mutex_lock(&cdm_hw->hw_mutex);
+				kfree(core->clients[idx]);
+				core->clients[idx] = NULL;
+				mutex_unlock(
+					&cdm_hw->hw_mutex);
+				rc = -EPERM;
+				CAM_ERR(CAM_CDM, "Invalid ops for virtual cdm");
+				break;
+			}
+		} else {
+			data->cdm_version = core->version;
+		}
+
+		cam_cdm_get_client_refcount(client);
+		mutex_lock(&client->lock);
+		memcpy(&client->data, data,
+			sizeof(struct cam_cdm_acquire_data));
+		client->handle = CAM_CDM_CREATE_CLIENT_HANDLE(
+					core->index,
+					idx);
+		client->stream_on = false;
+		data->handle = client->handle;
+		CAM_DBG(CAM_CDM, "Acquired client=%s in hwidx=%d",
+			data->identifier, core->index);
+		mutex_unlock(&client->lock);
+		rc = 0;
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_RELEASE: {
+		uint32_t *handle = cmd_args;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(uint32_t) != arg_size) {
+			CAM_ERR(CAM_CDM,
+				"Invalid CDM cmd %d size=%x for handle=%x",
+				cmd, arg_size, *handle);
+			return -EINVAL;
+		}
+		idx = CAM_CDM_GET_CLIENT_IDX(*handle);
+		mutex_lock(&cdm_hw->hw_mutex);
+		client = core->clients[idx];
+		if ((!client) || (*handle != client->handle)) {
+			CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x",
+				client, *handle);
+			mutex_unlock(&cdm_hw->hw_mutex);
+			break;
+		}
+		cam_cdm_put_client_refcount(client);
+		mutex_lock(&client->lock);
+		if (client->refcount != 0) {
+			CAM_ERR(CAM_CDM, "CDM Client refcount not zero %d",
+				client->refcount);
+			rc = -EPERM;
+			mutex_unlock(&client->lock);
+			mutex_unlock(&cdm_hw->hw_mutex);
+			break;
+		}
+		core->clients[idx] = NULL;
+		mutex_unlock(&client->lock);
+		mutex_destroy(&client->lock);
+		kfree(client);
+		mutex_unlock(&cdm_hw->hw_mutex);
+		rc = 0;
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_RESET_HW: {
+		CAM_ERR(CAM_CDM, "CDM HW reset not supported for handle =%x",
+			*((uint32_t *)cmd_args));
+		break;
+	}
+	default:
+		CAM_ERR(CAM_CDM, "CDM HW intf command not valid =%d", cmd);
+		break;
+	}
+	return rc;
+}

+ 45 - 0
drivers/cam_cdm/cam_cdm_core_common.h

@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CDM_CORE_COMMON_H_
+#define _CAM_CDM_CORE_COMMON_H_
+
+#include "cam_mem_mgr.h"
+
+#define CAM_CDM170_VERSION 0x10000000
+#define CAM_CDM175_VERSION 0x10010000
+#define CAM_CDM480_VERSION 0x10020000
+
+extern struct cam_cdm_utils_ops CDM170_ops;
+
+int cam_hw_cdm_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_hw_cdm_deinit(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_hw_cdm_alloc_genirq_mem(void *hw_priv);
+int cam_hw_cdm_release_genirq_mem(void *hw_priv);
+int cam_cdm_get_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size);
+int cam_cdm_stream_ops_internal(void *hw_priv, void *start_args,
+	bool operation);
+int cam_cdm_stream_start(void *hw_priv, void *start_args, uint32_t size);
+int cam_cdm_stream_stop(void *hw_priv, void *start_args, uint32_t size);
+int cam_cdm_process_cmd(void *hw_priv, uint32_t cmd, void *cmd_args,
+	uint32_t arg_size);
+bool cam_cdm_set_cam_hw_version(
+	uint32_t ver, struct cam_hw_version *cam_version);
+bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data);
+struct cam_cdm_utils_ops *cam_cdm_get_ops(
+	uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
+int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client);
+int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client);
+struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
+	uint32_t tag, struct list_head *bl_list);
+void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_cb_status status, void *data);
+
+#endif /* _CAM_CDM_CORE_COMMON_H_ */

+ 1135 - 0
drivers/cam_cdm/cam_cdm_hw_core.c

@@ -0,0 +1,1135 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include <cam_req_mgr.h>
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_core_common.h"
+#include "cam_cdm_soc.h"
+#include "cam_io_util.h"
+#include "cam_hw_cdm170_reg.h"
+
+#define CAM_HW_CDM_CPAS_0_NAME "qcom,cam170-cpas-cdm0"
+#define CAM_HW_CDM_IPE_0_NAME "qcom,cam170-ipe0-cdm"
+#define CAM_HW_CDM_IPE_1_NAME "qcom,cam170-ipe1-cdm"
+#define CAM_HW_CDM_BPS_NAME "qcom,cam170-bps-cdm"
+
+#define CAM_CDM_BL_FIFO_WAIT_TIMEOUT 2000
+
+static void cam_hw_cdm_work(struct work_struct *work);
+
+/* DT match table entry for all CDM variants*/
+static const struct of_device_id msm_cam_hw_cdm_dt_match[] = {
+	{
+		.compatible = CAM_HW_CDM_CPAS_0_NAME,
+		.data = &cam170_cpas_cdm_offset_table,
+	},
+	{}
+};
+
+static enum cam_cdm_id cam_hw_cdm_get_id_by_name(char *name)
+{
+	if (!strcmp(CAM_HW_CDM_CPAS_0_NAME, name))
+		return CAM_CDM_CPAS_0;
+
+	return CAM_CDM_MAX;
+}
+
+int cam_hw_cdm_bl_fifo_pending_bl_rb(struct cam_hw_info *cdm_hw,
+	uint32_t *pending_bl)
+{
+	int rc = 0;
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
+		pending_bl)) {
+		CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's");
+		rc = -EIO;
+	}
+
+	return rc;
+}
+
+static int cam_hw_cdm_enable_bl_done_irq(struct cam_hw_info *cdm_hw,
+	bool enable)
+{
+	int rc = -EIO;
+	uint32_t irq_mask = 0;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_MASK,
+		&irq_mask)) {
+		CAM_ERR(CAM_CDM, "Failed to read CDM IRQ mask");
+		return rc;
+	}
+
+	if (enable == true) {
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK,
+			(irq_mask | 0x4))) {
+			CAM_ERR(CAM_CDM, "Write failed to enable BL done irq");
+		} else {
+			atomic_inc(&core->bl_done);
+			rc = 0;
+			CAM_DBG(CAM_CDM, "BL done irq enabled =%d",
+				atomic_read(&core->bl_done));
+		}
+	} else {
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK,
+			(irq_mask & 0x70003))) {
+			CAM_ERR(CAM_CDM, "Write failed to disable BL done irq");
+		} else {
+			atomic_dec(&core->bl_done);
+			rc = 0;
+			CAM_DBG(CAM_CDM, "BL done irq disable =%d",
+				atomic_read(&core->bl_done));
+		}
+	}
+	return rc;
+}
+
+static int cam_hw_cdm_enable_core(struct cam_hw_info *cdm_hw, bool enable)
+{
+	int rc = 0;
+
+	if (enable == true) {
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x01)) {
+			CAM_ERR(CAM_CDM, "Failed to Write CDM HW core enable");
+			rc = -EIO;
+		}
+	} else {
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x02)) {
+			CAM_ERR(CAM_CDM, "Failed to Write CDM HW core disable");
+			rc = -EIO;
+		}
+	}
+	return rc;
+}
+
+int cam_hw_cdm_enable_core_dbg(struct cam_hw_info *cdm_hw)
+{
+	int rc = 0;
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0x10100)) {
+		CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug");
+		rc = -EIO;
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_disable_core_dbg(struct cam_hw_info *cdm_hw)
+{
+	int rc = 0;
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0)) {
+		CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug");
+		rc = -EIO;
+	}
+
+	return rc;
+}
+
+void cam_hw_cdm_dump_scratch_registors(struct cam_hw_info *cdm_hw)
+{
+	uint32_t dump_reg = 0;
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump core en=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_0_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch0=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_1_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch1=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_2_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch2=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_3_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch3=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_4_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch4=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_5_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch5=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_6_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch6=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_7_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch7=%x", dump_reg);
+
+}
+
+void cam_hw_cdm_dump_core_debug_registers(
+	struct cam_hw_info *cdm_hw)
+{
+	uint32_t dump_reg, core_dbg, loop_cnt;
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW core status=%x", dump_reg);
+	/* First pause CDM, If it fails still proceed to dump debug info */
+	cam_hw_cdm_enable_core(cdm_hw, false);
+	cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg);
+	loop_cnt = dump_reg;
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_DEBUG_STATUS, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW Debug status reg=%x", dump_reg);
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, &core_dbg);
+	if (core_dbg & 0x100) {
+		cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_ADDR, &dump_reg);
+		CAM_ERR(CAM_CDM, "AHB dump reglastaddr=%x", dump_reg);
+		cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_DATA, &dump_reg);
+		CAM_ERR(CAM_CDM, "AHB dump reglastdata=%x", dump_reg);
+	} else {
+		CAM_ERR(CAM_CDM, "CDM HW AHB dump not enable");
+	}
+
+	if (core_dbg & 0x10000) {
+		int i;
+
+		CAM_ERR(CAM_CDM, "CDM HW BL FIFO dump with loop count=%d",
+			loop_cnt);
+		for (i = 0 ; i < loop_cnt ; i++) {
+			cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_RB, i);
+			cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_RB,
+				&dump_reg);
+			CAM_ERR(CAM_CDM, "BL(%d) base addr =%x", i, dump_reg);
+			cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_RB,
+				&dump_reg);
+			CAM_ERR(CAM_CDM, "BL(%d) len=%d tag=%d", i,
+				(dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
+		}
+	} else {
+		CAM_ERR(CAM_CDM, "CDM HW BL FIFO readback not enable");
+	}
+
+	CAM_ERR(CAM_CDM, "CDM HW default dump");
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_CFG, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW core cfg=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW irq status=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_SET, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW irq set reg=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_BASE, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current BL base=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_LEN, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current BL len=%d tag=%d",
+		(dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_USED_AHB_BASE, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current AHB base=%x", dump_reg);
+
+	cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg);
+
+	/* Enable CDM back */
+	cam_hw_cdm_enable_core(cdm_hw, true);
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+}
+
+int cam_hw_cdm_wait_for_bl_fifo(struct cam_hw_info *cdm_hw,
+	uint32_t bl_count)
+{
+	uint32_t pending_bl = 0;
+	int32_t available_bl_slots = 0;
+	int rc = -EIO;
+	long time_left;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+	do {
+		if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
+			&pending_bl)) {
+			CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's");
+			rc = -EIO;
+			break;
+		}
+		available_bl_slots = CAM_CDM_HWFIFO_SIZE - pending_bl;
+		if (available_bl_slots < 0) {
+			CAM_ERR(CAM_CDM, "Invalid available slots %d:%d:%d",
+				available_bl_slots, CAM_CDM_HWFIFO_SIZE,
+				pending_bl);
+			break;
+		}
+		if (bl_count < (available_bl_slots - 1)) {
+			CAM_DBG(CAM_CDM,
+				"BL slot available_cnt=%d requested=%d",
+				(available_bl_slots - 1), bl_count);
+				rc = bl_count;
+				break;
+		} else if (0 == (available_bl_slots - 1)) {
+			rc = cam_hw_cdm_enable_bl_done_irq(cdm_hw, true);
+			if (rc) {
+				CAM_ERR(CAM_CDM, "Enable BL done irq failed");
+				break;
+			}
+			time_left = wait_for_completion_timeout(
+				&core->bl_complete, msecs_to_jiffies(
+				CAM_CDM_BL_FIFO_WAIT_TIMEOUT));
+			if (time_left <= 0) {
+				CAM_ERR(CAM_CDM,
+					"CDM HW BL Wait timed out failed");
+				if (cam_hw_cdm_enable_bl_done_irq(cdm_hw,
+					false))
+					CAM_ERR(CAM_CDM,
+						"Disable BL done irq failed");
+				rc = -EIO;
+				break;
+			}
+			if (cam_hw_cdm_enable_bl_done_irq(cdm_hw, false))
+				CAM_ERR(CAM_CDM, "Disable BL done irq failed");
+			rc = 0;
+			CAM_DBG(CAM_CDM, "CDM HW is ready for data");
+		} else {
+			rc = (bl_count - (available_bl_slots - 1));
+			break;
+		}
+	} while (1);
+
+	return rc;
+}
+
+bool cam_hw_cdm_bl_write(struct cam_hw_info *cdm_hw, uint32_t src,
+	uint32_t len, uint32_t tag)
+{
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_REG, src)) {
+		CAM_ERR(CAM_CDM, "Failed to write CDM base to BL base");
+		return true;
+	}
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_REG,
+		((len & 0xFFFFF) | ((tag & 0xFF) << 20)))) {
+		CAM_ERR(CAM_CDM, "Failed to write CDM BL len");
+		return true;
+	}
+	return false;
+}
+
+bool cam_hw_cdm_commit_bl_write(struct cam_hw_info *cdm_hw)
+{
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_STORE_REG, 1)) {
+		CAM_ERR(CAM_CDM, "Failed to write CDM commit BL");
+		return true;
+	}
+	return false;
+}
+
+int cam_hw_cdm_submit_gen_irq(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req)
+{
+	struct cam_cdm_bl_cb_request_entry *node;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+	uint32_t len;
+	int rc;
+
+	if (core->bl_tag > 63) {
+		CAM_ERR(CAM_CDM, "bl_tag invalid =%d", core->bl_tag);
+		rc = -EINVAL;
+		goto end;
+	}
+	CAM_DBG(CAM_CDM, "CDM write BL last cmd tag=%x total=%d cookie=%d",
+		core->bl_tag, req->data->cmd_arrary_count, req->data->cookie);
+	node = kzalloc(sizeof(struct cam_cdm_bl_cb_request_entry),
+			GFP_KERNEL);
+	if (!node) {
+		rc = -ENOMEM;
+		goto end;
+	}
+	node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
+	node->client_hdl = req->handle;
+	node->cookie = req->data->cookie;
+	node->bl_tag = core->bl_tag;
+	node->userdata = req->data->userdata;
+	list_add_tail(&node->entry, &core->bl_request_list);
+	len = core->ops->cdm_required_size_genirq() * core->bl_tag;
+	core->ops->cdm_write_genirq(((uint32_t *)core->gen_irq.kmdvaddr + len),
+		core->bl_tag);
+	rc = cam_hw_cdm_bl_write(cdm_hw, (core->gen_irq.vaddr + (4*len)),
+		((4 * core->ops->cdm_required_size_genirq()) - 1),
+		core->bl_tag);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "CDM hw bl write failed for gen irq bltag=%d",
+			core->bl_tag);
+		list_del_init(&node->entry);
+		kfree(node);
+		rc = -EIO;
+		goto end;
+	}
+
+	if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
+		CAM_ERR(CAM_CDM, "Cannot commit the genirq BL with tag tag=%d",
+			core->bl_tag);
+		list_del_init(&node->entry);
+		kfree(node);
+		rc = -EIO;
+	}
+
+end:
+	return rc;
+}
+
+int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client)
+{
+	int i, rc;
+	struct cam_cdm_bl_request *cdm_cmd = req->data;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+	uint32_t pending_bl = 0;
+	int write_count = 0;
+
+	if (req->data->cmd_arrary_count > CAM_CDM_HWFIFO_SIZE) {
+		pr_info("requested BL more than max size, cnt=%d max=%d",
+			req->data->cmd_arrary_count, CAM_CDM_HWFIFO_SIZE);
+	}
+
+	if (atomic_read(&core->error))
+		return -EIO;
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	mutex_lock(&client->lock);
+	rc = cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &pending_bl);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Cannot read the current BL depth");
+		mutex_unlock(&client->lock);
+		mutex_unlock(&cdm_hw->hw_mutex);
+		return rc;
+	}
+
+	for (i = 0; i < req->data->cmd_arrary_count ; i++) {
+		uint64_t hw_vaddr_ptr = 0;
+		size_t len = 0;
+
+		if ((!cdm_cmd->cmd[i].len) &&
+			(cdm_cmd->cmd[i].len > 0x100000)) {
+			CAM_ERR(CAM_CDM,
+				"cmd len(%d) is invalid cnt=%d total cnt=%d",
+				cdm_cmd->cmd[i].len, i,
+				req->data->cmd_arrary_count);
+			rc = -EINVAL;
+			break;
+		}
+		if (atomic_read(&core->error)) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM,
+				"In error state cnt=%d total cnt=%d\n",
+				i, req->data->cmd_arrary_count);
+			rc = -EIO;
+			break;
+		}
+		if (write_count == 0) {
+			write_count = cam_hw_cdm_wait_for_bl_fifo(cdm_hw,
+				(req->data->cmd_arrary_count - i));
+			if (write_count < 0) {
+				CAM_ERR(CAM_CDM,
+					"wait for bl fifo failed %d:%d",
+					i, req->data->cmd_arrary_count);
+				rc = -EIO;
+				break;
+			}
+		} else {
+			write_count--;
+		}
+
+		if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
+			rc = cam_mem_get_io_buf(
+				cdm_cmd->cmd[i].bl_addr.mem_handle,
+				core->iommu_hdl.non_secure, &hw_vaddr_ptr,
+				&len);
+		} else if (req->data->type == CAM_CDM_BL_CMD_TYPE_HW_IOVA) {
+			if (!cdm_cmd->cmd[i].bl_addr.hw_iova) {
+				CAM_ERR(CAM_CDM,
+					"Hw bl hw_iova is invalid %d:%d",
+					i, req->data->cmd_arrary_count);
+				rc = -EINVAL;
+				break;
+			}
+			rc = 0;
+			hw_vaddr_ptr =
+				(uint64_t)cdm_cmd->cmd[i].bl_addr.hw_iova;
+			len = cdm_cmd->cmd[i].len + cdm_cmd->cmd[i].offset;
+		} else {
+			CAM_ERR(CAM_CDM,
+				"Only mem hdl/hw va type is supported %d",
+				req->data->type);
+			rc = -EINVAL;
+			break;
+		}
+
+		if ((!rc) && (hw_vaddr_ptr) && (len) &&
+			(len >= cdm_cmd->cmd[i].offset)) {
+			CAM_DBG(CAM_CDM, "Got the HW VA");
+			if (core->bl_tag >=
+				(CAM_CDM_HWFIFO_SIZE - 1))
+				core->bl_tag = 0;
+			rc = cam_hw_cdm_bl_write(cdm_hw,
+				((uint32_t)hw_vaddr_ptr +
+					cdm_cmd->cmd[i].offset),
+				(cdm_cmd->cmd[i].len - 1), core->bl_tag);
+			if (rc) {
+				CAM_ERR(CAM_CDM, "Hw bl write failed %d:%d",
+					i, req->data->cmd_arrary_count);
+				rc = -EIO;
+				break;
+			}
+		} else {
+			CAM_ERR(CAM_CDM,
+				"Sanity check failed for hdl=%x len=%zu:%d",
+				cdm_cmd->cmd[i].bl_addr.mem_handle, len,
+				cdm_cmd->cmd[i].offset);
+			CAM_ERR(CAM_CDM, "Sanity check failed for %d:%d",
+				i, req->data->cmd_arrary_count);
+			rc = -EINVAL;
+			break;
+		}
+
+		if (!rc) {
+			CAM_DBG(CAM_CDM,
+				"write BL success for cnt=%d with tag=%d total_cnt=%d",
+				i, core->bl_tag, req->data->cmd_arrary_count);
+
+			CAM_DBG(CAM_CDM, "Now commit the BL");
+			if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
+				CAM_ERR(CAM_CDM,
+					"Cannot commit the BL %d tag=%d",
+					i, core->bl_tag);
+				rc = -EIO;
+				break;
+			}
+			CAM_DBG(CAM_CDM, "BL commit success BL %d tag=%d", i,
+				core->bl_tag);
+			core->bl_tag++;
+			if ((req->data->flag == true) &&
+				(i == (req->data->cmd_arrary_count -
+				1))) {
+				rc = cam_hw_cdm_submit_gen_irq(
+					cdm_hw, req);
+				if (rc == 0)
+					core->bl_tag++;
+			}
+		}
+	}
+	mutex_unlock(&client->lock);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	return rc;
+
+}
+
+static void cam_hw_cdm_work(struct work_struct *work)
+{
+	struct cam_cdm_work_payload *payload;
+	struct cam_hw_info *cdm_hw;
+	struct cam_cdm *core;
+
+	payload = container_of(work, struct cam_cdm_work_payload, work);
+	if (payload) {
+		cdm_hw = payload->hw;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+
+		CAM_DBG(CAM_CDM, "IRQ status=0x%x", payload->irq_status);
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
+			struct cam_cdm_bl_cb_request_entry *node, *tnode;
+
+			CAM_DBG(CAM_CDM, "inline IRQ data=0x%x",
+				payload->irq_data);
+			mutex_lock(&cdm_hw->hw_mutex);
+			list_for_each_entry_safe(node, tnode,
+					&core->bl_request_list, entry) {
+				if (node->request_type ==
+					CAM_HW_CDM_BL_CB_CLIENT) {
+					cam_cdm_notify_clients(cdm_hw,
+						CAM_CDM_CB_STATUS_BL_SUCCESS,
+						(void *)node);
+				} else if (node->request_type ==
+					CAM_HW_CDM_BL_CB_INTERNAL) {
+					CAM_ERR(CAM_CDM,
+						"Invalid node=%pK %d", node,
+						node->request_type);
+				}
+				list_del_init(&node->entry);
+				if (node->bl_tag == payload->irq_data) {
+					kfree(node);
+					break;
+				}
+				kfree(node);
+			}
+			mutex_unlock(&cdm_hw->hw_mutex);
+		}
+
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK) {
+			CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
+			complete(&core->reset_complete);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK) {
+			if (atomic_read(&core->bl_done)) {
+				CAM_DBG(CAM_CDM, "CDM HW BL done IRQ");
+				complete(&core->bl_complete);
+			}
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM,
+				"Invalid command IRQ, Need HW reset\n");
+			atomic_inc(&core->error);
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM, "AHB Error IRQ\n");
+			atomic_inc(&core->error);
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+			atomic_dec(&core->error);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM, "Overflow Error IRQ\n");
+			atomic_inc(&core->error);
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+			atomic_dec(&core->error);
+		}
+		kfree(payload);
+	} else {
+		CAM_ERR(CAM_CDM, "NULL payload");
+	}
+
+}
+
+static void cam_hw_cdm_iommu_fault_handler(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova, int flags, void *token,
+	uint32_t buf_info)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_cdm *core = NULL;
+
+	if (token) {
+		cdm_hw = (struct cam_hw_info *)token;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+		atomic_inc(&core->error);
+		cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+		CAM_ERR_RATE_LIMIT(CAM_CDM, "Page fault iova addr %pK\n",
+			(void *)iova);
+		cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT,
+			(void *)iova);
+		atomic_dec(&core->error);
+	} else {
+		CAM_ERR(CAM_CDM, "Invalid token");
+	}
+
+}
+
+irqreturn_t cam_hw_cdm_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *cdm_hw = data;
+	struct cam_cdm *cdm_core = cdm_hw->core_info;
+	struct cam_cdm_work_payload *payload;
+	bool work_status;
+
+	CAM_DBG(CAM_CDM, "Got irq");
+	payload = kzalloc(sizeof(struct cam_cdm_work_payload), GFP_ATOMIC);
+	if (payload) {
+		if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS,
+				&payload->irq_status)) {
+			CAM_ERR(CAM_CDM, "Failed to read CDM HW IRQ status");
+		}
+		if (!payload->irq_status) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid irq received\n");
+			kfree(payload);
+			return IRQ_HANDLED;
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
+			if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_USR_DATA,
+				&payload->irq_data)) {
+				CAM_ERR(CAM_CDM,
+					"Failed to read CDM HW IRQ data");
+			}
+		}
+		CAM_DBG(CAM_CDM, "Got payload=%d", payload->irq_status);
+		payload->hw = cdm_hw;
+		INIT_WORK((struct work_struct *)&payload->work,
+			cam_hw_cdm_work);
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR,
+			payload->irq_status))
+			CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ Clear");
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR_CMD, 0x01))
+			CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ cmd");
+		work_status = queue_work(cdm_core->work_queue, &payload->work);
+		if (work_status == false) {
+			CAM_ERR(CAM_CDM, "Failed to queue work for irq=0x%x",
+				payload->irq_status);
+			kfree(payload);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+int cam_hw_cdm_alloc_genirq_mem(void *hw_priv)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_mem_mgr_request_desc genirq_alloc_cmd;
+	struct cam_mem_mgr_memory_desc genirq_alloc_out;
+	struct cam_cdm *cdm_core = NULL;
+	int rc =  -EINVAL;
+
+	if (!hw_priv)
+		return rc;
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	genirq_alloc_cmd.align = 0;
+	genirq_alloc_cmd.size = (8 * CAM_CDM_HWFIFO_SIZE);
+	genirq_alloc_cmd.smmu_hdl = cdm_core->iommu_hdl.non_secure;
+	genirq_alloc_cmd.flags = CAM_MEM_FLAG_HW_READ_WRITE;
+	rc = cam_mem_mgr_request_mem(&genirq_alloc_cmd,
+		&genirq_alloc_out);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to get genirq cmd space rc=%d", rc);
+		goto end;
+	}
+	cdm_core->gen_irq.handle = genirq_alloc_out.mem_handle;
+	cdm_core->gen_irq.vaddr = (genirq_alloc_out.iova & 0xFFFFFFFF);
+	cdm_core->gen_irq.kmdvaddr = genirq_alloc_out.kva;
+	cdm_core->gen_irq.size = genirq_alloc_out.len;
+
+end:
+	return rc;
+}
+
+int cam_hw_cdm_release_genirq_mem(void *hw_priv)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_mem_mgr_memory_desc genirq_release_cmd;
+	int rc =  -EINVAL;
+
+	if (!hw_priv)
+		return rc;
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	genirq_release_cmd.mem_handle = cdm_core->gen_irq.handle;
+	rc = cam_mem_mgr_release_mem(&genirq_release_cmd);
+	if (rc)
+		CAM_ERR(CAM_CDM, "Failed to put genirq cmd space for hw");
+
+	return rc;
+}
+
+int cam_hw_cdm_init(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc;
+	long time_left;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	soc_info = &cdm_hw->soc_info;
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, true);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Enable platform failed");
+		goto end;
+	}
+
+	CAM_DBG(CAM_CDM, "Enable soc done");
+
+/* Before triggering the reset to HW, clear the reset complete */
+	atomic_set(&cdm_core->error, 0);
+	atomic_set(&cdm_core->bl_done, 0);
+	reinit_completion(&cdm_core->reset_complete);
+	reinit_completion(&cdm_core->bl_complete);
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003)) {
+		CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ mask");
+		goto disable_return;
+	}
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_RST_CMD, 0x9)) {
+		CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset");
+		goto disable_return;
+	}
+
+	CAM_DBG(CAM_CDM, "Waiting for CDM HW resetdone");
+	time_left = wait_for_completion_timeout(&cdm_core->reset_complete,
+		msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT));
+
+	if (time_left <= 0) {
+		CAM_ERR(CAM_CDM, "CDM HW reset Wait failed rc=%d", rc);
+		goto disable_return;
+	} else {
+		CAM_DBG(CAM_CDM, "CDM Init success");
+		cdm_hw->hw_state = CAM_HW_STATE_POWER_UP;
+		cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003);
+		rc = 0;
+		goto end;
+	}
+
+disable_return:
+	rc = -EIO;
+	cam_soc_util_disable_platform_resource(soc_info, true, true);
+end:
+	return rc;
+}
+
+int cam_hw_cdm_deinit(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	soc_info = &cdm_hw->soc_info;
+	cdm_core = cdm_hw->core_info;
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "disable platform failed");
+	} else {
+		CAM_DBG(CAM_CDM, "CDM Deinit success");
+		cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_cdm_private_dt_data *soc_private = NULL;
+	struct cam_cpas_register_params cpas_parms;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote = {0};
+
+	cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cdm_hw_intf)
+		return -ENOMEM;
+
+	cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cdm_hw) {
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
+	if (!cdm_hw->core_info) {
+		kfree(cdm_hw);
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cdm_hw->soc_info.pdev = pdev;
+	cdm_hw->soc_info.dev = &pdev->dev;
+	cdm_hw->soc_info.dev_name = pdev->name;
+	cdm_hw_intf->hw_type = CAM_HW_CDM;
+	cdm_hw->open_count = 0;
+	mutex_init(&cdm_hw->hw_mutex);
+	spin_lock_init(&cdm_hw->hw_lock);
+	init_completion(&cdm_hw->hw_complete);
+
+	rc = cam_hw_cdm_soc_get_dt_properties(cdm_hw, msm_cam_hw_cdm_dt_match);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to get dt properties");
+		goto release_mem;
+	}
+	cdm_hw_intf->hw_idx = cdm_hw->soc_info.index;
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	soc_private = (struct cam_cdm_private_dt_data *)
+		cdm_hw->soc_info.soc_private;
+	if (soc_private->dt_cdm_shared == true)
+		cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
+	else
+		cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
+
+	cdm_core->bl_tag = 0;
+	cdm_core->id = cam_hw_cdm_get_id_by_name(cdm_core->name);
+	if (cdm_core->id >= CAM_CDM_MAX) {
+		CAM_ERR(CAM_CDM, "Failed to get CDM HW name for %s",
+			cdm_core->name);
+		goto release_private_mem;
+	}
+	INIT_LIST_HEAD(&cdm_core->bl_request_list);
+	init_completion(&cdm_core->reset_complete);
+	init_completion(&cdm_core->bl_complete);
+	cdm_hw_intf->hw_priv = cdm_hw;
+	cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
+	cdm_hw_intf->hw_ops.init = cam_hw_cdm_init;
+	cdm_hw_intf->hw_ops.deinit = cam_hw_cdm_deinit;
+	cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
+	cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
+	cdm_hw_intf->hw_ops.read = NULL;
+	cdm_hw_intf->hw_ops.write = NULL;
+	cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
+	mutex_lock(&cdm_hw->hw_mutex);
+
+	CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
+		cdm_hw_intf->hw_idx);
+
+	platform_set_drvdata(pdev, cdm_hw_intf);
+
+	rc = cam_smmu_get_handle("cpas-cdm0", &cdm_core->iommu_hdl.non_secure);
+	if (rc < 0) {
+		CAM_ERR(CAM_CDM, "cpas-cdm get iommu handle failed");
+		goto unlock_release_mem;
+	}
+	cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+		cam_hw_cdm_iommu_fault_handler, cdm_hw);
+
+	cdm_core->iommu_hdl.secure = -1;
+
+	cdm_core->work_queue = alloc_workqueue(cdm_core->name,
+		WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
+		CAM_CDM_INFLIGHT_WORKS);
+
+	rc = cam_soc_util_request_platform_resource(&cdm_hw->soc_info,
+			cam_hw_cdm_irq, cdm_hw);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to request platform resource");
+		goto destroy_non_secure_hdl;
+	}
+
+	cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
+	cpas_parms.cell_index = cdm_hw->soc_info.index;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = cdm_hw_intf;
+	strlcpy(cpas_parms.identifier, "cpas-cdm", CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
+		goto release_platform_resource;
+	}
+	CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
+		cpas_parms.client_handle);
+	cdm_core->cpas_handle = cpas_parms.client_handle;
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.num_paths = 1;
+	axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL;
+	axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_READ;
+	axi_vote.axi_path[0].camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	axi_vote.axi_path[0].mnoc_ab_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	axi_vote.axi_path[0].mnoc_ib_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+	rc = cam_cpas_start(cdm_core->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "CPAS start failed");
+		goto cpas_unregister;
+	}
+
+	rc = cam_hw_cdm_init(cdm_hw, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to Init CDM HW");
+		goto cpas_stop;
+	}
+	cdm_hw->open_count++;
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
+		&cdm_core->hw_version)) {
+		CAM_ERR(CAM_CDM, "Failed to read CDM HW Version");
+		goto deinit;
+	}
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_TITAN_VERSION,
+		&cdm_core->hw_family_version)) {
+		CAM_ERR(CAM_CDM, "Failed to read CDM family Version");
+		goto deinit;
+	}
+
+	CAM_DBG(CAM_CDM, "CDM Hw version read success family =%x hw =%x",
+		cdm_core->hw_family_version, cdm_core->hw_version);
+	cdm_core->ops = cam_cdm_get_ops(cdm_core->hw_version, NULL,
+		false);
+	if (!cdm_core->ops) {
+		CAM_ERR(CAM_CDM, "Failed to util ops for hw");
+		goto deinit;
+	}
+
+	if (!cam_cdm_set_cam_hw_version(cdm_core->hw_version,
+		&cdm_core->version)) {
+		CAM_ERR(CAM_CDM, "Failed to set cam he version for hw");
+		goto deinit;
+	}
+
+	rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to Deinit CDM HW");
+		cdm_hw->open_count--;
+		goto cpas_stop;
+	}
+
+	rc = cam_cpas_stop(cdm_core->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "CPAS stop failed");
+		cdm_hw->open_count--;
+		goto cpas_unregister;
+	}
+
+	rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
+		soc_private, CAM_HW_CDM, &cdm_core->index);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "HW CDM Interface registration failed");
+		cdm_hw->open_count--;
+		goto cpas_unregister;
+	}
+	cdm_hw->open_count--;
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+	CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
+
+	return rc;
+
+deinit:
+	if (cam_hw_cdm_deinit(cdm_hw, NULL, 0))
+		CAM_ERR(CAM_CDM, "Deinit failed for hw");
+	cdm_hw->open_count--;
+cpas_stop:
+	if (cam_cpas_stop(cdm_core->cpas_handle))
+		CAM_ERR(CAM_CDM, "CPAS stop failed");
+cpas_unregister:
+	if (cam_cpas_unregister_client(cdm_core->cpas_handle))
+		CAM_ERR(CAM_CDM, "CPAS unregister failed");
+release_platform_resource:
+	if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
+		CAM_ERR(CAM_CDM, "Release platform resource failed");
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+destroy_non_secure_hdl:
+	cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+		NULL, cdm_hw);
+	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
+		CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
+unlock_release_mem:
+	mutex_unlock(&cdm_hw->hw_mutex);
+release_private_mem:
+	kfree(cdm_hw->soc_info.soc_private);
+release_mem:
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw_intf);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	return rc;
+}
+
+int cam_hw_cdm_remove(struct platform_device *pdev)
+{
+	int rc = -EBUSY;
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+
+	cdm_hw_intf = platform_get_drvdata(pdev);
+	if (!cdm_hw_intf) {
+		CAM_ERR(CAM_CDM, "Failed to get dev private data");
+		return rc;
+	}
+
+	cdm_hw = cdm_hw_intf->hw_priv;
+	if (!cdm_hw) {
+		CAM_ERR(CAM_CDM,
+			"Failed to get hw private data for type=%d idx=%d",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	cdm_core = cdm_hw->core_info;
+	if (!cdm_core) {
+		CAM_ERR(CAM_CDM,
+			"Failed to get hw core data for type=%d idx=%d",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	if (cdm_hw->open_count != 0) {
+		CAM_ERR(CAM_CDM, "Hw open count invalid type=%d idx=%d cnt=%d",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx,
+			cdm_hw->open_count);
+		return rc;
+	}
+
+	rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Deinit failed for hw");
+		return rc;
+	}
+
+	rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "CPAS unregister failed");
+		return rc;
+	}
+
+	if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
+		CAM_ERR(CAM_CDM, "Release platform resource failed");
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+
+	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
+		CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
+	cam_smmu_unset_client_page_fault_handler(
+		cdm_core->iommu_hdl.non_secure, cdm_hw);
+
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw->soc_info.soc_private);
+	kfree(cdm_hw_intf);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+
+	return 0;
+}
+
+static struct platform_driver cam_hw_cdm_driver = {
+	.probe = cam_hw_cdm_probe,
+	.remove = cam_hw_cdm_remove,
+	.driver = {
+		.name = "msm_cam_cdm",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_cam_hw_cdm_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_hw_cdm_init_module(void)
+{
+	return platform_driver_register(&cam_hw_cdm_driver);
+}
+
+static void __exit cam_hw_cdm_exit_module(void)
+{
+	platform_driver_unregister(&cam_hw_cdm_driver);
+}
+
+module_init(cam_hw_cdm_init_module);
+module_exit(cam_hw_cdm_exit_module);
+MODULE_DESCRIPTION("MSM Camera HW CDM driver");
+MODULE_LICENSE("GPL v2");

+ 573 - 0
drivers/cam_cdm/cam_cdm_intf.c

@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_virtual.h"
+#include "cam_soc_util.h"
+#include "cam_cdm_soc.h"
+
+static struct cam_cdm_intf_mgr cdm_mgr;
+static DEFINE_MUTEX(cam_cdm_mgr_lock);
+
+static const struct of_device_id msm_cam_cdm_intf_dt_match[] = {
+	{ .compatible = "qcom,cam-cdm-intf", },
+	{}
+};
+
+static int get_cdm_mgr_refcount(void)
+{
+	int rc = 0;
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.probe_done == false) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
+		rc = -EPERM;
+	} else {
+		CAM_DBG(CAM_CDM, "CDM intf mgr get refcount=%d",
+			cdm_mgr.refcount);
+		cdm_mgr.refcount++;
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	return rc;
+}
+
+static void put_cdm_mgr_refcount(void)
+{
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.probe_done == false) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
+	} else {
+		CAM_DBG(CAM_CDM, "CDM intf mgr put refcount=%d",
+			cdm_mgr.refcount);
+		if (cdm_mgr.refcount > 0) {
+			cdm_mgr.refcount--;
+		} else {
+			CAM_ERR(CAM_CDM, "Refcount put when zero");
+			WARN_ON(1);
+		}
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+}
+
+static int get_cdm_iommu_handle(struct cam_iommu_handle *cdm_handles,
+	uint32_t hw_idx)
+{
+	int rc = -EPERM;
+	struct cam_hw_intf *hw = cdm_mgr.nodes[hw_idx].device;
+
+	if (hw->hw_ops.get_hw_caps) {
+		rc = hw->hw_ops.get_hw_caps(hw->hw_priv, cdm_handles,
+			sizeof(struct cam_iommu_handle));
+	}
+
+	return rc;
+}
+
+static int get_cdm_index_by_id(char *identifier,
+	uint32_t cell_index, uint32_t *hw_index)
+{
+	int rc = -EPERM, i, j;
+	char client_name[128];
+
+	CAM_DBG(CAM_CDM, "Looking for HW id of =%s and index=%d",
+		identifier, cell_index);
+	snprintf(client_name, sizeof(client_name), "%s", identifier);
+	CAM_DBG(CAM_CDM, "Looking for HW id of %s count:%d", client_name,
+		cdm_mgr.cdm_count);
+	mutex_lock(&cam_cdm_mgr_lock);
+	for (i = 0; i < cdm_mgr.cdm_count; i++) {
+		mutex_lock(&cdm_mgr.nodes[i].lock);
+		CAM_DBG(CAM_CDM, "dt_num_supported_clients=%d",
+			cdm_mgr.nodes[i].data->dt_num_supported_clients);
+
+		for (j = 0; j <
+			cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) {
+			CAM_DBG(CAM_CDM, "client name:%s",
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j]);
+			if (!strcmp(
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
+				client_name)) {
+				rc = 0;
+				*hw_index = i;
+				break;
+			}
+		}
+		mutex_unlock(&cdm_mgr.nodes[i].lock);
+		if (rc == 0)
+			break;
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+
+	return rc;
+}
+
+int cam_cdm_get_iommu_handle(char *identifier,
+	struct cam_iommu_handle *cdm_handles)
+{
+	int i, j, rc = -EPERM;
+
+	if ((!identifier) || (!cdm_handles))
+		return -EINVAL;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		return rc;
+	}
+	CAM_DBG(CAM_CDM, "Looking for Iommu handle of %s", identifier);
+
+	for (i = 0; i < cdm_mgr.cdm_count; i++) {
+		mutex_lock(&cdm_mgr.nodes[i].lock);
+		if (!cdm_mgr.nodes[i].data) {
+			mutex_unlock(&cdm_mgr.nodes[i].lock);
+			continue;
+		}
+		for (j = 0; j <
+			 cdm_mgr.nodes[i].data->dt_num_supported_clients;
+			j++) {
+			if (!strcmp(
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
+				identifier)) {
+				rc = get_cdm_iommu_handle(cdm_handles, i);
+				break;
+			}
+		}
+		mutex_unlock(&cdm_mgr.nodes[i].lock);
+		if (rc == 0)
+			break;
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_get_iommu_handle);
+
+int cam_cdm_acquire(struct cam_cdm_acquire_data *data)
+{
+	int rc = -EPERM;
+	struct cam_hw_intf *hw;
+	uint32_t hw_index = 0;
+
+	if ((!data) || (!data->base_array_cnt))
+		return -EINVAL;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		return rc;
+	}
+
+	if (data->id > CAM_CDM_HW_ANY) {
+		CAM_ERR(CAM_CDM,
+			"only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported");
+		rc = -EPERM;
+		goto end;
+	}
+	rc = get_cdm_index_by_id(data->identifier, data->cell_index,
+		&hw_index);
+	if ((rc < 0) && (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM)) {
+		CAM_ERR(CAM_CDM, "Failed to identify associated hw id");
+		goto end;
+	} else {
+		CAM_DBG(CAM_CDM, "hw_index:%d", hw_index);
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_ACQUIRE, data,
+					sizeof(struct cam_cdm_acquire_data));
+			if (rc < 0) {
+				CAM_ERR(CAM_CDM, "CDM hw acquire failed");
+				goto end;
+			}
+		} else {
+			CAM_ERR(CAM_CDM, "idx %d doesn't have acquire ops",
+				hw_index);
+			rc = -EPERM;
+		}
+	}
+end:
+	if (rc < 0) {
+		CAM_ERR(CAM_CDM, "CDM acquire failed for id=%d name=%s, idx=%d",
+			data->id, data->identifier, data->cell_index);
+		put_cdm_mgr_refcount();
+	}
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_acquire);
+
+int cam_cdm_release(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -EPERM;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_RELEASE, &handle,
+					sizeof(handle));
+			if (rc < 0)
+				CAM_ERR(CAM_CDM,
+					"hw release failed for handle=%x",
+					handle);
+		} else
+			CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
+				hw_index);
+	}
+	put_cdm_mgr_refcount();
+	if (rc == 0)
+		put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_release);
+
+
+int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data)
+{
+	uint32_t hw_index;
+	int rc = -EINVAL;
+	struct cam_hw_intf *hw;
+
+	if (!data)
+		return rc;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		rc = -EPERM;
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		struct cam_cdm_hw_intf_cmd_submit_bl req;
+
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			req.data = data;
+			req.handle = handle;
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+				CAM_CDM_HW_INTF_CMD_SUBMIT_BL, &req,
+				sizeof(struct cam_cdm_hw_intf_cmd_submit_bl));
+			if (rc < 0)
+				CAM_ERR(CAM_CDM,
+					"hw submit bl failed for handle=%x",
+					handle);
+		} else {
+			CAM_ERR(CAM_CDM, "hw idx %d doesn't have submit ops",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_submit_bls);
+
+int cam_cdm_stream_on(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -EINVAL;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		rc = -EPERM;
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+			if (hw && hw->hw_ops.start) {
+				rc = hw->hw_ops.start(hw->hw_priv, &handle,
+						sizeof(uint32_t));
+				if (rc < 0)
+					CAM_ERR(CAM_CDM,
+						"hw start failed handle=%x",
+						handle);
+			} else {
+				CAM_ERR(CAM_CDM,
+					"hw idx %d doesn't have start ops",
+					hw_index);
+			}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_stream_on);
+
+int cam_cdm_stream_off(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -EINVAL;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		rc = -EPERM;
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.stop) {
+			rc = hw->hw_ops.stop(hw->hw_priv, &handle,
+					sizeof(uint32_t));
+			if (rc < 0)
+				CAM_ERR(CAM_CDM, "hw stop failed handle=%x",
+					handle);
+		} else {
+			CAM_ERR(CAM_CDM, "hw idx %d doesn't have stop ops",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_stream_off);
+
+int cam_cdm_reset_hw(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -EINVAL;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		rc = -EPERM;
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_RESET_HW, &handle,
+					sizeof(handle));
+			if (rc < 0)
+				CAM_ERR(CAM_CDM,
+					"CDM hw release failed for handle=%x",
+					handle);
+		} else {
+			CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_reset_hw);
+
+int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t *index)
+{
+	int rc = -EINVAL;
+
+	if ((!hw) || (!data) || (!index))
+		return rc;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		return rc;
+	}
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if ((type == CAM_VIRTUAL_CDM) &&
+		(!cdm_mgr.nodes[CAM_SW_CDM_INDEX].device)) {
+		mutex_lock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = hw;
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = data;
+		*index = cdm_mgr.cdm_count;
+		mutex_unlock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
+		cdm_mgr.cdm_count++;
+		rc = 0;
+	} else if ((type == CAM_HW_CDM) && (cdm_mgr.cdm_count > 0)) {
+		mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.nodes[cdm_mgr.cdm_count].device = hw;
+		cdm_mgr.nodes[cdm_mgr.cdm_count].data = data;
+		*index = cdm_mgr.cdm_count;
+		mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.cdm_count++;
+		rc = 0;
+	} else {
+		CAM_ERR(CAM_CDM, "CDM registration failed type=%d count=%d",
+			type, cdm_mgr.cdm_count);
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+
+int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t index)
+{
+	int rc = -EINVAL;
+
+	if ((!hw) || (!data))
+		return rc;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		rc = -EPERM;
+		return rc;
+	}
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if ((type == CAM_VIRTUAL_CDM) &&
+		(hw == cdm_mgr.nodes[CAM_SW_CDM_INDEX].device) &&
+		(index == CAM_SW_CDM_INDEX)) {
+		mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = NULL;
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = NULL;
+		mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		rc = 0;
+	} else if ((type == CAM_HW_CDM) &&
+		(hw == cdm_mgr.nodes[index].device)) {
+		mutex_lock(&cdm_mgr.nodes[index].lock);
+		cdm_mgr.nodes[index].device = NULL;
+		cdm_mgr.nodes[index].data = NULL;
+		mutex_unlock(&cdm_mgr.nodes[index].lock);
+		cdm_mgr.cdm_count--;
+		rc = 0;
+	} else {
+		CAM_ERR(CAM_CDM, "CDM Deregistration failed type=%d index=%d",
+			type, index);
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+
+static int cam_cdm_intf_probe(struct platform_device *pdev)
+{
+	int i, rc;
+
+	rc = cam_cdm_intf_mgr_soc_get_dt_properties(pdev, &cdm_mgr);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to get dt properties");
+		return rc;
+	}
+	mutex_lock(&cam_cdm_mgr_lock);
+	for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+		mutex_init(&cdm_mgr.nodes[i].lock);
+		cdm_mgr.nodes[i].device = NULL;
+		cdm_mgr.nodes[i].data = NULL;
+		cdm_mgr.nodes[i].refcount = 0;
+	}
+	cdm_mgr.probe_done = true;
+	cdm_mgr.refcount = 0;
+	mutex_unlock(&cam_cdm_mgr_lock);
+	rc = cam_virtual_cdm_probe(pdev);
+	if (rc) {
+		mutex_lock(&cam_cdm_mgr_lock);
+		cdm_mgr.probe_done = false;
+		for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+			if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
+				(cdm_mgr.nodes[i].refcount != 0))
+				CAM_ERR(CAM_CDM,
+					"Valid node present in index=%d", i);
+			mutex_destroy(&cdm_mgr.nodes[i].lock);
+			cdm_mgr.nodes[i].device = NULL;
+			cdm_mgr.nodes[i].data = NULL;
+			cdm_mgr.nodes[i].refcount = 0;
+		}
+		mutex_unlock(&cam_cdm_mgr_lock);
+	}
+
+	CAM_DBG(CAM_CDM, "CDM Intf probe done");
+
+	return rc;
+}
+
+static int cam_cdm_intf_remove(struct platform_device *pdev)
+{
+	int i, rc = -EBUSY;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		return rc;
+	}
+
+	if (cam_virtual_cdm_remove(pdev)) {
+		CAM_ERR(CAM_CDM, "Virtual CDM remove failed");
+		goto end;
+	}
+	put_cdm_mgr_refcount();
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.refcount != 0) {
+		CAM_ERR(CAM_CDM, "cdm manger refcount not zero %d",
+			cdm_mgr.refcount);
+		goto end;
+	}
+
+	for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+		if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
+			(cdm_mgr.nodes[i].refcount != 0)) {
+			CAM_ERR(CAM_CDM, "Valid node present in index=%d", i);
+			mutex_unlock(&cam_cdm_mgr_lock);
+			goto end;
+		}
+		mutex_destroy(&cdm_mgr.nodes[i].lock);
+		cdm_mgr.nodes[i].device = NULL;
+		cdm_mgr.nodes[i].data = NULL;
+		cdm_mgr.nodes[i].refcount = 0;
+	}
+	cdm_mgr.probe_done = false;
+	rc = 0;
+
+end:
+	mutex_unlock(&cam_cdm_mgr_lock);
+	return rc;
+}
+
+static struct platform_driver cam_cdm_intf_driver = {
+	.probe = cam_cdm_intf_probe,
+	.remove = cam_cdm_intf_remove,
+	.driver = {
+	.name = "msm_cam_cdm_intf",
+	.owner = THIS_MODULE,
+	.of_match_table = msm_cam_cdm_intf_dt_match,
+	.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_cdm_intf_init_module(void)
+{
+	return platform_driver_register(&cam_cdm_intf_driver);
+}
+
+static void __exit cam_cdm_intf_exit_module(void)
+{
+	platform_driver_unregister(&cam_cdm_intf_driver);
+}
+
+module_init(cam_cdm_intf_init_module);
+module_exit(cam_cdm_intf_exit_module);
+MODULE_DESCRIPTION("MSM Camera CDM Intf driver");
+MODULE_LICENSE("GPL v2");

+ 202 - 0
drivers/cam_cdm/cam_cdm_intf_api.h

@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CDM_API_H_
+#define _CAM_CDM_API_H_
+
+#include <media/cam_defs.h>
+#include "cam_cdm_util.h"
+#include "cam_soc_util.h"
+
+/* enum cam_cdm_id - Enum for possible CAM CDM hardwares */
+enum cam_cdm_id {
+	CAM_CDM_VIRTUAL,
+	CAM_CDM_HW_ANY,
+	CAM_CDM_CPAS_0,
+	CAM_CDM_IPE0,
+	CAM_CDM_IPE1,
+	CAM_CDM_BPS,
+	CAM_CDM_VFE,
+	CAM_CDM_MAX
+};
+
+/* enum cam_cdm_cb_status - Enum for possible CAM CDM callback */
+enum cam_cdm_cb_status {
+	CAM_CDM_CB_STATUS_BL_SUCCESS,
+	CAM_CDM_CB_STATUS_INVALID_BL_CMD,
+	CAM_CDM_CB_STATUS_PAGEFAULT,
+	CAM_CDM_CB_STATUS_HW_RESET_ONGOING,
+	CAM_CDM_CB_STATUS_HW_RESET_DONE,
+	CAM_CDM_CB_STATUS_UNKNOWN_ERROR,
+};
+
+/* enum cam_cdm_bl_cmd_addr_type - Enum for possible CDM bl cmd addr types */
+enum cam_cdm_bl_cmd_addr_type {
+	CAM_CDM_BL_CMD_TYPE_MEM_HANDLE,
+	CAM_CDM_BL_CMD_TYPE_HW_IOVA,
+	CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA,
+};
+
+/**
+ * struct cam_cdm_acquire_data - Cam CDM acquire data structure
+ *
+ * @identifier : Input identifier string which is the device label from dt
+ *                    like vfe, ife, jpeg etc
+ * @cell_index : Input integer identifier pointing to the cell index from dt
+ *                     of the device. This can be used to form a unique string
+ *                     with @identifier like vfe0, ife1, jpeg0 etc
+ * @id : ID of a specific or any CDM HW which needs to be acquired.
+ * @userdata : Input private data which will be returned as part
+ *             of callback.
+ * @cam_cdm_callback : Input callback pointer for triggering the
+ *                     callbacks from CDM driver
+ *                     @handle : CDM Client handle
+ *                     @userdata : Private data given at the time of acquire
+ *                     @status : Callback status
+ *                     @cookie : Cookie if the callback is gen irq status
+ * @base_array_cnt : Input number of ioremapped address pair pointing
+ *                   in base_array, needed only if selected cdm is a virtual.
+ * @base_array : Input pointer to ioremapped address pair arrary
+ *               needed only if selected cdm is a virtual.
+ * @cdm_version : CDM version is output while acquiring HW cdm and
+ *                it is Input while acquiring virtual cdm, Currently fixing it
+ *                to one version below acquire API.
+ * @ops : Output pointer updated by cdm driver to the CDM
+ *                     util ops for this HW version of CDM acquired.
+ * @handle  : Output Unique handle generated for this acquire
+ *
+ */
+struct cam_cdm_acquire_data {
+	char identifier[128];
+	uint32_t cell_index;
+	enum cam_cdm_id id;
+	void *userdata;
+	void (*cam_cdm_callback)(uint32_t handle, void *userdata,
+		enum cam_cdm_cb_status status, uint64_t cookie);
+	uint32_t base_array_cnt;
+	struct cam_soc_reg_map *base_array[CAM_SOC_MAX_BLOCK];
+	struct cam_hw_version cdm_version;
+	struct cam_cdm_utils_ops *ops;
+	uint32_t handle;
+};
+
+/**
+ * struct cam_cdm_bl_cmd - Cam CDM HW bl command
+ *
+ * @bl_addr : Union of all three type for CDM BL commands
+ * @mem_handle : Input mem handle of bl cmd
+ * @offset : Input offset of the actual bl cmd in the memory pointed
+ *           by mem_handle
+ * @len : Input length of the BL command, Cannot be more than 1MB and
+ *           this is will be validated with offset+size of the memory pointed
+ *           by mem_handle
+ *
+ */
+struct cam_cdm_bl_cmd {
+	union {
+		int32_t mem_handle;
+		uint32_t *hw_iova;
+		uintptr_t kernel_iova;
+	} bl_addr;
+	uint32_t offset;
+	uint32_t len;
+};
+
+/**
+ * struct cam_cdm_bl_request - Cam CDM HW base & length (BL) request
+ *
+ * @flag : 1 for callback needed and 0 for no callback when this BL
+ *            request is done
+ * @userdata :Input private data which will be returned as part
+ *             of callback if request for this bl request in flags.
+ * @cookie : Cookie if the callback is gen irq status
+ * @type : type of the submitted bl cmd address.
+ * @cmd_arrary_count : Input number of BL commands to be submitted to CDM
+ * @bl_cmd_array     : Input payload holding the BL cmd's arrary
+ *                     to be sumbitted.
+ *
+ */
+struct cam_cdm_bl_request {
+	int flag;
+	void *userdata;
+	uint64_t cookie;
+	enum cam_cdm_bl_cmd_addr_type type;
+	uint32_t cmd_arrary_count;
+	struct cam_cdm_bl_cmd cmd[1];
+};
+
+/**
+ * @brief : API to get the CDM capabilities for a camera device type
+ *
+ * @identifier : Input pointer to a string which is the device label from dt
+ *                   like vfe, ife, jpeg etc, We do not need cell index
+ *                   assuming all devices of a single type maps to one SMMU
+ *                   client
+ * @cdm_handles : Input iommu handle memory pointer to update handles
+ *
+ * @return 0 on success
+ */
+int cam_cdm_get_iommu_handle(char *identifier,
+	struct cam_iommu_handle *cdm_handles);
+
+/**
+ * @brief : API to acquire a CDM
+ *
+ * @data : Input data for the CDM to be acquired
+ *
+ * @return 0 on success
+ */
+int cam_cdm_acquire(struct cam_cdm_acquire_data *data);
+
+/**
+ * @brief : API to release a previously acquired CDM
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_release(uint32_t handle);
+
+/**
+ * @brief : API to submit the base & length (BL's) for acquired CDM
+ *
+ * @handle : Input cdm handle to which the BL's needs to be sumbitted.
+ * @data   : Input pointer to the BL's to be sumbitted
+ *
+ * @return 0 on success
+ */
+int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data);
+
+/**
+ * @brief : API to stream ON a previously acquired CDM,
+ *          during this we turn on/off clocks/power based on active clients.
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_stream_on(uint32_t handle);
+
+/**
+ * @brief : API to stream OFF a previously acquired CDM,
+ *          during this we turn on/off clocks/power based on active clients.
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_stream_off(uint32_t handle);
+
+/**
+ * @brief : API to reset previously acquired CDM,
+ *          this can be only performed only the CDM is private.
+ *
+ * @handle : Input handle of the CDM to reset
+ *
+ * @return 0 on success
+ */
+int cam_cdm_reset_hw(uint32_t handle);
+
+#endif /* _CAM_CDM_API_H_ */

+ 199 - 0
drivers/cam_cdm/cam_cdm_soc.c

@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_OFFSET_FROM_REG(x, y) ((x)->offsets[y].offset)
+#define CAM_CDM_ATTR_FROM_REG(x, y) ((x)->offsets[y].attribute)
+
+bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t *value)
+{
+	void __iomem *reg_addr;
+	struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
+	void __iomem *base =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
+	resource_size_t mem_len =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
+
+	CAM_DBG(CAM_CDM, "E: b=%pK blen=%d reg=%x off=%x", (void __iomem *)base,
+		(int)mem_len, reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl,
+		reg)));
+	CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x", (void __iomem *)base,
+		reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)));
+
+	if ((reg > cdm->offset_tbl->offset_max_size) ||
+		(reg > cdm->offset_tbl->last_offset)) {
+		CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid reg=%d\n", reg);
+		goto permission_error;
+	} else {
+		reg_addr = (base + (CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg)));
+		if (reg_addr > (base + mem_len)) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM,
+				"Invalid mapped region %d", reg);
+			goto permission_error;
+		}
+		*value = cam_io_r_mb(reg_addr);
+		CAM_DBG(CAM_CDM, "X b=%pK reg=%x off=%x val=%x",
+			(void __iomem *)base, reg,
+			(CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)),
+			*value);
+		return false;
+	}
+permission_error:
+	*value = 0;
+	return true;
+
+}
+
+bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t value)
+{
+	void __iomem *reg_addr;
+	struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
+	void __iomem *base =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
+	resource_size_t mem_len =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
+
+	CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x val=%x", (void __iomem *)base,
+		reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)), value);
+
+	if ((reg > cdm->offset_tbl->offset_max_size) ||
+		(reg > cdm->offset_tbl->last_offset)) {
+		CAM_ERR_RATE_LIMIT(CAM_CDM, "CDM accessing invalid reg=%d\n",
+			reg);
+		goto permission_error;
+	} else {
+		reg_addr = (base + CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg));
+		if (reg_addr > (base + mem_len)) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM,
+				"Accessing invalid region %d:%d\n",
+				reg, (CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg)));
+			goto permission_error;
+		}
+		cam_io_w_mb(value, reg_addr);
+		return false;
+	}
+permission_error:
+	return true;
+
+}
+
+int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
+	struct cam_cdm_private_dt_data *ptr)
+{
+	int i, rc = -EINVAL;
+
+	ptr->dt_num_supported_clients = of_property_count_strings(
+						pdev->dev.of_node,
+						"cdm-client-names");
+	CAM_DBG(CAM_CDM, "Num supported cdm_client = %d",
+		ptr->dt_num_supported_clients);
+	if (ptr->dt_num_supported_clients >
+		CAM_PER_CDM_MAX_REGISTERED_CLIENTS) {
+		CAM_ERR(CAM_CDM, "Invalid count of client names count=%d",
+			ptr->dt_num_supported_clients);
+		rc = -EINVAL;
+		return rc;
+	}
+	if (ptr->dt_num_supported_clients < 0) {
+		CAM_DBG(CAM_CDM, "No cdm client names found");
+		ptr->dt_num_supported_clients = 0;
+		ptr->dt_cdm_shared = false;
+	} else {
+		ptr->dt_cdm_shared = true;
+	}
+	for (i = 0; i < ptr->dt_num_supported_clients; i++) {
+		rc = of_property_read_string_index(pdev->dev.of_node,
+			"cdm-client-names", i, &(ptr->dt_cdm_client_name[i]));
+		CAM_DBG(CAM_CDM, "cdm-client-names[%d] = %s",	i,
+			ptr->dt_cdm_client_name[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_CDM, "Reading cdm-client-names failed");
+			break;
+		}
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
+	const struct of_device_id *table)
+{
+	int rc;
+	struct cam_hw_soc_info *soc_ptr;
+	const struct of_device_id *id;
+
+	if (!cdm_hw  || (cdm_hw->soc_info.soc_private)
+		|| !(cdm_hw->soc_info.pdev))
+		return -EINVAL;
+
+	soc_ptr = &cdm_hw->soc_info;
+
+	rc = cam_soc_util_get_dt_properties(soc_ptr);
+	if (rc != 0) {
+		CAM_ERR(CAM_CDM, "Failed to retrieve the CDM dt properties");
+	} else {
+		soc_ptr->soc_private = kzalloc(
+				sizeof(struct cam_cdm_private_dt_data),
+				GFP_KERNEL);
+		if (!soc_ptr->soc_private)
+			return -ENOMEM;
+
+		rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev,
+			soc_ptr->soc_private);
+		if (rc != 0) {
+			CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
+			goto error;
+		}
+		id = of_match_node(table, soc_ptr->pdev->dev.of_node);
+		if ((!id) || !(id->data)) {
+			CAM_ERR(CAM_CDM, "Failed to retrieve the CDM id table");
+			goto error;
+		}
+		CAM_DBG(CAM_CDM, "CDM Hw Id compatible =%s", id->compatible);
+		((struct cam_cdm *)cdm_hw->core_info)->offset_tbl =
+			(struct cam_cdm_reg_offset_table *)id->data;
+		strlcpy(((struct cam_cdm *)cdm_hw->core_info)->name,
+			id->compatible,
+			sizeof(((struct cam_cdm *)cdm_hw->core_info)->name));
+	}
+
+	return rc;
+
+error:
+	rc = -EINVAL;
+	kfree(soc_ptr->soc_private);
+	soc_ptr->soc_private = NULL;
+	return rc;
+}
+
+int cam_cdm_intf_mgr_soc_get_dt_properties(
+	struct platform_device *pdev, struct cam_cdm_intf_mgr *mgr)
+{
+	int rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"num-hw-cdm", &mgr->dt_supported_hw_cdm);
+	CAM_DBG(CAM_CDM, "Number of HW cdm supported =%d",
+		mgr->dt_supported_hw_cdm);
+
+	return rc;
+}

+ 21 - 0
drivers/cam_cdm/cam_cdm_soc.h

@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CDM_SOC_H_
+#define _CAM_CDM_SOC_H_
+
+int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
+	const struct of_device_id *table);
+bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t *value);
+bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t value);
+int cam_cdm_intf_mgr_soc_get_dt_properties(
+	struct platform_device *pdev,
+	struct cam_cdm_intf_mgr *mgr);
+int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
+	struct cam_cdm_private_dt_data *ptr);
+
+#endif /* _CAM_CDM_SOC_H_ */

+ 717 - 0
drivers/cam_cdm/cam_cdm_util.c

@@ -0,0 +1,717 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm_util.h"
+#include "cam_cdm.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_DWORD 4
+
+#define CAM_CDM_SW_CMD_COUNT    2
+#define CAM_CMD_LENGTH_MASK     0xFFFF
+#define CAM_CDM_COMMAND_OFFSET  24
+#define CAM_CDM_REG_OFFSET_MASK 0x00FFFFFF
+
+#define CAM_CDM_DMI_DATA_HI_OFFSET   8
+#define CAM_CDM_DMI_DATA_OFFSET      8
+#define CAM_CDM_DMI_DATA_LO_OFFSET   12
+
+static unsigned int CDMCmdHeaderSizes[
+	CAM_CDM_CMD_PRIVATE_BASE + CAM_CDM_SW_CMD_COUNT] = {
+	0, /* UNUSED*/
+	3, /* DMI*/
+	0, /* UNUSED*/
+	2, /* RegContinuous*/
+	1, /* RegRandom*/
+	2, /* BUFFER_INDIREC*/
+	2, /* GenerateIRQ*/
+	3, /* WaitForEvent*/
+	1, /* ChangeBase*/
+	1, /* PERF_CONTROL*/
+	3, /* DMI32*/
+	3, /* DMI64*/
+};
+
+/**
+ * struct cdm_regrandom_cmd - Definition for CDM random register command.
+ * @count: Number of register writes
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ */
+struct cdm_regrandom_cmd {
+	unsigned int count    : 16;
+	unsigned int reserved : 8;
+	unsigned int cmd      : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_regcontinuous_cmd - Definition for a CDM register range command.
+ * @count: Number of register writes
+ * @reserved0: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @offset: Start address of the range of registers
+ * @reserved1: reserved bits
+ */
+struct cdm_regcontinuous_cmd {
+	unsigned int count     : 16;
+	unsigned int reserved0 : 8;
+	unsigned int cmd       : 8;
+	unsigned int offset    : 24;
+	unsigned int reserved1 : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_dmi_cmd - Definition for a CDM DMI command.
+ * @length: Number of bytes in LUT - 1
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @addr: Address of the LUT in memory
+ * @DMIAddr: Address of the target DMI config register
+ * @DMISel: DMI identifier
+ */
+struct cdm_dmi_cmd {
+	unsigned int length   : 16;
+	unsigned int reserved : 8;
+	unsigned int cmd      : 8;
+	unsigned int addr;
+	unsigned int DMIAddr  : 24;
+	unsigned int DMISel   : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_indirect_cmd - Definition for a CDM indirect buffer command.
+ * @length: Number of bytes in buffer - 1
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @addr:  Device address of the indirect buffer
+ */
+struct cdm_indirect_cmd {
+	unsigned int length     : 16;
+	unsigned int reserved   : 8;
+	unsigned int cmd        : 8;
+	unsigned int addr;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_changebase_cmd - Definition for CDM base address change command.
+ * @base: Base address to be changed to
+ * @cmd:Command ID (CDMCmd)
+ */
+struct cdm_changebase_cmd {
+	unsigned int base   : 24;
+	unsigned int cmd    : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_wait_event_cmd - Definition for a CDM Gen IRQ command.
+ * @mask: Mask for the events
+ * @id: ID to read back for debug
+ * @iw_reserved: reserved bits
+ * @iw: iw AHB write bit
+ * @cmd:Command ID (CDMCmd)
+ * @offset: Offset to where data is written
+ * @offset_reserved: reserved bits
+ * @data: data returned in IRQ_USR_DATA
+ */
+struct cdm_wait_event_cmd {
+	unsigned int mask             : 8;
+	unsigned int id               : 8;
+	unsigned int iw_reserved      : 7;
+	unsigned int iw               : 1;
+	unsigned int cmd              : 8;
+	unsigned int offset           : 24;
+	unsigned int offset_reserved  : 8;
+	unsigned int data;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_genirq_cmd - Definition for a CDM Wait event command.
+ * @reserved: reserved bits
+ * @cmd:Command ID (CDMCmd)
+ * @userdata: userdata returned in IRQ_USR_DATA
+ */
+struct cdm_genirq_cmd {
+	unsigned int reserved   : 24;
+	unsigned int cmd        : 8;
+	unsigned int userdata;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_perf_ctrl_cmd_t - Definition for CDM perf control command.
+ * @perf: perf command
+ * @reserved: reserved bits
+ * @cmd:Command ID (CDMCmd)
+ */
+struct cdm_perf_ctrl_cmd {
+	unsigned int perf     : 2;
+	unsigned int reserved : 22;
+	unsigned int cmd      : 8;
+} __attribute__((__packed__));
+
+uint32_t cdm_get_cmd_header_size(unsigned int command)
+{
+	return CDMCmdHeaderSizes[command];
+}
+
+uint32_t cdm_required_size_reg_continuous(uint32_t  numVals)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT) + numVals;
+}
+
+uint32_t cdm_required_size_reg_random(uint32_t numRegVals)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM) +
+		(2 * numRegVals);
+}
+
+uint32_t cdm_required_size_dmi(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
+}
+
+uint32_t cdm_required_size_genirq(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ);
+}
+
+uint32_t cdm_required_size_indirect(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
+}
+
+uint32_t cdm_required_size_changebase(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
+}
+
+uint32_t cdm_offsetof_dmi_addr(void)
+{
+	return offsetof(struct cdm_dmi_cmd, addr);
+}
+
+uint32_t cdm_offsetof_indirect_addr(void)
+{
+	return offsetof(struct cdm_indirect_cmd, addr);
+}
+
+uint32_t *cdm_write_regcontinuous(uint32_t *pCmdBuffer, uint32_t reg,
+	uint32_t numVals, uint32_t *pVals)
+{
+	uint32_t i;
+	struct cdm_regcontinuous_cmd *pHeader =
+		(struct cdm_regcontinuous_cmd *)pCmdBuffer;
+
+	pHeader->count = numVals;
+	pHeader->cmd = CAM_CDM_CMD_REG_CONT;
+	pHeader->reserved0 = 0;
+	pHeader->reserved1 = 0;
+	pHeader->offset = reg;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
+
+	for (i = 0; i < numVals; i++)
+		(((uint32_t *)pCmdBuffer)[i]) = (((uint32_t *)pVals)[i]);
+
+	pCmdBuffer += numVals;
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_regrandom(uint32_t *pCmdBuffer, uint32_t numRegVals,
+	uint32_t *pRegVals)
+{
+	uint32_t i;
+	uint32_t *dst, *src;
+	struct cdm_regrandom_cmd *pHeader =
+		(struct cdm_regrandom_cmd *)pCmdBuffer;
+
+	pHeader->count = numRegVals;
+	pHeader->cmd = CAM_CDM_CMD_REG_RANDOM;
+	pHeader->reserved = 0;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
+	dst = pCmdBuffer;
+	src = pRegVals;
+	for (i = 0; i < numRegVals; i++) {
+		*dst++ = *src++;
+		*dst++ = *src++;
+	}
+
+	return dst;
+}
+
+uint32_t *cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd,
+	uint32_t DMIAddr, uint8_t DMISel, uint32_t dmiBufferAddr,
+	uint32_t length)
+{
+	struct cdm_dmi_cmd *pHeader = (struct cdm_dmi_cmd *)pCmdBuffer;
+
+	pHeader->cmd        = dmiCmd;
+	pHeader->addr = dmiBufferAddr;
+	pHeader->length = length - 1;
+	pHeader->DMIAddr = DMIAddr;
+	pHeader->DMISel = DMISel;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_indirect(uint32_t *pCmdBuffer, uint32_t indirectBufAddr,
+	uint32_t length)
+{
+	struct cdm_indirect_cmd *pHeader =
+		(struct cdm_indirect_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_BUFF_INDIRECT;
+	pHeader->addr = indirectBufAddr;
+	pHeader->length = length - 1;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_changebase(uint32_t *pCmdBuffer, uint32_t base)
+{
+	struct cdm_changebase_cmd *pHeader =
+		(struct cdm_changebase_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_CHANGE_BASE;
+	pHeader->base = base;
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
+
+	return pCmdBuffer;
+}
+
+void cdm_write_genirq(uint32_t *pCmdBuffer, uint32_t userdata)
+{
+	struct cdm_genirq_cmd *pHeader = (struct cdm_genirq_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_GEN_IRQ;
+	pHeader->userdata = userdata;
+}
+
+struct cam_cdm_utils_ops CDM170_ops = {
+	cdm_get_cmd_header_size,
+	cdm_required_size_reg_continuous,
+	cdm_required_size_reg_random,
+	cdm_required_size_dmi,
+	cdm_required_size_genirq,
+	cdm_required_size_indirect,
+	cdm_required_size_changebase,
+	cdm_offsetof_dmi_addr,
+	cdm_offsetof_indirect_addr,
+	cdm_write_regcontinuous,
+	cdm_write_regrandom,
+	cdm_write_dmi,
+	cdm_write_indirect,
+	cdm_write_changebase,
+	cdm_write_genirq,
+};
+
+int cam_cdm_get_ioremap_from_base(uint32_t hw_base,
+	uint32_t base_array_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	void __iomem **device_base)
+{
+	int ret = -EINVAL, i;
+
+	for (i = 0; i < base_array_size; i++) {
+		if (base_table[i])
+			CAM_DBG(CAM_CDM, "In loop %d ioremap for %x addr=%x",
+			i, (base_table[i])->mem_cam_base, hw_base);
+		if ((base_table[i]) &&
+			((base_table[i])->mem_cam_base == hw_base)) {
+			*device_base = (base_table[i])->mem_base;
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int cam_cdm_util_reg_cont_write(void __iomem *base_addr,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
+{
+	int ret = 0;
+	uint32_t *data;
+	struct cdm_regcontinuous_cmd *reg_cont;
+
+	if ((cmd_buf_size < cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) ||
+		(!base_addr)) {
+		CAM_ERR(CAM_CDM, "invalid base addr and data length  %d %pK",
+			cmd_buf_size, base_addr);
+		return -EINVAL;
+	}
+
+	reg_cont = (struct cdm_regcontinuous_cmd *)cmd_buf;
+	if ((!reg_cont->count) || (reg_cont->count > 0x10000) ||
+		(((reg_cont->count * sizeof(uint32_t)) +
+			cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) >
+			cmd_buf_size)) {
+		CAM_ERR(CAM_CDM, "buffer size %d is not sufficient for count%d",
+			cmd_buf_size, reg_cont->count);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
+	cam_io_memcpy(base_addr + reg_cont->offset,	data,
+		reg_cont->count * sizeof(uint32_t));
+
+	*used_bytes = (reg_cont->count * sizeof(uint32_t)) +
+		(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT));
+
+	return ret;
+}
+
+static int cam_cdm_util_reg_random_write(void __iomem *base_addr,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
+{
+	uint32_t i;
+	struct cdm_regrandom_cmd *reg_random;
+	uint32_t *data;
+
+	if (!base_addr) {
+		CAM_ERR(CAM_CDM, "invalid base address");
+		return -EINVAL;
+	}
+
+	reg_random = (struct cdm_regrandom_cmd *) cmd_buf;
+	if ((!reg_random->count) || (reg_random->count > 0x10000) ||
+		(((reg_random->count * (sizeof(uint32_t) * 2)) +
+		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) >
+			cmd_buf_size)) {
+		CAM_ERR(CAM_CDM, "invalid reg_count  %d cmd_buf_size %d",
+			reg_random->count, cmd_buf_size);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
+
+	for (i = 0; i < reg_random->count; i++) {
+		CAM_DBG(CAM_CDM, "reg random: offset %pK, value 0x%x",
+			((void __iomem *)(base_addr + data[0])),
+			data[1]);
+		cam_io_w(data[1], base_addr + data[0]);
+		data += 2;
+	}
+
+	*used_bytes = ((reg_random->count * (sizeof(uint32_t) * 2)) +
+		(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)));
+
+	return 0;
+}
+
+static int cam_cdm_util_swd_dmi_write(uint32_t cdm_cmd_type,
+	void __iomem *base_addr, uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	uint32_t *used_bytes)
+{
+	uint32_t i;
+	struct cdm_dmi_cmd *swd_dmi;
+	uint32_t *data;
+
+	swd_dmi = (struct cdm_dmi_cmd *)cmd_buf;
+
+	if (cmd_buf_size < (cdm_required_size_dmi() + swd_dmi->length + 1)) {
+		CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d",
+			swd_dmi->length + 1);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_required_size_dmi();
+
+	if (cdm_cmd_type == CAM_CDM_CMD_SWD_DMI_64) {
+		for (i = 0; i < (swd_dmi->length + 1)/8; i++) {
+			cam_io_w_mb(data[0], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
+			cam_io_w_mb(data[1], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_HI_OFFSET);
+			data += 2;
+		}
+	} else if (cdm_cmd_type == CAM_CDM_CMD_DMI) {
+		for (i = 0; i < (swd_dmi->length + 1)/4; i++) {
+			cam_io_w_mb(data[0], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_OFFSET);
+			data += 1;
+		}
+	} else {
+		for (i = 0; i < (swd_dmi->length + 1)/4; i++) {
+			cam_io_w_mb(data[0], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
+			data += 1;
+		}
+	}
+	*used_bytes = (4 * cdm_required_size_dmi()) + swd_dmi->length + 1;
+
+	return 0;
+}
+
+int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	uint32_t base_array_size, uint8_t bl_tag)
+{
+	int ret = 0;
+	uint32_t cdm_cmd_type = 0, total_cmd_buf_size = 0;
+	uint32_t used_bytes = 0;
+
+	total_cmd_buf_size = cmd_buf_size;
+
+	while (cmd_buf_size > 0) {
+		CAM_DBG(CAM_CDM, "cmd data=%x", *cmd_buf);
+		cdm_cmd_type = (*cmd_buf >> CAM_CDM_COMMAND_OFFSET);
+		switch (cdm_cmd_type) {
+		case CAM_CDM_CMD_REG_CONT: {
+			ret = cam_cdm_util_reg_cont_write(*current_device_base,
+				cmd_buf, cmd_buf_size, &used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes/4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_REG_RANDOM: {
+			ret = cam_cdm_util_reg_random_write(
+				*current_device_base, cmd_buf, cmd_buf_size,
+				&used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes / 4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_DMI:
+		case CAM_CDM_CMD_SWD_DMI_32:
+		case CAM_CDM_CMD_SWD_DMI_64: {
+			if (*current_device_base == 0) {
+				CAM_ERR(CAM_CDM,
+					"Got SWI DMI cmd =%d for invalid hw",
+					cdm_cmd_type);
+				ret = -EINVAL;
+				break;
+			}
+			ret = cam_cdm_util_swd_dmi_write(cdm_cmd_type,
+				*current_device_base, cmd_buf, cmd_buf_size,
+				&used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes / 4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_CHANGE_BASE: {
+			struct cdm_changebase_cmd *change_base_cmd =
+				(struct cdm_changebase_cmd *)cmd_buf;
+
+			ret = cam_cdm_get_ioremap_from_base(
+				change_base_cmd->base, base_array_size,
+				base_table, current_device_base);
+			if (ret != 0) {
+				CAM_ERR(CAM_CDM,
+					"Get ioremap change base failed %x",
+					change_base_cmd->base);
+				break;
+			}
+			CAM_DBG(CAM_CDM, "Got ioremap for %x addr=%pK",
+				change_base_cmd->base,
+				current_device_base);
+			cmd_buf_size -= (4 *
+				cdm_required_size_changebase());
+			cmd_buf += cdm_required_size_changebase();
+			}
+			break;
+		default:
+			CAM_ERR(CAM_CDM, "unsupported cdm_cmd_type type 0%x",
+			cdm_cmd_type);
+			ret = -EINVAL;
+			break;
+		}
+
+		if (ret < 0)
+			break;
+	}
+
+	return ret;
+}
+
+static long cam_cdm_util_dump_dmi_cmd(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_DMI];
+	CAM_INFO(CAM_CDM, "DMI");
+	return ret;
+}
+
+static long cam_cdm_util_dump_buff_indirect(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_BUFF_INDIRECT];
+	CAM_INFO(CAM_CDM, "Buff Indirect");
+	return ret;
+}
+
+static long cam_cdm_util_dump_reg_cont_cmd(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+	struct cdm_regcontinuous_cmd *p_regcont_cmd;
+	uint32_t *temp_ptr = cmd_buf_addr;
+	int i = 0;
+
+	p_regcont_cmd = (struct cdm_regcontinuous_cmd *)temp_ptr;
+	temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT];
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT];
+
+	CAM_INFO(CAM_CDM, "REG_CONT: COUNT: %u OFFSET: 0x%X",
+		p_regcont_cmd->count, p_regcont_cmd->offset);
+
+	for (i = 0; i < p_regcont_cmd->count; i++) {
+		CAM_INFO(CAM_CDM, "DATA_%d: 0x%X", i,
+			*temp_ptr);
+		temp_ptr++;
+		ret++;
+	}
+
+	return ret;
+}
+
+static long cam_cdm_util_dump_reg_random_cmd(uint32_t *cmd_buf_addr)
+{
+	struct cdm_regrandom_cmd *p_regrand_cmd;
+	uint32_t *temp_ptr = cmd_buf_addr;
+	long ret = 0;
+	int i = 0;
+
+	p_regrand_cmd = (struct cdm_regrandom_cmd *)temp_ptr;
+	temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM];
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM];
+
+	CAM_INFO(CAM_CDM, "REG_RAND: COUNT: %u",
+		p_regrand_cmd->count);
+
+	for (i = 0; i < p_regrand_cmd->count; i++) {
+		CAM_INFO(CAM_CDM, "OFFSET_%d: 0x%X DATA_%d: 0x%X",
+			i, *temp_ptr & CAM_CDM_REG_OFFSET_MASK, i,
+			*(temp_ptr + 1));
+		temp_ptr += 2;
+		ret += 2;
+	}
+
+	return ret;
+}
+
+static long cam_cdm_util_dump_gen_irq_cmd(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_GEN_IRQ];
+
+	CAM_INFO(CAM_CDM, "GEN_IRQ");
+
+	return ret;
+}
+
+static long cam_cdm_util_dump_wait_event_cmd(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_WAIT_EVENT];
+
+	CAM_INFO(CAM_CDM, "WAIT_EVENT");
+
+	return ret;
+}
+
+static long cam_cdm_util_dump_change_base_cmd(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+	struct cdm_changebase_cmd *p_cbase_cmd;
+	uint32_t *temp_ptr = cmd_buf_addr;
+
+	p_cbase_cmd = (struct cdm_changebase_cmd *)temp_ptr;
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_CHANGE_BASE];
+
+	CAM_INFO(CAM_CDM, "CHANGE_BASE: 0x%X",
+		p_cbase_cmd->base);
+
+	return ret;
+}
+
+static long cam_cdm_util_dump_perf_ctrl_cmd(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_PERF_CTRL];
+
+	CAM_INFO(CAM_CDM, "PERF_CTRL");
+
+	return ret;
+}
+
+void cam_cdm_util_dump_cmd_buf(
+	uint32_t *cmd_buf_start, uint32_t *cmd_buf_end)
+{
+	uint32_t *buf_now = cmd_buf_start;
+	uint32_t cmd = 0;
+
+	if (!cmd_buf_start || !cmd_buf_end) {
+		CAM_INFO(CAM_CDM, "Invalid args");
+		return;
+	}
+
+	do {
+		cmd = *buf_now;
+		cmd = cmd >> CAM_CDM_COMMAND_OFFSET;
+
+		switch (cmd) {
+		case CAM_CDM_CMD_DMI:
+		case CAM_CDM_CMD_DMI_32:
+		case CAM_CDM_CMD_DMI_64:
+			buf_now += cam_cdm_util_dump_dmi_cmd(buf_now);
+			break;
+		case CAM_CDM_CMD_REG_CONT:
+			buf_now += cam_cdm_util_dump_reg_cont_cmd(buf_now);
+			break;
+		case CAM_CDM_CMD_REG_RANDOM:
+			buf_now += cam_cdm_util_dump_reg_random_cmd(buf_now);
+			break;
+		case CAM_CDM_CMD_BUFF_INDIRECT:
+			buf_now += cam_cdm_util_dump_buff_indirect(buf_now);
+			break;
+		case CAM_CDM_CMD_GEN_IRQ:
+			buf_now += cam_cdm_util_dump_gen_irq_cmd(buf_now);
+			break;
+		case CAM_CDM_CMD_WAIT_EVENT:
+			buf_now += cam_cdm_util_dump_wait_event_cmd(buf_now);
+			break;
+		case CAM_CDM_CMD_CHANGE_BASE:
+			buf_now += cam_cdm_util_dump_change_base_cmd(buf_now);
+			break;
+		case CAM_CDM_CMD_PERF_CTRL:
+			buf_now += cam_cdm_util_dump_perf_ctrl_cmd(buf_now);
+			break;
+		default:
+			CAM_INFO(CAM_CDM, "Invalid CMD: 0x%x buf 0x%x",
+				cmd, *buf_now);
+			buf_now++;
+			break;
+		}
+	} while (buf_now <= cmd_buf_end);
+}

+ 161 - 0
drivers/cam_cdm/cam_cdm_util.h

@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CDM_UTIL_H_
+#define _CAM_CDM_UTIL_H_
+
+enum cam_cdm_command {
+	CAM_CDM_CMD_UNUSED = 0x0,
+	CAM_CDM_CMD_DMI = 0x1,
+	CAM_CDM_CMD_NOT_DEFINED = 0x2,
+	CAM_CDM_CMD_REG_CONT = 0x3,
+	CAM_CDM_CMD_REG_RANDOM = 0x4,
+	CAM_CDM_CMD_BUFF_INDIRECT = 0x5,
+	CAM_CDM_CMD_GEN_IRQ = 0x6,
+	CAM_CDM_CMD_WAIT_EVENT = 0x7,
+	CAM_CDM_CMD_CHANGE_BASE = 0x8,
+	CAM_CDM_CMD_PERF_CTRL = 0x9,
+	CAM_CDM_CMD_DMI_32 = 0xa,
+	CAM_CDM_CMD_DMI_64 = 0xb,
+	CAM_CDM_CMD_PRIVATE_BASE = 0xc,
+	CAM_CDM_CMD_SWD_DMI_32 = (CAM_CDM_CMD_PRIVATE_BASE + 0x64),
+	CAM_CDM_CMD_SWD_DMI_64 = (CAM_CDM_CMD_PRIVATE_BASE + 0x65),
+	CAM_CDM_CMD_PRIVATE_BASE_MAX = 0x7F
+};
+
+/**
+ * struct cam_cdm_utils_ops - Camera CDM util ops
+ *
+ * @cdm_get_cmd_header_size: Returns the size of the given command header
+ *                           in DWORDs.
+ *      @command Command ID
+ *      @return Size of the command in DWORDs
+ *
+ * @cdm_required_size_reg_continuous: Calculates the size of a reg-continuous
+ *                                    command in dwords.
+ *      @numVals Number of continuous values
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_reg_random: Calculates the size of a reg-random command
+ *                                in dwords.
+ *      @numRegVals  Number of register/value pairs
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_dmi: Calculates the size of a DMI command in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_genirq: Calculates size of a Genirq command in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_indirect: Calculates the size of an indirect command
+ *                              in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_changebase: Calculates the size of a change-base command
+ *                                in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_offsetof_dmi_addr: Returns the offset of address field in the DMI
+ *                         command header.
+ *      @return Offset of addr field
+ *
+ * @cdm_offsetof_indirect_addr: Returns the offset of address field in the
+ *                              indirect command header.
+ *      @return Offset of addr field
+ *
+ * @cdm_write_regcontinuous: Writes a command into the command buffer.
+ *      @pCmdBuffer:  Pointer to command buffer
+ *      @reg: Beginning of the register address range where
+ *            values will be written.
+ *      @numVals: Number of values (registers) that will be written
+ *      @pVals : An array of values that will be written
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_regrandom: Writes a command into the command buffer in
+ *                       register/value pairs.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @numRegVals: Number of register/value pairs that will be written
+ *      @pRegVals: An array of register/value pairs that will be written
+ *                 The even indices are registers and the odd indices
+ *                 arevalues, e.g., {reg1, val1, reg2, val2, ...}.
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_dmi: Writes a DMI command into the command bufferM.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @dmiCmd: DMI command
+ *      @DMIAddr: Address of the DMI
+ *      @DMISel: Selected bank that the DMI will write to
+ *      @length: Size of data in bytes
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_indirect: Writes a indirect command into the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @indirectBufferAddr: Device address of the indirect cmd buffer.
+ *      @length: Size of data in bytes
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_changebase: Writes a changing CDM (address) base command into
+ *                        the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @base: New base (device) address
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_genirq:  Writes a gen irq command into the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @userdata: userdata or cookie return by hardware during irq.
+ */
+struct cam_cdm_utils_ops {
+uint32_t (*cdm_get_cmd_header_size)(unsigned int command);
+uint32_t (*cdm_required_size_reg_continuous)(uint32_t  numVals);
+uint32_t (*cdm_required_size_reg_random)(uint32_t numRegVals);
+uint32_t (*cdm_required_size_dmi)(void);
+uint32_t (*cdm_required_size_genirq)(void);
+uint32_t (*cdm_required_size_indirect)(void);
+uint32_t (*cdm_required_size_changebase)(void);
+uint32_t (*cdm_offsetof_dmi_addr)(void);
+uint32_t (*cdm_offsetof_indirect_addr)(void);
+uint32_t* (*cdm_write_regcontinuous)(
+	uint32_t *pCmdBuffer,
+	uint32_t reg,
+	uint32_t numVals,
+	uint32_t *pVals);
+uint32_t *(*cdm_write_regrandom)(
+	uint32_t *pCmdBuffer,
+	uint32_t numRegVals,
+	uint32_t *pRegVals);
+uint32_t *(*cdm_write_dmi)(
+	uint32_t *pCmdBuffer,
+	uint8_t  dmiCmd,
+	uint32_t DMIAddr,
+	uint8_t  DMISel,
+	uint32_t dmiBufferAddr,
+	uint32_t length);
+uint32_t *(*cdm_write_indirect)(
+	uint32_t *pCmdBuffer,
+	uint32_t indirectBufferAddr,
+	uint32_t length);
+uint32_t *(*cdm_write_changebase)(
+	uint32_t *pCmdBuffer,
+	uint32_t base);
+void (*cdm_write_genirq)(
+	uint32_t *pCmdBuffer,
+	uint32_t  userdata);
+};
+
+/**
+ * cam_cdm_util_log_cmd_bufs()
+ *
+ * @brief:            Util function to log cdm command buffers
+ *
+ * @cmd_buffer_start: Pointer to start of cmd buffer
+ * @cmd_buffer_end:   Pointer to end of cmd buffer
+ *
+ */
+void cam_cdm_util_dump_cmd_buf(
+	uint32_t *cmd_buffer_start, uint32_t *cmd_buffer_end);
+
+
+
+#endif /* _CAM_CDM_UTIL_H_ */

+ 18 - 0
drivers/cam_cdm/cam_cdm_virtual.h

@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CDM_VIRTUAL_H_
+#define _CAM_CDM_VIRTUAL_H_
+
+#include "cam_cdm_intf_api.h"
+
+int cam_virtual_cdm_probe(struct platform_device *pdev);
+int cam_virtual_cdm_remove(struct platform_device *pdev);
+int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	uint32_t base_array_size, uint8_t bl_tag);
+
+#endif /* _CAM_CDM_VIRTUAL_H_ */

+ 382 - 0
drivers/cam_cdm/cam_cdm_virtual_core.c

@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_util.h"
+#include "cam_cdm_virtual.h"
+#include "cam_cdm_core_common.h"
+#include "cam_cdm_soc.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_VIRTUAL_NAME "qcom,cam_virtual_cdm"
+
+static void cam_virtual_cdm_work(struct work_struct *work)
+{
+	struct cam_cdm_work_payload *payload;
+	struct cam_hw_info *cdm_hw;
+	struct cam_cdm *core;
+
+	payload = container_of(work, struct cam_cdm_work_payload, work);
+	if (payload) {
+		cdm_hw = payload->hw;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+		if (payload->irq_status & 0x2) {
+			struct cam_cdm_bl_cb_request_entry *node;
+
+			CAM_DBG(CAM_CDM, "CDM HW Gen/inline IRQ with data=%x",
+				payload->irq_data);
+			mutex_lock(&cdm_hw->hw_mutex);
+			node = cam_cdm_find_request_by_bl_tag(
+				payload->irq_data,
+				&core->bl_request_list);
+			if (node) {
+				if (node->request_type ==
+					CAM_HW_CDM_BL_CB_CLIENT) {
+					cam_cdm_notify_clients(cdm_hw,
+						CAM_CDM_CB_STATUS_BL_SUCCESS,
+						(void *)node);
+				} else if (node->request_type ==
+					CAM_HW_CDM_BL_CB_INTERNAL) {
+					CAM_ERR(CAM_CDM, "Invalid node=%pK %d",
+						node, node->request_type);
+				}
+				list_del_init(&node->entry);
+				kfree(node);
+			} else {
+				CAM_ERR(CAM_CDM, "Invalid node for inline irq");
+			}
+			mutex_unlock(&cdm_hw->hw_mutex);
+		}
+		if (payload->irq_status & 0x1) {
+			CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
+			complete(&core->reset_complete);
+		}
+		kfree(payload);
+	}
+
+}
+
+int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client)
+{
+	int i, rc = -EINVAL;
+	struct cam_cdm_bl_request *cdm_cmd = req->data;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+	mutex_lock(&client->lock);
+	for (i = 0; i < req->data->cmd_arrary_count ; i++) {
+		uintptr_t vaddr_ptr = 0;
+		size_t len = 0;
+
+		if ((!cdm_cmd->cmd[i].len) &&
+			(cdm_cmd->cmd[i].len > 0x100000)) {
+			CAM_ERR(CAM_CDM,
+				"len(%d) is invalid count=%d total cnt=%d",
+				cdm_cmd->cmd[i].len, i,
+				req->data->cmd_arrary_count);
+			rc = -EINVAL;
+			break;
+		}
+		if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
+			rc = cam_mem_get_cpu_buf(
+				cdm_cmd->cmd[i].bl_addr.mem_handle, &vaddr_ptr,
+				&len);
+		} else if (req->data->type ==
+			CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA) {
+			rc = 0;
+			vaddr_ptr = cdm_cmd->cmd[i].bl_addr.kernel_iova;
+			len = cdm_cmd->cmd[i].offset + cdm_cmd->cmd[i].len;
+		} else {
+			CAM_ERR(CAM_CDM,
+				"Only mem hdl/Kernel va type is supported %d",
+				req->data->type);
+			rc = -EINVAL;
+			break;
+		}
+
+		if ((!rc) && (vaddr_ptr) && (len) &&
+			(len >= cdm_cmd->cmd[i].offset)) {
+
+
+			if ((len - cdm_cmd->cmd[i].offset) <
+				cdm_cmd->cmd[i].len) {
+				CAM_ERR(CAM_CDM, "Not enough buffer");
+				rc = -EINVAL;
+				break;
+			}
+			CAM_DBG(CAM_CDM,
+				"hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu",
+				cdm_cmd->cmd[i].bl_addr.mem_handle,
+				(void *)vaddr_ptr, cdm_cmd->cmd[i].offset,
+				cdm_cmd->cmd[i].len, len);
+			rc = cam_cdm_util_cmd_buf_write(
+				&client->changebase_addr,
+				((uint32_t *)vaddr_ptr +
+					((cdm_cmd->cmd[i].offset)/4)),
+				cdm_cmd->cmd[i].len, client->data.base_array,
+				client->data.base_array_cnt, core->bl_tag);
+			if (rc) {
+				CAM_ERR(CAM_CDM,
+					"write failed for cnt=%d:%d len %u",
+					i, req->data->cmd_arrary_count,
+					cdm_cmd->cmd[i].len);
+				break;
+			}
+		} else {
+			CAM_ERR(CAM_CDM,
+				"Sanity check failed for hdl=%x len=%zu:%d",
+				cdm_cmd->cmd[i].bl_addr.mem_handle, len,
+				cdm_cmd->cmd[i].offset);
+			CAM_ERR(CAM_CDM,
+				"Sanity check failed for cmd_count=%d cnt=%d",
+				i, req->data->cmd_arrary_count);
+			rc = -EINVAL;
+			break;
+		}
+		if (!rc) {
+			struct cam_cdm_work_payload *payload;
+
+			CAM_DBG(CAM_CDM,
+				"write BL success for cnt=%d with tag=%d",
+				i, core->bl_tag);
+			if ((true == req->data->flag) &&
+				(i == req->data->cmd_arrary_count)) {
+				struct cam_cdm_bl_cb_request_entry *node;
+
+				node = kzalloc(sizeof(
+					struct cam_cdm_bl_cb_request_entry),
+					GFP_KERNEL);
+				if (!node) {
+					rc = -ENOMEM;
+					break;
+				}
+				node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
+				node->client_hdl = req->handle;
+				node->cookie = req->data->cookie;
+				node->bl_tag = core->bl_tag;
+				node->userdata = req->data->userdata;
+				mutex_lock(&cdm_hw->hw_mutex);
+				list_add_tail(&node->entry,
+					&core->bl_request_list);
+				mutex_unlock(&cdm_hw->hw_mutex);
+
+				payload = kzalloc(sizeof(
+					struct cam_cdm_work_payload),
+					GFP_ATOMIC);
+				if (payload) {
+					payload->irq_status = 0x2;
+					payload->irq_data = core->bl_tag;
+					payload->hw = cdm_hw;
+					INIT_WORK((struct work_struct *)
+						&payload->work,
+						cam_virtual_cdm_work);
+					queue_work(core->work_queue,
+						&payload->work);
+					}
+			}
+			core->bl_tag++;
+			CAM_DBG(CAM_CDM,
+				"Now commit the BL nothing for virtual");
+			if (!rc && (core->bl_tag == 63))
+				core->bl_tag = 0;
+		}
+	}
+	mutex_unlock(&client->lock);
+	return rc;
+}
+
+int cam_virtual_cdm_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_cdm_private_dt_data *soc_private = NULL;
+	int rc;
+	struct cam_cpas_register_params cpas_parms;
+
+	cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cdm_hw_intf)
+		return -ENOMEM;
+
+	cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cdm_hw) {
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
+	if (!cdm_hw->core_info) {
+		kfree(cdm_hw);
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+	cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cdm_hw->soc_info.pdev = pdev;
+	cdm_hw_intf->hw_type = CAM_VIRTUAL_CDM;
+	cdm_hw->soc_info.soc_private = kzalloc(
+			sizeof(struct cam_cdm_private_dt_data), GFP_KERNEL);
+	if (!cdm_hw->soc_info.soc_private) {
+		rc = -ENOMEM;
+		goto soc_load_failed;
+	}
+
+	rc = cam_cdm_soc_load_dt_private(pdev, cdm_hw->soc_info.soc_private);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
+		kfree(cdm_hw->soc_info.soc_private);
+		cdm_hw->soc_info.soc_private = NULL;
+		goto soc_load_failed;
+	}
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	soc_private = (struct cam_cdm_private_dt_data *)
+					cdm_hw->soc_info.soc_private;
+	if (soc_private->dt_cdm_shared == true)
+		cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
+	else
+		cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
+
+	cdm_core->bl_tag = 0;
+	INIT_LIST_HEAD(&cdm_core->bl_request_list);
+	init_completion(&cdm_core->reset_complete);
+	cdm_hw_intf->hw_priv = cdm_hw;
+	cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
+	cdm_hw_intf->hw_ops.init = NULL;
+	cdm_hw_intf->hw_ops.deinit = NULL;
+	cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
+	cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
+	cdm_hw_intf->hw_ops.read = NULL;
+	cdm_hw_intf->hw_ops.write = NULL;
+	cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
+
+	CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
+		cdm_hw_intf->hw_idx);
+
+	platform_set_drvdata(pdev, cdm_hw_intf);
+
+	cdm_hw->open_count = 0;
+	cdm_core->iommu_hdl.non_secure = -1;
+	cdm_core->iommu_hdl.secure = -1;
+	mutex_init(&cdm_hw->hw_mutex);
+	spin_lock_init(&cdm_hw->hw_lock);
+	init_completion(&cdm_hw->hw_complete);
+	mutex_lock(&cdm_hw->hw_mutex);
+	cdm_core->id = CAM_CDM_VIRTUAL;
+	memcpy(cdm_core->name, CAM_CDM_VIRTUAL_NAME,
+		sizeof(CAM_CDM_VIRTUAL_NAME));
+	cdm_core->work_queue = alloc_workqueue(cdm_core->name,
+		WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
+		CAM_CDM_INFLIGHT_WORKS);
+	cdm_core->ops = NULL;
+
+	cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
+	cpas_parms.cell_index = cdm_hw->soc_info.index;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = cdm_hw_intf;
+	strlcpy(cpas_parms.identifier, "cam-cdm-intf",
+		CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
+		goto cpas_registration_failed;
+	}
+	CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
+		cpas_parms.client_handle);
+	cdm_core->cpas_handle = cpas_parms.client_handle;
+
+	CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
+
+	rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
+			soc_private, CAM_VIRTUAL_CDM, &cdm_core->index);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Virtual CDM Interface registration failed");
+		goto intf_registration_failed;
+	}
+	CAM_DBG(CAM_CDM, "CDM%d registered to intf successful",
+		cdm_hw_intf->hw_idx);
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+	return 0;
+intf_registration_failed:
+	cam_cpas_unregister_client(cdm_core->cpas_handle);
+cpas_registration_failed:
+	kfree(cdm_hw->soc_info.soc_private);
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	mutex_destroy(&cdm_hw->hw_mutex);
+soc_load_failed:
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	kfree(cdm_hw_intf);
+	return rc;
+}
+
+int cam_virtual_cdm_remove(struct platform_device *pdev)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc = -EBUSY;
+
+	cdm_hw_intf = platform_get_drvdata(pdev);
+	if (!cdm_hw_intf) {
+		CAM_ERR(CAM_CDM, "Failed to get dev private data");
+		return rc;
+	}
+
+	cdm_hw = cdm_hw_intf->hw_priv;
+	if (!cdm_hw) {
+		CAM_ERR(CAM_CDM,
+			"Failed to get virtual private data for type=%d idx=%d",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	cdm_core = cdm_hw->core_info;
+	if (!cdm_core) {
+		CAM_ERR(CAM_CDM,
+			"Failed to get virtual core data for type=%d idx=%d",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "CPAS unregister failed");
+		return rc;
+	}
+
+	rc = cam_cdm_intf_deregister_hw_cdm(cdm_hw_intf,
+			cdm_hw->soc_info.soc_private, CAM_VIRTUAL_CDM,
+			cdm_core->index);
+	if (rc) {
+		CAM_ERR(CAM_CDM,
+			"Virtual CDM Interface de-registration failed");
+		return rc;
+	}
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw->soc_info.soc_private);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	kfree(cdm_hw_intf);
+	rc = 0;
+
+	return rc;
+}

+ 135 - 0
drivers/cam_cdm/cam_hw_cdm170_reg.h

@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_HW_CDM170_REG_H_
+#define _CAM_HW_CDM170_REG_H_
+
+#define CAM_CDM_REG_OFFSET_FIRST 0x0
+#define CAM_CDM_REG_OFFSET_LAST 0x200
+#define CAM_CDM_REGS_COUNT 0x30
+#define CAM_CDM_HWFIFO_SIZE 0x40
+
+#define CAM_CDM_OFFSET_HW_VERSION 0x0
+#define CAM_CDM_OFFSET_TITAN_VERSION 0x4
+#define CAM_CDM_OFFSET_RST_CMD 0x10
+#define CAM_CDM_OFFSET_CGC_CFG 0x14
+#define CAM_CDM_OFFSET_CORE_CFG 0x18
+#define CAM_CDM_OFFSET_CORE_EN 0x1c
+#define CAM_CDM_OFFSET_FE_CFG 0x20
+#define CAM_CDM_OFFSET_IRQ_MASK 0x30
+#define CAM_CDM_OFFSET_IRQ_CLEAR 0x34
+#define CAM_CDM_OFFSET_IRQ_CLEAR_CMD 0x38
+#define CAM_CDM_OFFSET_IRQ_SET 0x3c
+#define CAM_CDM_OFFSET_IRQ_SET_CMD 0x40
+
+#define CAM_CDM_OFFSET_IRQ_STATUS 0x44
+#define CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK 0x1
+#define CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK 0x2
+#define CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK 0x4
+#define CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK 0x10000
+#define CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK 0x20000
+#define CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK 0x40000
+
+#define CAM_CDM_OFFSET_BL_FIFO_BASE_REG 0x50
+#define CAM_CDM_OFFSET_BL_FIFO_LEN_REG 0x54
+#define CAM_CDM_OFFSET_BL_FIFO_STORE_REG 0x58
+#define CAM_CDM_OFFSET_BL_FIFO_CFG 0x5c
+#define CAM_CDM_OFFSET_BL_FIFO_RB 0x60
+#define CAM_CDM_OFFSET_BL_FIFO_BASE_RB 0x64
+#define CAM_CDM_OFFSET_BL_FIFO_LEN_RB 0x68
+#define CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB 0x6c
+#define CAM_CDM_OFFSET_IRQ_USR_DATA 0x80
+#define CAM_CDM_OFFSET_WAIT_STATUS 0x84
+#define CAM_CDM_OFFSET_SCRATCH_0_REG 0x90
+#define CAM_CDM_OFFSET_SCRATCH_1_REG 0x94
+#define CAM_CDM_OFFSET_SCRATCH_2_REG 0x98
+#define CAM_CDM_OFFSET_SCRATCH_3_REG 0x9c
+#define CAM_CDM_OFFSET_SCRATCH_4_REG 0xa0
+#define CAM_CDM_OFFSET_SCRATCH_5_REG 0xa4
+#define CAM_CDM_OFFSET_SCRATCH_6_REG 0xa8
+#define CAM_CDM_OFFSET_SCRATCH_7_REG 0xac
+#define CAM_CDM_OFFSET_LAST_AHB_ADDR 0xd0
+#define CAM_CDM_OFFSET_LAST_AHB_DATA 0xd4
+#define CAM_CDM_OFFSET_CORE_DBUG 0xd8
+#define CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR 0xe0
+#define CAM_CDM_OFFSET_LAST_AHB_ERR_DATA 0xe4
+#define CAM_CDM_OFFSET_CURRENT_BL_BASE 0xe8
+#define CAM_CDM_OFFSET_CURRENT_BL_LEN 0xec
+#define CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE 0xf0
+#define CAM_CDM_OFFSET_DEBUG_STATUS 0xf4
+#define CAM_CDM_OFFSET_BUS_MISR_CFG_0 0x100
+#define CAM_CDM_OFFSET_BUS_MISR_CFG_1 0x104
+#define CAM_CDM_OFFSET_BUS_MISR_RD_VAL 0x108
+#define CAM_CDM_OFFSET_PERF_MON_CTRL 0x110
+#define CAM_CDM_OFFSET_PERF_MON_0 0x114
+#define CAM_CDM_OFFSET_PERF_MON_1 0x118
+#define CAM_CDM_OFFSET_PERF_MON_2 0x11c
+#define CAM_CDM_OFFSET_SPARE 0x200
+
+/*
+ * Always make sure below register offsets are aligned with
+ * enum cam_cdm_regs offsets
+ */
+struct cam_cdm_reg_offset cam170_cpas_cdm_register_offsets[] = {
+	{ CAM_CDM_OFFSET_HW_VERSION, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_TITAN_VERSION, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_RST_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_CGC_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_CORE_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_CORE_EN, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_FE_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_MASK, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_CLEAR, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_CLEAR_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_SET, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_SET_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_IRQ_USR_DATA, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_BASE_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_LEN_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_STORE_REG, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_RB, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_BASE_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BL_FIFO_LEN_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_WAIT_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_SCRATCH_0_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_1_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_2_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_3_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_4_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_5_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_6_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_7_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_LAST_AHB_ADDR, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_LAST_AHB_DATA, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CORE_DBUG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_LAST_AHB_ERR_DATA, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_BL_BASE, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_BL_LEN, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_DEBUG_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BUS_MISR_CFG_0, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BUS_MISR_CFG_1, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BUS_MISR_RD_VAL, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_CTRL, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_PERF_MON_0, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_1, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_2, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_SPARE, CAM_REG_ATTR_READ_WRITE }
+};
+
+struct cam_cdm_reg_offset_table cam170_cpas_cdm_offset_table = {
+	.first_offset = 0x0,
+	.last_offset = 0x200,
+	.reg_count = 0x30,
+	.offsets = cam170_cpas_cdm_register_offsets,
+	.offset_max_size = (sizeof(cam170_cpas_cdm_register_offsets)/
+		sizeof(struct cam_cdm_reg_offset)),
+};
+
+#endif /* _CAM_HW_CDM170_REG_H_ */

+ 10 - 0
drivers/cam_core/Makefile

@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_smmu/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_sync
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_context.o cam_context_utils.o cam_node.o cam_subdev.o

+ 586 - 0
drivers/cam_core/cam_context.c

@@ -0,0 +1,586 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/refcount.h>
+
+#include "cam_context.h"
+#include "cam_debug_util.h"
+#include "cam_node.h"
+
+static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_context *ctx = (struct cam_context *)context;
+
+	if (!ctx || !ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (ctx->state_machine[ctx->state].irq_ops)
+		rc = ctx->state_machine[ctx->state].irq_ops(ctx, evt_id,
+			evt_data);
+	else
+		CAM_DBG(CAM_CORE,
+			"No function to handle event %d in dev %d, state %d",
+			evt_id, ctx->dev_hdl, ctx->state);
+	return rc;
+}
+
+int cam_context_shutdown(struct cam_context *ctx)
+{
+	int rc = 0;
+	struct cam_release_dev_cmd cmd;
+
+	if (ctx->state > CAM_CTX_AVAILABLE && ctx->state < CAM_CTX_STATE_MAX) {
+		cmd.session_handle = ctx->session_hdl;
+		cmd.dev_handle = ctx->dev_hdl;
+		rc = cam_context_handle_release_dev(ctx, &cmd);
+		if (rc)
+			CAM_ERR(CAM_CORE,
+				"context release failed for dev_name %s",
+				ctx->dev_name);
+		else
+			cam_context_putref(ctx);
+	} else {
+		CAM_WARN(CAM_CORE,
+			"dev %s context id %u state %d invalid to release hdl",
+			ctx->dev_name, ctx->ctx_id, ctx->state);
+		rc = -EINVAL;
+	}
+
+	rc = cam_destroy_device_hdl(ctx->dev_hdl);
+	if (rc)
+		CAM_ERR(CAM_CORE, "destroy device hdl failed for node %s",
+			ctx->dev_name);
+	else
+		ctx->dev_hdl = -1;
+
+	return rc;
+}
+
+int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
+	struct cam_req_mgr_device_info *info)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!info) {
+		CAM_ERR(CAM_CORE, "Invalid get device info payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.get_dev_info) {
+		rc = ctx->state_machine[ctx->state].crm_ops.get_dev_info(
+			ctx, info);
+	} else {
+		CAM_ERR(CAM_CORE, "No get device info in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_crm_link(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *link)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!link) {
+		CAM_ERR(CAM_CORE, "Invalid link payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.link) {
+		rc = ctx->state_machine[ctx->state].crm_ops.link(ctx, link);
+	} else {
+		CAM_ERR(CAM_CORE, "No crm link in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_crm_unlink(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!unlink) {
+		CAM_ERR(CAM_CORE, "Invalid unlink payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.unlink) {
+		rc = ctx->state_machine[ctx->state].crm_ops.unlink(
+			ctx, unlink);
+	} else {
+		CAM_ERR(CAM_CORE, "No crm unlink in dev %d, name %s, state %d",
+			ctx->dev_hdl, ctx->dev_name, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_crm_apply_req(struct cam_context *ctx,
+	struct cam_req_mgr_apply_request *apply)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!apply) {
+		CAM_ERR(CAM_CORE, "Invalid apply request payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.apply_req) {
+		rc = ctx->state_machine[ctx->state].crm_ops.apply_req(ctx,
+			apply);
+	} else {
+		CAM_ERR(CAM_CORE, "No crm apply req in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_crm_flush_req(struct cam_context *ctx,
+	struct cam_req_mgr_flush_request *flush)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.flush_req) {
+		rc = ctx->state_machine[ctx->state].crm_ops.flush_req(ctx,
+			flush);
+	} else {
+		CAM_ERR(CAM_CORE, "No crm flush req in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_crm_process_evt(struct cam_context *ctx,
+	struct cam_req_mgr_link_evt_data *process_evt)
+{
+	int rc = 0;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.process_evt) {
+		rc = ctx->state_machine[ctx->state].crm_ops.process_evt(ctx,
+			process_evt);
+	} else {
+		/* handling of this message is optional */
+		CAM_DBG(CAM_CORE, "No crm process evt in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova,
+	uint32_t buf_info)
+{
+	int rc = 0;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (ctx->state_machine[ctx->state].pagefault_ops) {
+		rc = ctx->state_machine[ctx->state].pagefault_ops(ctx, iova,
+			buf_info);
+	} else {
+		CAM_WARN(CAM_CORE, "No dump ctx in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+	}
+
+	return rc;
+}
+
+int cam_context_handle_acquire_dev(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+	int i;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		CAM_ERR(CAM_CORE, "Invalid acquire device command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.acquire_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_dev(
+			ctx, cmd);
+	} else {
+		CAM_ERR(CAM_CORE, "No acquire device in dev %d, state %d",
+			cmd->dev_handle, ctx->state);
+		rc = -EPROTO;
+	}
+
+	INIT_LIST_HEAD(&ctx->active_req_list);
+	INIT_LIST_HEAD(&ctx->wait_req_list);
+	INIT_LIST_HEAD(&ctx->pending_req_list);
+	INIT_LIST_HEAD(&ctx->free_req_list);
+
+	for (i = 0; i < ctx->req_size; i++) {
+		INIT_LIST_HEAD(&ctx->req_list[i].list);
+		list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
+	}
+
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_acquire_hw(struct cam_context *ctx,
+	void *args)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!args) {
+		CAM_ERR(CAM_CORE, "Invalid acquire device hw command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.acquire_hw) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_hw(
+			ctx, args);
+	} else {
+		CAM_ERR(CAM_CORE, "No acquire hw for dev %s, state %d",
+			ctx->dev_name, ctx->state);
+		rc = -EPROTO;
+	}
+
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_release_dev(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		CAM_ERR(CAM_CORE, "Invalid release device command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.release_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.release_dev(
+			ctx, cmd);
+	} else {
+		CAM_ERR(CAM_CORE, "No release device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_release_hw(struct cam_context *ctx,
+	void *args)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!args) {
+		CAM_ERR(CAM_CORE, "Invalid release HW command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.release_hw) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.release_hw(
+			ctx, args);
+	} else {
+		CAM_ERR(CAM_CORE, "No release hw for dev %s, state %d",
+			ctx->dev_name, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_flush_dev(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		CAM_ERR(CAM_CORE, "Invalid flush device command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.flush_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.flush_dev(
+			ctx, cmd);
+	} else {
+		CAM_WARN(CAM_CORE, "No flush device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_config_dev(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "context is not ready");
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		CAM_ERR(CAM_CORE, "Invalid config device command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.config_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.config_dev(
+			ctx, cmd);
+	} else {
+		CAM_ERR(CAM_CORE, "No config device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_start_dev(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	if (!ctx || !ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		CAM_ERR(CAM_CORE, "Invalid start device command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.start_dev)
+		rc = ctx->state_machine[ctx->state].ioctl_ops.start_dev(
+			ctx, cmd);
+	else
+		/* start device can be optional for some driver */
+		CAM_DBG(CAM_CORE, "No start device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_stop_dev(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	if (!ctx || !ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		CAM_ERR(CAM_CORE, "Invalid stop device command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.stop_dev)
+		rc = ctx->state_machine[ctx->state].ioctl_ops.stop_dev(
+			ctx, cmd);
+	else
+		/* stop device can be optional for some driver */
+		CAM_WARN(CAM_CORE, "No stop device in dev %d, name %s state %d",
+			ctx->dev_hdl, ctx->dev_name, ctx->state);
+
+	ctx->last_flush_req = 0;
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_init(struct cam_context *ctx,
+	const char *dev_name,
+	uint64_t dev_id,
+	uint32_t ctx_id,
+	struct cam_req_mgr_kmd_ops *crm_node_intf,
+	struct cam_hw_mgr_intf *hw_mgr_intf,
+	struct cam_ctx_request *req_list,
+	uint32_t req_size)
+{
+	int i;
+
+	/* crm_node_intf is optinal */
+	if (!ctx || !hw_mgr_intf || !req_list) {
+		CAM_ERR(CAM_CORE, "Invalid input parameters");
+		return -EINVAL;
+	}
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->dev_hdl = -1;
+	ctx->link_hdl = -1;
+	ctx->session_hdl = -1;
+	INIT_LIST_HEAD(&ctx->list);
+	mutex_init(&ctx->ctx_mutex);
+	mutex_init(&ctx->sync_mutex);
+	spin_lock_init(&ctx->lock);
+
+	ctx->dev_name = dev_name;
+	ctx->dev_id = dev_id;
+	ctx->ctx_id = ctx_id;
+	ctx->last_flush_req = 0;
+	ctx->ctx_crm_intf = NULL;
+	ctx->crm_ctx_intf = crm_node_intf;
+	ctx->hw_mgr_intf = hw_mgr_intf;
+	ctx->irq_cb_intf = cam_context_handle_hw_event;
+
+	INIT_LIST_HEAD(&ctx->active_req_list);
+	INIT_LIST_HEAD(&ctx->wait_req_list);
+	INIT_LIST_HEAD(&ctx->pending_req_list);
+	INIT_LIST_HEAD(&ctx->free_req_list);
+	ctx->req_list = req_list;
+	ctx->req_size = req_size;
+	for (i = 0; i < req_size; i++) {
+		INIT_LIST_HEAD(&ctx->req_list[i].list);
+		list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
+		ctx->req_list[i].ctx = ctx;
+	}
+	ctx->state = CAM_CTX_AVAILABLE;
+	ctx->state_machine = NULL;
+	ctx->ctx_priv = NULL;
+
+	return 0;
+}
+
+int cam_context_deinit(struct cam_context *ctx)
+{
+	if (!ctx)
+		return -EINVAL;
+
+	/**
+	 * This is called from platform device remove.
+	 * Everyting should be released at this moment.
+	 * so we just free the memory for the context
+	 */
+	if (ctx->state != CAM_CTX_AVAILABLE)
+		CAM_ERR(CAM_CORE, "Device did not shutdown cleanly");
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}
+
+void cam_context_putref(struct cam_context *ctx)
+{
+	kref_put(&ctx->refcount, cam_node_put_ctxt_to_free_list);
+	CAM_DBG(CAM_CORE,
+		"ctx device hdl %ld, ref count %d, dev_name %s",
+		ctx->dev_hdl, refcount_read(&(ctx->refcount.refcount)),
+		ctx->dev_name);
+}
+
+void cam_context_getref(struct cam_context *ctx)
+{
+	if (kref_get_unless_zero(&ctx->refcount) == 0) {
+		/* should never happen */
+		WARN(1, "%s fail\n", __func__);
+	}
+	CAM_DBG(CAM_CORE,
+		"ctx device hdl %ld, ref count %d, dev_name %s",
+		ctx->dev_hdl, refcount_read(&(ctx->refcount.refcount)),
+		ctx->dev_name);
+}

+ 460 - 0
drivers/cam_core/cam_context.h

@@ -0,0 +1,460 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CONTEXT_H_
+#define _CAM_CONTEXT_H_
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include "cam_req_mgr_interface.h"
+#include "cam_hw_mgr_intf.h"
+
+/* Forward declarations */
+struct cam_context;
+
+/* max request number */
+#define CAM_CTX_REQ_MAX              20
+#define CAM_CTX_CFG_MAX              20
+#define CAM_CTX_RES_MAX              20
+
+/**
+ * enum cam_ctx_state -  context top level states
+ *
+ */
+enum cam_context_state {
+	CAM_CTX_UNINIT               = 0,
+	CAM_CTX_AVAILABLE            = 1,
+	CAM_CTX_ACQUIRED             = 2,
+	CAM_CTX_READY                = 3,
+	CAM_CTX_ACTIVATED            = 4,
+	CAM_CTX_STATE_MAX            = 5,
+};
+
+/**
+ * struct cam_ctx_request - Common request structure for the context
+ *
+ * @list:                  Link list entry
+ * @status:                Request status
+ * @request_id:            Request id
+ * @req_priv:              Derived request object
+ * @hw_update_entries:     Hardware update entries
+ * @num_hw_update_entries: Number of hardware update entries
+ * @in_map_entries:        Entries for in fences
+ * @num_in_map_entries:    Number of in map entries
+ * @out_map_entries:       Entries for out fences
+ * @num_out_map_entries:   Number of out map entries
+ * @num_in_acked:          Number of in fence acked
+ * @num_out_acked:         Number of out fence acked
+ * @flushed:               Request is flushed
+ * @ctx:                   The context to which this request belongs
+ * @pf_data                page fault debug data
+ *
+ */
+struct cam_ctx_request {
+	struct list_head               list;
+	uint32_t                       status;
+	uint64_t                       request_id;
+	void                          *req_priv;
+	struct cam_hw_update_entry     hw_update_entries[CAM_CTX_CFG_MAX];
+	uint32_t                       num_hw_update_entries;
+	struct cam_hw_fence_map_entry  in_map_entries[CAM_CTX_CFG_MAX];
+	uint32_t                       num_in_map_entries;
+	struct cam_hw_fence_map_entry  out_map_entries[CAM_CTX_CFG_MAX];
+	uint32_t                       num_out_map_entries;
+	atomic_t                       num_in_acked;
+	uint32_t                       num_out_acked;
+	int                            flushed;
+	struct cam_context            *ctx;
+	struct cam_hw_mgr_dump_pf_data pf_data;
+};
+
+/**
+ * struct cam_ctx_ioctl_ops - Function table for handling IOCTL calls
+ *
+ * @acquire_dev:           Function pointer for acquire device
+ * @release_dev:           Function pointer for release device
+ * @config_dev:            Function pointer for config device
+ * @start_dev:             Function pointer for start device
+ * @stop_dev:              Function pointer for stop device
+ * @flush_dev:             Function pointer for flush device
+ * @acquire_hw:            Function pointer for acquire hw
+ * @release_hw:            Function pointer for release hw
+ *
+ */
+struct cam_ctx_ioctl_ops {
+	int (*acquire_dev)(struct cam_context *ctx,
+			struct cam_acquire_dev_cmd *cmd);
+	int (*release_dev)(struct cam_context *ctx,
+			struct cam_release_dev_cmd *cmd);
+	int (*config_dev)(struct cam_context *ctx,
+			struct cam_config_dev_cmd *cmd);
+	int (*start_dev)(struct cam_context *ctx,
+			struct cam_start_stop_dev_cmd *cmd);
+	int (*stop_dev)(struct cam_context *ctx,
+			struct cam_start_stop_dev_cmd *cmd);
+	int (*flush_dev)(struct cam_context *ctx,
+			struct cam_flush_dev_cmd *cmd);
+	int (*acquire_hw)(struct cam_context *ctx, void *args);
+	int (*release_hw)(struct cam_context *ctx, void *args);
+};
+
+/**
+ * struct cam_ctx_crm_ops -  Function table for handling CRM to context calls
+ *
+ * @get_dev_info:          Get device informaiton
+ * @link:                  Link the context
+ * @unlink:                Unlink the context
+ * @apply_req:             Apply setting for the context
+ * @flush_req:             Flush request to remove request ids
+ * @process_evt:           Handle event notification from CRM.(optional)
+ *
+ */
+struct cam_ctx_crm_ops {
+	int (*get_dev_info)(struct cam_context *ctx,
+			struct cam_req_mgr_device_info *device_info);
+	int (*link)(struct cam_context *ctx,
+			struct cam_req_mgr_core_dev_link_setup *link);
+	int (*unlink)(struct cam_context *ctx,
+			struct cam_req_mgr_core_dev_link_setup *unlink);
+	int (*apply_req)(struct cam_context *ctx,
+			struct cam_req_mgr_apply_request *apply);
+	int (*flush_req)(struct cam_context *ctx,
+			struct cam_req_mgr_flush_request *flush);
+	int (*process_evt)(struct cam_context *ctx,
+			struct cam_req_mgr_link_evt_data *evt_data);
+};
+
+
+/**
+ * struct cam_ctx_ops - Collection of the interface funciton tables
+ *
+ * @ioctl_ops:             Ioctl funciton table
+ * @crm_ops:               CRM to context interface function table
+ * @irq_ops:               Hardware event handle function
+ * @pagefault_ops:         Function to be called on page fault
+ *
+ */
+struct cam_ctx_ops {
+	struct cam_ctx_ioctl_ops     ioctl_ops;
+	struct cam_ctx_crm_ops       crm_ops;
+	cam_hw_event_cb_func         irq_ops;
+	cam_hw_pagefault_cb_func     pagefault_ops;
+};
+
+/**
+ * struct cam_context - camera context object for the subdevice node
+ *
+ * @dev_name:              String giving name of device associated
+ * @dev_id:                ID of device associated
+ * @ctx_id:                ID for this context
+ * @list:                  Link list entry
+ * @sessoin_hdl:           Session handle
+ * @dev_hdl:               Device handle
+ * @link_hdl:              Link handle
+ * @ctx_mutex:             Mutex for ioctl calls
+ * @lock:                  Spin lock
+ * @active_req_list:       Requests pending for done event
+ * @pending_req_list:      Requests pending for reg upd event
+ * @wait_req_list:         Requests waiting for apply
+ * @free_req_list:         Requests that are free
+ * @req_list:              Reference to the request storage
+ * @req_size:              Size of the request storage
+ * @hw_mgr_intf:           Context to HW interface
+ * @ctx_crm_intf:          Context to CRM interface
+ * @crm_ctx_intf:          CRM to context interface
+ * @irq_cb_intf:           HW to context callback interface
+ * @state:                 Current state for top level state machine
+ * @state_machine:         Top level state machine
+ * @ctx_priv:              Private context pointer
+ * @ctxt_to_hw_map:        Context to hardware mapping pointer
+ * @refcount:              Context object refcount
+ * @node:                  The main node to which this context belongs
+ * @sync_mutex:            mutex to sync with sync cb thread
+ * @last_flush_req:        Last request to flush
+ *
+ */
+struct cam_context {
+	const char                  *dev_name;
+	uint64_t                     dev_id;
+	uint32_t                     ctx_id;
+	struct list_head             list;
+	int32_t                      session_hdl;
+	int32_t                      dev_hdl;
+	int32_t                      link_hdl;
+
+	struct mutex                 ctx_mutex;
+	spinlock_t                   lock;
+
+	struct list_head             active_req_list;
+	struct list_head             pending_req_list;
+	struct list_head             wait_req_list;
+	struct list_head             free_req_list;
+	struct cam_ctx_request      *req_list;
+	uint32_t                     req_size;
+
+	struct cam_hw_mgr_intf      *hw_mgr_intf;
+	struct cam_req_mgr_crm_cb   *ctx_crm_intf;
+	struct cam_req_mgr_kmd_ops  *crm_ctx_intf;
+	cam_hw_event_cb_func         irq_cb_intf;
+
+	enum cam_context_state       state;
+	struct cam_ctx_ops          *state_machine;
+
+	void                        *ctx_priv;
+	void                        *ctxt_to_hw_map;
+
+	struct kref                  refcount;
+	void                        *node;
+	struct mutex                 sync_mutex;
+	uint32_t                     last_flush_req;
+};
+
+/**
+ * cam_context_shutdown()
+ *
+ * @brief:        Calls while device close or shutdown
+ *
+ * @ctx:          Object pointer for cam_context
+ *
+ */
+int cam_context_shutdown(struct cam_context *ctx);
+
+/**
+ * cam_context_handle_crm_get_dev_info()
+ *
+ * @brief:        Handle get device information command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @info:         Device information returned
+ *
+ */
+int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
+		struct cam_req_mgr_device_info *info);
+
+/**
+ * cam_context_handle_crm_link()
+ *
+ * @brief:        Handle link command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @link:         Link command payload
+ *
+ */
+int cam_context_handle_crm_link(struct cam_context *ctx,
+		struct cam_req_mgr_core_dev_link_setup *link);
+
+/**
+ * cam_context_handle_crm_unlink()
+ *
+ * @brief:        Handle unlink command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @unlink:       Unlink command payload
+ *
+ */
+int cam_context_handle_crm_unlink(struct cam_context *ctx,
+		struct cam_req_mgr_core_dev_link_setup *unlink);
+
+/**
+ * cam_context_handle_crm_apply_req()
+ *
+ * @brief:        Handle apply request command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @apply:        Apply request command payload
+ *
+ */
+int cam_context_handle_crm_apply_req(struct cam_context *ctx,
+		struct cam_req_mgr_apply_request *apply);
+
+/**
+ * cam_context_handle_crm_flush_req()
+ *
+ * @brief:        Handle flush request command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @apply:        Flush request command payload
+ *
+ */
+int cam_context_handle_crm_flush_req(struct cam_context *ctx,
+		struct cam_req_mgr_flush_request *apply);
+
+/**
+ * cam_context_handle_crm_process_evt()
+ *
+ * @brief:        Handle process event command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @process_evt:  process event command payload
+ *
+ */
+int cam_context_handle_crm_process_evt(struct cam_context *ctx,
+	struct cam_req_mgr_link_evt_data *process_evt);
+
+/**
+ * cam_context_dump_pf_info()
+ *
+ * @brief:        Handle dump active request request command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @iova:         Page fault address
+ * @buf_info:     Information about closest memory handle
+ *
+ */
+int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova,
+	uint32_t buf_info);
+
+/**
+ * cam_context_handle_acquire_dev()
+ *
+ * @brief:        Handle acquire device command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Acquire device command payload
+ *
+ */
+int cam_context_handle_acquire_dev(struct cam_context *ctx,
+		struct cam_acquire_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_acquire_hw()
+ *
+ * @brief:        Handle acquire HW command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Acquire HW command payload
+ *
+ */
+int cam_context_handle_acquire_hw(struct cam_context *ctx,
+		void *cmd);
+
+/**
+ * cam_context_handle_release_dev()
+ *
+ * @brief:        Handle release device command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Release device command payload
+ *
+ */
+int cam_context_handle_release_dev(struct cam_context *ctx,
+		struct cam_release_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_release_hw()
+ *
+ * @brief:        Handle release HW command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Release HW command payload
+ *
+ */
+int cam_context_handle_release_hw(struct cam_context *ctx,
+		void *cmd);
+
+/**
+ * cam_context_handle_config_dev()
+ *
+ * @brief:        Handle config device command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Config device command payload
+ *
+ */
+int cam_context_handle_config_dev(struct cam_context *ctx,
+		struct cam_config_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_flush_dev()
+ *
+ * @brief:        Handle flush device command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Flush device command payload
+ *
+ */
+int cam_context_handle_flush_dev(struct cam_context *ctx,
+		struct cam_flush_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_start_dev()
+ *
+ * @brief:        Handle start device command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Start device command payload
+ *
+ */
+int cam_context_handle_start_dev(struct cam_context *ctx,
+		struct cam_start_stop_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_stop_dev()
+ *
+ * @brief:        Handle stop device command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Stop device command payload
+ *
+ */
+int cam_context_handle_stop_dev(struct cam_context *ctx,
+		struct cam_start_stop_dev_cmd *cmd);
+
+/**
+ * cam_context_deinit()
+ *
+ * @brief:        Camera context deinitialize function
+ *
+ * @ctx:          Object pointer for cam_context
+ *
+ */
+int cam_context_deinit(struct cam_context *ctx);
+
+/**
+ * cam_context_init()
+ *
+ * @brief:        Camera context initialize function
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @dev_name:              String giving name of device associated
+ * @dev_id:                ID of the device associated
+ * @ctx_id:                ID for this context
+ * @crm_node_intf:         Function table for crm to context interface
+ * @hw_mgr_intf:           Function table for context to hw interface
+ * @req_list:              Requests storage
+ * @req_size:              Size of the request storage
+ *
+ */
+int cam_context_init(struct cam_context *ctx,
+		const char *dev_name,
+		uint64_t dev_id,
+		uint32_t ctx_id,
+		struct cam_req_mgr_kmd_ops *crm_node_intf,
+		struct cam_hw_mgr_intf *hw_mgr_intf,
+		struct cam_ctx_request *req_list,
+		uint32_t req_size);
+
+/**
+ * cam_context_putref()
+ *
+ * @brief:       Put back context reference.
+ *
+ * @ctx:                  Context for which ref is returned
+ *
+ */
+void cam_context_putref(struct cam_context *ctx);
+
+/**
+ * cam_context_getref()
+ *
+ * @brief:       Get back context reference.
+ *
+ * @ctx:                  Context for which ref is taken
+ *
+ */
+void cam_context_getref(struct cam_context *ctx);
+
+#endif  /* _CAM_CONTEXT_H_ */

+ 1018 - 0
drivers/cam_core/cam_context_utils.c

@@ -0,0 +1,1018 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <cam_sync.h>
+#include <cam_defs.h>
+
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_mem_mgr.h"
+#include "cam_node.h"
+#include "cam_req_mgr_util.h"
+#include "cam_sync_api.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+
+static uint cam_debug_ctx_req_list;
+module_param(cam_debug_ctx_req_list, uint, 0644);
+
+static inline int cam_context_validate_thread(void)
+{
+	if (in_interrupt()) {
+		WARN(1, "Invalid execution context\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int cam_context_buf_done_from_hw(struct cam_context *ctx,
+	void *done_event_data, uint32_t bubble_state)
+{
+	int j;
+	int result;
+	struct cam_ctx_request *req;
+	struct cam_hw_done_event_data *done =
+		(struct cam_hw_done_event_data *)done_event_data;
+	int rc;
+
+	if (!ctx || !done) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, done);
+		return -EINVAL;
+	}
+
+	rc = cam_context_validate_thread();
+	if (rc)
+		return rc;
+
+	spin_lock(&ctx->lock);
+	if (list_empty(&ctx->active_req_list)) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] no active request",
+			ctx->dev_name, ctx->ctx_id);
+		spin_unlock(&ctx->lock);
+		return -EIO;
+	}
+	req = list_first_entry(&ctx->active_req_list,
+		struct cam_ctx_request, list);
+
+	trace_cam_buf_done("UTILS", ctx, req);
+
+	if (done->request_id != req->request_id) {
+		CAM_ERR(CAM_CTXT,
+			"[%s][%d] mismatch: done req[%lld], active req[%lld]",
+			ctx->dev_name, ctx->ctx_id,
+			done->request_id, req->request_id);
+		spin_unlock(&ctx->lock);
+		return -EIO;
+	}
+
+	if (!req->num_out_map_entries) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] no output fence to signal",
+			ctx->dev_name, ctx->ctx_id);
+		spin_unlock(&ctx->lock);
+		return -EIO;
+	}
+
+	/*
+	 * since another thread may be adding/removing from active
+	 * list, so hold the lock
+	 */
+	list_del_init(&req->list);
+	spin_unlock(&ctx->lock);
+	if (!bubble_state) {
+		result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
+	} else {
+		CAM_DBG(CAM_REQ,
+			"[%s][ctx_id %d] : req[%llu] is done with error",
+			ctx->dev_name, ctx->ctx_id, req->request_id);
+
+		for (j = 0; j < req->num_out_map_entries; j++)
+			CAM_DBG(CAM_REQ, "fence %d signaled with error",
+				req->out_map_entries[j].sync_id);
+
+		result = CAM_SYNC_STATE_SIGNALED_ERROR;
+	}
+
+	for (j = 0; j < req->num_out_map_entries; j++) {
+		cam_sync_signal(req->out_map_entries[j].sync_id, result);
+		req->out_map_entries[j].sync_id = -1;
+	}
+
+	if (cam_debug_ctx_req_list & ctx->dev_id)
+		CAM_INFO(CAM_CTXT,
+			"[%s][%d] : Moving req[%llu] from active_list to free_list",
+			ctx->dev_name, ctx->ctx_id, req->request_id);
+
+	/*
+	 * another thread may be adding/removing from free list,
+	 * so hold the lock
+	 */
+	spin_lock(&ctx->lock);
+	list_add_tail(&req->list, &ctx->free_req_list);
+	req->ctx = NULL;
+	spin_unlock(&ctx->lock);
+
+	return 0;
+}
+
+static int cam_context_apply_req_to_hw(struct cam_ctx_request *req,
+	struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_context *ctx = req->ctx;
+	struct cam_hw_config_args cfg;
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	spin_lock(&ctx->lock);
+	list_del_init(&req->list);
+	list_add_tail(&req->list, &ctx->active_req_list);
+	spin_unlock(&ctx->lock);
+
+	if (cam_debug_ctx_req_list & ctx->dev_id)
+		CAM_INFO(CAM_CTXT,
+			"[%s][%d] : Moving req[%llu] from pending_list to active_list",
+			ctx->dev_name, ctx->ctx_id, req->request_id);
+
+	cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+	cfg.request_id = req->request_id;
+	cfg.hw_update_entries = req->hw_update_entries;
+	cfg.num_hw_update_entries = req->num_hw_update_entries;
+	cfg.out_map_entries = req->out_map_entries;
+	cfg.num_out_map_entries = req->num_out_map_entries;
+	cfg.priv = req->req_priv;
+
+	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc) {
+		spin_lock(&ctx->lock);
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->free_req_list);
+		spin_unlock(&ctx->lock);
+
+		if (cam_debug_ctx_req_list & ctx->dev_id)
+			CAM_INFO(CAM_CTXT,
+				"[%s][%d] : Moving req[%llu] from active_list to free_list",
+				ctx->dev_name, ctx->ctx_id, req->request_id);
+	}
+
+end:
+	return rc;
+}
+
+static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
+{
+	struct cam_ctx_request *req = data;
+	struct cam_context *ctx = NULL;
+	struct cam_flush_dev_cmd flush_cmd;
+	struct cam_req_mgr_apply_request apply;
+	int rc;
+
+	if (!req) {
+		CAM_ERR(CAM_CTXT, "Invalid input param");
+		return;
+	}
+	rc = cam_context_validate_thread();
+	if (rc)
+		return;
+
+	ctx = req->ctx;
+	if (!ctx) {
+		CAM_ERR(CAM_CTXT, "Invalid ctx for req %llu", req->request_id);
+		return;
+	}
+
+	if (atomic_inc_return(&req->num_in_acked) == req->num_in_map_entries) {
+		apply.request_id = req->request_id;
+		/*
+		 * take mutex to ensure that another thread does
+		 * not flush the request while this
+		 * thread is submitting it to h/w. The submit to
+		 * h/w and adding to the active list should happen
+		 * in a critical section which is provided by this
+		 * mutex.
+		 */
+		if (status == CAM_SYNC_STATE_SIGNALED_ERROR) {
+			CAM_DBG(CAM_CTXT, "fence error: %d", sync_obj);
+			flush_cmd.req_id = req->request_id;
+			cam_context_flush_req_to_hw(ctx, &flush_cmd);
+		}
+
+		mutex_lock(&ctx->sync_mutex);
+		if (!req->flushed) {
+			cam_context_apply_req_to_hw(req, &apply);
+			mutex_unlock(&ctx->sync_mutex);
+		} else {
+			req->flushed = 0;
+			req->ctx = NULL;
+			mutex_unlock(&ctx->sync_mutex);
+			spin_lock(&ctx->lock);
+			list_del_init(&req->list);
+			list_add_tail(&req->list, &ctx->free_req_list);
+			spin_unlock(&ctx->lock);
+
+			if (cam_debug_ctx_req_list & ctx->dev_id)
+				CAM_INFO(CAM_CTXT,
+					"[%s][%d] : Moving req[%llu] from pending_list to free_list",
+					ctx->dev_name, ctx->ctx_id,
+					req->request_id);
+		}
+	}
+	cam_context_putref(ctx);
+}
+
+int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	struct cam_hw_release_args arg;
+
+	if (!ctx) {
+		CAM_ERR(CAM_CTXT, "Invalid input param");
+		return -EINVAL;
+	}
+
+	if ((!ctx->hw_mgr_intf) || (!ctx->hw_mgr_intf->hw_release)) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		return -EINVAL;
+	}
+
+	arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+	arg.active_req = false;
+
+	ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
+	ctx->ctxt_to_hw_map = NULL;
+
+	ctx->session_hdl = -1;
+	ctx->dev_hdl = -1;
+	ctx->link_hdl = -1;
+
+	return 0;
+}
+
+int32_t cam_context_config_dev_to_hw(
+	struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
+{
+	int rc = 0;
+	size_t len;
+	struct cam_hw_stream_setttings cfg;
+	uintptr_t packet_addr;
+	struct cam_packet *packet;
+
+	if (!ctx || !cmd) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+		return -EINVAL;
+	}
+
+	if (!ctx->hw_mgr_intf->hw_config_stream_settings) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		return rc;
+	}
+
+	rc = cam_context_validate_thread();
+	if (rc) {
+		CAM_ERR(CAM_CTXT,
+			"Not executing in the right context");
+		return rc;
+	}
+
+	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+		&packet_addr, &len);
+	if (rc) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] Can not get packet address",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	packet = (struct cam_packet *) ((uint8_t *)packet_addr +
+		(uint32_t)cmd->offset);
+
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.packet = packet;
+	cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+	cfg.priv = NULL;
+
+	CAM_DBG(CAM_CTXT, "Processing config settings");
+	rc = ctx->hw_mgr_intf->hw_config_stream_settings(
+		ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc) {
+		CAM_ERR(CAM_CTXT,
+			"[%s][%d] Config failed stream settings",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+	}
+
+	return rc;
+}
+
+int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_ctx_request *req = NULL;
+	struct cam_hw_prepare_update_args cfg;
+	uintptr_t packet_addr;
+	struct cam_packet *packet;
+	size_t len = 0;
+	size_t remain_len = 0;
+	int32_t i = 0, j = 0;
+
+	if (!ctx || !cmd) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+		return -EINVAL;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		return -EFAULT;
+	}
+	rc = cam_context_validate_thread();
+	if (rc)
+		return rc;
+
+	spin_lock(&ctx->lock);
+	if (!list_empty(&ctx->free_req_list)) {
+		req = list_first_entry(&ctx->free_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+	}
+	spin_unlock(&ctx->lock);
+
+	if (!req) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] No more request obj free",
+			ctx->dev_name, ctx->ctx_id);
+		return -ENOMEM;
+	}
+
+	memset(req, 0, sizeof(*req));
+	INIT_LIST_HEAD(&req->list);
+	req->ctx = ctx;
+
+	/* for config dev, only memory handle is supported */
+	/* map packet from the memhandle */
+	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+		&packet_addr, &len);
+	if (rc != 0) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] Can not get packet address",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EINVAL;
+		goto free_req;
+	}
+
+	if ((len < sizeof(struct cam_packet)) ||
+		(cmd->offset >= (len - sizeof(struct cam_packet)))) {
+		CAM_ERR(CAM_CTXT, "Not enough buf");
+		return -EINVAL;
+
+	}
+	remain_len = len;
+	if ((len < sizeof(struct cam_packet)) ||
+		((size_t)cmd->offset >= len - sizeof(struct cam_packet))) {
+		CAM_ERR(CAM_CTXT, "invalid buff length: %zu or offset", len);
+		rc = -EINVAL;
+		goto free_req;
+	}
+
+	remain_len -= (size_t)cmd->offset;
+	packet = (struct cam_packet *) ((uint8_t *)packet_addr +
+		(uint32_t)cmd->offset);
+
+	if (packet->header.request_id <= ctx->last_flush_req) {
+		CAM_ERR(CAM_CORE,
+			"request %lld has been flushed, reject packet",
+			packet->header.request_id);
+		rc = -EINVAL;
+		goto free_req;
+	}
+
+	if (packet->header.request_id > ctx->last_flush_req)
+		ctx->last_flush_req = 0;
+
+	/* preprocess the configuration */
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.packet = packet;
+	cfg.remain_len = remain_len;
+	cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+	cfg.max_hw_update_entries = CAM_CTX_CFG_MAX;
+	cfg.num_hw_update_entries = req->num_hw_update_entries;
+	cfg.hw_update_entries = req->hw_update_entries;
+	cfg.max_out_map_entries = CAM_CTX_CFG_MAX;
+	cfg.out_map_entries = req->out_map_entries;
+	cfg.max_in_map_entries = CAM_CTX_CFG_MAX;
+	cfg.in_map_entries = req->in_map_entries;
+	cfg.pf_data = &(req->pf_data);
+
+	rc = ctx->hw_mgr_intf->hw_prepare_update(
+		ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc != 0) {
+		CAM_ERR(CAM_CTXT,
+			"[%s][%d] Prepare config packet failed in HW layer",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto free_req;
+	}
+	req->num_hw_update_entries = cfg.num_hw_update_entries;
+	req->num_out_map_entries = cfg.num_out_map_entries;
+	req->num_in_map_entries = cfg.num_in_map_entries;
+	atomic_set(&req->num_in_acked, 0);
+	req->request_id = packet->header.request_id;
+	req->status = 1;
+	req->req_priv = cfg.priv;
+
+	for (i = 0; i < req->num_out_map_entries; i++) {
+		rc = cam_sync_get_obj_ref(req->out_map_entries[i].sync_id);
+		if (rc) {
+			CAM_ERR(CAM_CTXT, "Can't get ref for sync %d",
+				req->out_map_entries[i].sync_id);
+			goto put_ref;
+		}
+	}
+
+	if (req->num_in_map_entries > 0) {
+		spin_lock(&ctx->lock);
+		list_add_tail(&req->list, &ctx->pending_req_list);
+		spin_unlock(&ctx->lock);
+
+		if (cam_debug_ctx_req_list & ctx->dev_id)
+			CAM_INFO(CAM_CTXT,
+				"[%s][%d] : Moving req[%llu] from free_list to pending_list",
+				ctx->dev_name, ctx->ctx_id, req->request_id);
+
+		for (j = 0; j < req->num_in_map_entries; j++) {
+			cam_context_getref(ctx);
+			rc = cam_sync_register_callback(
+					cam_context_sync_callback,
+					(void *)req,
+					req->in_map_entries[j].sync_id);
+			if (rc) {
+				CAM_ERR(CAM_CTXT,
+					"[%s][%d] Failed register fence cb: %d ret = %d",
+					ctx->dev_name, ctx->ctx_id,
+					req->in_map_entries[j].sync_id, rc);
+				spin_lock(&ctx->lock);
+				list_del_init(&req->list);
+				spin_unlock(&ctx->lock);
+
+				if (cam_debug_ctx_req_list & ctx->dev_id)
+					CAM_INFO(CAM_CTXT,
+						"[%s][%d] : Moving req[%llu] from pending_list to free_list",
+						ctx->dev_name, ctx->ctx_id,
+						req->request_id);
+
+				goto put_ctx_ref;
+			}
+			CAM_DBG(CAM_CTXT, "register in fence cb: %d ret = %d",
+				req->in_map_entries[j].sync_id, rc);
+		}
+	}
+
+	return rc;
+put_ctx_ref:
+	for (j; j >= 0; j--)
+		cam_context_putref(ctx);
+put_ref:
+	for (--i; i >= 0; i--) {
+		if (cam_sync_put_obj_ref(req->out_map_entries[i].sync_id))
+			CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d",
+				req->out_map_entries[i].sync_id);
+	}
+free_req:
+	spin_lock(&ctx->lock);
+	list_add_tail(&req->list, &ctx->free_req_list);
+	req->ctx = NULL;
+	spin_unlock(&ctx->lock);
+
+	return rc;
+}
+
+int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+	struct cam_hw_acquire_args param;
+	struct cam_create_dev_hdl req_hdl_param;
+	struct cam_hw_release_args release;
+
+	if (!ctx || !cmd) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	CAM_DBG(CAM_CTXT, "ses hdl: %x, num_res: %d, type: %d, res: %lld",
+		cmd->session_handle, cmd->num_resources, cmd->handle_type,
+		cmd->resource_hdl);
+
+	if (cmd->num_resources > CAM_CTX_RES_MAX) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] resource limit exceeded",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	/* for now we only support user pointer */
+	if (cmd->handle_type != 1)  {
+		CAM_ERR(CAM_CTXT, "[%s][%d] Only user pointer is supported",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* fill in parameters */
+	param.context_data = ctx;
+	param.event_cb = ctx->irq_cb_intf;
+	param.num_acq = cmd->num_resources;
+	param.acquire_info = cmd->resource_hdl;
+
+	/* call HW manager to reserve the resource */
+	rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
+		&param);
+	if (rc != 0) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] Acquire device failed",
+			ctx->dev_name, ctx->ctx_id);
+		goto end;
+	}
+
+	ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
+
+	/* if hw resource acquire successful, acquire dev handle */
+	req_hdl_param.session_hdl = cmd->session_handle;
+	/* bridge is not ready for these flags. so false for now */
+	req_hdl_param.v4l2_sub_dev_flag = 0;
+	req_hdl_param.media_entity_flag = 0;
+	req_hdl_param.priv = ctx;
+	req_hdl_param.ops = ctx->crm_ctx_intf;
+
+	ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
+	if (ctx->dev_hdl <= 0) {
+		rc = -EFAULT;
+		CAM_ERR(CAM_CTXT, "[%s][%d] Can not create device handle",
+			ctx->dev_name, ctx->ctx_id);
+		goto free_hw;
+	}
+	cmd->dev_handle = ctx->dev_hdl;
+
+	/* store session information */
+	ctx->session_hdl = cmd->session_handle;
+
+	return rc;
+
+free_hw:
+	release.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+	ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
+	ctx->ctxt_to_hw_map = NULL;
+	ctx->dev_hdl = -1;
+end:
+	return rc;
+}
+
+int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
+{
+	struct cam_hw_flush_args flush_args;
+	struct list_head temp_list;
+	struct cam_ctx_request *req;
+	uint32_t i;
+	int rc = 0;
+	bool free_req;
+
+	CAM_DBG(CAM_CTXT, "[%s] E: NRT flush ctx", ctx->dev_name);
+	memset(&flush_args, 0, sizeof(flush_args));
+
+	/*
+	 * flush pending requests, take the sync lock to synchronize with the
+	 * sync callback thread so that the sync cb thread does not try to
+	 * submit request to h/w while the request is being flushed
+	 */
+	mutex_lock(&ctx->sync_mutex);
+	INIT_LIST_HEAD(&temp_list);
+	spin_lock(&ctx->lock);
+	list_splice_init(&ctx->pending_req_list, &temp_list);
+	spin_unlock(&ctx->lock);
+
+	if (cam_debug_ctx_req_list & ctx->dev_id)
+		CAM_INFO(CAM_CTXT,
+			"[%s][%d] : Moving all pending requests from pending_list to temp_list",
+			ctx->dev_name, ctx->ctx_id);
+
+	flush_args.num_req_pending = 0;
+	while (true) {
+		spin_lock(&ctx->lock);
+		if (list_empty(&temp_list)) {
+			spin_unlock(&ctx->lock);
+			break;
+		}
+
+		req = list_first_entry(&temp_list,
+				struct cam_ctx_request, list);
+
+		list_del_init(&req->list);
+		spin_unlock(&ctx->lock);
+		req->flushed = 1;
+
+		flush_args.flush_req_pending[flush_args.num_req_pending++] =
+			req->req_priv;
+
+		free_req = false;
+		for (i = 0; i < req->num_in_map_entries; i++) {
+			rc = cam_sync_deregister_callback(
+				cam_context_sync_callback,
+				(void *)req,
+				req->in_map_entries[i].sync_id);
+			if (!rc) {
+				cam_context_putref(ctx);
+				if (atomic_inc_return(&req->num_in_acked) ==
+					req->num_in_map_entries)
+					free_req = true;
+			}
+		}
+
+		for (i = 0; i < req->num_out_map_entries; i++) {
+			if (req->out_map_entries[i].sync_id != -1) {
+				rc = cam_sync_signal(
+					req->out_map_entries[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+				if (rc == -EALREADY) {
+					CAM_ERR(CAM_CTXT,
+					"Req: %llu already signalled, sync_id:%d",
+					req->request_id,
+					req->out_map_entries[i].sync_id);
+					break;
+				}
+			}
+		}
+
+		/*
+		 * If we have deregistered the last sync callback, req will
+		 * not be put on the free list. So put it on the free list here
+		 */
+		if (free_req) {
+			req->ctx = NULL;
+			spin_lock(&ctx->lock);
+			list_add_tail(&req->list, &ctx->free_req_list);
+			spin_unlock(&ctx->lock);
+		}
+
+		if (cam_debug_ctx_req_list & ctx->dev_id)
+			CAM_INFO(CAM_CTXT,
+				"[%s][%d] : Deleting req[%llu] from temp_list",
+				ctx->dev_name, ctx->ctx_id, req->request_id);
+	}
+	mutex_unlock(&ctx->sync_mutex);
+
+	if (ctx->hw_mgr_intf->hw_flush) {
+		flush_args.num_req_active = 0;
+		spin_lock(&ctx->lock);
+		list_for_each_entry(req, &ctx->active_req_list, list) {
+			flush_args.flush_req_active[flush_args.num_req_active++]
+				= req->req_priv;
+		}
+		spin_unlock(&ctx->lock);
+
+		if (flush_args.num_req_pending || flush_args.num_req_active) {
+			flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+			flush_args.flush_type = CAM_FLUSH_TYPE_ALL;
+			ctx->hw_mgr_intf->hw_flush(
+				ctx->hw_mgr_intf->hw_mgr_priv, &flush_args);
+		}
+	}
+
+	INIT_LIST_HEAD(&temp_list);
+	spin_lock(&ctx->lock);
+	list_splice_init(&ctx->active_req_list, &temp_list);
+	INIT_LIST_HEAD(&ctx->active_req_list);
+	spin_unlock(&ctx->lock);
+
+	if (cam_debug_ctx_req_list & ctx->dev_id)
+		CAM_INFO(CAM_CTXT,
+			"[%s][%d] : Moving all requests from active_list to temp_list",
+			ctx->dev_name, ctx->ctx_id);
+
+	while (true) {
+		spin_lock(&ctx->lock);
+		if (list_empty(&temp_list)) {
+			spin_unlock(&ctx->lock);
+			break;
+		}
+		req = list_first_entry(&temp_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		spin_unlock(&ctx->lock);
+
+		for (i = 0; i < req->num_out_map_entries; i++) {
+			if (req->out_map_entries[i].sync_id != -1) {
+				rc = cam_sync_signal(
+					req->out_map_entries[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+				if (rc == -EALREADY) {
+					CAM_ERR(CAM_CTXT,
+						"Req: %llu already signalled ctx: %pK dev_name: %s dev_handle: %d ctx_state: %d",
+						req->request_id, req->ctx,
+						req->ctx->dev_name,
+						req->ctx->dev_hdl,
+						req->ctx->state);
+					break;
+				}
+			}
+		}
+
+		spin_lock(&ctx->lock);
+		list_add_tail(&req->list, &ctx->free_req_list);
+		spin_unlock(&ctx->lock);
+		req->ctx = NULL;
+
+		if (cam_debug_ctx_req_list & ctx->dev_id)
+			CAM_INFO(CAM_CTXT,
+				"[%s][%d] : Moving req[%llu] from temp_list to free_list",
+				ctx->dev_name, ctx->ctx_id, req->request_id);
+	}
+
+	CAM_DBG(CAM_CTXT, "[%s] X: NRT flush ctx", ctx->dev_name);
+
+	return 0;
+}
+
+int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	struct cam_ctx_request *req = NULL;
+	struct cam_hw_flush_args flush_args;
+	uint32_t i;
+	int32_t sync_id = 0;
+	int rc = 0;
+	bool free_req = false;
+
+	CAM_DBG(CAM_CTXT, "[%s] E: NRT flush req", ctx->dev_name);
+
+	memset(&flush_args, 0, sizeof(flush_args));
+	flush_args.num_req_pending = 0;
+	flush_args.num_req_active = 0;
+	mutex_lock(&ctx->sync_mutex);
+	spin_lock(&ctx->lock);
+	list_for_each_entry(req, &ctx->pending_req_list, list) {
+		if (req->request_id != cmd->req_id)
+			continue;
+
+		if (cam_debug_ctx_req_list & ctx->dev_id)
+			CAM_INFO(CAM_CTXT,
+				"[%s][%d] : Deleting req[%llu] from pending_list",
+				ctx->dev_name, ctx->ctx_id, req->request_id);
+
+		list_del_init(&req->list);
+		req->flushed = 1;
+
+		flush_args.flush_req_pending[flush_args.num_req_pending++] =
+			req->req_priv;
+		break;
+	}
+	spin_unlock(&ctx->lock);
+	mutex_unlock(&ctx->sync_mutex);
+
+	if (ctx->hw_mgr_intf->hw_flush) {
+		if (!flush_args.num_req_pending) {
+			spin_lock(&ctx->lock);
+			list_for_each_entry(req, &ctx->active_req_list, list) {
+				if (req->request_id != cmd->req_id)
+					continue;
+
+				list_del_init(&req->list);
+
+				flush_args.flush_req_active[
+					flush_args.num_req_active++] =
+					req->req_priv;
+				break;
+			}
+			spin_unlock(&ctx->lock);
+		}
+
+		if (flush_args.num_req_pending || flush_args.num_req_active) {
+			flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+			flush_args.flush_type = CAM_FLUSH_TYPE_REQ;
+			ctx->hw_mgr_intf->hw_flush(
+				ctx->hw_mgr_intf->hw_mgr_priv, &flush_args);
+		}
+	}
+
+	if (req) {
+		if (flush_args.num_req_pending) {
+			for (i = 0; i < req->num_in_map_entries; i++) {
+				rc = cam_sync_deregister_callback(
+					cam_context_sync_callback,
+					(void *)req,
+					req->in_map_entries[i].sync_id);
+				if (rc)
+					continue;
+
+				cam_context_putref(ctx);
+				if (atomic_inc_return(&req->num_in_acked) ==
+					req->num_in_map_entries)
+					free_req = true;
+			}
+		}
+
+		if (flush_args.num_req_pending || flush_args.num_req_active) {
+			for (i = 0; i < req->num_out_map_entries; i++) {
+				sync_id =
+					req->out_map_entries[i].sync_id;
+				if (sync_id != -1) {
+					rc = cam_sync_signal(sync_id,
+						CAM_SYNC_STATE_SIGNALED_ERROR);
+					if (rc == -EALREADY) {
+						CAM_ERR(CAM_CTXT,
+						"Req: %llu already signalled, sync_id:%d",
+						req->request_id, sync_id);
+						break;
+					}
+				}
+			}
+			if (flush_args.num_req_active || free_req) {
+				req->ctx = NULL;
+				spin_lock(&ctx->lock);
+				list_add_tail(&req->list, &ctx->free_req_list);
+				spin_unlock(&ctx->lock);
+
+				if (cam_debug_ctx_req_list & ctx->dev_id)
+					CAM_INFO(CAM_CTXT,
+						"[%s][%d] : Moving req[%llu] from %s to free_list",
+						ctx->dev_name, ctx->ctx_id,
+						req->request_id,
+						flush_args.num_req_active ?
+							"active_list" :
+							"pending_list");
+			}
+		}
+	}
+	CAM_DBG(CAM_CTXT, "[%s] X: NRT flush req", ctx->dev_name);
+
+	return 0;
+}
+
+int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+
+	int rc = 0;
+
+	if (!ctx || !cmd) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	if (cmd->flush_type == CAM_FLUSH_TYPE_ALL) {
+		ctx->last_flush_req = cmd->req_id;
+		rc = cam_context_flush_ctx_to_hw(ctx);
+	} else if (cmd->flush_type == CAM_FLUSH_TYPE_REQ)
+		rc = cam_context_flush_req_to_hw(ctx, cmd);
+	else {
+		rc = -EINVAL;
+		CAM_ERR(CAM_CORE, "[%s][%d] Invalid flush type %d",
+			ctx->dev_name, ctx->ctx_id, cmd->flush_type);
+	}
+
+end:
+	return rc;
+}
+
+int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_hw_start_args arg;
+
+	if (!ctx || !cmd) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	if ((cmd->session_handle != ctx->session_hdl) ||
+		(cmd->dev_handle != ctx->dev_hdl)) {
+		CAM_ERR(CAM_CTXT,
+			"[%s][%d] Invalid session hdl[%d], dev_handle[%d]",
+			ctx->dev_name, ctx->ctx_id,
+			cmd->session_handle, cmd->dev_handle);
+		rc = -EPERM;
+		goto end;
+	}
+
+	if (ctx->hw_mgr_intf->hw_start) {
+		arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+		rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
+				&arg);
+		if (rc) {
+			/* HW failure. user need to clean up the resource */
+			CAM_ERR(CAM_CTXT, "[%s][%d] Start HW failed",
+				ctx->dev_name, ctx->ctx_id);
+			goto end;
+		}
+	}
+
+end:
+	return rc;
+}
+
+int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
+{
+	int rc = 0;
+	struct cam_hw_stop_args stop;
+
+	if (!ctx) {
+		CAM_ERR(CAM_CTXT, "Invalid input param");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	rc = cam_context_validate_thread();
+	if (rc)
+		goto end;
+
+	rc = cam_context_flush_ctx_to_hw(ctx);
+	if (rc)
+		goto end;
+
+	/* stop hw first */
+	if (ctx->hw_mgr_intf->hw_stop) {
+		stop.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+		ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+			&stop);
+	}
+
+end:
+	return rc;
+}
+
+int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx,
+	struct cam_packet *packet, unsigned long iova, uint32_t buf_info,
+	bool *mem_found)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args cmd_args;
+
+	if (!ctx) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK ", ctx);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	if (ctx->hw_mgr_intf->hw_cmd) {
+		cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+		cmd_args.cmd_type = CAM_HW_MGR_CMD_DUMP_PF_INFO;
+		cmd_args.u.pf_args.pf_data.packet = packet;
+		cmd_args.u.pf_args.iova = iova;
+		cmd_args.u.pf_args.buf_info = buf_info;
+		cmd_args.u.pf_args.mem_found = mem_found;
+		ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+			&cmd_args);
+	}
+
+end:
+	return rc;
+}

+ 33 - 0
drivers/cam_core/cam_context_utils.h

@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CONTEXT_UTILS_H_
+#define _CAM_CONTEXT_UTILS_H_
+
+#include <linux/types.h>
+
+int cam_context_buf_done_from_hw(struct cam_context *ctx,
+	void *done_event_data, uint32_t bubble_state);
+int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd);
+int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd);
+int32_t cam_context_config_dev_to_hw(
+	struct cam_context *ctx, struct cam_config_dev_cmd *cmd);
+int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd);
+int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd);
+int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx);
+int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd);
+int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx);
+int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd);
+int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx,
+	struct cam_packet *packet, unsigned long iova, uint32_t buf_info,
+	bool *mem_found);
+
+#endif /* _CAM_CONTEXT_UTILS_H_ */

+ 46 - 0
drivers/cam_core/cam_hw.h

@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_HW_H_
+#define _CAM_HW_H_
+
+#include "cam_soc_util.h"
+
+/*
+ * This file declares Enums, Structures and APIs to be used as template
+ * when writing any HW driver in the camera subsystem.
+ */
+
+/* Hardware state enum */
+enum cam_hw_state {
+	CAM_HW_STATE_POWER_DOWN,
+	CAM_HW_STATE_POWER_UP,
+};
+
+/**
+ * struct cam_hw_info - Common hardware information
+ *
+ * @hw_mutex:              Hardware mutex
+ * @hw_lock:               Hardware spinlock
+ * @hw_complete:           Hardware Completion
+ * @open_count:            Count to track the HW enable from the client
+ * @hw_state:              Hardware state
+ * @soc_info:              Platform SOC properties for hardware
+ * @node_info:             Private HW data related to nodes
+ * @core_info:             Private HW data related to core logic
+ *
+ */
+struct cam_hw_info {
+	struct mutex                    hw_mutex;
+	spinlock_t                      hw_lock;
+	struct completion               hw_complete;
+	uint32_t                        open_count;
+	enum cam_hw_state               hw_state;
+	struct cam_hw_soc_info          soc_info;
+	void                           *node_info;
+	void                           *core_info;
+};
+
+#endif /* _CAM_HW_H_ */

+ 80 - 0
drivers/cam_core/cam_hw_intf.h

@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_HW_INTF_H_
+#define _CAM_HW_INTF_H_
+
+#include <linux/types.h>
+
+/*
+ * This file declares Constants, Enums, Structures and APIs to be used as
+ * Interface between HW driver and HW Manager.
+ */
+
+/**
+ * struct cam_hw_ops - Hardware layer interface functions
+ *
+ * @get_hw_caps:           Function pointer for get hw caps
+ * @init:                  Function poniter for initialize hardware
+ * @deinit:                Function pointer for deinitialize hardware
+ * @reset:                 Function pointer for reset hardware
+ * @reserve:               Function pointer for reserve hardware
+ * @release:               Function pointer for release hardware
+ * @start:                 Function pointer for start hardware
+ * @stop:                  Function pointer for stop hardware
+ * @read:                  Function pointer for read hardware registers
+ * @write:                 Function pointer for Write hardware registers
+ * @process_cmd:           Function pointer for additional hardware controls
+ * @flush_cmd:             Function pointer for flush requests
+ *
+ */
+struct cam_hw_ops {
+	int (*get_hw_caps)(void *hw_priv,
+		void *get_hw_cap_args, uint32_t arg_size);
+	int (*init)(void *hw_priv,
+		void *init_hw_args, uint32_t arg_size);
+	int (*deinit)(void *hw_priv,
+		void *init_hw_args, uint32_t arg_size);
+	int (*reset)(void *hw_priv,
+		void *reset_core_args, uint32_t arg_size);
+	int (*reserve)(void *hw_priv,
+		void *reserve_args, uint32_t arg_size);
+	int (*release)(void *hw_priv,
+		void *release_args, uint32_t arg_size);
+	int (*start)(void *hw_priv,
+		void *start_args, uint32_t arg_size);
+	int (*stop)(void *hw_priv,
+		void *stop_args, uint32_t arg_size);
+	int (*read)(void *hw_priv,
+		void *read_args, uint32_t arg_size);
+	int (*write)(void *hw_priv,
+		void *write_args, uint32_t arg_size);
+	int (*process_cmd)(void *hw_priv,
+		uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+	int (*flush)(void *hw_priv,
+		void *flush_args, uint32_t arg_size);
+};
+
+/**
+ * struct cam_hw_intf - Common hardware node
+ *
+ * @hw_type:               Hardware type
+ * @hw_idx:                Hardware ID
+ * @hw_ops:                Hardware interface function table
+ * @hw_priv:               Private hardware node pointer
+ *
+ */
+struct cam_hw_intf {
+	uint32_t                     hw_type;
+	uint32_t                     hw_idx;
+	struct cam_hw_ops            hw_ops;
+	void                        *hw_priv;
+};
+
+/* hardware event callback function type */
+typedef int (*cam_hw_mgr_event_cb_func)(void *priv, uint32_t evt_id,
+	void *evt_data);
+
+#endif /* _CAM_HW_INTF_H_ */

+ 338 - 0
drivers/cam_core/cam_hw_mgr_intf.h

@@ -0,0 +1,338 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_HW_MGR_INTF_H_
+#define _CAM_HW_MGR_INTF_H_
+
+#include <linux/time.h>
+#include <linux/types.h>
+#include <media/cam_defs.h>
+/*
+ * This file declares Constants, Enums, Structures and APIs to be used as
+ * Interface between HW Manager and Context.
+ */
+
+
+/* maximum context numbers */
+#define CAM_CTX_MAX                         8
+
+/* maximum buf done irqs */
+#define CAM_NUM_OUT_PER_COMP_IRQ_MAX        12
+
+/* hardware event callback function type */
+typedef int (*cam_hw_event_cb_func)(void *context, uint32_t evt_id,
+	void *evt_data);
+
+/* hardware page fault callback function type */
+typedef int (*cam_hw_pagefault_cb_func)(void *context, unsigned long iova,
+	uint32_t buf_info);
+
+/**
+ * struct cam_hw_update_entry - Entry for hardware config
+ *
+ * @handle:                Memory handle for the configuration
+ * @offset:                Memory offset
+ * @len:                   Size of the configuration
+ * @flags:                 Flags for the config entry(eg. DMI)
+ * @addr:                  Address of hardware update entry
+ *
+ */
+struct cam_hw_update_entry {
+	int                handle;
+	uint32_t           offset;
+	uint32_t           len;
+	uint32_t           flags;
+	uintptr_t          addr;
+};
+
+/**
+ * struct cam_hw_fence_map_entry - Entry for the resource to sync id map
+ *
+ * @resrouce_handle:       Resource port id for the buffer
+ * @sync_id:               Sync id
+ *
+ */
+struct cam_hw_fence_map_entry {
+	uint32_t           resource_handle;
+	int32_t            sync_id;
+};
+
+/**
+ * struct cam_hw_done_event_data - Payload for hw done event
+ *
+ * @num_handles:           number of handles in the event
+ * @resrouce_handle:       list of the resource handle
+ * @timestamp:             time stamp
+ * @request_id:            request identifier
+ *
+ */
+struct cam_hw_done_event_data {
+	uint32_t           num_handles;
+	uint32_t           resource_handle[CAM_NUM_OUT_PER_COMP_IRQ_MAX];
+	struct timeval     timestamp;
+	uint64_t           request_id;
+};
+
+/**
+ * struct cam_hw_acquire_args - Payload for acquire command
+ *
+ * @context_data:          Context data pointer for the callback function
+ * @event_cb:              Callback function array
+ * @num_acq:               Total number of acquire in the payload
+ * @acquire_info:          Acquired resource array pointer
+ * @ctxt_to_hw_map:        HW context (returned)
+ * @acquired_hw_id:        Acquired hardware mask
+ * @acquired_hw_path:      Acquired path mask for an input
+ *                         if input splits into multiple paths,
+ *                         its updated per hardware
+ * valid_acquired_hw:      Valid num of acquired hardware
+ *
+ */
+struct cam_hw_acquire_args {
+	void                        *context_data;
+	cam_hw_event_cb_func         event_cb;
+	uint32_t                     num_acq;
+	uint32_t                     acquire_info_size;
+	uintptr_t                    acquire_info;
+	void                        *ctxt_to_hw_map;
+
+	uint32_t    acquired_hw_id[CAM_MAX_ACQ_RES];
+	uint32_t    acquired_hw_path[CAM_MAX_ACQ_RES][CAM_MAX_HW_SPLIT];
+	uint32_t    valid_acquired_hw;
+};
+
+/**
+ * struct cam_hw_release_args - Payload for release command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @active_req:            Active request flag
+ *
+ */
+struct cam_hw_release_args {
+	void              *ctxt_to_hw_map;
+	bool               active_req;
+};
+
+/**
+ * struct cam_hw_start_args - Payload for start command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @num_hw_update_entries: Number of Hardware configuration
+ * @hw_update_entries:     Hardware configuration list
+ *
+ */
+struct cam_hw_start_args {
+	void                        *ctxt_to_hw_map;
+	uint32_t                     num_hw_update_entries;
+	struct cam_hw_update_entry  *hw_update_entries;
+};
+
+/**
+ * struct cam_hw_stop_args - Payload for stop command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @args:                  Arguments to pass for stop
+ *
+ */
+struct cam_hw_stop_args {
+	void              *ctxt_to_hw_map;
+	void              *args;
+};
+
+
+/**
+ * struct cam_hw_mgr_dump_pf_data - page fault debug data
+ *
+ * packet:     pointer to packet
+ */
+struct cam_hw_mgr_dump_pf_data {
+	void    *packet;
+};
+
+/**
+ * struct cam_hw_prepare_update_args - Payload for prepare command
+ *
+ * @packet:                CSL packet from user mode driver
+ * @remain_len             Remaining length of CPU buffer after config offset
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @max_hw_update_entries: Maximum hardware update entries supported
+ * @hw_update_entries:     Actual hardware update configuration (returned)
+ * @num_hw_update_entries: Number of actual hardware update entries (returned)
+ * @max_out_map_entries:   Maximum output fence mapping supported
+ * @out_map_entries:       Actual output fence mapping list (returned)
+ * @num_out_map_entries:   Number of actual output fence mapping (returned)
+ * @max_in_map_entries:    Maximum input fence mapping supported
+ * @in_map_entries:        Actual input fence mapping list (returned)
+ * @num_in_map_entries:    Number of acutal input fence mapping (returned)
+ * @priv:                  Private pointer of hw update
+ * @pf_data:               Debug data for page fault
+ *
+ */
+struct cam_hw_prepare_update_args {
+	struct cam_packet              *packet;
+	size_t                          remain_len;
+	void                           *ctxt_to_hw_map;
+	uint32_t                        max_hw_update_entries;
+	struct cam_hw_update_entry     *hw_update_entries;
+	uint32_t                        num_hw_update_entries;
+	uint32_t                        max_out_map_entries;
+	struct cam_hw_fence_map_entry  *out_map_entries;
+	uint32_t                        num_out_map_entries;
+	uint32_t                        max_in_map_entries;
+	struct cam_hw_fence_map_entry  *in_map_entries;
+	uint32_t                        num_in_map_entries;
+	void                           *priv;
+	struct cam_hw_mgr_dump_pf_data *pf_data;
+};
+
+/**
+ * struct cam_hw_stream_setttings - Payload for config stream command
+ *
+ * @packet:                CSL packet from user mode driver
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @priv:                  Private pointer of hw update
+ *
+ */
+struct cam_hw_stream_setttings {
+	struct cam_packet              *packet;
+	void                           *ctxt_to_hw_map;
+	void                           *priv;
+};
+
+/**
+ * struct cam_hw_config_args - Payload for config command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @num_hw_update_entries: Number of hardware update entries
+ * @hw_update_entries:     Hardware update list
+ * @out_map_entries:       Out map info
+ * @num_out_map_entries:   Number of out map entries
+ * @priv:                  Private pointer
+ * @request_id:            Request ID
+ *
+ */
+struct cam_hw_config_args {
+	void                           *ctxt_to_hw_map;
+	uint32_t                        num_hw_update_entries;
+	struct cam_hw_update_entry     *hw_update_entries;
+	struct cam_hw_fence_map_entry  *out_map_entries;
+	uint32_t                        num_out_map_entries;
+	void                           *priv;
+	uint64_t                        request_id;
+	bool                            init_packet;
+};
+
+/**
+ * struct cam_hw_flush_args - Flush arguments
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @num_req_pending:       Num request to flush, valid when flush type is REQ
+ * @flush_req_pending:     Request pending pointers to flush
+ * @num_req_active:        Num request to flush, valid when flush type is REQ
+ * @flush_req_active:      Request active pointers to flush
+ * @flush_type:            The flush type
+ *
+ */
+struct cam_hw_flush_args {
+	void                           *ctxt_to_hw_map;
+	uint32_t                        num_req_pending;
+	void                           *flush_req_pending[20];
+	uint32_t                        num_req_active;
+	void                           *flush_req_active[20];
+	enum flush_type_t               flush_type;
+};
+
+/**
+ * struct cam_hw_dump_pf_args - Payload for dump pf info command
+ *
+ * @pf_data:               Debug data for page fault
+ * @iova:                  Page fault address
+ * @buf_info:              Info about memory buffer where page
+ *                               fault occurred
+ * @mem_found:             If fault memory found in current
+ *                               request
+ *
+ */
+struct cam_hw_dump_pf_args {
+	struct cam_hw_mgr_dump_pf_data  pf_data;
+	unsigned long                   iova;
+	uint32_t                        buf_info;
+	bool                           *mem_found;
+};
+
+/* enum cam_hw_mgr_command - Hardware manager command type */
+enum cam_hw_mgr_command {
+	CAM_HW_MGR_CMD_INTERNAL,
+	CAM_HW_MGR_CMD_DUMP_PF_INFO,
+};
+
+/**
+ * struct cam_hw_cmd_args - Payload for hw manager command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @cmd_type               HW command type
+ * @internal_args          Arguments for internal command
+ * @pf_args                Arguments for Dump PF info command
+ *
+ */
+struct cam_hw_cmd_args {
+	void                               *ctxt_to_hw_map;
+	uint32_t                            cmd_type;
+	union {
+		void                       *internal_args;
+		struct cam_hw_dump_pf_args  pf_args;
+	} u;
+};
+
+/**
+ * cam_hw_mgr_intf - HW manager interface
+ *
+ * @hw_mgr_priv:               HW manager object
+ * @hw_get_caps:               Function pointer for get hw caps
+ *                               args = cam_query_cap_cmd
+ * @hw_acquire:                Function poniter for acquire hw resources
+ *                               args = cam_hw_acquire_args
+ * @hw_release:                Function pointer for release hw device resource
+ *                               args = cam_hw_release_args
+ * @hw_start:                  Function pointer for start hw devices
+ *                               args = cam_hw_start_args
+ * @hw_stop:                   Function pointer for stop hw devices
+ *                               args = cam_hw_stop_args
+ * @hw_prepare_update:         Function pointer for prepare hw update for hw
+ *                             devices args = cam_hw_prepare_update_args
+ * @hw_config_stream_settings: Function pointer for configure stream for hw
+ *                             devices args = cam_hw_stream_setttings
+ * @hw_config:                 Function pointer for configure hw devices
+ *                               args = cam_hw_config_args
+ * @hw_read:                   Function pointer for read hardware registers
+ * @hw_write:                  Function pointer for Write hardware registers
+ * @hw_cmd:                    Function pointer for any customized commands for
+ *                             the hardware manager
+ * @hw_open:                   Function pointer for HW init
+ * @hw_close:                  Function pointer for HW deinit
+ * @hw_flush:                  Function pointer for HW flush
+ *
+ */
+struct cam_hw_mgr_intf {
+	void *hw_mgr_priv;
+
+	int (*hw_get_caps)(void *hw_priv, void *hw_caps_args);
+	int (*hw_acquire)(void *hw_priv, void *hw_acquire_args);
+	int (*hw_release)(void *hw_priv, void *hw_release_args);
+	int (*hw_start)(void *hw_priv, void *hw_start_args);
+	int (*hw_stop)(void *hw_priv, void *hw_stop_args);
+	int (*hw_prepare_update)(void *hw_priv, void *hw_prepare_update_args);
+	int (*hw_config_stream_settings)(void *hw_priv,
+		void *hw_stream_settings);
+	int (*hw_config)(void *hw_priv, void *hw_config_args);
+	int (*hw_read)(void *hw_priv, void *read_args);
+	int (*hw_write)(void *hw_priv, void *write_args);
+	int (*hw_cmd)(void *hw_priv, void *write_args);
+	int (*hw_open)(void *hw_priv, void *fw_download_args);
+	int (*hw_close)(void *hw_priv, void *hw_close_args);
+	int (*hw_flush)(void *hw_priv, void *hw_flush_args);
+};
+
+#endif /* _CAM_HW_MGR_INTF_H_ */

+ 836 - 0
drivers/cam_core/cam_node.c

@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+
+#include "cam_node.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+
+static void cam_node_print_ctx_state(
+	struct cam_node *node)
+{
+	int i;
+	struct cam_context *ctx;
+
+	CAM_INFO(CAM_CORE, "[%s] state=%d, ctx_size %d",
+		node->name, node->state, node->ctx_size);
+
+	mutex_lock(&node->list_mutex);
+	for (i = 0; i < node->ctx_size; i++) {
+		ctx = &node->ctx_list[i];
+
+		spin_lock_bh(&ctx->lock);
+		CAM_INFO(CAM_CORE,
+			"[%s][%d] : state=%d, refcount=%d, active_req_list=%d, pending_req_list=%d, wait_req_list=%d, free_req_list=%d",
+			ctx->dev_name ? ctx->dev_name : "null",
+			i, ctx->state,
+			atomic_read(&(ctx->refcount.refcount.refs)),
+			list_empty(&ctx->active_req_list),
+			list_empty(&ctx->pending_req_list),
+			list_empty(&ctx->wait_req_list),
+			list_empty(&ctx->free_req_list));
+		spin_unlock_bh(&ctx->lock);
+	}
+	mutex_unlock(&node->list_mutex);
+}
+
+static struct cam_context *cam_node_get_ctxt_from_free_list(
+		struct cam_node *node)
+{
+	struct cam_context *ctx = NULL;
+
+	mutex_lock(&node->list_mutex);
+	if (!list_empty(&node->free_ctx_list)) {
+		ctx = list_first_entry(&node->free_ctx_list,
+			struct cam_context, list);
+		list_del_init(&ctx->list);
+	}
+	mutex_unlock(&node->list_mutex);
+	if (ctx)
+		kref_init(&ctx->refcount);
+	return ctx;
+}
+
+void cam_node_put_ctxt_to_free_list(struct kref *ref)
+{
+	struct cam_context *ctx =
+		container_of(ref, struct cam_context, refcount);
+	struct cam_node *node = ctx->node;
+
+	mutex_lock(&node->list_mutex);
+	list_add_tail(&ctx->list, &node->free_ctx_list);
+	mutex_unlock(&node->list_mutex);
+}
+
+static int __cam_node_handle_query_cap(struct cam_node *node,
+	struct cam_query_cap_cmd *query)
+{
+	int rc = -EFAULT;
+
+	if (!query) {
+		CAM_ERR(CAM_CORE, "Invalid params");
+		return -EINVAL;
+	}
+
+	if (node->hw_mgr_intf.hw_get_caps) {
+		rc = node->hw_mgr_intf.hw_get_caps(
+			node->hw_mgr_intf.hw_mgr_priv, query);
+	}
+
+	return rc;
+}
+
+static int __cam_node_handle_acquire_dev(struct cam_node *node,
+	struct cam_acquire_dev_cmd *acquire)
+{
+	int rc = 0;
+	struct cam_context *ctx = NULL;
+
+	if (!acquire)
+		return -EINVAL;
+
+	ctx = cam_node_get_ctxt_from_free_list(node);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "No free ctx in free list node %s",
+			node->name);
+		cam_node_print_ctx_state(node);
+
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	ctx->last_flush_req = 0;
+	rc = cam_context_handle_acquire_dev(ctx, acquire);
+	if (rc) {
+		CAM_ERR(CAM_CORE, "Acquire device failed for node %s",
+			node->name);
+		goto free_ctx;
+	}
+
+	CAM_DBG(CAM_CORE, "[%s] Acquire ctx_id %d",
+		node->name, ctx->ctx_id);
+
+	return 0;
+free_ctx:
+	cam_context_putref(ctx);
+err:
+	return rc;
+}
+
+static int __cam_node_handle_acquire_hw_v1(struct cam_node *node,
+	struct cam_acquire_hw_cmd_v1 *acquire)
+{
+	int rc = 0;
+	struct cam_context *ctx = NULL;
+
+	if (!acquire)
+		return -EINVAL;
+
+	if (acquire->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (acquire->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(acquire->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			acquire->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_acquire_hw(ctx, acquire);
+	if (rc) {
+		CAM_ERR(CAM_CORE, "Acquire device failed for node %s",
+			node->name);
+		return rc;
+	}
+
+	CAM_DBG(CAM_CORE, "[%s] Acquire ctx_id %d",
+		node->name, ctx->ctx_id);
+
+	return 0;
+}
+
+static int __cam_node_handle_acquire_hw_v2(struct cam_node *node,
+	struct cam_acquire_hw_cmd_v2 *acquire)
+{
+	int rc = 0;
+	struct cam_context *ctx = NULL;
+
+	if (!acquire)
+		return -EINVAL;
+
+	if (acquire->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (acquire->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(acquire->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			acquire->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_acquire_hw(ctx, acquire);
+	if (rc) {
+		CAM_ERR(CAM_CORE, "Acquire device failed for node %s",
+			node->name);
+		return rc;
+	}
+
+	CAM_DBG(CAM_CORE, "[%s] Acquire ctx_id %d",
+		node->name, ctx->ctx_id);
+
+	return 0;
+}
+
+static int __cam_node_handle_start_dev(struct cam_node *node,
+	struct cam_start_stop_dev_cmd *start)
+{
+	struct cam_context *ctx = NULL;
+	int rc;
+
+	if (!start)
+		return -EINVAL;
+
+	if (start->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (start->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(start->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			start->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_start_dev(ctx, start);
+	if (rc)
+		CAM_ERR(CAM_CORE, "Start failure for node %s", node->name);
+
+	return rc;
+}
+
+static int __cam_node_handle_stop_dev(struct cam_node *node,
+	struct cam_start_stop_dev_cmd *stop)
+{
+	struct cam_context *ctx = NULL;
+	int rc;
+
+	if (!stop)
+		return -EINVAL;
+
+	if (stop->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (stop->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(stop->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			stop->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_stop_dev(ctx, stop);
+	if (rc)
+		CAM_ERR(CAM_CORE, "Stop failure for node %s", node->name);
+
+	return rc;
+}
+
+static int __cam_node_handle_config_dev(struct cam_node *node,
+	struct cam_config_dev_cmd *config)
+{
+	struct cam_context *ctx = NULL;
+	int rc;
+
+	if (!config)
+		return -EINVAL;
+
+	if (config->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (config->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(config->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			config->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_config_dev(ctx, config);
+	if (rc)
+		CAM_ERR(CAM_CORE, "Config failure for node %s", node->name);
+
+	return rc;
+}
+
+static int __cam_node_handle_flush_dev(struct cam_node *node,
+	struct cam_flush_dev_cmd *flush)
+{
+	struct cam_context *ctx = NULL;
+	int rc;
+
+	if (!flush)
+		return -EINVAL;
+
+	if (flush->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (flush->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(flush->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			flush->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_flush_dev(ctx, flush);
+	if (rc)
+		CAM_ERR(CAM_CORE, "Flush failure for node %s", node->name);
+
+	return rc;
+}
+
+static int __cam_node_handle_release_dev(struct cam_node *node,
+	struct cam_release_dev_cmd *release)
+{
+	int rc = 0;
+	struct cam_context *ctx = NULL;
+
+	if (!release)
+		return -EINVAL;
+
+	if (release->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (release->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d node %s",
+			release->dev_handle, node->name);
+		return -EINVAL;
+	}
+
+	if (ctx->state > CAM_CTX_UNINIT && ctx->state < CAM_CTX_STATE_MAX) {
+		rc = cam_context_handle_release_dev(ctx, release);
+		if (rc)
+			CAM_ERR(CAM_CORE, "context release failed for node %s",
+				node->name);
+	} else {
+		CAM_WARN(CAM_CORE,
+			"node %s context id %u state %d invalid to release hdl",
+			node->name, ctx->ctx_id, ctx->state);
+		goto destroy_dev_hdl;
+	}
+
+	cam_context_putref(ctx);
+
+destroy_dev_hdl:
+	rc = cam_destroy_device_hdl(release->dev_handle);
+	if (rc)
+		CAM_ERR(CAM_CORE, "destroy device hdl failed for node %s",
+			node->name);
+	else
+		ctx->dev_hdl = -1;
+
+	CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d",
+		node->name, ctx->ctx_id,
+		atomic_read(&(ctx->refcount.refcount.refs)));
+
+	return rc;
+}
+
+static int __cam_node_handle_release_hw_v1(struct cam_node *node,
+	struct cam_release_hw_cmd_v1 *release)
+{
+	int rc = 0;
+	struct cam_context *ctx = NULL;
+
+	if (!release)
+		return -EINVAL;
+
+	if (release->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (release->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d node %s",
+			release->dev_handle, node->name);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_release_hw(ctx, release);
+	if (rc)
+		CAM_ERR(CAM_CORE, "context release failed node %s", node->name);
+
+	CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d",
+		node->name, ctx->ctx_id,
+		atomic_read(&(ctx->refcount.refcount.refs)));
+
+	return rc;
+}
+
+static int __cam_node_crm_get_dev_info(struct cam_req_mgr_device_info *info)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!info)
+		return -EINVAL;
+
+	ctx = (struct cam_context *) cam_get_device_priv(info->dev_hdl);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context  for handle %d",
+			info->dev_hdl);
+		return -EINVAL;
+	}
+	return cam_context_handle_crm_get_dev_info(ctx, info);
+}
+
+static int __cam_node_crm_link_setup(
+	struct cam_req_mgr_core_dev_link_setup *setup)
+{
+	int rc;
+	struct cam_context *ctx = NULL;
+
+	if (!setup)
+		return -EINVAL;
+
+	ctx = (struct cam_context *) cam_get_device_priv(setup->dev_hdl);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			setup->dev_hdl);
+		return -EINVAL;
+	}
+
+	if (setup->link_enable)
+		rc = cam_context_handle_crm_link(ctx, setup);
+	else
+		rc = cam_context_handle_crm_unlink(ctx, setup);
+
+	return rc;
+}
+
+static int __cam_node_crm_apply_req(struct cam_req_mgr_apply_request *apply)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!apply)
+		return -EINVAL;
+
+	ctx = (struct cam_context *) cam_get_device_priv(apply->dev_hdl);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			apply->dev_hdl);
+		return -EINVAL;
+	}
+
+	trace_cam_apply_req("Node", apply->request_id);
+
+	return cam_context_handle_crm_apply_req(ctx, apply);
+}
+
+static int __cam_node_crm_flush_req(struct cam_req_mgr_flush_request *flush)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!flush) {
+		CAM_ERR(CAM_CORE, "Invalid flush request payload");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *) cam_get_device_priv(flush->dev_hdl);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			flush->dev_hdl);
+		return -EINVAL;
+	}
+
+	return cam_context_handle_crm_flush_req(ctx, flush);
+}
+
+static int __cam_node_crm_process_evt(
+	struct cam_req_mgr_link_evt_data *evt_data)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!evt_data) {
+		CAM_ERR(CAM_CORE, "Invalid process event request payload");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *) cam_get_device_priv(evt_data->dev_hdl);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			evt_data->dev_hdl);
+		return -EINVAL;
+	}
+	return cam_context_handle_crm_process_evt(ctx, evt_data);
+}
+
+int cam_node_deinit(struct cam_node *node)
+{
+	if (node)
+		memset(node, 0, sizeof(*node));
+
+	CAM_DBG(CAM_CORE, "deinit complete");
+
+	return 0;
+}
+
+int cam_node_shutdown(struct cam_node *node)
+{
+	int i = 0;
+	int rc = 0;
+
+	if (!node)
+		return -EINVAL;
+
+	for (i = 0; i < node->ctx_size; i++) {
+		if (node->ctx_list[i].dev_hdl > 0) {
+			CAM_DBG(CAM_CORE,
+				"Node [%s] invoking shutdown on context [%d]",
+				node->name, i);
+			rc = cam_context_shutdown(&(node->ctx_list[i]));
+		}
+	}
+
+	if (node->hw_mgr_intf.hw_close)
+		node->hw_mgr_intf.hw_close(node->hw_mgr_intf.hw_mgr_priv,
+			NULL);
+
+	return 0;
+}
+
+int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
+	struct cam_context *ctx_list, uint32_t ctx_size, char *name)
+{
+	int rc = 0;
+	int i;
+
+	if (!node || !hw_mgr_intf ||
+		sizeof(node->hw_mgr_intf) != sizeof(*hw_mgr_intf)) {
+		return -EINVAL;
+	}
+
+	memset(node, 0, sizeof(*node));
+
+	strlcpy(node->name, name, sizeof(node->name));
+
+	memcpy(&node->hw_mgr_intf, hw_mgr_intf, sizeof(node->hw_mgr_intf));
+	node->crm_node_intf.apply_req = __cam_node_crm_apply_req;
+	node->crm_node_intf.get_dev_info = __cam_node_crm_get_dev_info;
+	node->crm_node_intf.link_setup = __cam_node_crm_link_setup;
+	node->crm_node_intf.flush_req = __cam_node_crm_flush_req;
+	node->crm_node_intf.process_evt = __cam_node_crm_process_evt;
+
+	mutex_init(&node->list_mutex);
+	INIT_LIST_HEAD(&node->free_ctx_list);
+	node->ctx_list = ctx_list;
+	node->ctx_size = ctx_size;
+	for (i = 0; i < ctx_size; i++) {
+		if (!ctx_list[i].state_machine) {
+			CAM_ERR(CAM_CORE,
+				"camera context %d is not initialized", i);
+			rc = -1;
+			goto err;
+		}
+		INIT_LIST_HEAD(&ctx_list[i].list);
+		list_add_tail(&ctx_list[i].list, &node->free_ctx_list);
+		ctx_list[i].node = node;
+	}
+
+	node->state = CAM_NODE_STATE_INIT;
+err:
+	CAM_DBG(CAM_CORE, "Exit. (rc = %d)", rc);
+	return rc;
+}
+
+int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
+{
+	int rc = 0;
+
+	if (!cmd)
+		return -EINVAL;
+
+	CAM_DBG(CAM_CORE, "handle cmd %d", cmd->op_code);
+
+	switch (cmd->op_code) {
+	case CAM_QUERY_CAP: {
+		struct cam_query_cap_cmd query;
+
+		if (copy_from_user(&query, u64_to_user_ptr(cmd->handle),
+			sizeof(query))) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = __cam_node_handle_query_cap(node, &query);
+		if (rc) {
+			CAM_ERR(CAM_CORE, "querycap is failed(rc = %d)",
+				rc);
+			break;
+		}
+
+		if (copy_to_user(u64_to_user_ptr(cmd->handle), &query,
+			sizeof(query)))
+			rc = -EFAULT;
+
+		break;
+	}
+	case CAM_ACQUIRE_DEV: {
+		struct cam_acquire_dev_cmd acquire;
+
+		if (copy_from_user(&acquire, u64_to_user_ptr(cmd->handle),
+			sizeof(acquire))) {
+			rc = -EFAULT;
+			break;
+		}
+		rc = __cam_node_handle_acquire_dev(node, &acquire);
+		if (rc) {
+			CAM_ERR(CAM_CORE, "acquire device failed(rc = %d)",
+				rc);
+			break;
+		}
+		if (copy_to_user(u64_to_user_ptr(cmd->handle), &acquire,
+			sizeof(acquire)))
+			rc = -EFAULT;
+		break;
+	}
+	case CAM_ACQUIRE_HW: {
+		uint32_t api_version;
+		void *acquire_ptr = NULL;
+		size_t acquire_size;
+
+		if (copy_from_user(&api_version, (void __user *)cmd->handle,
+			sizeof(api_version))) {
+			rc = -EFAULT;
+			break;
+		}
+
+		if (api_version == 1) {
+			acquire_size = sizeof(struct cam_acquire_hw_cmd_v1);
+		} else if (api_version == 2) {
+			acquire_size = sizeof(struct cam_acquire_hw_cmd_v2);
+		} else {
+			CAM_ERR(CAM_CORE, "Unsupported api version %d",
+				api_version);
+			rc = -EINVAL;
+			break;
+		}
+
+		acquire_ptr = kzalloc(acquire_size, GFP_KERNEL);
+		if (!acquire_ptr) {
+			CAM_ERR(CAM_CORE, "No memory for acquire HW");
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(acquire_ptr, (void __user *)cmd->handle,
+			acquire_size)) {
+			rc = -EFAULT;
+			goto acquire_kfree;
+		}
+
+		if (api_version == 1) {
+			rc = __cam_node_handle_acquire_hw_v1(node, acquire_ptr);
+			if (rc) {
+				CAM_ERR(CAM_CORE,
+					"acquire device failed(rc = %d)", rc);
+				goto acquire_kfree;
+			}
+			CAM_INFO(CAM_CORE, "Acquire HW successful");
+		} else if (api_version == 2) {
+			rc = __cam_node_handle_acquire_hw_v2(node, acquire_ptr);
+			if (rc) {
+				CAM_ERR(CAM_CORE,
+					"acquire device failed(rc = %d)", rc);
+				goto acquire_kfree;
+			}
+			CAM_INFO(CAM_CORE, "Acquire HW successful");
+		}
+
+		if (copy_to_user((void __user *)cmd->handle, acquire_ptr,
+			acquire_size))
+			rc = -EFAULT;
+
+acquire_kfree:
+		kfree(acquire_ptr);
+		break;
+	}
+	case CAM_START_DEV: {
+		struct cam_start_stop_dev_cmd start;
+
+		if (copy_from_user(&start, u64_to_user_ptr(cmd->handle),
+			sizeof(start)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_start_dev(node, &start);
+			if (rc)
+				CAM_ERR(CAM_CORE,
+					"start device failed(rc = %d)", rc);
+		}
+		break;
+	}
+	case CAM_STOP_DEV: {
+		struct cam_start_stop_dev_cmd stop;
+
+		if (copy_from_user(&stop, u64_to_user_ptr(cmd->handle),
+			sizeof(stop)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_stop_dev(node, &stop);
+			if (rc)
+				CAM_ERR(CAM_CORE,
+					"stop device failed(rc = %d)", rc);
+		}
+		break;
+	}
+	case CAM_CONFIG_DEV: {
+		struct cam_config_dev_cmd config;
+
+		if (copy_from_user(&config, u64_to_user_ptr(cmd->handle),
+			sizeof(config)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_config_dev(node, &config);
+			if (rc)
+				CAM_ERR(CAM_CORE,
+					"config device failed(rc = %d)", rc);
+		}
+		break;
+	}
+	case CAM_RELEASE_DEV: {
+		struct cam_release_dev_cmd release;
+
+		if (copy_from_user(&release, u64_to_user_ptr(cmd->handle),
+			sizeof(release)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_release_dev(node, &release);
+			if (rc)
+				CAM_ERR(CAM_CORE,
+					"release device failed(rc = %d)", rc);
+		}
+		break;
+	}
+	case CAM_RELEASE_HW: {
+		uint32_t api_version;
+		size_t release_size;
+		void *release_ptr = NULL;
+
+		if (copy_from_user(&api_version, (void __user *)cmd->handle,
+			sizeof(api_version))) {
+			rc = -EFAULT;
+			break;
+		}
+
+		if (api_version == 1) {
+			release_size = sizeof(struct cam_release_hw_cmd_v1);
+		} else {
+			CAM_ERR(CAM_CORE, "Unsupported api version %d",
+				api_version);
+			rc = -EINVAL;
+			break;
+		}
+
+		release_ptr = kzalloc(release_size, GFP_KERNEL);
+		if (!release_ptr) {
+			CAM_ERR(CAM_CORE, "No memory for release HW");
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(release_ptr, (void __user *)cmd->handle,
+			release_size)) {
+			rc = -EFAULT;
+			goto release_kfree;
+		}
+
+		if (api_version == 1) {
+			rc = __cam_node_handle_release_hw_v1(node, release_ptr);
+			if (rc)
+				CAM_ERR(CAM_CORE,
+					"release device failed(rc = %d)", rc);
+		}
+
+		CAM_INFO(CAM_CORE, "Release HW done(rc = %d)", rc);
+
+release_kfree:
+		kfree(release_ptr);
+		break;
+	}
+	case CAM_FLUSH_REQ: {
+		struct cam_flush_dev_cmd flush;
+
+		if (copy_from_user(&flush, u64_to_user_ptr(cmd->handle),
+			sizeof(flush)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_flush_dev(node, &flush);
+			if (rc)
+				CAM_ERR(CAM_CORE,
+					"flush device failed(rc = %d)", rc);
+		}
+		break;
+	}
+	default:
+		CAM_ERR(CAM_CORE, "Unknown op code %d", cmd->op_code);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}

+ 104 - 0
drivers/cam_core/cam_node.h

@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_NODE_H_
+#define _CAM_NODE_H_
+
+#include <linux/kref.h>
+#include "cam_context.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+
+#define CAM_NODE_NAME_LENGTH_MAX        256
+
+#define CAM_NODE_STATE_UNINIT           0
+#define CAM_NODE_STATE_INIT             1
+
+/**
+ * struct cam_node - Singleton Node for camera HW devices
+ *
+ * @name:                  Name for struct cam_node
+ * @state:                 Node state:
+ *                            0 = uninitialized, 1 = initialized
+ * @list_mutex:            Mutex for the context pool
+ * @free_ctx_list:         Free context pool list
+ * @ctx_list:              Context list
+ * @ctx_size:              Context list size
+ * @hw_mgr_intf:           Interface for cam_node to HW
+ * @crm_node_intf:         Interface for the CRM to cam_node
+ *
+ */
+struct cam_node {
+	char                         name[CAM_NODE_NAME_LENGTH_MAX];
+	uint32_t                     state;
+
+	/* context pool */
+	struct mutex                 list_mutex;
+	struct list_head             free_ctx_list;
+	struct cam_context          *ctx_list;
+	uint32_t                     ctx_size;
+
+	/* interfaces */
+	struct cam_hw_mgr_intf       hw_mgr_intf;
+	struct cam_req_mgr_kmd_ops   crm_node_intf;
+};
+
+/**
+ * cam_node_handle_ioctl()
+ *
+ * @brief:       Handle ioctl commands
+ *
+ * @node:                  Node handle
+ * @cmd:                   IOCTL command
+ *
+ */
+int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd);
+
+/**
+ * cam_node_deinit()
+ *
+ * @brief:       Deinitialization function for the Node interface
+ *
+ * @node:                  Node handle
+ *
+ */
+int cam_node_deinit(struct cam_node *node);
+
+/**
+ * cam_node_shutdown()
+ *
+ * @brief:       Shutdowns/Closes the cam node.
+ *
+ * @node:                  Cam_node pointer
+ *
+ */
+int cam_node_shutdown(struct cam_node *node);
+
+/**
+ * cam_node_init()
+ *
+ * @brief:       Initialization function for the Node interface.
+ *
+ * @node:                  Cam_node pointer
+ * @hw_mgr_intf:           HW manager interface blob
+ * @ctx_list:              List of cam_contexts to be added
+ * @ctx_size:              Size of the cam_context
+ * @name:                  Name for the node
+ *
+ */
+int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
+	struct cam_context *ctx_list, uint32_t ctx_size, char *name);
+
+/**
+ * cam_node_put_ctxt_to_free_list()
+ *
+ * @brief:       Put context in node free list.
+ *
+ * @ref:         Context's kref object
+ *
+ */
+void cam_node_put_ctxt_to_free_list(struct kref *ref);
+
+#endif /* _CAM_NODE_H_ */

+ 154 - 0
drivers/cam_core/cam_subdev.c

@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_debug_util.h"
+
+/**
+ * cam_subdev_subscribe_event()
+ *
+ * @brief: function to subscribe to v4l2 events
+ *
+ * @sd:                    Pointer to struct v4l2_subdev.
+ * @fh:                    Pointer to struct v4l2_fh.
+ * @sub:                   Pointer to struct v4l2_event_subscription.
+ */
+static int cam_subdev_subscribe_event(struct v4l2_subdev *sd,
+	struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_subscribe(fh, sub, CAM_SUBDEVICE_EVENT_MAX, NULL);
+}
+
+/**
+ * cam_subdev_unsubscribe_event()
+ *
+ * @brief: function to unsubscribe from v4l2 events
+ *
+ * @sd:                    Pointer to struct v4l2_subdev.
+ * @fh:                    Pointer to struct v4l2_fh.
+ * @sub:                   Pointer to struct v4l2_event_subscription.
+ */
+static int cam_subdev_unsubscribe_event(struct v4l2_subdev *sd,
+	struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_unsubscribe(fh, sub);
+}
+
+static long cam_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd,
+	void *arg)
+{
+	long rc;
+	struct cam_node *node =
+		(struct cam_node *) v4l2_get_subdevdata(sd);
+
+	if (!node || node->state == CAM_NODE_STATE_UNINIT) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_node_handle_ioctl(node,
+			(struct cam_control *) arg);
+		break;
+	default:
+		CAM_ERR(CAM_CORE, "Invalid command %d for %s", cmd,
+			node->name);
+		rc = -EINVAL;
+	}
+end:
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_subdev_compat_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	struct cam_control cmd_data;
+	int rc;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		CAM_ERR(CAM_CORE, "Failed to copy from user_ptr=%pK size=%zu",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+	rc = cam_subdev_ioctl(sd, cmd, &cmd_data);
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			CAM_ERR(CAM_CORE,
+				"Failed to copy to user_ptr=%pK size=%zu",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+
+	return rc;
+}
+#endif
+
+const struct v4l2_subdev_core_ops cam_subdev_core_ops = {
+	.ioctl = cam_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_subdev_compat_ioctl,
+#endif
+	.subscribe_event = cam_subdev_subscribe_event,
+	.unsubscribe_event = cam_subdev_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_ops cam_subdev_ops = {
+	.core = &cam_subdev_core_ops,
+};
+
+int cam_subdev_remove(struct cam_subdev *sd)
+{
+	if (!sd)
+		return -EINVAL;
+
+	cam_unregister_subdev(sd);
+	cam_node_deinit((struct cam_node *)sd->token);
+	kfree(sd->token);
+
+	return 0;
+}
+
+int cam_subdev_probe(struct cam_subdev *sd, struct platform_device *pdev,
+	char *name, uint32_t dev_type)
+{
+	int rc;
+	struct cam_node *node = NULL;
+
+	if (!sd || !pdev || !name)
+		return -EINVAL;
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	/* Setup camera v4l2 subdevice */
+	sd->pdev = pdev;
+	sd->name = name;
+	sd->ops = &cam_subdev_ops;
+	sd->token = node;
+	sd->sd_flags =
+		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+	sd->ent_function = dev_type;
+
+	rc = cam_register_subdev(sd);
+	if (rc) {
+		CAM_ERR(CAM_CORE, "cam_register_subdev() failed for dev: %s",
+			sd->name);
+		goto err;
+	}
+	platform_set_drvdata(sd->pdev, sd);
+	return rc;
+err:
+	kfree(node);
+	return rc;
+}

+ 14 - 0
drivers/cam_cpas/Makefile

@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+#ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/cpas_top
+#ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/camss_top
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cpas_top/
+obj-$(CONFIG_SPECTRA_CAMERA) += camss_top/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpas_soc.o cam_cpas_intf.o cam_cpas_hw.o

+ 1857 - 0
drivers/cam_cpas/cam_cpas_hw.c

@@ -0,0 +1,1857 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "cam_cpas_hw.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_soc.h"
+
+static uint cam_min_camnoc_ib_bw;
+module_param(cam_min_camnoc_ib_bw, uint, 0644);
+
+
+int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
+	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	uint32_t value;
+	int reg_base_index;
+
+	if (reg_info->enable == false)
+		return 0;
+
+	reg_base_index = cpas_core->regbase_index[reg_base];
+	if (reg_base_index == -1)
+		return -EINVAL;
+
+	if (reg_info->masked_value) {
+		value = cam_io_r_mb(
+			soc_info->reg_map[reg_base_index].mem_base +
+			reg_info->offset);
+		value = value & (~reg_info->mask);
+		value = value | (reg_info->value << reg_info->shift);
+	} else {
+		value = reg_info->value;
+	}
+
+	CAM_DBG(CAM_CPAS, "Base[%d] Offset[0x%8x] Value[0x%8x]",
+		reg_base, reg_info->offset, value);
+
+	cam_io_w_mb(value, soc_info->reg_map[reg_base_index].mem_base +
+		reg_info->offset);
+
+	return 0;
+}
+
+static int cam_cpas_util_vote_bus_client_level(
+	struct cam_cpas_bus_client *bus_client, unsigned int level)
+{
+	if (!bus_client->valid || (bus_client->dyn_vote == true)) {
+		CAM_ERR(CAM_CPAS, "Invalid params %d %d", bus_client->valid,
+			bus_client->dyn_vote);
+		return -EINVAL;
+	}
+
+	if (level >= bus_client->num_usecases) {
+		CAM_ERR(CAM_CPAS, "Invalid vote level=%d, usecases=%d", level,
+			bus_client->num_usecases);
+		return -EINVAL;
+	}
+
+	if (level == bus_client->curr_vote_level)
+		return 0;
+
+	CAM_DBG(CAM_CPAS, "Bus client=[%d][%s] index[%d]",
+		bus_client->client_id, bus_client->name, level);
+	msm_bus_scale_client_update_request(bus_client->client_id, level);
+	bus_client->curr_vote_level = level;
+
+	return 0;
+}
+
+static int cam_cpas_util_vote_bus_client_bw(
+	struct cam_cpas_bus_client *bus_client, uint64_t ab, uint64_t ib,
+	bool camnoc_bw)
+{
+	struct msm_bus_paths *path;
+	struct msm_bus_scale_pdata *pdata;
+	int idx = 0;
+	uint64_t min_camnoc_ib_bw = CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+
+	if (cam_min_camnoc_ib_bw > 0)
+		min_camnoc_ib_bw = (uint64_t)cam_min_camnoc_ib_bw * 1000000L;
+
+	CAM_DBG(CAM_CPAS, "cam_min_camnoc_ib_bw = %d, min_camnoc_ib_bw=%llu",
+		cam_min_camnoc_ib_bw, min_camnoc_ib_bw);
+
+	if (!bus_client->valid) {
+		CAM_ERR(CAM_CPAS, "bus client not valid");
+		return -EINVAL;
+	}
+
+	if ((bus_client->num_usecases != 2) ||
+		(bus_client->num_paths != 1) ||
+		(bus_client->dyn_vote != true)) {
+		CAM_ERR(CAM_CPAS, "dynamic update not allowed %d %d %d",
+			bus_client->num_usecases, bus_client->num_paths,
+			bus_client->dyn_vote);
+		return -EINVAL;
+	}
+
+	mutex_lock(&bus_client->lock);
+
+	if (bus_client->curr_vote_level > 1) {
+		CAM_ERR(CAM_CPAS, "curr_vote_level %d cannot be greater than 1",
+			bus_client->curr_vote_level);
+		mutex_unlock(&bus_client->lock);
+		return -EINVAL;
+	}
+
+	idx = bus_client->curr_vote_level;
+	idx = 1 - idx;
+	bus_client->curr_vote_level = idx;
+	mutex_unlock(&bus_client->lock);
+
+	if (camnoc_bw == true) {
+		if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_CAMNOC_AB_BW))
+			ab = CAM_CPAS_AXI_MIN_CAMNOC_AB_BW;
+
+		if ((ib > 0) && (ib < min_camnoc_ib_bw))
+			ib = min_camnoc_ib_bw;
+	} else {
+		if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_MNOC_AB_BW))
+			ab = CAM_CPAS_AXI_MIN_MNOC_AB_BW;
+
+		if ((ib > 0) && (ib < CAM_CPAS_AXI_MIN_MNOC_IB_BW))
+			ib = CAM_CPAS_AXI_MIN_MNOC_IB_BW;
+	}
+
+	pdata = bus_client->pdata;
+	path = &(pdata->usecase[idx]);
+	path->vectors[0].ab = ab;
+	path->vectors[0].ib = ib;
+
+	CAM_DBG(CAM_CPAS, "Bus client=[%d][%s] :ab[%llu] ib[%llu], index[%d]",
+		bus_client->client_id, bus_client->name, ab, ib, idx);
+	msm_bus_scale_client_update_request(bus_client->client_id, idx);
+
+	return 0;
+}
+
+static int cam_cpas_util_register_bus_client(
+	struct cam_hw_soc_info *soc_info, struct device_node *dev_node,
+	struct cam_cpas_bus_client *bus_client)
+{
+	struct msm_bus_scale_pdata *pdata = NULL;
+	uint32_t client_id;
+	int rc;
+
+	pdata = msm_bus_pdata_from_node(soc_info->pdev,
+		dev_node);
+	if (!pdata) {
+		CAM_ERR(CAM_CPAS, "failed get_pdata");
+		return -EINVAL;
+	}
+
+	if ((pdata->num_usecases == 0) ||
+		(pdata->usecase[0].num_paths == 0)) {
+		CAM_ERR(CAM_CPAS, "usecase=%d", pdata->num_usecases);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	client_id = msm_bus_scale_register_client(pdata);
+	if (!client_id) {
+		CAM_ERR(CAM_CPAS, "failed in register ahb bus client");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	bus_client->dyn_vote = of_property_read_bool(dev_node,
+		"qcom,msm-bus-vector-dyn-vote");
+
+	if (bus_client->dyn_vote && (pdata->num_usecases != 2)) {
+		CAM_ERR(CAM_CPAS, "Excess or less vectors %d",
+			pdata->num_usecases);
+		rc = -EINVAL;
+		goto fail_unregister_client;
+	}
+
+	msm_bus_scale_client_update_request(client_id, 0);
+
+	bus_client->src = pdata->usecase[0].vectors[0].src;
+	bus_client->dst = pdata->usecase[0].vectors[0].dst;
+	bus_client->pdata = pdata;
+	bus_client->client_id = client_id;
+	bus_client->num_usecases = pdata->num_usecases;
+	bus_client->num_paths = pdata->usecase[0].num_paths;
+	bus_client->curr_vote_level = 0;
+	bus_client->valid = true;
+	bus_client->name = pdata->name;
+	mutex_init(&bus_client->lock);
+
+	CAM_DBG(CAM_CPAS, "Bus Client=[%d][%s] : src=%d, dst=%d",
+		bus_client->client_id, bus_client->name,
+		bus_client->src, bus_client->dst);
+
+	return 0;
+fail_unregister_client:
+	msm_bus_scale_unregister_client(bus_client->client_id);
+error:
+	return rc;
+
+}
+
+static int cam_cpas_util_unregister_bus_client(
+	struct cam_cpas_bus_client *bus_client)
+{
+	if (!bus_client->valid)
+		return -EINVAL;
+
+	if (bus_client->dyn_vote)
+		cam_cpas_util_vote_bus_client_bw(bus_client, 0, 0, false);
+	else
+		cam_cpas_util_vote_bus_client_level(bus_client, 0);
+
+	msm_bus_scale_unregister_client(bus_client->client_id);
+	bus_client->valid = false;
+
+	mutex_destroy(&bus_client->lock);
+
+	return 0;
+}
+
+static int cam_cpas_util_axi_cleanup(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info)
+{
+	int i = 0;
+
+	for (i = 0; i < cpas_core->num_axi_ports; i++) {
+		cam_cpas_util_unregister_bus_client(
+			&cpas_core->axi_port[i].bus_client);
+		of_node_put(cpas_core->axi_port[i].axi_port_node);
+		cpas_core->axi_port[i].axi_port_node = NULL;
+	}
+
+	return 0;
+}
+
+static int cam_cpas_util_axi_setup(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info)
+{
+	int i = 0, rc = 0;
+	struct device_node *axi_port_mnoc_node = NULL;
+
+	for (i = 0; i < cpas_core->num_axi_ports; i++) {
+		axi_port_mnoc_node = cpas_core->axi_port[i].axi_port_node;
+		rc = cam_cpas_util_register_bus_client(soc_info,
+			axi_port_mnoc_node, &cpas_core->axi_port[i].bus_client);
+		if (rc)
+			goto bus_register_fail;
+	}
+
+	return 0;
+bus_register_fail:
+	of_node_put(cpas_core->axi_port[i].axi_port_node);
+	return rc;
+}
+
+static int cam_cpas_util_vote_default_ahb_axi(struct cam_hw_info *cpas_hw,
+	int enable)
+{
+	int rc, i = 0;
+	struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+	uint64_t ab_bw, ib_bw;
+
+	rc = cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
+		(enable == true) ? CAM_SVS_VOTE : CAM_SUSPEND_VOTE);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "Failed in AHB vote, enable=%d, rc=%d",
+			enable, rc);
+		return rc;
+	}
+
+	if (enable) {
+		ab_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		ib_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	} else {
+		ab_bw = 0;
+		ib_bw = 0;
+	}
+
+	for (i = 0; i < cpas_core->num_axi_ports; i++) {
+		rc = cam_cpas_util_vote_bus_client_bw(
+			&cpas_core->axi_port[i].bus_client,
+			ab_bw, ib_bw, false);
+		if (rc) {
+			CAM_ERR(CAM_CPAS,
+				"Failed in mnoc vote, enable=%d, rc=%d",
+				enable, rc);
+			goto remove_ahb_vote;
+		}
+	}
+
+	return 0;
+remove_ahb_vote:
+	cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
+		CAM_SUSPEND_VOTE);
+	return rc;
+}
+
+static int cam_cpas_hw_reg_write(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, enum cam_cpas_reg_base reg_base,
+	uint32_t offset, bool mb, uint32_t value)
+{
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
+	int reg_base_index = cpas_core->regbase_index[reg_base];
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
+		CAM_ERR(CAM_CPAS,
+			"Invalid reg_base=%d, reg_base_index=%d, num_map=%d",
+			reg_base, reg_base_index, soc_info->num_reg_map);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	if (mb)
+		cam_io_w_mb(value,
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+	else
+		cam_io_w(value,
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_hw_reg_read(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, enum cam_cpas_reg_base reg_base,
+	uint32_t offset, bool mb, uint32_t *value)
+{
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
+	int reg_base_index = cpas_core->regbase_index[reg_base];
+	uint32_t reg_value;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!value)
+		return -EINVAL;
+
+	if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
+		CAM_ERR(CAM_CPAS,
+			"Invalid reg_base=%d, reg_base_index=%d, num_map=%d",
+			reg_base, reg_base_index, soc_info->num_reg_map);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	if (mb)
+		reg_value = cam_io_r_mb(
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+	else
+		reg_value = cam_io_r(
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+
+	*value = reg_value;
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_util_set_camnoc_axi_clk_rate(
+	struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_tree_node *tree_node = NULL;
+	int rc = 0, i = 0;
+
+	CAM_DBG(CAM_CPAS, "control_camnoc_axi_clk=%d",
+		soc_private->control_camnoc_axi_clk);
+
+	if (soc_private->control_camnoc_axi_clk) {
+		struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+		uint64_t required_camnoc_bw = 0;
+		int32_t clk_rate = 0;
+
+		for (i = 0; i < CAM_CPAS_MAX_TREE_NODES; i++) {
+			tree_node = soc_private->tree_node[i];
+			if (!tree_node ||
+				!tree_node->camnoc_max_needed)
+				continue;
+
+			if (required_camnoc_bw < (tree_node->camnoc_bw *
+				tree_node->bus_width_factor)) {
+				required_camnoc_bw = tree_node->camnoc_bw *
+					tree_node->bus_width_factor;
+			}
+		}
+
+		required_camnoc_bw += (required_camnoc_bw *
+			soc_private->camnoc_axi_clk_bw_margin) / 100;
+
+		if ((required_camnoc_bw > 0) &&
+			(required_camnoc_bw <
+			soc_private->camnoc_axi_min_ib_bw))
+			required_camnoc_bw = soc_private->camnoc_axi_min_ib_bw;
+
+		clk_rate = required_camnoc_bw / soc_private->camnoc_bus_width;
+
+		CAM_DBG(CAM_CPAS, "Setting camnoc axi clk rate : %llu %d",
+			required_camnoc_bw, clk_rate);
+
+		/*
+		 * CPAS hw is not powered on for the first client.
+		 * Also, clk_rate will be overwritten with default
+		 * value while power on. So, skipping this for first
+		 * client.
+		 */
+		if (cpas_core->streamon_clients) {
+			rc = cam_soc_util_set_src_clk_rate(soc_info, clk_rate);
+			if (rc)
+				CAM_ERR(CAM_CPAS,
+				"Failed in setting camnoc axi clk %llu %d %d",
+				required_camnoc_bw, clk_rate, rc);
+		}
+	}
+
+	return rc;
+}
+
+static int cam_cpas_util_translate_client_paths(
+	struct cam_axi_vote *axi_vote)
+{
+	int i;
+	uint32_t *path_data_type = NULL;
+
+	if (!axi_vote)
+		return -EINVAL;
+
+	for (i = 0; i < axi_vote->num_paths; i++) {
+		path_data_type = &axi_vote->axi_path[i].path_data_type;
+		/* Update path_data_type from UAPI value to internal value */
+		if (*path_data_type >= CAM_CPAS_PATH_DATA_CONSO_OFFSET)
+			*path_data_type = CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT +
+				(*path_data_type %
+				CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT);
+		else
+			*path_data_type %= CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT;
+
+		if (*path_data_type >= CAM_CPAS_PATH_DATA_MAX) {
+			CAM_ERR(CAM_CPAS, "index Invalid: %d", path_data_type);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int cam_cpas_axi_consolidate_path_votes(
+	struct cam_cpas_client *cpas_client,
+	struct cam_axi_vote *axi_vote)
+{
+	int rc = 0, i, k, l;
+	struct cam_axi_vote *con_axi_vote = &cpas_client->axi_vote;
+	bool path_found = false, cons_entry_found;
+	struct cam_cpas_tree_node *curr_tree_node = NULL;
+	struct cam_cpas_tree_node *sum_tree_node = NULL;
+	uint32_t transac_type;
+	uint32_t path_data_type;
+	struct cam_axi_per_path_bw_vote *axi_path;
+
+	con_axi_vote->num_paths = 0;
+
+	for (i = 0; i < axi_vote->num_paths; i++) {
+		path_found = false;
+		path_data_type = axi_vote->axi_path[i].path_data_type;
+		transac_type = axi_vote->axi_path[i].transac_type;
+
+		if ((path_data_type >= CAM_CPAS_PATH_DATA_MAX) ||
+			(transac_type >= CAM_CPAS_TRANSACTION_MAX)) {
+			CAM_ERR(CAM_CPAS, "Invalid path or transac type: %d %d",
+				path_data_type, transac_type);
+			return -EINVAL;
+		}
+
+		axi_path = &con_axi_vote->axi_path[con_axi_vote->num_paths];
+
+		curr_tree_node =
+			cpas_client->tree_node[path_data_type][transac_type];
+		if (curr_tree_node) {
+			path_found = true;
+			memcpy(axi_path, &axi_vote->axi_path[i],
+				sizeof(struct cam_axi_per_path_bw_vote));
+			con_axi_vote->num_paths++;
+			continue;
+		}
+
+		for (k = 0; k < CAM_CPAS_PATH_DATA_MAX; k++) {
+			sum_tree_node = cpas_client->tree_node[k][transac_type];
+
+			if (!sum_tree_node)
+				continue;
+
+			if (sum_tree_node->constituent_paths[path_data_type]) {
+				path_found = true;
+				/*
+				 * Check if corresponding consolidated path
+				 * entry is already added into consolidated list
+				 */
+				cons_entry_found = false;
+				for (l = 0; l < con_axi_vote->num_paths; l++) {
+					if ((con_axi_vote->axi_path[l]
+					.path_data_type == k) &&
+					(con_axi_vote->axi_path[l]
+					.transac_type == transac_type)) {
+						cons_entry_found = true;
+						con_axi_vote->axi_path[l]
+						.camnoc_bw +=
+						axi_vote->axi_path[i]
+						.camnoc_bw;
+
+						con_axi_vote->axi_path[l]
+						.mnoc_ab_bw +=
+						axi_vote->axi_path[i]
+						.mnoc_ab_bw;
+
+						con_axi_vote->axi_path[l]
+						.mnoc_ib_bw +=
+						axi_vote->axi_path[i]
+						.mnoc_ib_bw;
+						break;
+					}
+				}
+
+				/* If not found, add a new entry */
+				if (!cons_entry_found) {
+					axi_path->path_data_type = k;
+					axi_path->transac_type = transac_type;
+					axi_path->camnoc_bw =
+					axi_vote->axi_path[i].camnoc_bw;
+					axi_path->mnoc_ab_bw =
+					axi_vote->axi_path[i].mnoc_ab_bw;
+					axi_path->mnoc_ib_bw =
+					axi_vote->axi_path[i].mnoc_ib_bw;
+					con_axi_vote->num_paths++;
+				}
+				break;
+			}
+		}
+
+		if (!path_found) {
+			CAM_ERR(CAM_CPAS,
+				"Client [%s][%d] Consolidated path not found for path=%d, transac=%d",
+				cpas_client->data.identifier,
+				cpas_client->data.cell_index,
+				path_data_type, transac_type);
+			return -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_cpas_util_apply_client_axi_vote(
+	struct cam_hw_info *cpas_hw,
+	struct cam_cpas_client *cpas_client,
+	struct cam_axi_vote *axi_vote)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_axi_vote *con_axi_vote = NULL;
+	struct cam_cpas_axi_port *axi_port = NULL;
+	struct cam_cpas_tree_node *curr_tree_node = NULL;
+	struct cam_cpas_tree_node *par_tree_node = NULL;
+	uint32_t transac_type;
+	uint32_t path_data_type;
+	bool axi_port_updated[CAM_CPAS_MAX_AXI_PORTS] = {false};
+	uint64_t mnoc_ab_bw = 0, mnoc_ib_bw = 0,
+		curr_camnoc_old = 0, curr_mnoc_ab_old = 0, curr_mnoc_ib_old = 0,
+		par_camnoc_old = 0, par_mnoc_ab_old = 0, par_mnoc_ib_old = 0;
+	int rc = 0, i = 0;
+
+	mutex_lock(&cpas_core->tree_lock);
+	if (!cpas_client->tree_node_valid) {
+		/*
+		 * This is by assuming apply_client_axi_vote is called
+		 * for these clients from only cpas_start, cpas_stop.
+		 * not called from hw_update_axi_vote
+		 */
+		for (i = 0; i < cpas_core->num_axi_ports; i++) {
+			if (axi_vote->axi_path[0].mnoc_ab_bw) {
+				/* start case */
+				cpas_core->axi_port[i].additional_bw +=
+					CAM_CPAS_DEFAULT_AXI_BW;
+			} else {
+				/* stop case */
+				cpas_core->axi_port[i].additional_bw -=
+					CAM_CPAS_DEFAULT_AXI_BW;
+			}
+			axi_port_updated[i] = true;
+		}
+		goto vote_start_clients;
+	}
+
+	rc = cam_cpas_axi_consolidate_path_votes(cpas_client, axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_PERF, "Failed in bw consolidation, Client [%s][%d]",
+			cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		goto unlock_tree;
+	}
+
+	con_axi_vote = &cpas_client->axi_vote;
+
+	cam_cpas_dump_axi_vote_info(cpas_client, "Consolidated Vote",
+		con_axi_vote);
+
+	/* Traverse through node tree and update bw vote values */
+	for (i = 0; i < con_axi_vote->num_paths; i++) {
+		path_data_type =
+		con_axi_vote->axi_path[i].path_data_type;
+		transac_type =
+		con_axi_vote->axi_path[i].transac_type;
+		curr_tree_node = cpas_client->tree_node[path_data_type]
+			[transac_type];
+
+		if (con_axi_vote->axi_path[i].mnoc_ab_bw == 0)
+			con_axi_vote->axi_path[i].mnoc_ab_bw =
+				con_axi_vote->axi_path[i].camnoc_bw;
+
+		if (con_axi_vote->axi_path[i].camnoc_bw == 0)
+			con_axi_vote->axi_path[i].camnoc_bw =
+				con_axi_vote->axi_path[i].mnoc_ab_bw;
+
+		if ((curr_tree_node->camnoc_bw ==
+			con_axi_vote->axi_path[i].camnoc_bw) &&
+			(curr_tree_node->mnoc_ab_bw ==
+			con_axi_vote->axi_path[i].mnoc_ab_bw) &&
+			(curr_tree_node->mnoc_ib_bw ==
+			con_axi_vote->axi_path[i].mnoc_ib_bw))
+			continue;
+
+		curr_camnoc_old = curr_tree_node->camnoc_bw;
+		curr_mnoc_ab_old = curr_tree_node->mnoc_ab_bw;
+		curr_mnoc_ib_old = curr_tree_node->mnoc_ib_bw;
+		curr_tree_node->camnoc_bw =
+			con_axi_vote->axi_path[i].camnoc_bw;
+		curr_tree_node->mnoc_ab_bw =
+			con_axi_vote->axi_path[i].mnoc_ab_bw;
+		curr_tree_node->mnoc_ib_bw =
+			con_axi_vote->axi_path[i].mnoc_ib_bw;
+
+		while (curr_tree_node->parent_node) {
+			par_tree_node = curr_tree_node->parent_node;
+			par_camnoc_old = par_tree_node->camnoc_bw;
+			par_mnoc_ab_old = par_tree_node->mnoc_ab_bw;
+			par_mnoc_ib_old = par_tree_node->mnoc_ib_bw;
+			par_tree_node->mnoc_ab_bw -= curr_mnoc_ab_old;
+			par_tree_node->mnoc_ab_bw += curr_tree_node->mnoc_ab_bw;
+			par_tree_node->mnoc_ib_bw -= curr_mnoc_ib_old;
+			par_tree_node->mnoc_ib_bw += curr_tree_node->mnoc_ib_bw;
+
+			if (par_tree_node->merge_type ==
+				CAM_CPAS_TRAFFIC_MERGE_SUM) {
+				par_tree_node->camnoc_bw -=
+					curr_camnoc_old;
+				par_tree_node->camnoc_bw +=
+					curr_tree_node->camnoc_bw;
+			} else if (par_tree_node->merge_type ==
+				CAM_CPAS_TRAFFIC_MERGE_SUM_INTERLEAVE) {
+				par_tree_node->camnoc_bw -=
+					(curr_camnoc_old / 2);
+				par_tree_node->camnoc_bw +=
+					(curr_tree_node->camnoc_bw / 2);
+			} else {
+				CAM_ERR(CAM_CPAS, "Invalid Merge type");
+				rc = -EINVAL;
+				goto unlock_tree;
+			}
+
+			if (!par_tree_node->parent_node) {
+				if ((par_tree_node->axi_port_idx < 0) ||
+					(par_tree_node->axi_port_idx >=
+					CAM_CPAS_MAX_AXI_PORTS)) {
+					CAM_ERR(CAM_CPAS,
+					"AXI port index invalid");
+					rc = -EINVAL;
+					goto unlock_tree;
+				}
+
+				cpas_core->axi_port
+				[par_tree_node->axi_port_idx].ab_bw =
+				par_tree_node->mnoc_ab_bw;
+				cpas_core->axi_port
+				[par_tree_node->axi_port_idx].ib_bw =
+				par_tree_node->mnoc_ib_bw;
+				axi_port_updated[par_tree_node->axi_port_idx] =
+					true;
+			}
+
+			curr_tree_node = par_tree_node;
+			curr_camnoc_old = par_camnoc_old;
+			curr_mnoc_ab_old = par_mnoc_ab_old;
+			curr_mnoc_ib_old = par_mnoc_ib_old;
+		}
+	}
+
+	if (!par_tree_node) {
+		CAM_DBG(CAM_CPAS, "No change in BW for all paths");
+		rc = 0;
+		goto unlock_tree;
+	}
+
+vote_start_clients:
+	for (i = 0; i < cpas_core->num_axi_ports; i++) {
+		if (axi_port_updated[i])
+			axi_port = &cpas_core->axi_port[i];
+		else
+			continue;
+
+		CAM_DBG(CAM_PERF, "Port[%s] : ab=%lld ib=%lld additional=%lld",
+			axi_port->axi_port_name, axi_port->ab_bw,
+			axi_port->ib_bw, axi_port->additional_bw);
+
+		if (axi_port->ab_bw)
+			mnoc_ab_bw = axi_port->ab_bw;
+		else
+			mnoc_ab_bw = axi_port->additional_bw;
+
+		if (cpas_core->axi_port[i].ib_bw_voting_needed)
+			mnoc_ib_bw = axi_port->ib_bw;
+		else
+			mnoc_ib_bw = 0;
+
+		rc = cam_cpas_util_vote_bus_client_bw(&axi_port->bus_client,
+			mnoc_ab_bw, mnoc_ib_bw, false);
+		if (rc) {
+			CAM_ERR(CAM_CPAS,
+				"Failed in mnoc vote ab[%llu] ib[%llu] rc=%d",
+				mnoc_ab_bw, mnoc_ib_bw, rc);
+			goto unlock_tree;
+		}
+	}
+
+	rc = cam_cpas_util_set_camnoc_axi_clk_rate(cpas_hw);
+	if (rc)
+		CAM_ERR(CAM_CPAS, "Failed in setting axi clk rate rc=%d", rc);
+
+unlock_tree:
+	mutex_unlock(&cpas_core->tree_lock);
+	return rc;
+}
+
+static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, struct cam_axi_vote *client_axi_vote)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
+	struct cam_axi_vote axi_vote = {0};
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!client_axi_vote) {
+		CAM_ERR(CAM_CPAS, "Invalid arg, client_handle=%d",
+			client_handle);
+		return -EINVAL;
+	}
+
+	memcpy(&axi_vote, client_axi_vote, sizeof(struct cam_axi_vote));
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	cam_cpas_dump_axi_vote_info(cpas_core->cpas_client[client_indx],
+		"Incoming Vote", &axi_vote);
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	rc = cam_cpas_util_translate_client_paths(&axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_CPAS,
+			"Unable to translate per path votes rc: %d", rc);
+		goto unlock_client;
+	}
+
+	cam_cpas_dump_axi_vote_info(cpas_core->cpas_client[client_indx],
+		"Translated Vote", &axi_vote);
+
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
+		cpas_core->cpas_client[client_indx], &axi_vote);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_util_get_ahb_level(struct cam_hw_info *cpas_hw,
+	struct device *dev, unsigned long freq, enum cam_vote_level *req_level)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+	struct dev_pm_opp *opp;
+	unsigned int corner;
+	enum cam_vote_level level = CAM_SVS_VOTE;
+	unsigned long corner_freq = freq;
+	int i;
+
+	if (!dev || !req_level) {
+		CAM_ERR(CAM_CPAS, "Invalid params %pK, %pK", dev, req_level);
+		return -EINVAL;
+	}
+
+	opp = dev_pm_opp_find_freq_ceil(dev, &corner_freq);
+	if (IS_ERR(opp)) {
+		CAM_DBG(CAM_CPAS, "OPP Ceil not available for freq :%ld, %pK",
+			corner_freq, opp);
+		*req_level = CAM_TURBO_VOTE;
+		return 0;
+	}
+
+	corner = dev_pm_opp_get_voltage(opp);
+
+	for (i = 0; i < soc_private->num_vdd_ahb_mapping; i++)
+		if (corner == soc_private->vdd_ahb[i].vdd_corner)
+			level = soc_private->vdd_ahb[i].ahb_level;
+
+	CAM_DBG(CAM_CPAS,
+		"From OPP table : freq=[%ld][%ld], corner=%d, level=%d",
+		freq, corner_freq, corner, level);
+
+	*req_level = level;
+
+	return 0;
+}
+
+static int cam_cpas_util_apply_client_ahb_vote(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote,
+	enum cam_vote_level *applied_level)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_bus_client *ahb_bus_client = &cpas_core->ahb_bus_client;
+	enum cam_vote_level required_level;
+	enum cam_vote_level highest_level;
+	int i, rc = 0;
+
+	if (!ahb_bus_client->valid) {
+		CAM_ERR(CAM_CPAS, "AHB Bus client not valid");
+		return -EINVAL;
+	}
+
+	if (ahb_vote->type == CAM_VOTE_DYNAMIC) {
+		rc = cam_cpas_util_get_ahb_level(cpas_hw, cpas_client->data.dev,
+			ahb_vote->vote.freq, &required_level);
+		if (rc)
+			return rc;
+	} else {
+		required_level = ahb_vote->vote.level;
+	}
+
+	if (cpas_client->ahb_level == required_level)
+		return 0;
+
+	mutex_lock(&ahb_bus_client->lock);
+	cpas_client->ahb_level = required_level;
+
+	CAM_DBG(CAM_CPAS, "Client=[%d][%s] required level[%d], curr_level[%d]",
+		ahb_bus_client->client_id, ahb_bus_client->name,
+		required_level, ahb_bus_client->curr_vote_level);
+
+	if (required_level == ahb_bus_client->curr_vote_level)
+		goto unlock_bus_client;
+
+	highest_level = required_level;
+	for (i = 0; i < cpas_core->num_clients; i++) {
+		if (cpas_core->cpas_client[i] && (highest_level <
+			cpas_core->cpas_client[i]->ahb_level))
+			highest_level = cpas_core->cpas_client[i]->ahb_level;
+	}
+
+	CAM_DBG(CAM_CPAS, "Required highest_level[%d]", highest_level);
+
+	rc = cam_cpas_util_vote_bus_client_level(ahb_bus_client,
+		highest_level);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "Failed in ahb vote, level=%d, rc=%d",
+			highest_level, rc);
+		goto unlock_bus_client;
+	}
+
+	rc = cam_soc_util_set_clk_rate_level(&cpas_hw->soc_info, highest_level);
+	if (rc) {
+		CAM_ERR(CAM_CPAS,
+			"Failed in scaling clock rate level %d for AHB",
+			highest_level);
+		goto unlock_bus_client;
+	}
+
+	if (applied_level)
+		*applied_level = highest_level;
+
+unlock_bus_client:
+	mutex_unlock(&ahb_bus_client->lock);
+	return rc;
+}
+
+static int cam_cpas_hw_update_ahb_vote(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, struct cam_ahb_vote *client_ahb_vote)
+{
+	struct cam_ahb_vote ahb_vote;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!client_ahb_vote) {
+		CAM_ERR(CAM_CPAS, "Invalid input arg");
+		return -EINVAL;
+	}
+
+	ahb_vote = *client_ahb_vote;
+
+	if (ahb_vote.vote.level == 0) {
+		CAM_DBG(CAM_CPAS, "0 ahb vote from client %d",
+			client_handle);
+		ahb_vote.type = CAM_VOTE_ABSOLUTE;
+		ahb_vote.vote.level = CAM_SVS_VOTE;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	CAM_DBG(CAM_PERF,
+		"client=[%d][%s][%d] : type[%d], level[%d], freq[%ld], applied[%d]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, ahb_vote.type,
+		ahb_vote.vote.level, ahb_vote.vote.freq,
+		cpas_core->cpas_client[client_indx]->ahb_level);
+
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw,
+		cpas_core->cpas_client[client_indx], &ahb_vote, NULL);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_util_create_vote_all_paths(
+	struct cam_cpas_client *cpas_client,
+	struct cam_axi_vote *axi_vote)
+{
+	int i, j;
+	uint64_t camnoc_bw, mnoc_ab_bw, mnoc_ib_bw;
+	struct cam_axi_per_path_bw_vote *axi_path;
+
+	if (!cpas_client || !axi_vote)
+		return -EINVAL;
+
+	camnoc_bw = axi_vote->axi_path[0].camnoc_bw;
+	mnoc_ab_bw = axi_vote->axi_path[0].mnoc_ab_bw;
+	mnoc_ib_bw = axi_vote->axi_path[0].mnoc_ib_bw;
+
+	axi_vote->num_paths = 0;
+
+	for (i = 0; i < CAM_CPAS_TRANSACTION_MAX; i++) {
+		for (j = 0; j < CAM_CPAS_PATH_DATA_MAX; j++) {
+			if (cpas_client->tree_node[j][i]) {
+				axi_path =
+				&axi_vote->axi_path[axi_vote->num_paths];
+
+				axi_path->path_data_type = j;
+				axi_path->transac_type = i;
+				axi_path->camnoc_bw = camnoc_bw;
+				axi_path->mnoc_ab_bw = mnoc_ab_bw;
+				axi_path->mnoc_ib_bw = mnoc_ib_bw;
+
+				axi_vote->num_paths++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int cam_cpas_hw_start(void *hw_priv, void *start_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	uint32_t client_indx;
+	struct cam_cpas_hw_cmd_start *cmd_hw_start;
+	struct cam_cpas_client *cpas_client;
+	struct cam_ahb_vote *ahb_vote;
+	struct cam_axi_vote axi_vote = {0};
+	enum cam_vote_level applied_level = CAM_SVS_VOTE;
+	int rc, i = 0;
+	struct cam_cpas_private_soc *soc_private = NULL;
+	bool invalid_start = true;
+
+	if (!hw_priv || !start_args) {
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+			hw_priv, start_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_cmd_start) != arg_size) {
+		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
+			sizeof(struct cam_cpas_hw_cmd_start), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	soc_private = (struct cam_cpas_private_soc *)
+		cpas_hw->soc_info.soc_private;
+	cmd_hw_start = (struct cam_cpas_hw_cmd_start *)start_args;
+	client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_start->client_handle);
+	ahb_vote = cmd_hw_start->ahb_vote;
+
+	if (!ahb_vote || !cmd_hw_start->axi_vote)
+		return -EINVAL;
+
+	if (!ahb_vote->vote.level) {
+		CAM_ERR(CAM_CPAS, "Invalid vote ahb[%d]",
+			ahb_vote->vote.level);
+		return -EINVAL;
+	}
+
+	memcpy(&axi_vote, cmd_hw_start->axi_vote, sizeof(struct cam_axi_vote));
+	for (i = 0; i < axi_vote.num_paths; i++) {
+		if ((axi_vote.axi_path[i].camnoc_bw != 0) ||
+			(axi_vote.axi_path[i].mnoc_ab_bw != 0) ||
+			(axi_vote.axi_path[i].mnoc_ib_bw != 0)) {
+			invalid_start = false;
+			break;
+		}
+	}
+
+	if (invalid_start) {
+		CAM_ERR(CAM_CPAS, "Zero start vote");
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "client=[%d] is not registered",
+			client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] is in start state",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto done;
+	}
+
+	CAM_DBG(CAM_CPAS,
+		"AHB :client=[%d][%s][%d] type[%d], level[%d], applied[%d]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index,
+		ahb_vote->type, ahb_vote->vote.level, cpas_client->ahb_level);
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
+		ahb_vote, &applied_level);
+	if (rc)
+		goto done;
+
+	cam_cpas_dump_axi_vote_info(cpas_client, "CPAS Start Vote",
+		&axi_vote);
+
+	/*
+	 * If client has indicated start bw to be applied on all paths
+	 * of client, apply that otherwise apply whatever the client supplies
+	 * for specific paths
+	 */
+	if (axi_vote.axi_path[0].path_data_type ==
+		CAM_CPAS_API_PATH_DATA_STD_START) {
+		rc = cam_cpas_util_create_vote_all_paths(cpas_client,
+			&axi_vote);
+	} else {
+		rc = cam_cpas_util_translate_client_paths(&axi_vote);
+	}
+
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "Unable to create or translate paths rc: %d",
+			rc);
+		goto done;
+	}
+
+	cam_cpas_dump_axi_vote_info(cpas_client, "CPAS Start Translated Vote",
+		&axi_vote);
+
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
+		cpas_client, &axi_vote);
+	if (rc)
+		goto done;
+
+	if (cpas_core->streamon_clients == 0) {
+		atomic_set(&cpas_core->irq_count, 1);
+		rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info,
+			applied_level);
+		if (rc) {
+			atomic_set(&cpas_core->irq_count, 0);
+			CAM_ERR(CAM_CPAS, "enable_resorce failed, rc=%d", rc);
+			goto done;
+		}
+
+		if (cpas_core->internal_ops.power_on) {
+			rc = cpas_core->internal_ops.power_on(cpas_hw);
+			if (rc) {
+				atomic_set(&cpas_core->irq_count, 0);
+				cam_cpas_soc_disable_resources(
+					&cpas_hw->soc_info, true, true);
+				CAM_ERR(CAM_CPAS,
+					"failed in power_on settings rc=%d",
+					rc);
+				goto done;
+			}
+		}
+		CAM_DBG(CAM_CPAS, "irq_count=%d\n",
+			atomic_read(&cpas_core->irq_count));
+		cpas_hw->hw_state = CAM_HW_STATE_POWER_UP;
+	}
+
+	cpas_client->started = true;
+	cpas_core->streamon_clients++;
+
+	CAM_DBG(CAM_CPAS, "client=[%d][%s][%d] streamon_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->streamon_clients);
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int _check_irq_count(struct cam_cpas *cpas_core)
+{
+	return (atomic_read(&cpas_core->irq_count) > 0) ? 0 : 1;
+}
+
+static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	uint32_t client_indx;
+	struct cam_cpas_hw_cmd_stop *cmd_hw_stop;
+	struct cam_cpas_client *cpas_client;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote = {0};
+	struct cam_cpas_private_soc *soc_private = NULL;
+	int rc = 0;
+	long result;
+
+	if (!hw_priv || !stop_args) {
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+			hw_priv, stop_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_cmd_stop) != arg_size) {
+		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
+			sizeof(struct cam_cpas_hw_cmd_stop), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	soc_private = (struct cam_cpas_private_soc *)
+		cpas_hw->soc_info.soc_private;
+	cmd_hw_stop = (struct cam_cpas_hw_cmd_stop *)stop_args;
+	client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_stop->client_handle);
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	CAM_DBG(CAM_CPAS, "Client=[%d][%s][%d] streamon_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->streamon_clients);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto done;
+	}
+
+	cpas_client->started = false;
+	cpas_core->streamon_clients--;
+
+	if (cpas_core->streamon_clients == 0) {
+		if (cpas_core->internal_ops.power_off) {
+			rc = cpas_core->internal_ops.power_off(cpas_hw);
+			if (rc) {
+				CAM_ERR(CAM_CPAS,
+					"failed in power_off settings rc=%d",
+					rc);
+				/* Do not return error, passthrough */
+			}
+		}
+
+		rc = cam_cpas_soc_disable_irq(&cpas_hw->soc_info);
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "disable_irq failed, rc=%d", rc);
+			goto done;
+		}
+
+		/* Wait for any IRQs still being handled */
+		atomic_dec(&cpas_core->irq_count);
+		result = wait_event_timeout(cpas_core->irq_count_wq,
+			_check_irq_count(cpas_core), HZ);
+		if (result == 0) {
+			CAM_ERR(CAM_CPAS, "Wait failed: irq_count=%d",
+				atomic_read(&cpas_core->irq_count));
+		}
+
+		rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info,
+			true, false);
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "disable_resorce failed, rc=%d", rc);
+			goto done;
+		}
+		CAM_DBG(CAM_CPAS, "Disabled all the resources: irq_count=%d\n",
+			atomic_read(&cpas_core->irq_count));
+		cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	}
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SUSPEND_VOTE;
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
+		&ahb_vote, NULL);
+	if (rc)
+		goto done;
+
+	rc = cam_cpas_util_create_vote_all_paths(cpas_client, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "Unable to create per path votes rc: %d", rc);
+		goto done;
+	}
+
+	cam_cpas_dump_axi_vote_info(cpas_client, "CPAS Stop Vote", &axi_vote);
+
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
+		cpas_client, &axi_vote);
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_hw_init(void *hw_priv, void *init_hw_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	int rc = 0;
+
+	if (!hw_priv || !init_hw_args) {
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+			hw_priv, init_hw_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
+		CAM_ERR(CAM_CPAS, "INIT HW size mismatch %zd %d",
+			sizeof(struct cam_cpas_hw_caps), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+
+	if (cpas_core->internal_ops.init_hw_version) {
+		rc = cpas_core->internal_ops.init_hw_version(cpas_hw,
+			(struct cam_cpas_hw_caps *)init_hw_args);
+	}
+
+	return rc;
+}
+
+static int cam_cpas_hw_register_client(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_register_params *register_params)
+{
+	int rc;
+	char client_name[CAM_HW_IDENTIFIER_LENGTH + 3];
+	int32_t client_indx = -1;
+	struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+
+	CAM_DBG(CAM_CPAS, "Register params : identifier=%s, cell_index=%d",
+		register_params->identifier, register_params->cell_index);
+
+	if (soc_private->client_id_based)
+		snprintf(client_name, sizeof(client_name), "%s%d",
+			register_params->identifier,
+			register_params->cell_index);
+	else
+		snprintf(client_name, sizeof(client_name), "%s",
+			register_params->identifier);
+
+	mutex_lock(&cpas_hw->hw_mutex);
+
+	rc = cam_common_util_get_string_index(soc_private->client_name,
+		soc_private->num_clients, client_name, &client_indx);
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (rc || !CAM_CPAS_CLIENT_VALID(client_indx) ||
+		CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS,
+			"Inval client %s %d : %d %d %pK %d",
+			register_params->identifier,
+			register_params->cell_index,
+			CAM_CPAS_CLIENT_VALID(client_indx),
+			CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx),
+			cpas_core->cpas_client[client_indx], rc);
+		mutex_unlock(&cpas_core->client_mutex[client_indx]);
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -EPERM;
+	}
+
+	register_params->client_handle =
+		CAM_CPAS_GET_CLIENT_HANDLE(client_indx);
+	memcpy(&cpas_core->cpas_client[client_indx]->data, register_params,
+		sizeof(struct cam_cpas_register_params));
+	cpas_core->registered_clients++;
+	cpas_core->cpas_client[client_indx]->registered = true;
+
+	CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d",
+		client_indx,
+		cpas_core->cpas_client[client_indx]->data.identifier,
+		cpas_core->cpas_client[client_indx]->data.cell_index,
+		cpas_core->registered_clients);
+
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+
+	return 0;
+}
+
+static int cam_cpas_hw_unregister_client(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] not registered",
+			client_indx,
+			cpas_core->cpas_client[client_indx]->data.identifier,
+			cpas_core->cpas_client[client_indx]->data.cell_index);
+		rc = -EPERM;
+		goto done;
+	}
+
+	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not stopped",
+			client_indx,
+			cpas_core->cpas_client[client_indx]->data.identifier,
+			cpas_core->cpas_client[client_indx]->data.cell_index);
+
+		rc = -EPERM;
+		goto done;
+	}
+
+	CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d",
+		client_indx,
+		cpas_core->cpas_client[client_indx]->data.identifier,
+		cpas_core->cpas_client[client_indx]->data.cell_index,
+		cpas_core->registered_clients);
+
+	cpas_core->cpas_client[client_indx]->registered = false;
+	cpas_core->registered_clients--;
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_hw_get_hw_info(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	struct cam_cpas_hw_caps *hw_caps;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+			hw_priv, get_hw_cap_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
+		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
+			sizeof(struct cam_cpas_hw_caps), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	hw_caps = (struct cam_cpas_hw_caps *)get_hw_cap_args;
+
+	*hw_caps = cpas_core->hw_caps;
+
+	return 0;
+}
+
+
+static int cam_cpas_hw_process_cmd(void *hw_priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = -EINVAL;
+
+	if (!hw_priv || !cmd_args ||
+		(cmd_type >= CAM_CPAS_HW_CMD_INVALID)) {
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK %d",
+			hw_priv, cmd_args, cmd_type);
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_CPAS_HW_CMD_REGISTER_CLIENT: {
+		struct cam_cpas_register_params *register_params;
+
+		if (sizeof(struct cam_cpas_register_params) != arg_size) {
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		register_params = (struct cam_cpas_register_params *)cmd_args;
+		rc = cam_cpas_hw_register_client(hw_priv, register_params);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_UNREGISTER_CLIENT: {
+		uint32_t *client_handle;
+
+		if (sizeof(uint32_t) != arg_size) {
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		client_handle = (uint32_t *)cmd_args;
+		rc = cam_cpas_hw_unregister_client(hw_priv, *client_handle);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_REG_WRITE: {
+		struct cam_cpas_hw_cmd_reg_read_write *reg_write;
+
+		if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
+			arg_size) {
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		reg_write =
+			(struct cam_cpas_hw_cmd_reg_read_write *)cmd_args;
+		rc = cam_cpas_hw_reg_write(hw_priv, reg_write->client_handle,
+			reg_write->reg_base, reg_write->offset, reg_write->mb,
+			reg_write->value);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_REG_READ: {
+		struct cam_cpas_hw_cmd_reg_read_write *reg_read;
+
+		if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
+			arg_size) {
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		reg_read =
+			(struct cam_cpas_hw_cmd_reg_read_write *)cmd_args;
+		rc = cam_cpas_hw_reg_read(hw_priv,
+			reg_read->client_handle, reg_read->reg_base,
+			reg_read->offset, reg_read->mb, &reg_read->value);
+
+		break;
+	}
+	case CAM_CPAS_HW_CMD_AHB_VOTE: {
+		struct cam_cpas_hw_cmd_ahb_vote *cmd_ahb_vote;
+
+		if (sizeof(struct cam_cpas_hw_cmd_ahb_vote) != arg_size) {
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		cmd_ahb_vote = (struct cam_cpas_hw_cmd_ahb_vote *)cmd_args;
+		rc = cam_cpas_hw_update_ahb_vote(hw_priv,
+			cmd_ahb_vote->client_handle, cmd_ahb_vote->ahb_vote);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_AXI_VOTE: {
+		struct cam_cpas_hw_cmd_axi_vote *cmd_axi_vote;
+
+		if (sizeof(struct cam_cpas_hw_cmd_axi_vote) != arg_size) {
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		cmd_axi_vote = (struct cam_cpas_hw_cmd_axi_vote *)cmd_args;
+		rc = cam_cpas_hw_update_axi_vote(hw_priv,
+			cmd_axi_vote->client_handle, cmd_axi_vote->axi_vote);
+		break;
+	}
+	default:
+		CAM_ERR(CAM_CPAS, "CPAS HW command not valid =%d", cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_cpas_util_client_setup(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int i;
+
+	for (i = 0; i < CAM_CPAS_MAX_CLIENTS; i++) {
+		mutex_init(&cpas_core->client_mutex[i]);
+	}
+
+	return 0;
+}
+
+int cam_cpas_util_client_cleanup(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int i;
+
+	for (i = 0; i < CAM_CPAS_MAX_CLIENTS; i++) {
+		if (cpas_core->cpas_client[i] &&
+			cpas_core->cpas_client[i]->registered) {
+			cam_cpas_hw_unregister_client(cpas_hw, i);
+		}
+		kfree(cpas_core->cpas_client[i]);
+		cpas_core->cpas_client[i] = NULL;
+		mutex_destroy(&cpas_core->client_mutex[i]);
+	}
+
+	return 0;
+}
+
+static int cam_cpas_util_get_internal_ops(struct platform_device *pdev,
+	struct cam_hw_intf *hw_intf, struct cam_cpas_internal_ops *internal_ops)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc;
+	const char *compat_str = NULL;
+
+	rc = of_property_read_string_index(of_node, "arch-compat", 0,
+		(const char **)&compat_str);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed to get arch-compat rc=%d", rc);
+		return -EINVAL;
+	}
+
+	if (strnstr(compat_str, "camss_top", strlen(compat_str))) {
+		hw_intf->hw_type = CAM_HW_CAMSSTOP;
+		rc = cam_camsstop_get_internal_ops(internal_ops);
+	} else if (strnstr(compat_str, "cpas_top", strlen(compat_str))) {
+		hw_intf->hw_type = CAM_HW_CPASTOP;
+		rc = cam_cpastop_get_internal_ops(internal_ops);
+	} else {
+		CAM_ERR(CAM_CPAS, "arch-compat %s not supported", compat_str);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_cpas_hw_probe(struct platform_device *pdev,
+	struct cam_hw_intf **hw_intf)
+{
+	int rc = 0;
+	int i;
+	struct cam_hw_info *cpas_hw = NULL;
+	struct cam_hw_intf *cpas_hw_intf = NULL;
+	struct cam_cpas *cpas_core = NULL;
+	struct cam_cpas_private_soc *soc_private;
+	struct cam_cpas_internal_ops *internal_ops;
+
+	cpas_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cpas_hw_intf)
+		return -ENOMEM;
+
+	cpas_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cpas_hw) {
+		kfree(cpas_hw_intf);
+		return -ENOMEM;
+	}
+
+	cpas_core = kzalloc(sizeof(struct cam_cpas), GFP_KERNEL);
+	if (!cpas_core) {
+		kfree(cpas_hw);
+		kfree(cpas_hw_intf);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < CAM_CPAS_REG_MAX; i++)
+		cpas_core->regbase_index[i] = -1;
+
+	cpas_hw_intf->hw_priv = cpas_hw;
+	cpas_hw->core_info = cpas_core;
+
+	cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cpas_hw->soc_info.pdev = pdev;
+	cpas_hw->soc_info.dev = &pdev->dev;
+	cpas_hw->soc_info.dev_name = pdev->name;
+	cpas_hw->open_count = 0;
+	mutex_init(&cpas_hw->hw_mutex);
+	spin_lock_init(&cpas_hw->hw_lock);
+	init_completion(&cpas_hw->hw_complete);
+
+	cpas_hw_intf->hw_ops.get_hw_caps = cam_cpas_hw_get_hw_info;
+	cpas_hw_intf->hw_ops.init = cam_cpas_hw_init;
+	cpas_hw_intf->hw_ops.deinit = NULL;
+	cpas_hw_intf->hw_ops.reset = NULL;
+	cpas_hw_intf->hw_ops.reserve = NULL;
+	cpas_hw_intf->hw_ops.release = NULL;
+	cpas_hw_intf->hw_ops.start = cam_cpas_hw_start;
+	cpas_hw_intf->hw_ops.stop = cam_cpas_hw_stop;
+	cpas_hw_intf->hw_ops.read = NULL;
+	cpas_hw_intf->hw_ops.write = NULL;
+	cpas_hw_intf->hw_ops.process_cmd = cam_cpas_hw_process_cmd;
+
+	cpas_core->work_queue = alloc_workqueue("cam-cpas",
+		WQ_UNBOUND | WQ_MEM_RECLAIM, CAM_CPAS_INFLIGHT_WORKS);
+	if (!cpas_core->work_queue) {
+		rc = -ENOMEM;
+		goto release_mem;
+	}
+
+	internal_ops = &cpas_core->internal_ops;
+	rc = cam_cpas_util_get_internal_ops(pdev, cpas_hw_intf, internal_ops);
+	if (rc)
+		goto release_workq;
+
+	rc = cam_cpas_soc_init_resources(&cpas_hw->soc_info,
+		internal_ops->handle_irq, cpas_hw);
+	if (rc)
+		goto release_workq;
+
+	soc_private = (struct cam_cpas_private_soc *)
+		cpas_hw->soc_info.soc_private;
+	cpas_core->num_clients = soc_private->num_clients;
+	atomic_set(&cpas_core->irq_count, 0);
+	init_waitqueue_head(&cpas_core->irq_count_wq);
+
+	if (internal_ops->setup_regbase) {
+		rc = internal_ops->setup_regbase(&cpas_hw->soc_info,
+			cpas_core->regbase_index, CAM_CPAS_REG_MAX);
+		if (rc)
+			goto deinit_platform_res;
+	}
+
+	rc = cam_cpas_util_client_setup(cpas_hw);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in client setup, rc=%d", rc);
+		goto deinit_platform_res;
+	}
+
+	rc = cam_cpas_util_register_bus_client(&cpas_hw->soc_info,
+		cpas_hw->soc_info.pdev->dev.of_node,
+		&cpas_core->ahb_bus_client);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in ahb setup, rc=%d", rc);
+		goto client_cleanup;
+	}
+
+	rc = cam_cpas_util_axi_setup(cpas_core, &cpas_hw->soc_info);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in axi setup, rc=%d", rc);
+		goto ahb_cleanup;
+	}
+
+	/* Need to vote first before enabling clocks */
+	rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, true);
+	if (rc)
+		goto axi_cleanup;
+
+	rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info, CAM_SVS_VOTE);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in soc_enable_resources, rc=%d", rc);
+		goto remove_default_vote;
+	}
+
+	if (internal_ops->get_hw_info) {
+		rc = internal_ops->get_hw_info(cpas_hw, &cpas_core->hw_caps);
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "failed in get_hw_info, rc=%d", rc);
+			goto disable_soc_res;
+		}
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid get_hw_info");
+		goto disable_soc_res;
+	}
+
+	rc = cam_cpas_hw_init(cpas_hw_intf->hw_priv,
+		&cpas_core->hw_caps, sizeof(struct cam_cpas_hw_caps));
+	if (rc)
+		goto disable_soc_res;
+
+	rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in soc_disable_resources, rc=%d", rc);
+		goto remove_default_vote;
+	}
+
+	rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
+	if (rc)
+		goto axi_cleanup;
+
+	*hw_intf = cpas_hw_intf;
+	return 0;
+
+disable_soc_res:
+	cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true);
+remove_default_vote:
+	cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
+axi_cleanup:
+	cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info);
+ahb_cleanup:
+	cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
+client_cleanup:
+	cam_cpas_util_client_cleanup(cpas_hw);
+deinit_platform_res:
+	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+release_workq:
+	cam_cpas_node_tree_cleanup(cpas_core, cpas_hw->soc_info.soc_private);
+	flush_workqueue(cpas_core->work_queue);
+	destroy_workqueue(cpas_core->work_queue);
+release_mem:
+	mutex_destroy(&cpas_hw->hw_mutex);
+	kfree(cpas_core);
+	kfree(cpas_hw);
+	kfree(cpas_hw_intf);
+	CAM_ERR(CAM_CPAS, "failed in hw probe");
+	return rc;
+}
+
+int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+
+	if (!cpas_hw_intf) {
+		CAM_ERR(CAM_CPAS, "cpas interface not initialized");
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)cpas_hw_intf->hw_priv;
+	cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+
+	if (cpas_hw->hw_state == CAM_HW_STATE_POWER_UP) {
+		CAM_ERR(CAM_CPAS, "cpas hw is in power up state");
+		return -EINVAL;
+	}
+
+	cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info);
+	cam_cpas_node_tree_cleanup(cpas_core, cpas_hw->soc_info.soc_private);
+	cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
+	cam_cpas_util_client_cleanup(cpas_hw);
+	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+	flush_workqueue(cpas_core->work_queue);
+	destroy_workqueue(cpas_core->work_queue);
+	mutex_destroy(&cpas_hw->hw_mutex);
+	kfree(cpas_core);
+	kfree(cpas_hw);
+	kfree(cpas_hw_intf);
+
+	return 0;
+}

+ 212 - 0
drivers/cam_cpas/cam_cpas_hw.h

@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CPAS_HW_H_
+#define _CAM_CPAS_HW_H_
+
+#include <dt-bindings/msm/msm-camera.h>
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_common_util.h"
+
+#define CAM_CPAS_INFLIGHT_WORKS              5
+#define CAM_CPAS_MAX_CLIENTS                 40
+#define CAM_CPAS_MAX_AXI_PORTS               6
+#define CAM_CPAS_MAX_TREE_LEVELS             4
+#define CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT   32
+#define CAM_CPAS_PATH_DATA_MAX               38
+#define CAM_CPAS_TRANSACTION_MAX             2
+
+#define CAM_CPAS_AXI_MIN_MNOC_AB_BW   (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_MNOC_IB_BW   (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000UL)
+
+#define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
+#define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
+
+#define CAM_CPAS_CLIENT_VALID(indx) \
+	((indx >= 0) && (indx < CAM_CPAS_MAX_CLIENTS))
+#define CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)        \
+	((CAM_CPAS_CLIENT_VALID(indx)) && \
+	(cpas_core->cpas_client[indx]->registered))
+#define CAM_CPAS_CLIENT_STARTED(cpas_core, indx)          \
+	((CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)) && \
+	(cpas_core->cpas_client[indx]->started))
+
+/**
+ * enum cam_cpas_access_type - Enum for Register access type
+ */
+enum cam_cpas_access_type {
+	CAM_REG_TYPE_READ,
+	CAM_REG_TYPE_WRITE,
+	CAM_REG_TYPE_READ_WRITE,
+};
+
+/**
+ * struct cam_cpas_internal_ops - CPAS Hardware layer internal ops
+ *
+ * @get_hw_info: Function pointer for get hw info
+ * @init_hw_version: Function pointer for hw init based on version
+ * @handle_irq: Function poniter for irq handling
+ * @setup_regbase: Function pointer for setup rebase indices
+ * @power_on: Function pointer for hw core specific power on settings
+ * @power_off: Function pointer for hw core specific power off settings
+ *
+ */
+struct cam_cpas_internal_ops {
+	int (*get_hw_info)(struct cam_hw_info *cpas_hw,
+		struct cam_cpas_hw_caps *hw_caps);
+	int (*init_hw_version)(struct cam_hw_info *cpas_hw,
+		struct cam_cpas_hw_caps *hw_caps);
+	irqreturn_t (*handle_irq)(int irq_num, void *data);
+	int (*setup_regbase)(struct cam_hw_soc_info *soc_info,
+		int32_t regbase_index[], int32_t num_reg_map);
+	int (*power_on)(struct cam_hw_info *cpas_hw);
+	int (*power_off)(struct cam_hw_info *cpas_hw);
+};
+
+/**
+ * struct cam_cpas_reg : CPAS register info
+ *
+ * @enable: Whether this reg info need to be enabled
+ * @access_type: Register access type
+ * @masked_value: Whether this register write/read is based on mask, shift
+ * @mask: Mask for this register value
+ * @shift: Shift for this register value
+ * @value: Register value
+ *
+ */
+struct cam_cpas_reg {
+	bool enable;
+	enum cam_cpas_access_type access_type;
+	bool masked_value;
+	uint32_t offset;
+	uint32_t mask;
+	uint32_t shift;
+	uint32_t value;
+};
+
+/**
+ * struct cam_cpas_client : CPAS Client structure info
+ *
+ * @data: Client register params
+ * @registered: Whether client has registered with cpas
+ * @started: Whether client has streamed on
+ * @tree_node_valid: Indicates whether tree node has at least one valid node
+ * @ahb_level: Determined/Applied ahb level for the client
+ * @axi_vote: Determined/Applied axi vote for the client
+ * @axi_port: Client's parent axi port
+ * @tree_node: All granular path voting nodes for the client
+ *
+ */
+struct cam_cpas_client {
+	struct cam_cpas_register_params data;
+	bool registered;
+	bool started;
+	bool tree_node_valid;
+	enum cam_vote_level ahb_level;
+	struct cam_axi_vote axi_vote;
+	struct cam_cpas_axi_port *axi_port;
+	struct cam_cpas_tree_node *tree_node[CAM_CPAS_PATH_DATA_MAX]
+		[CAM_CPAS_TRANSACTION_MAX];
+};
+
+/**
+ * struct cam_cpas_bus_client : Bus client information
+ *
+ * @src: Bus master/src id
+ * @dst: Bus slave/dst id
+ * @pdata: Bus pdata information
+ * @client_id: Bus client id
+ * @num_usecases: Number of use cases for this client
+ * @num_paths: Number of paths for this client
+ * @curr_vote_level: current voted index
+ * @dyn_vote: Whether dynamic voting enabled
+ * @lock: Mutex lock used while voting on this client
+ * @valid: Whether bus client is valid
+ * @name: Name of the bus client
+ *
+ */
+struct cam_cpas_bus_client {
+	int src;
+	int dst;
+	struct msm_bus_scale_pdata *pdata;
+	uint32_t client_id;
+	int num_usecases;
+	int num_paths;
+	unsigned int curr_vote_level;
+	bool dyn_vote;
+	struct mutex lock;
+	bool valid;
+	const char *name;
+};
+
+/**
+ * struct cam_cpas_axi_port : AXI port information
+ *
+ * @axi_port_name: Name of this AXI port
+ * @bus_client: bus client info for this port
+ * @ib_bw_voting_needed: if this port can update ib bw dynamically
+ * @axi_port_node: Node representing AXI Port info in device tree
+ * @ab_bw: AB bw value for this port
+ * @ib_bw: IB bw value for this port
+ * @additional_bw: Additional bandwidth to cover non-hw cpas clients
+ */
+struct cam_cpas_axi_port {
+	const char *axi_port_name;
+	struct cam_cpas_bus_client bus_client;
+	bool ib_bw_voting_needed;
+	struct device_node *axi_port_node;
+	uint64_t ab_bw;
+	uint64_t ib_bw;
+	uint64_t additional_bw;
+};
+
+/**
+ * struct cam_cpas : CPAS core data structure info
+ *
+ * @hw_caps: CPAS hw capabilities
+ * @cpas_client: Array of pointers to CPAS clients info
+ * @client_mutex: Mutex for accessing client info
+ * @tree_lock: Mutex lock for accessing CPAS node tree
+ * @num_clients: Total number of clients that CPAS supports
+ * @num_axi_ports: Total number of axi ports found in device tree
+ * @registered_clients: Number of Clients registered currently
+ * @streamon_clients: Number of Clients that are in start state currently
+ * @regbase_index: Register base indices for CPAS register base IDs
+ * @ahb_bus_client: AHB Bus client info
+ * @axi_port: AXI port info for a specific axi index
+ * @internal_ops: CPAS HW internal ops
+ * @work_queue: Work queue handle
+ *
+ */
+struct cam_cpas {
+	struct cam_cpas_hw_caps hw_caps;
+	struct cam_cpas_client *cpas_client[CAM_CPAS_MAX_CLIENTS];
+	struct mutex client_mutex[CAM_CPAS_MAX_CLIENTS];
+	struct mutex tree_lock;
+	uint32_t num_clients;
+	uint32_t num_axi_ports;
+	uint32_t registered_clients;
+	uint32_t streamon_clients;
+	int32_t regbase_index[CAM_CPAS_REG_MAX];
+	struct cam_cpas_bus_client ahb_bus_client;
+	struct cam_cpas_axi_port axi_port[CAM_CPAS_MAX_AXI_PORTS];
+	struct cam_cpas_internal_ops internal_ops;
+	struct workqueue_struct *work_queue;
+	atomic_t irq_count;
+	wait_queue_head_t irq_count_wq;
+};
+
+int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
+int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
+
+int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
+	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info);
+
+int cam_cpas_util_client_cleanup(struct cam_hw_info *cpas_hw);
+
+#endif /* _CAM_CPAS_HW_H_ */

+ 128 - 0
drivers/cam_cpas/cam_cpas_hw_intf.h

@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CPAS_HW_INTF_H_
+#define _CAM_CPAS_HW_INTF_H_
+
+#include <linux/platform_device.h>
+
+#include "cam_cpas_api.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_debug_util.h"
+
+/* Number of times to retry while polling */
+#define CAM_CPAS_POLL_RETRY_CNT 5
+/* Minimum usecs to sleep while polling */
+#define CAM_CPAS_POLL_MIN_USECS 200
+/* Maximum usecs to sleep while polling */
+#define CAM_CPAS_POLL_MAX_USECS 250
+
+/**
+ * enum cam_cpas_hw_type - Enum for CPAS HW type
+ */
+enum cam_cpas_hw_type {
+	CAM_HW_CPASTOP,
+	CAM_HW_CAMSSTOP,
+};
+
+/**
+ * enum cam_cpas_hw_cmd_process - Enum for CPAS HW process command type
+ */
+enum cam_cpas_hw_cmd_process {
+	CAM_CPAS_HW_CMD_REGISTER_CLIENT,
+	CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
+	CAM_CPAS_HW_CMD_REG_WRITE,
+	CAM_CPAS_HW_CMD_REG_READ,
+	CAM_CPAS_HW_CMD_AHB_VOTE,
+	CAM_CPAS_HW_CMD_AXI_VOTE,
+	CAM_CPAS_HW_CMD_INVALID,
+};
+
+/**
+ * struct cam_cpas_hw_cmd_reg_read_write : CPAS cmd struct for reg read, write
+ *
+ * @client_handle: Client handle
+ * @reg_base: Register base type
+ * @offset: Register offset
+ * @value: Register value
+ * @mb: Whether to do operation with memory barrier
+ *
+ */
+struct cam_cpas_hw_cmd_reg_read_write {
+	uint32_t client_handle;
+	enum cam_cpas_reg_base reg_base;
+	uint32_t offset;
+	uint32_t value;
+	bool mb;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_ahb_vote : CPAS cmd struct for AHB vote
+ *
+ * @client_handle: Client handle
+ * @ahb_vote: AHB voting info
+ *
+ */
+struct cam_cpas_hw_cmd_ahb_vote {
+	uint32_t client_handle;
+	struct cam_ahb_vote *ahb_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_axi_vote : CPAS cmd struct for AXI vote
+ *
+ * @client_handle: Client handle
+ * @axi_vote: axi bandwidth vote
+ *
+ */
+struct cam_cpas_hw_cmd_axi_vote {
+	uint32_t client_handle;
+	struct cam_axi_vote *axi_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_start : CPAS cmd struct for start
+ *
+ * @client_handle: Client handle
+ *
+ */
+struct cam_cpas_hw_cmd_start {
+	uint32_t client_handle;
+	struct cam_ahb_vote *ahb_vote;
+	struct cam_axi_vote *axi_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_stop : CPAS cmd struct for stop
+ *
+ * @client_handle: Client handle
+ *
+ */
+struct cam_cpas_hw_cmd_stop {
+	uint32_t client_handle;
+};
+
+/**
+ * struct cam_cpas_hw_caps : CPAS HW capabilities
+ *
+ * @camera_family: Camera family type
+ * @camera_version: Camera version
+ * @cpas_version: CPAS version
+ * @camera_capability: Camera hw capabilities
+ *
+ */
+struct cam_cpas_hw_caps {
+	uint32_t camera_family;
+	struct cam_hw_version camera_version;
+	struct cam_hw_version cpas_version;
+	uint32_t camera_capability;
+};
+
+int cam_cpas_hw_probe(struct platform_device *pdev,
+	struct cam_hw_intf **hw_intf);
+int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf);
+
+#endif /* _CAM_CPAS_HW_INTF_H_ */

+ 725 - 0
drivers/cam_cpas/cam_cpas_intf.c

@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <cam_cpas.h>
+#include <cam_req_mgr.h>
+
+#include "cam_subdev.h"
+#include "cam_cpas_hw_intf.h"
+
+#define CAM_CPAS_DEV_NAME    "cam-cpas"
+#define CAM_CPAS_INTF_INITIALIZED() (g_cpas_intf && g_cpas_intf->probe_done)
+
+/**
+ * struct cam_cpas_intf : CPAS interface
+ *
+ * @pdev: Platform device
+ * @subdev: Subdev info
+ * @hw_intf: CPAS HW interface
+ * @hw_caps: CPAS HW capabilities
+ * @intf_lock: CPAS interface mutex
+ * @open_cnt: CPAS subdev open count
+ * @probe_done: Whether CPAS prove completed
+ *
+ */
+struct cam_cpas_intf {
+	struct platform_device *pdev;
+	struct cam_subdev subdev;
+	struct cam_hw_intf *hw_intf;
+	struct cam_cpas_hw_caps hw_caps;
+	struct mutex intf_lock;
+	uint32_t open_cnt;
+	bool probe_done;
+};
+
+static struct cam_cpas_intf *g_cpas_intf;
+
+const char *cam_cpas_axi_util_path_type_to_string(
+	uint32_t path_data_type)
+{
+	switch (path_data_type) {
+	/* IFE Paths */
+	case CAM_AXI_PATH_DATA_IFE_LINEAR:
+		return "IFE_LINEAR";
+	case CAM_AXI_PATH_DATA_IFE_VID:
+		return "IFE_VID";
+	case CAM_AXI_PATH_DATA_IFE_DISP:
+		return "IFE_DISP";
+	case CAM_AXI_PATH_DATA_IFE_STATS:
+		return "IFE_STATS";
+	case CAM_AXI_PATH_DATA_IFE_RDI0:
+		return "IFE_RDI0";
+	case CAM_AXI_PATH_DATA_IFE_RDI1:
+		return "IFE_RDI1";
+	case CAM_AXI_PATH_DATA_IFE_RDI2:
+		return "IFE_RDI2";
+	case CAM_AXI_PATH_DATA_IFE_RDI3:
+		return "IFE_RDI3";
+	case CAM_AXI_PATH_DATA_IFE_PDAF:
+		return "IFE_PDAF";
+	case CAM_AXI_PATH_DATA_IFE_PIXEL_RAW:
+		return "IFE_PIXEL_RAW";
+
+	/* IPE Paths */
+	case CAM_AXI_PATH_DATA_IPE_RD_IN:
+		return "IPE_RD_IN";
+	case CAM_AXI_PATH_DATA_IPE_RD_REF:
+		return "IPE_RD_REF";
+	case CAM_AXI_PATH_DATA_IPE_WR_VID:
+		return "IPE_WR_VID";
+	case CAM_AXI_PATH_DATA_IPE_WR_DISP:
+		return "IPE_WR_DISP";
+	case CAM_AXI_PATH_DATA_IPE_WR_REF:
+		return "IPE_WR_REF";
+
+	/* Common Paths */
+	case CAM_AXI_PATH_DATA_ALL:
+		return "DATA_ALL";
+	default:
+		return "IFE_PATH_INVALID";
+	}
+}
+EXPORT_SYMBOL(cam_cpas_axi_util_path_type_to_string);
+
+const char *cam_cpas_axi_util_trans_type_to_string(
+	uint32_t transac_type)
+{
+	switch (transac_type) {
+	case CAM_AXI_TRANSACTION_READ:
+		return "TRANSAC_READ";
+	case CAM_AXI_TRANSACTION_WRITE:
+		return "TRANSAC_WRITE";
+	default:
+		return "TRANSAC_INVALID";
+	}
+}
+EXPORT_SYMBOL(cam_cpas_axi_util_trans_type_to_string);
+
+int cam_cpas_get_cpas_hw_version(uint32_t *hw_version)
+{
+	struct cam_hw_info *cpas_hw = NULL;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (!hw_version) {
+		CAM_ERR(CAM_CPAS, "invalid input %pK", hw_version);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info  *) g_cpas_intf->hw_intf->hw_priv;
+
+	*hw_version = cpas_hw->soc_info.hw_version;
+
+	if (*hw_version == CAM_CPAS_TITAN_NONE) {
+		CAM_DBG(CAM_CPAS, "Didn't find a valid HW Version %d",
+			*hw_version);
+	}
+
+	return 0;
+}
+
+int cam_cpas_get_hw_info(uint32_t *camera_family,
+	struct cam_hw_version *camera_version,
+	struct cam_hw_version *cpas_version,
+	uint32_t *cam_caps)
+{
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (!camera_family || !camera_version || !cpas_version || !cam_caps) {
+		CAM_ERR(CAM_CPAS, "invalid input %pK %pK %pK %pK",
+			camera_family, camera_version, cpas_version, cam_caps);
+		return -EINVAL;
+	}
+
+	*camera_family = g_cpas_intf->hw_caps.camera_family;
+	*camera_version = g_cpas_intf->hw_caps.camera_version;
+	*cpas_version = g_cpas_intf->hw_caps.cpas_version;
+	*cam_caps = g_cpas_intf->hw_caps.camera_capability;
+
+	return 0;
+}
+EXPORT_SYMBOL(cam_cpas_get_hw_info);
+
+int cam_cpas_reg_write(uint32_t client_handle,
+	enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
+	uint32_t value)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_reg_read_write cmd_reg_write;
+
+		cmd_reg_write.client_handle = client_handle;
+		cmd_reg_write.reg_base = reg_base;
+		cmd_reg_write.offset = offset;
+		cmd_reg_write.value = value;
+		cmd_reg_write.mb = mb;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REG_WRITE, &cmd_reg_write,
+			sizeof(struct cam_cpas_hw_cmd_reg_read_write));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_reg_write);
+
+int cam_cpas_reg_read(uint32_t client_handle,
+	enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
+	uint32_t *value)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (!value) {
+		CAM_ERR(CAM_CPAS, "Invalid arg value");
+		return -EINVAL;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_reg_read_write cmd_reg_read;
+
+		cmd_reg_read.client_handle = client_handle;
+		cmd_reg_read.reg_base = reg_base;
+		cmd_reg_read.offset = offset;
+		cmd_reg_read.mb = mb;
+		cmd_reg_read.value = 0;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REG_READ, &cmd_reg_read,
+			sizeof(struct cam_cpas_hw_cmd_reg_read_write));
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+			return rc;
+		}
+
+		*value = cmd_reg_read.value;
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_reg_read);
+
+int cam_cpas_update_axi_vote(uint32_t client_handle,
+	struct cam_axi_vote *axi_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (!axi_vote) {
+		CAM_ERR(CAM_CPAS, "NULL axi vote");
+		return -EINVAL;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_axi_vote cmd_axi_vote;
+
+		cmd_axi_vote.client_handle = client_handle;
+		cmd_axi_vote.axi_vote = axi_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_AXI_VOTE, &cmd_axi_vote,
+			sizeof(struct cam_cpas_hw_cmd_axi_vote));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_update_axi_vote);
+
+int cam_cpas_update_ahb_vote(uint32_t client_handle,
+	struct cam_ahb_vote *ahb_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_ahb_vote cmd_ahb_vote;
+
+		cmd_ahb_vote.client_handle = client_handle;
+		cmd_ahb_vote.ahb_vote = ahb_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_AHB_VOTE, &cmd_ahb_vote,
+			sizeof(struct cam_cpas_hw_cmd_ahb_vote));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_update_ahb_vote);
+
+int cam_cpas_stop(uint32_t client_handle)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.stop) {
+		struct cam_cpas_hw_cmd_stop cmd_hw_stop;
+
+		cmd_hw_stop.client_handle = client_handle;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.stop(
+			g_cpas_intf->hw_intf->hw_priv, &cmd_hw_stop,
+			sizeof(struct cam_cpas_hw_cmd_stop));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in stop, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid stop ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_stop);
+
+int cam_cpas_start(uint32_t client_handle,
+	struct cam_ahb_vote *ahb_vote, struct cam_axi_vote *axi_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (!axi_vote) {
+		CAM_ERR(CAM_CPAS, "NULL axi vote");
+		return -EINVAL;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.start) {
+		struct cam_cpas_hw_cmd_start cmd_hw_start;
+
+		cmd_hw_start.client_handle = client_handle;
+		cmd_hw_start.ahb_vote = ahb_vote;
+		cmd_hw_start.axi_vote = axi_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.start(
+			g_cpas_intf->hw_intf->hw_priv, &cmd_hw_start,
+			sizeof(struct cam_cpas_hw_cmd_start));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in start, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid start ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_start);
+
+int cam_cpas_unregister_client(uint32_t client_handle)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
+			&client_handle, sizeof(uint32_t));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_unregister_client);
+
+int cam_cpas_register_client(
+	struct cam_cpas_register_params *register_params)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REGISTER_CLIENT, register_params,
+			sizeof(struct cam_cpas_register_params));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_register_client);
+
+int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
+	struct cam_control *cmd)
+{
+	int rc = 0;
+
+	if (!cmd) {
+		CAM_ERR(CAM_CPAS, "Invalid input cmd");
+		return -EINVAL;
+	}
+
+	switch (cmd->op_code) {
+	case CAM_QUERY_CAP: {
+		struct cam_cpas_query_cap query;
+
+		rc = copy_from_user(&query, u64_to_user_ptr(cmd->handle),
+			sizeof(query));
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d",
+				rc);
+			break;
+		}
+
+		rc = cam_cpas_get_hw_info(&query.camera_family,
+			&query.camera_version, &query.cpas_version,
+			&query.reserved);
+		if (rc)
+			break;
+
+		rc = copy_to_user(u64_to_user_ptr(cmd->handle), &query,
+			sizeof(query));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in copy to user, rc=%d", rc);
+
+		break;
+	}
+	case CAM_SD_SHUTDOWN:
+		break;
+	default:
+		CAM_ERR(CAM_CPAS, "Unknown op code %d for CPAS", cmd->op_code);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_cpas_subdev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		CAM_ERR(CAM_CPAS, "CPAS not initialized");
+		return -ENODEV;
+	}
+
+	mutex_lock(&cpas_intf->intf_lock);
+	cpas_intf->open_cnt++;
+	CAM_DBG(CAM_CPAS, "CPAS Subdev open count %d", cpas_intf->open_cnt);
+	mutex_unlock(&cpas_intf->intf_lock);
+
+	return 0;
+}
+
+static int cam_cpas_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		CAM_ERR(CAM_CPAS, "CPAS not initialized");
+		return -ENODEV;
+	}
+
+	mutex_lock(&cpas_intf->intf_lock);
+	cpas_intf->open_cnt--;
+	CAM_DBG(CAM_CPAS, "CPAS Subdev close count %d", cpas_intf->open_cnt);
+	mutex_unlock(&cpas_intf->intf_lock);
+
+	return 0;
+}
+
+static long cam_cpas_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int32_t rc;
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		CAM_ERR(CAM_CPAS, "CPAS not initialized");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
+		break;
+	default:
+		CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	struct cam_control cmd_data;
+	int32_t rc;
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		CAM_ERR(CAM_CPAS, "CPAS not initialized");
+		return -ENODEV;
+	}
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		CAM_ERR(CAM_CPAS, "Failed to copy from user_ptr=%pK size=%zu",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_cpas_subdev_cmd(cpas_intf, &cmd_data);
+		break;
+	default:
+		CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
+		rc = -EINVAL;
+		break;
+	}
+
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			CAM_ERR(CAM_CPAS,
+				"Failed to copy to user_ptr=%pK size=%zu",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+
+	return rc;
+}
+#endif
+
+static struct v4l2_subdev_core_ops cpas_subdev_core_ops = {
+	.ioctl = cam_cpas_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_cpas_subdev_compat_ioctl,
+#endif
+};
+
+static const struct v4l2_subdev_ops cpas_subdev_ops = {
+	.core = &cpas_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cpas_subdev_intern_ops = {
+	.open = cam_cpas_subdev_open,
+	.close = cam_cpas_subdev_close,
+};
+
+static int cam_cpas_subdev_register(struct platform_device *pdev)
+{
+	int rc;
+	struct cam_subdev *subdev;
+
+	if (!g_cpas_intf)
+		return -EINVAL;
+
+	subdev = &g_cpas_intf->subdev;
+
+	subdev->name = CAM_CPAS_DEV_NAME;
+	subdev->pdev = pdev;
+	subdev->ops = &cpas_subdev_ops;
+	subdev->internal_ops = &cpas_subdev_intern_ops;
+	subdev->token = g_cpas_intf;
+	subdev->sd_flags =
+		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+	subdev->ent_function = CAM_CPAS_DEVICE_TYPE;
+
+	rc = cam_register_subdev(subdev);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed register subdev: %s!",
+			CAM_CPAS_DEV_NAME);
+		return rc;
+	}
+
+	platform_set_drvdata(g_cpas_intf->pdev, g_cpas_intf);
+	return rc;
+}
+
+static int cam_cpas_dev_probe(struct platform_device *pdev)
+{
+	struct cam_cpas_hw_caps *hw_caps;
+	struct cam_hw_intf *hw_intf;
+	int rc;
+
+	if (g_cpas_intf) {
+		CAM_ERR(CAM_CPAS, "cpas dev proble already done");
+		return -EALREADY;
+	}
+
+	g_cpas_intf = kzalloc(sizeof(*g_cpas_intf), GFP_KERNEL);
+	if (!g_cpas_intf)
+		return -ENOMEM;
+
+	mutex_init(&g_cpas_intf->intf_lock);
+	g_cpas_intf->pdev = pdev;
+
+	rc = cam_cpas_hw_probe(pdev, &g_cpas_intf->hw_intf);
+	if (rc || (g_cpas_intf->hw_intf == NULL)) {
+		CAM_ERR(CAM_CPAS, "Failed in hw probe, rc=%d", rc);
+		goto error_destroy_mem;
+	}
+
+	hw_intf = g_cpas_intf->hw_intf;
+	hw_caps = &g_cpas_intf->hw_caps;
+	if (hw_intf->hw_ops.get_hw_caps) {
+		rc = hw_intf->hw_ops.get_hw_caps(hw_intf->hw_priv,
+			hw_caps, sizeof(struct cam_cpas_hw_caps));
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "Failed in get_hw_caps, rc=%d", rc);
+			goto error_hw_remove;
+		}
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid get_hw_caps ops");
+		goto error_hw_remove;
+	}
+
+	rc = cam_cpas_subdev_register(pdev);
+	if (rc)
+		goto error_hw_remove;
+
+	g_cpas_intf->probe_done = true;
+	CAM_DBG(CAM_CPAS,
+		"CPAS INTF Probe success %d, %d.%d.%d, %d.%d.%d, 0x%x",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr,
+		hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
+		hw_caps->cpas_version.incr, hw_caps->camera_capability);
+
+	return rc;
+
+error_hw_remove:
+	cam_cpas_hw_remove(g_cpas_intf->hw_intf);
+error_destroy_mem:
+	mutex_destroy(&g_cpas_intf->intf_lock);
+	kfree(g_cpas_intf);
+	g_cpas_intf = NULL;
+	CAM_ERR(CAM_CPAS, "CPAS probe failed");
+	return rc;
+}
+
+static int cam_cpas_dev_remove(struct platform_device *dev)
+{
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	mutex_lock(&g_cpas_intf->intf_lock);
+	g_cpas_intf->probe_done = false;
+	cam_unregister_subdev(&g_cpas_intf->subdev);
+	cam_cpas_hw_remove(g_cpas_intf->hw_intf);
+	mutex_unlock(&g_cpas_intf->intf_lock);
+	mutex_destroy(&g_cpas_intf->intf_lock);
+	kfree(g_cpas_intf);
+	g_cpas_intf = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id cam_cpas_dt_match[] = {
+	{.compatible = "qcom,cam-cpas"},
+	{}
+};
+
+static struct platform_driver cam_cpas_driver = {
+	.probe = cam_cpas_dev_probe,
+	.remove = cam_cpas_dev_remove,
+	.driver = {
+		.name = CAM_CPAS_DEV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_cpas_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_cpas_dev_init_module(void)
+{
+	return platform_driver_register(&cam_cpas_driver);
+}
+
+static void __exit cam_cpas_dev_exit_module(void)
+{
+	platform_driver_unregister(&cam_cpas_driver);
+}
+
+module_init(cam_cpas_dev_init_module);
+module_exit(cam_cpas_dev_exit_module);
+MODULE_DESCRIPTION("MSM CPAS driver");
+MODULE_LICENSE("GPL v2");

+ 614 - 0
drivers/cam_cpas/cam_cpas_soc.c

@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpas_soc.h"
+
+static uint cpas_dump;
+module_param(cpas_dump, uint, 0644);
+
+
+void cam_cpas_dump_axi_vote_info(
+	const struct cam_cpas_client *cpas_client,
+	const char *identifier,
+	struct cam_axi_vote *axi_vote)
+{
+	int i;
+
+	if (!cpas_dump)
+		return;
+
+	if (!axi_vote || (axi_vote->num_paths >
+		CAM_CPAS_MAX_PATHS_PER_CLIENT)) {
+		CAM_ERR(CAM_PERF, "Invalid num_paths %d",
+			axi_vote ? axi_vote->num_paths : -1);
+		return;
+	}
+
+	for (i = 0; i < axi_vote->num_paths; i++) {
+		CAM_INFO(CAM_PERF,
+		"Client [%s][%d] : [%s], Path=[%d] [%d], camnoc[%llu], mnoc_ab[%llu], mnoc_ib[%llu]",
+		cpas_client->data.identifier, cpas_client->data.cell_index,
+		identifier,
+		axi_vote->axi_path[i].path_data_type,
+		axi_vote->axi_path[i].transac_type,
+		axi_vote->axi_path[i].camnoc_bw,
+		axi_vote->axi_path[i].mnoc_ab_bw,
+		axi_vote->axi_path[i].mnoc_ib_bw);
+	}
+
+}
+
+void cam_cpas_util_debug_parse_data(
+	struct cam_cpas_private_soc *soc_private)
+{
+	int i, j;
+	struct cam_cpas_tree_node *curr_node = NULL;
+
+	if (!cpas_dump)
+		return;
+
+	for (i = 0; i < CAM_CPAS_MAX_TREE_NODES; i++) {
+		if (!soc_private->tree_node[i])
+			break;
+
+		curr_node = soc_private->tree_node[i];
+		CAM_INFO(CAM_CPAS,
+			"NODE cell_idx: %d, level: %d, name: %s, axi_port_idx: %d, merge_type: %d, parent_name: %s",
+			curr_node->cell_idx, curr_node->level_idx,
+			curr_node->node_name, curr_node->axi_port_idx,
+			curr_node->merge_type, curr_node->parent_node ?
+			curr_node->parent_node->node_name : "no parent");
+
+		if (curr_node->level_idx)
+			continue;
+
+		CAM_INFO(CAM_CPAS, "path_type: %d, transac_type: %s",
+			curr_node->path_data_type,
+			cam_cpas_axi_util_trans_type_to_string(
+			curr_node->path_trans_type));
+
+		for (j = 0; j < CAM_CPAS_PATH_DATA_MAX; j++) {
+			CAM_INFO(CAM_CPAS, "Constituent path: %d",
+				curr_node->constituent_paths[j] ? j : -1);
+		}
+	}
+
+	CAM_INFO(CAM_CPAS, "NUMBER OF NODES PARSED: %d", i);
+}
+
+int cam_cpas_node_tree_cleanup(struct cam_cpas *cpas_core,
+	struct cam_cpas_private_soc *soc_private)
+{
+	int i = 0;
+
+	for (i = 0; i < CAM_CPAS_MAX_TREE_NODES; i++) {
+		if (soc_private->tree_node[i]) {
+			of_node_put(soc_private->tree_node[i]->tree_dev_node);
+			kfree(soc_private->tree_node[i]);
+			soc_private->tree_node[i] = NULL;
+		}
+	}
+
+	for (i = 0; i < CAM_CPAS_MAX_TREE_LEVELS; i++) {
+		of_node_put(soc_private->level_node[i]);
+		soc_private->level_node[i] = NULL;
+	}
+
+	of_node_put(soc_private->camera_bus_node);
+	soc_private->camera_bus_node = NULL;
+	mutex_destroy(&cpas_core->tree_lock);
+
+	return 0;
+}
+
+static int cam_cpas_util_path_type_to_idx(uint32_t *path_data_type)
+{
+	if (*path_data_type >= CAM_CPAS_PATH_DATA_CONSO_OFFSET)
+		*path_data_type = CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT +
+			(*path_data_type % CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT);
+	else
+		*path_data_type %= CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT;
+
+	if (*path_data_type >= CAM_CPAS_PATH_DATA_MAX) {
+		CAM_ERR(CAM_CPAS, "index Invalid: %d", path_data_type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_cpas_parse_node_tree(struct cam_cpas *cpas_core,
+	struct device_node *of_node, struct cam_cpas_private_soc *soc_private)
+{
+	struct device_node *camera_bus_node;
+	struct device_node *level_node;
+	struct device_node *curr_node;
+	struct device_node *parent_node;
+	struct device_node *mnoc_node;
+	int mnoc_idx = 0;
+	uint32_t path_idx;
+	bool camnoc_max_needed = false;
+	struct cam_cpas_tree_node *curr_node_ptr = NULL;
+	struct cam_cpas_client *curr_client = NULL;
+	const char *client_name = NULL;
+	uint32_t client_idx = 0, cell_idx = 0, level_idx = 0;
+	int rc = 0, count = 0, i;
+
+	camera_bus_node = of_find_node_by_name(of_node, "camera-bus-nodes");
+	if (!camera_bus_node) {
+		CAM_ERR(CAM_CPAS, "Camera Bus node not found in cpas DT node");
+		return -EINVAL;
+	}
+
+	soc_private->camera_bus_node = camera_bus_node;
+
+	for_each_available_child_of_node(camera_bus_node, level_node) {
+		rc = of_property_read_u32(level_node, "level-index",
+			&level_idx);
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "Error raeding level idx rc: %d", rc);
+			return rc;
+		}
+		if (level_idx >= CAM_CPAS_MAX_TREE_LEVELS) {
+			CAM_ERR(CAM_CPAS, "Invalid level idx: %d", level_idx);
+			return -EINVAL;
+		}
+
+		soc_private->level_node[level_idx] = level_node;
+		camnoc_max_needed = of_property_read_bool(level_node,
+			"camnoc-max-needed");
+
+		for_each_available_child_of_node(level_node, curr_node) {
+			curr_node_ptr =
+				kzalloc(sizeof(struct cam_cpas_tree_node),
+				GFP_KERNEL);
+			if (!curr_node_ptr)
+				return -ENOMEM;
+
+			curr_node_ptr->tree_dev_node = curr_node;
+			rc = of_property_read_u32(curr_node, "cell-index",
+				&curr_node_ptr->cell_idx);
+			if (rc) {
+				CAM_ERR(CAM_CPAS, "Node index not found");
+				return rc;
+			}
+
+			if (curr_node_ptr->cell_idx >=
+				CAM_CPAS_MAX_TREE_NODES) {
+				CAM_ERR(CAM_CPAS, "Invalid cell idx: %d",
+					cell_idx);
+				return -EINVAL;
+			}
+
+			soc_private->tree_node[curr_node_ptr->cell_idx] =
+				curr_node_ptr;
+			curr_node_ptr->level_idx = level_idx;
+
+			rc = of_property_read_string(curr_node, "node-name",
+				&curr_node_ptr->node_name);
+			if (rc) {
+				CAM_ERR(CAM_CPAS,
+					"failed to read node-name rc=%d",
+					rc);
+				return rc;
+			}
+
+			curr_node_ptr->camnoc_max_needed = camnoc_max_needed;
+			rc = of_property_read_u32(curr_node, "bus-width-factor",
+				&curr_node_ptr->bus_width_factor);
+			if (rc)
+				curr_node_ptr->bus_width_factor = 1;
+
+			rc = of_property_read_u32(curr_node,
+				"traffic-merge-type",
+				&curr_node_ptr->merge_type);
+
+			curr_node_ptr->axi_port_idx = -1;
+			mnoc_node = of_find_node_by_name(curr_node,
+				"qcom,axi-port-mnoc");
+			if (mnoc_node) {
+				if (mnoc_idx >= CAM_CPAS_MAX_AXI_PORTS)
+					return -EINVAL;
+
+				cpas_core->axi_port[mnoc_idx].axi_port_node
+					= mnoc_node;
+				rc =  of_property_read_string(
+					curr_node, "qcom,axi-port-name",
+					&cpas_core->axi_port[mnoc_idx]
+					.axi_port_name);
+				if (rc) {
+					CAM_ERR(CAM_CPAS,
+					"failed to read mnoc-port-name rc=%d",
+						rc);
+					return rc;
+				}
+				cpas_core->axi_port
+					[mnoc_idx].ib_bw_voting_needed
+				= of_property_read_bool(curr_node,
+					"ib-bw-voting-needed");
+				curr_node_ptr->axi_port_idx = mnoc_idx;
+				mnoc_idx++;
+				cpas_core->num_axi_ports++;
+			}
+
+			rc = of_property_read_string(curr_node,
+				"client-name", &client_name);
+			if (!rc) {
+				rc = of_property_read_u32(curr_node,
+				"traffic-data", &curr_node_ptr->path_data_type);
+				if (rc) {
+					CAM_ERR(CAM_CPAS,
+						"Path Data type not found");
+					return rc;
+				}
+
+				rc = cam_cpas_util_path_type_to_idx(
+					&curr_node_ptr->path_data_type);
+				if (rc)
+					return rc;
+
+				rc = of_property_read_u32(curr_node,
+					"traffic-transaction-type",
+					&curr_node_ptr->path_trans_type);
+				if (rc) {
+					CAM_ERR(CAM_CPAS,
+						"Path Transac type not found");
+					return rc;
+				}
+
+				if (curr_node_ptr->path_trans_type >=
+					CAM_CPAS_TRANSACTION_MAX) {
+					CAM_ERR(CAM_CPAS,
+						"Invalid transac type: %d",
+						curr_node_ptr->path_trans_type);
+					return -EINVAL;
+				}
+
+				count = of_property_count_u32_elems(curr_node,
+					"constituent-paths");
+				for (i = 0; i < count; i++) {
+					rc = of_property_read_u32_index(
+						curr_node, "constituent-paths",
+						i, &path_idx);
+					if (rc) {
+						CAM_ERR(CAM_CPAS,
+						"No constituent path at %d", i);
+						return rc;
+					}
+
+					rc = cam_cpas_util_path_type_to_idx(
+						&path_idx);
+					if (rc)
+						return rc;
+
+					curr_node_ptr->constituent_paths
+						[path_idx] = true;
+				}
+
+				rc = cam_common_util_get_string_index(
+					soc_private->client_name,
+					soc_private->num_clients,
+					client_name, &client_idx);
+				if (rc) {
+					CAM_ERR(CAM_CPAS,
+					"client name not found in list: %s",
+					client_name);
+					return rc;
+				}
+
+				if (client_idx >= CAM_CPAS_MAX_CLIENTS)
+					return -EINVAL;
+
+				curr_client =
+					cpas_core->cpas_client[client_idx];
+				curr_client->tree_node_valid = true;
+				curr_client->tree_node
+					[curr_node_ptr->path_data_type]
+					[curr_node_ptr->path_trans_type] =
+					curr_node_ptr;
+				CAM_DBG(CAM_CPAS,
+					"CLIENT NODE ADDED: %d %d %s",
+					curr_node_ptr->path_data_type,
+					curr_node_ptr->path_trans_type,
+					client_name);
+			}
+
+			parent_node = of_parse_phandle(curr_node,
+				"parent-node", 0);
+			if (parent_node) {
+				of_property_read_u32(parent_node, "cell-index",
+					&cell_idx);
+				curr_node_ptr->parent_node =
+					soc_private->tree_node[cell_idx];
+			} else {
+				CAM_DBG(CAM_CPAS,
+					"no parent node at this level");
+			}
+		}
+	}
+	mutex_init(&cpas_core->tree_lock);
+	cam_cpas_util_debug_parse_data(soc_private);
+
+	return 0;
+}
+
+int cam_cpas_get_custom_dt_info(struct cam_hw_info *cpas_hw,
+	struct platform_device *pdev, struct cam_cpas_private_soc *soc_private)
+{
+	struct device_node *of_node;
+	int count = 0, i = 0, rc = 0;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+
+	if (!soc_private || !pdev) {
+		CAM_ERR(CAM_CPAS, "invalid input arg %pK %pK",
+			soc_private, pdev);
+		return -EINVAL;
+	}
+
+	of_node = pdev->dev.of_node;
+
+	rc = of_property_read_string(of_node, "arch-compat",
+		&soc_private->arch_compat);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "device %s failed to read arch-compat",
+			pdev->name);
+		return rc;
+	}
+
+	soc_private->camnoc_axi_min_ib_bw = 0;
+	rc = of_property_read_u64(of_node,
+		"camnoc-axi-min-ib-bw",
+		&soc_private->camnoc_axi_min_ib_bw);
+	if (rc == -EOVERFLOW) {
+		soc_private->camnoc_axi_min_ib_bw = 0;
+		rc = of_property_read_u32(of_node,
+			"camnoc-axi-min-ib-bw",
+			(u32 *)&soc_private->camnoc_axi_min_ib_bw);
+	}
+
+	if (rc) {
+		CAM_DBG(CAM_CPAS,
+			"failed to read camnoc-axi-min-ib-bw rc:%d", rc);
+		soc_private->camnoc_axi_min_ib_bw =
+			CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+	}
+
+	CAM_DBG(CAM_CPAS, "camnoc-axi-min-ib-bw = %llu",
+		soc_private->camnoc_axi_min_ib_bw);
+
+	soc_private->client_id_based = of_property_read_bool(of_node,
+		"client-id-based");
+
+	count = of_property_count_strings(of_node, "client-names");
+	if (count <= 0) {
+		CAM_ERR(CAM_CPAS, "no client-names found");
+		count = 0;
+		return -EINVAL;
+	} else if (count > CAM_CPAS_MAX_CLIENTS) {
+		CAM_ERR(CAM_CPAS, "Number of clients %d greater than max %d",
+			count, CAM_CPAS_MAX_CLIENTS);
+		count = 0;
+		return -EINVAL;
+	}
+
+	soc_private->num_clients = count;
+	CAM_DBG(CAM_CPAS,
+		"arch-compat=%s, client_id_based = %d, num_clients=%d",
+		soc_private->arch_compat, soc_private->client_id_based,
+		soc_private->num_clients);
+
+	for (i = 0; i < soc_private->num_clients; i++) {
+		rc = of_property_read_string_index(of_node,
+			"client-names", i, &soc_private->client_name[i]);
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i);
+			return -EINVAL;
+		}
+
+		cpas_core->cpas_client[i] =
+			kzalloc(sizeof(struct cam_cpas_client), GFP_KERNEL);
+		if (!cpas_core->cpas_client[i]) {
+			rc = -ENOMEM;
+			goto cleanup_clients;
+		}
+
+		CAM_DBG(CAM_CPAS, "Client[%d] : %s", i,
+			soc_private->client_name[i]);
+	}
+
+	soc_private->control_camnoc_axi_clk = of_property_read_bool(of_node,
+		"control-camnoc-axi-clk");
+
+	if (soc_private->control_camnoc_axi_clk == true) {
+		rc = of_property_read_u32(of_node, "camnoc-bus-width",
+			&soc_private->camnoc_bus_width);
+		if (rc || (soc_private->camnoc_bus_width == 0)) {
+			CAM_ERR(CAM_CPAS, "Bus width not found rc=%d, %d",
+				rc, soc_private->camnoc_bus_width);
+			goto cleanup_clients;
+		}
+
+		rc = of_property_read_u32(of_node,
+			"camnoc-axi-clk-bw-margin-perc",
+			&soc_private->camnoc_axi_clk_bw_margin);
+
+		if (rc) {
+			/* this is not fatal, overwrite rc */
+			rc = 0;
+			soc_private->camnoc_axi_clk_bw_margin = 0;
+		}
+	}
+
+	CAM_DBG(CAM_CPAS,
+		"control_camnoc_axi_clk=%d, width=%d, margin=%d",
+		soc_private->control_camnoc_axi_clk,
+		soc_private->camnoc_bus_width,
+		soc_private->camnoc_axi_clk_bw_margin);
+
+	count = of_property_count_u32_elems(of_node, "vdd-corners");
+	if ((count > 0) && (count <= CAM_REGULATOR_LEVEL_MAX) &&
+		(of_property_count_strings(of_node, "vdd-corner-ahb-mapping") ==
+		count)) {
+		const char *ahb_string;
+
+		for (i = 0; i < count; i++) {
+			rc = of_property_read_u32_index(of_node, "vdd-corners",
+				i, &soc_private->vdd_ahb[i].vdd_corner);
+			if (rc) {
+				CAM_ERR(CAM_CPAS,
+					"vdd-corners failed at index=%d", i);
+				rc = -ENODEV;
+				goto cleanup_clients;
+			}
+
+			rc = of_property_read_string_index(of_node,
+				"vdd-corner-ahb-mapping", i, &ahb_string);
+			if (rc) {
+				CAM_ERR(CAM_CPAS,
+					"no ahb-mapping at index=%d", i);
+				rc = -ENODEV;
+				goto cleanup_clients;
+			}
+
+			rc = cam_soc_util_get_level_from_string(ahb_string,
+				&soc_private->vdd_ahb[i].ahb_level);
+			if (rc) {
+				CAM_ERR(CAM_CPAS,
+					"invalid ahb-string at index=%d", i);
+				rc = -EINVAL;
+				goto cleanup_clients;
+			}
+
+			CAM_DBG(CAM_CPAS,
+				"Vdd-AHB mapping [%d] : [%d] [%s] [%d]", i,
+				soc_private->vdd_ahb[i].vdd_corner,
+				ahb_string, soc_private->vdd_ahb[i].ahb_level);
+		}
+
+		soc_private->num_vdd_ahb_mapping = count;
+	}
+
+	rc = cam_cpas_parse_node_tree(cpas_core, of_node, soc_private);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "Node tree parsing failed rc: %d", rc);
+		goto cleanup_tree;
+	}
+
+	return 0;
+
+cleanup_tree:
+	cam_cpas_node_tree_cleanup(cpas_core, soc_private);
+cleanup_clients:
+	cam_cpas_util_client_cleanup(cpas_hw);
+	return rc;
+}
+
+int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, struct cam_hw_info *cpas_hw)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in get_dt_properties, rc=%d", rc);
+		return rc;
+	}
+
+	if (soc_info->irq_line && !irq_handler) {
+		CAM_ERR(CAM_CPAS, "Invalid IRQ handler");
+		return -EINVAL;
+	}
+
+	rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+		cpas_hw);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in request_platform_resource, rc=%d",
+			rc);
+		return rc;
+	}
+
+	soc_info->soc_private = kzalloc(sizeof(struct cam_cpas_private_soc),
+		GFP_KERNEL);
+	if (!soc_info->soc_private) {
+		rc = -ENOMEM;
+		goto release_res;
+	}
+
+	rc = cam_cpas_get_custom_dt_info(cpas_hw, soc_info->pdev,
+		soc_info->soc_private);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in get_custom_info, rc=%d", rc);
+		goto free_soc_private;
+	}
+
+	return rc;
+
+free_soc_private:
+	kfree(soc_info->soc_private);
+release_res:
+	cam_soc_util_release_platform_resource(soc_info);
+	return rc;
+}
+
+int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+	if (rc)
+		CAM_ERR(CAM_CPAS, "release platform failed, rc=%d", rc);
+
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+
+	return rc;
+}
+
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level default_level)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		default_level, true);
+	if (rc)
+		CAM_ERR(CAM_CPAS, "enable platform resource failed, rc=%d", rc);
+
+	return rc;
+}
+
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disable_irq)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info,
+		disable_clocks, disable_irq);
+	if (rc)
+		CAM_ERR(CAM_CPAS, "disable platform failed, rc=%d", rc);
+
+	return rc;
+}
+
+int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_irq_disable(soc_info);
+	if (rc)
+		CAM_ERR(CAM_CPAS, "disable irq failed, rc=%d", rc);
+
+	return rc;
+}

+ 123 - 0
drivers/cam_cpas/cam_cpas_soc.h

@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CPAS_SOC_H_
+#define _CAM_CPAS_SOC_H_
+
+#include "cam_soc_util.h"
+#include "cam_cpas_hw.h"
+
+#define CAM_REGULATOR_LEVEL_MAX 16
+#define CAM_CPAS_MAX_TREE_NODES 50
+
+/**
+ * struct cam_cpas_vdd_ahb_mapping : Voltage to ahb level mapping
+ *
+ * @vdd_corner : Voltage corner value
+ * @ahb_level : AHB vote level corresponds to this vdd_corner
+ *
+ */
+struct cam_cpas_vdd_ahb_mapping {
+	unsigned int vdd_corner;
+	enum cam_vote_level ahb_level;
+};
+
+/**
+ * struct cpas_tree_node: Generic cpas tree node for BW voting
+ *
+ * @cell_idx: Index to identify node from device tree and its parent
+ * @level_idx: Index to identify at what level the node is present
+ * @axi_port_idx: Index to identify which axi port to vote the consolidated bw
+ * @path_data_type: Traffic type info from device tree (ife-vid, ife-disp etc)
+ * @path_trans_type: Transaction type info from device tree (rd, wr)
+ * @merge_type: Traffic merge type (calculation info) from device tree
+ * @bus_width_factor: Factor for accounting bus width in CAMNOC bw calculation
+ * @camnoc_bw: CAMNOC bw value at current node
+ * @mnoc_ab_bw: MNOC AB bw value at current node
+ * @mnoc_ib_bw: MNOC IB bw value at current node
+ * @ddr_ab_bw: DDR AB bw value at current node
+ * @ddr_ib_bw: DDR IB bw value at current node
+ * @camnoc_max_needed: If node is needed for CAMNOC BW calculation then true
+ * @constituent_paths: Constituent paths presence info from device tree
+ *     Ex: For CAM_CPAS_PATH_DATA_IFE_UBWC_STATS, index corresponding to
+ *     CAM_CPAS_PATH_DATA_IFE_VID, CAM_CPAS_PATH_DATA_IFE_DISP and
+ *     CAM_CPAS_PATH_DATA_IFE_STATS
+ * @tree_dev_node: Device node from devicetree for current tree node
+ * @parent_node: Pointer to node one or more level above the current level
+ *     (starting from end node of cpas client)
+ *
+ */
+struct cam_cpas_tree_node {
+	uint32_t cell_idx;
+	uint32_t level_idx;
+	int axi_port_idx;
+	const char *node_name;
+	uint32_t path_data_type;
+	uint32_t path_trans_type;
+	uint32_t merge_type;
+	uint32_t bus_width_factor;
+	uint64_t camnoc_bw;
+	uint64_t mnoc_ab_bw;
+	uint64_t mnoc_ib_bw;
+	uint64_t ddr_ab_bw;
+	uint64_t ddr_ib_bw;
+	bool camnoc_max_needed;
+	bool constituent_paths[CAM_CPAS_PATH_DATA_MAX];
+	struct device_node *tree_dev_node;
+	struct cam_cpas_tree_node *parent_node;
+};
+
+/**
+ * struct cam_cpas_private_soc : CPAS private DT info
+ *
+ * @arch_compat: ARCH compatible string
+ * @client_id_based: Whether clients are id based
+ * @num_clients: Number of clients supported
+ * @client_name: Client names
+ * @tree_node: Array of pointers to all tree nodes required to calculate
+ *      axi bw, arranged with help of cell index in device tree
+ * @camera_bus_node: Device tree node from cpas node
+ * @level_node: Device tree node for each level in camera_bus_node
+ * @num_vdd_ahb_mapping : Number of vdd to ahb level mapping supported
+ * @vdd_ahb : AHB level mapping info for the supported vdd levels
+ * @control_camnoc_axi_clk : Whether CPAS driver need to set camnoc axi clk freq
+ * @camnoc_bus_width : CAMNOC Bus width
+ * @camnoc_axi_clk_bw_margin : BW Margin in percentage to add while calculating
+ *      camnoc axi clock
+ * @camnoc_axi_min_ib_bw: Min camnoc BW which varies based on target
+ *
+ */
+struct cam_cpas_private_soc {
+	const char *arch_compat;
+	bool client_id_based;
+	uint32_t num_clients;
+	const char *client_name[CAM_CPAS_MAX_CLIENTS];
+	struct cam_cpas_tree_node *tree_node[CAM_CPAS_MAX_TREE_NODES];
+	struct device_node *camera_bus_node;
+	struct device_node *level_node[CAM_CPAS_MAX_TREE_LEVELS];
+	uint32_t num_vdd_ahb_mapping;
+	struct cam_cpas_vdd_ahb_mapping vdd_ahb[CAM_REGULATOR_LEVEL_MAX];
+	bool control_camnoc_axi_clk;
+	uint32_t camnoc_bus_width;
+	uint32_t camnoc_axi_clk_bw_margin;
+	uint64_t camnoc_axi_min_ib_bw;
+};
+
+void cam_cpas_util_debug_parse_data(struct cam_cpas_private_soc *soc_private);
+void cam_cpas_dump_axi_vote_info(
+	const struct cam_cpas_client *cpas_client,
+	const char *identifier,
+	struct cam_axi_vote *axi_vote);
+int cam_cpas_node_tree_cleanup(struct cam_cpas *cpas_core,
+	struct cam_cpas_private_soc *soc_private);
+int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t vfe_irq_handler, struct cam_hw_info *cpas_hw);
+int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level default_level);
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disable_irq);
+int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info);
+#endif /* _CAM_CPAS_SOC_H_ */

+ 9 - 0
drivers/cam_cpas/camss_top/Makefile

@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_camsstop_hw.o

+ 82 - 0
drivers/cam_cpas/camss_top/cam_camsstop_hw.c

@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpas_soc.h"
+
+int cam_camsstop_get_hw_info(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CAMSS];
+	uint32_t reg_value;
+
+	if (reg_indx == -1)
+		return -EINVAL;
+
+	hw_caps->camera_family = CAM_FAMILY_CAMERA_SS;
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
+	hw_caps->camera_version.major =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+	hw_caps->camera_version.minor =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->camera_version.incr =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr);
+
+	return 0;
+}
+
+int cam_camsstop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
+	int32_t regbase_index[], int32_t num_reg_map)
+{
+	uint32_t index;
+	int rc;
+
+	if (num_reg_map > CAM_CPAS_REG_MAX) {
+		CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
+		return -EINVAL;
+	}
+
+	if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
+		CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
+			soc_info->num_mem_block);
+		return -EINVAL;
+	}
+
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_camss", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CAMSS] = index;
+	} else {
+		CAM_ERR(CAM_CPAS, "regbase not found for CAM_CPAS_REG_CAMSS");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
+{
+	if (!internal_ops) {
+		CAM_ERR(CAM_CPAS, "invalid NULL param");
+		return -EINVAL;
+	}
+
+	internal_ops->get_hw_info = cam_camsstop_get_hw_info;
+	internal_ops->init_hw_version = NULL;
+	internal_ops->handle_irq = NULL;
+	internal_ops->setup_regbase = cam_camsstop_setup_regbase_indices;
+	internal_ops->power_on = NULL;
+	internal_ops->power_off = NULL;
+
+	return 0;
+}

+ 9 - 0
drivers/cam_cpas/cpas_top/Makefile

@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpastop_hw.o

+ 635 - 0
drivers/cam_cpas/cpas_top/cam_cpastop_hw.c

@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpastop_hw.h"
+#include "cam_io_util.h"
+#include "cam_cpas_soc.h"
+#include "cpastop100.h"
+#include "cpastop_v150_100.h"
+#include "cpastop_v170_110.h"
+#include "cpastop_v175_100.h"
+#include "cpastop_v175_101.h"
+#include "cpastop_v175_120.h"
+#include "cpastop_v175_130.h"
+#include "cpastop_v480_100.h"
+
+struct cam_camnoc_info *camnoc_info;
+
+#define CAMNOC_SLAVE_MAX_ERR_CODE 7
+static const char * const camnoc_salve_err_code[] = {
+	"Target Error",              /* err code 0 */
+	"Address decode error",      /* err code 1 */
+	"Unsupported request",       /* err code 2 */
+	"Disconnected target",       /* err code 3 */
+	"Security violation",        /* err code 4 */
+	"Hidden security violation", /* err code 5 */
+	"Timeout Error",             /* err code 6 */
+	"Unknown Error",             /* unknown err code */
+};
+
+static int cam_cpastop_get_hw_info(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CPASTOP];
+	uint32_t reg_value;
+
+	if (reg_indx == -1)
+		return -EINVAL;
+
+	hw_caps->camera_family = CAM_FAMILY_CPAS_SS;
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
+	hw_caps->camera_version.major =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xff0000, 0x10);
+	hw_caps->camera_version.minor =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xff00, 0x8);
+	hw_caps->camera_version.incr =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x4);
+	hw_caps->cpas_version.major =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+	hw_caps->cpas_version.minor =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->cpas_version.incr =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x8);
+	hw_caps->camera_capability = reg_value;
+
+	CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d, cpas %d.%d.%d, cap 0x%x",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr,
+		hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
+		hw_caps->cpas_version.incr, hw_caps->camera_capability);
+
+	soc_info->hw_version = CAM_CPAS_TITAN_NONE;
+
+	if ((hw_caps->camera_version.major == 1) &&
+		(hw_caps->camera_version.minor == 7) &&
+		(hw_caps->camera_version.incr == 0)) {
+		if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 0) &&
+			(hw_caps->cpas_version.incr == 0))
+			soc_info->hw_version = CAM_CPAS_TITAN_170_V100;
+		else if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 1) &&
+			(hw_caps->cpas_version.incr == 0))
+			soc_info->hw_version = CAM_CPAS_TITAN_170_V110;
+		else if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 2) &&
+			(hw_caps->cpas_version.incr == 0))
+			soc_info->hw_version = CAM_CPAS_TITAN_170_V120;
+	} else if ((hw_caps->camera_version.major == 1) &&
+		(hw_caps->camera_version.minor == 7) &&
+		(hw_caps->camera_version.incr == 5)) {
+		if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 0) &&
+			(hw_caps->cpas_version.incr == 0))
+			soc_info->hw_version = CAM_CPAS_TITAN_175_V100;
+		else if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 0) &&
+			(hw_caps->cpas_version.incr == 1))
+			soc_info->hw_version = CAM_CPAS_TITAN_175_V101;
+		else if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 2) &&
+			(hw_caps->cpas_version.incr == 0))
+			soc_info->hw_version = CAM_CPAS_TITAN_175_V120;
+		else if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 3) &&
+			(hw_caps->cpas_version.incr == 0))
+			soc_info->hw_version = CAM_CPAS_TITAN_175_V130;
+	} else if ((hw_caps->camera_version.major == 1) &&
+		(hw_caps->camera_version.minor == 5) &&
+		(hw_caps->camera_version.incr == 0)) {
+		if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 0) &&
+			(hw_caps->cpas_version.incr == 0))
+			soc_info->hw_version = CAM_CPAS_TITAN_150_V100;
+	} else if ((hw_caps->camera_version.major == 4) &&
+		(hw_caps->camera_version.minor == 8) &&
+		(hw_caps->camera_version.incr == 0)) {
+		soc_info->hw_version = CAM_CPAS_TITAN_480_V100;
+	}
+
+	CAM_DBG(CAM_CPAS, "CPAS HW VERSION %x", soc_info->hw_version);
+
+	return 0;
+}
+
+static int cam_cpastop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
+	int32_t regbase_index[], int32_t num_reg_map)
+{
+	uint32_t index;
+	int rc;
+
+	if (num_reg_map > CAM_CPAS_REG_MAX) {
+		CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
+		return -EINVAL;
+	}
+
+	if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
+		CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
+			soc_info->num_mem_block);
+		return -EINVAL;
+	}
+
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_cpas_top", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CPASTOP] = index;
+	} else {
+		CAM_ERR(CAM_CPAS, "regbase not found for CPASTOP, rc=%d, %d %d",
+			rc, index, num_reg_map);
+		return -EINVAL;
+	}
+
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_camnoc", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CAMNOC] = index;
+	} else {
+		CAM_ERR(CAM_CPAS, "regbase not found for CAMNOC, rc=%d, %d %d",
+			rc, index, num_reg_map);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_cpastop_handle_errlogger(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info,
+	struct cam_camnoc_irq_slave_err_data *slave_err)
+{
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	int err_code_index = 0;
+
+	if (!camnoc_info->err_logger) {
+		CAM_ERR_RATE_LIMIT(CAM_CPAS, "Invalid err logger info");
+		return -EINVAL;
+	}
+
+	slave_err->mainctrl.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->mainctrl);
+
+	slave_err->errvld.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errvld);
+
+	slave_err->errlog0_low.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog0_low);
+
+	slave_err->errlog0_high.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog0_high);
+
+	slave_err->errlog1_low.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog1_low);
+
+	slave_err->errlog1_high.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog1_high);
+
+	slave_err->errlog2_low.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog2_low);
+
+	slave_err->errlog2_high.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog2_high);
+
+	slave_err->errlog3_low.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog3_low);
+
+	slave_err->errlog3_high.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog3_high);
+
+	CAM_ERR_RATE_LIMIT(CAM_CPAS,
+		"Possible memory configuration issue, fault at SMMU raised as CAMNOC SLAVE_IRQ");
+
+	CAM_ERR_RATE_LIMIT(CAM_CPAS,
+		"mainctrl[0x%x 0x%x] errvld[0x%x 0x%x] stall_en=%d, fault_en=%d, err_vld=%d",
+		camnoc_info->err_logger->mainctrl,
+		slave_err->mainctrl.value,
+		camnoc_info->err_logger->errvld,
+		slave_err->errvld.value,
+		slave_err->mainctrl.stall_en,
+		slave_err->mainctrl.fault_en,
+		slave_err->errvld.err_vld);
+
+	err_code_index = slave_err->errlog0_low.err_code;
+	if (err_code_index > CAMNOC_SLAVE_MAX_ERR_CODE)
+		err_code_index = CAMNOC_SLAVE_MAX_ERR_CODE;
+
+	CAM_ERR_RATE_LIMIT(CAM_CPAS,
+		"errlog0 low[0x%x 0x%x] high[0x%x 0x%x] loginfo_vld=%d, word_error=%d, non_secure=%d, device=%d, opc=%d, err_code=%d(%s) sizef=%d, addr_space=%d, len1=%d",
+		camnoc_info->err_logger->errlog0_low,
+		slave_err->errlog0_low.value,
+		camnoc_info->err_logger->errlog0_high,
+		slave_err->errlog0_high.value,
+		slave_err->errlog0_low.loginfo_vld,
+		slave_err->errlog0_low.word_error,
+		slave_err->errlog0_low.non_secure,
+		slave_err->errlog0_low.device,
+		slave_err->errlog0_low.opc,
+		slave_err->errlog0_low.err_code,
+		camnoc_salve_err_code[err_code_index],
+		slave_err->errlog0_low.sizef,
+		slave_err->errlog0_low.addr_space,
+		slave_err->errlog0_high.len1);
+
+	CAM_ERR_RATE_LIMIT(CAM_CPAS,
+		"errlog1_low[0x%x 0x%x]  errlog1_high[0x%x 0x%x] errlog2_low[0x%x 0x%x]  errlog2_high[0x%x 0x%x] errlog3_low[0x%x 0x%x]  errlog3_high[0x%x 0x%x]",
+		camnoc_info->err_logger->errlog1_low,
+		slave_err->errlog1_low.value,
+		camnoc_info->err_logger->errlog1_high,
+		slave_err->errlog1_high.value,
+		camnoc_info->err_logger->errlog2_low,
+		slave_err->errlog2_low.value,
+		camnoc_info->err_logger->errlog2_high,
+		slave_err->errlog2_high.value,
+		camnoc_info->err_logger->errlog3_low,
+		slave_err->errlog3_low.value,
+		camnoc_info->err_logger->errlog3_high,
+		slave_err->errlog3_high.value);
+
+	return 0;
+}
+
+static int cam_cpastop_handle_ubwc_enc_err(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info, int i,
+	struct cam_camnoc_irq_ubwc_enc_data *enc_err)
+{
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+	enc_err->encerr_status.value =
+		cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_err[i].err_status.offset);
+
+	/* Let clients handle the UBWC errors */
+	CAM_DBG(CAM_CPAS,
+		"ubwc enc err [%d]: offset[0x%x] value[0x%x]",
+		i, camnoc_info->irq_err[i].err_status.offset,
+		enc_err->encerr_status.value);
+
+	return 0;
+}
+
+static int cam_cpastop_handle_ubwc_dec_err(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info, int i,
+	struct cam_camnoc_irq_ubwc_dec_data *dec_err)
+{
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+	dec_err->decerr_status.value =
+		cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_err[i].err_status.offset);
+
+	/* Let clients handle the UBWC errors */
+	CAM_DBG(CAM_CPAS,
+		"ubwc dec err status [%d]: offset[0x%x] value[0x%x] thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+		i, camnoc_info->irq_err[i].err_status.offset,
+		dec_err->decerr_status.value,
+		dec_err->decerr_status.thr_err,
+		dec_err->decerr_status.fcl_err,
+		dec_err->decerr_status.len_md_err,
+		dec_err->decerr_status.format_err);
+
+	return 0;
+}
+
+static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw,
+	struct cam_camnoc_irq_ahb_timeout_data *ahb_err)
+{
+	CAM_ERR_RATE_LIMIT(CAM_CPAS, "ahb timeout error");
+
+	return 0;
+}
+
+static int cam_cpastop_disable_test_irq(struct cam_hw_info *cpas_hw)
+{
+	camnoc_info->irq_sbm->sbm_clear.value &= ~0x4;
+	camnoc_info->irq_sbm->sbm_enable.value &= ~0x100;
+	camnoc_info->irq_err[CAM_CAMNOC_HW_IRQ_CAMNOC_TEST].enable = false;
+
+	return 0;
+}
+
+static int cam_cpastop_reset_irq(struct cam_hw_info *cpas_hw)
+{
+	int i;
+
+	cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+		&camnoc_info->irq_sbm->sbm_clear);
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if (camnoc_info->irq_err[i].enable)
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->irq_err[i].err_clear);
+	}
+
+	cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+		&camnoc_info->irq_sbm->sbm_enable);
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if (camnoc_info->irq_err[i].enable)
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->irq_err[i].err_enable);
+	}
+
+	return 0;
+}
+
+static void cam_cpastop_notify_clients(struct cam_cpas *cpas_core,
+	struct cam_cpas_irq_data *irq_data)
+{
+	int i;
+	struct cam_cpas_client *cpas_client;
+	bool error_handled = false;
+
+	CAM_DBG(CAM_CPAS,
+		"Notify CB : num_clients=%d, registered=%d, started=%d",
+		cpas_core->num_clients, cpas_core->registered_clients,
+		cpas_core->streamon_clients);
+
+	for (i = 0; i < cpas_core->num_clients; i++) {
+		if (CAM_CPAS_CLIENT_STARTED(cpas_core, i)) {
+			cpas_client = cpas_core->cpas_client[i];
+			if (cpas_client->data.cam_cpas_client_cb) {
+				CAM_DBG(CAM_CPAS,
+					"Calling client CB %d : %d",
+					i, irq_data->irq_type);
+				error_handled =
+					cpas_client->data.cam_cpas_client_cb(
+					cpas_client->data.client_handle,
+					cpas_client->data.userdata,
+					irq_data);
+				if (error_handled)
+					break;
+			}
+		}
+	}
+}
+
+static void cam_cpastop_work(struct work_struct *work)
+{
+	struct cam_cpas_work_payload *payload;
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	struct cam_hw_soc_info *soc_info;
+	int i;
+	enum cam_camnoc_hw_irq_type irq_type;
+	struct cam_cpas_irq_data irq_data;
+
+	payload = container_of(work, struct cam_cpas_work_payload, work);
+	if (!payload) {
+		CAM_ERR(CAM_CPAS, "NULL payload");
+		return;
+	}
+
+	cpas_hw = payload->hw;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	soc_info = &cpas_hw->soc_info;
+
+	if (!atomic_inc_not_zero(&cpas_core->irq_count)) {
+		CAM_ERR(CAM_CPAS, "CPAS off");
+		return;
+	}
+
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if ((payload->irq_status & camnoc_info->irq_err[i].sbm_port) &&
+			(camnoc_info->irq_err[i].enable)) {
+			irq_type = camnoc_info->irq_err[i].irq_type;
+			CAM_ERR_RATE_LIMIT(CAM_CPAS,
+				"Error occurred, type=%d", irq_type);
+			memset(&irq_data, 0x0, sizeof(irq_data));
+			irq_data.irq_type = (enum cam_camnoc_irq_type)irq_type;
+
+			switch (irq_type) {
+			case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR:
+				cam_cpastop_handle_errlogger(
+					cpas_core, soc_info,
+					&irq_data.u.slave_err);
+				break;
+			case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+				cam_cpastop_handle_ubwc_enc_err(
+					cpas_core, soc_info, i,
+					&irq_data.u.enc_err);
+				break;
+			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+				cam_cpastop_handle_ubwc_dec_err(
+					cpas_core, soc_info, i,
+					&irq_data.u.dec_err);
+				break;
+			case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT:
+				cam_cpastop_handle_ahb_timeout_err(
+					cpas_hw, &irq_data.u.ahb_err);
+				break;
+			case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
+				CAM_DBG(CAM_CPAS, "TEST IRQ");
+				break;
+			default:
+				CAM_ERR(CAM_CPAS, "Invalid IRQ type");
+				break;
+			}
+
+			cam_cpastop_notify_clients(cpas_core, &irq_data);
+
+			payload->irq_status &=
+				~camnoc_info->irq_err[i].sbm_port;
+		}
+	}
+	atomic_dec(&cpas_core->irq_count);
+	wake_up(&cpas_core->irq_count_wq);
+	CAM_DBG(CAM_CPAS, "irq_count=%d\n", atomic_read(&cpas_core->irq_count));
+
+	if (payload->irq_status)
+		CAM_ERR(CAM_CPAS, "IRQ not handled irq_status=0x%x",
+			payload->irq_status);
+
+	kfree(payload);
+}
+
+static irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *cpas_hw = (struct cam_hw_info *)data;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	struct cam_cpas_work_payload *payload;
+
+	if (!atomic_inc_not_zero(&cpas_core->irq_count)) {
+		CAM_ERR(CAM_CPAS, "CPAS off");
+		return IRQ_HANDLED;
+	}
+
+	payload = kzalloc(sizeof(struct cam_cpas_work_payload), GFP_ATOMIC);
+	if (!payload)
+		goto done;
+
+	payload->irq_status = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_sbm->sbm_status.offset);
+
+	CAM_DBG(CAM_CPAS, "IRQ callback, irq_status=0x%x", payload->irq_status);
+
+	payload->hw = cpas_hw;
+	INIT_WORK((struct work_struct *)&payload->work, cam_cpastop_work);
+
+	if (TEST_IRQ_ENABLE)
+		cam_cpastop_disable_test_irq(cpas_hw);
+
+	cam_cpastop_reset_irq(cpas_hw);
+
+	queue_work(cpas_core->work_queue, &payload->work);
+done:
+	atomic_dec(&cpas_core->irq_count);
+	wake_up(&cpas_core->irq_count_wq);
+
+	return IRQ_HANDLED;
+}
+
+static int cam_cpastop_poweron(struct cam_hw_info *cpas_hw)
+{
+	int i;
+
+	for (i = 0; i < camnoc_info->specific_size; i++) {
+		if (camnoc_info->specific[i].enable) {
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].priority_lut_low);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].priority_lut_high);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].urgency);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].danger_lut);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].safe_lut);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].ubwc_ctl);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].flag_out_set0_low);
+		}
+	}
+
+	return 0;
+}
+
+static int cam_cpastop_poweroff(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	int rc = 0;
+	struct cam_cpas_hw_errata_wa_list *errata_wa_list =
+		camnoc_info->errata_wa_list;
+
+	if (!errata_wa_list)
+		return 0;
+
+	if (errata_wa_list->camnoc_flush_slave_pending_trans.enable) {
+		struct cam_cpas_hw_errata_wa *errata_wa =
+			&errata_wa_list->camnoc_flush_slave_pending_trans;
+
+		rc = cam_io_poll_value_wmask(
+			soc_info->reg_map[camnoc_index].mem_base +
+			errata_wa->data.reg_info.offset,
+			errata_wa->data.reg_info.value,
+			errata_wa->data.reg_info.mask,
+			CAM_CPAS_POLL_RETRY_CNT,
+			CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS);
+		if (rc) {
+			CAM_DBG(CAM_CPAS,
+				"camnoc flush slave pending trans failed");
+			/* Do not return error, passthrough */
+			rc = 0;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_cpastop_init_hw_version(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	int rc = 0;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+
+	CAM_DBG(CAM_CPAS,
+		"hw_version=0x%x Camera Version %d.%d.%d, cpas version %d.%d.%d",
+		soc_info->hw_version,
+		hw_caps->camera_version.major,
+		hw_caps->camera_version.minor,
+		hw_caps->camera_version.incr,
+		hw_caps->cpas_version.major,
+		hw_caps->cpas_version.minor,
+		hw_caps->cpas_version.incr);
+
+	switch (soc_info->hw_version) {
+	case CAM_CPAS_TITAN_170_V100:
+		camnoc_info = &cam170_cpas100_camnoc_info;
+		break;
+	case CAM_CPAS_TITAN_170_V110:
+		camnoc_info = &cam170_cpas110_camnoc_info;
+		break;
+	case CAM_CPAS_TITAN_175_V100:
+		camnoc_info = &cam175_cpas100_camnoc_info;
+		break;
+	case CAM_CPAS_TITAN_175_V101:
+		camnoc_info = &cam175_cpas101_camnoc_info;
+		break;
+	case CAM_CPAS_TITAN_175_V120:
+		camnoc_info = &cam175_cpas120_camnoc_info;
+		break;
+	case CAM_CPAS_TITAN_175_V130:
+		camnoc_info = &cam175_cpas130_camnoc_info;
+		break;
+	case CAM_CPAS_TITAN_150_V100:
+		camnoc_info = &cam150_cpas100_camnoc_info;
+		break;
+	case CAM_CPAS_TITAN_480_V100:
+		camnoc_info = &cam480_cpas100_camnoc_info;
+		break;
+	default:
+		CAM_ERR(CAM_CPAS, "Camera Version not supported %d.%d.%d",
+			hw_caps->camera_version.major,
+			hw_caps->camera_version.minor,
+			hw_caps->camera_version.incr);
+		rc = -EINVAL;
+		break;
+	}
+
+	return 0;
+}
+
+int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
+{
+	if (!internal_ops) {
+		CAM_ERR(CAM_CPAS, "invalid NULL param");
+		return -EINVAL;
+	}
+
+	internal_ops->get_hw_info = cam_cpastop_get_hw_info;
+	internal_ops->init_hw_version = cam_cpastop_init_hw_version;
+	internal_ops->handle_irq = cam_cpastop_handle_irq;
+	internal_ops->setup_regbase = cam_cpastop_setup_regbase_indices;
+	internal_ops->power_on = cam_cpastop_poweron;
+	internal_ops->power_off = cam_cpastop_poweroff;
+
+	return 0;
+}

+ 268 - 0
drivers/cam_cpas/cpas_top/cam_cpastop_hw.h

@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CPASTOP_HW_H_
+#define _CAM_CPASTOP_HW_H_
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw.h"
+
+/**
+ * enum cam_camnoc_hw_irq_type - Enum for camnoc error types
+ *
+ * @CAM_CAMNOC_HW_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
+ *                                 1 QHB port) has an error logger. The error
+ *                                 observed at any slave port is logged into
+ *                                 the error logger register and an IRQ is
+ *                                 triggered
+ * @CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR  : Triggered if any error
+ *                                               detected in the IFE0 UBWC
+ *                                               encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR  : Triggered if any error
+ *                                               detected in the IFE1 or IFE3
+ *                                               UBWC encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error
+ *                                               detected in the IPE/BPS
+ *                                               UBWC decoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error
+ *                                               detected in the IPE/BPS UBWC
+ *                                               encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IFE0_UBWC_ENCODE_ERROR:    Triggered if any UBWC error
+ *                                               is detected in IFE0 write path
+ * @CAM_CAMNOC_HW_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR:  Triggered if any UBWC error
+ *                                               is detected in IFE1 write path
+ * @CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT              : Triggered when the QHS_ICP
+ *                                               slave  times out after 4000
+ *                                               AHB cycles
+ * @CAM_CAMNOC_HW_IRQ_RESERVED1                : Reserved
+ * @CAM_CAMNOC_HW_IRQ_RESERVED2                : Reserved
+ * @CAM_CAMNOC_HW_IRQ_CAMNOC_TEST              : To test the IRQ logic
+ */
+enum cam_camnoc_hw_irq_type {
+	CAM_CAMNOC_HW_IRQ_SLAVE_ERROR =
+		CAM_CAMNOC_IRQ_SLAVE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IFE0_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IFE0_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR =
+		CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT =
+		CAM_CAMNOC_IRQ_AHB_TIMEOUT,
+	CAM_CAMNOC_HW_IRQ_RESERVED1,
+	CAM_CAMNOC_HW_IRQ_RESERVED2,
+	CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+};
+
+/**
+ * enum cam_camnoc_port_type - Enum for different camnoc hw ports. All CAMNOC
+ *         settings like QoS, LUT mappings need to be configured for
+ *         each of these ports.
+ *
+ * @CAM_CAMNOC_CDM: Indicates CDM HW connection to camnoc
+ * @CAM_CAMNOC_IFE02: Indicates IFE0, IFE2 HW connection to camnoc
+ * @CAM_CAMNOC_IFE13: Indicates IFE1, IFE3 HW connection to camnoc
+ * @CAM_CAMNOC_IFE_LINEAR: Indicates linear data from all IFEs to cammnoc
+ * @CAM_CAMNOC_IFE_UBWC_STATS: Indicates ubwc+stats from all IFEs to cammnoc
+ * @CAM_CAMNOC_IFE_RDI_WR: Indicates RDI write data from all IFEs to cammnoc
+ * @CAM_CAMNOC_IFE_RDI_RD: Indicates RDI read data from all IFEs to cammnoc
+ * @CAM_CAMNOC_IFE0123_RDI_WRITE: RDI write only for all IFEx
+ * @CAM_CAMNOC_IFE0_NRDI_WRITE: IFE0 non-RDI write
+ * @CAM_CAMNOC_IFE01_RDI_READ: IFE0/1 RDI READ
+ * @CAM_CAMNOC_IFE1_NRDI_WRITE: IFE1 non-RDI write
+ * @CAM_CAMNOC_IPE_BPS_LRME_READ: Indicates IPE, BPS, LRME Read HW
+ *         connection to camnoc
+ * @CAM_CAMNOC_IPE_BPS_LRME_WRITE: Indicates IPE, BPS, LRME Write HW
+ *         connection to camnoc
+ * @CAM_CAMNOC_IPE_VID_DISP_WRITE: Indicates IPE's VID/DISP Wrire HW
+ *         connection to camnoc
+ * @CAM_CAMNOC_IPE0_RD: Indicates IPE's Read0 HW connection to camnoc
+ * @CAM_CAMNOC_IPE1_BPS_RD: Indicates IPE's Read1 + BPS Read HW connection
+ *         to camnoc
+ * @CAM_CAMNOC_IPE_BPS_WR: Indicates IPE+BPS Write HW connection to camnoc
+ * @CAM_CAMNOC_JPEG: Indicates JPEG HW connection to camnoc
+ * @CAM_CAMNOC_FD: Indicates FD HW connection to camnoc
+ * @CAM_CAMNOC_ICP: Indicates ICP HW connection to camnoc
+ */
+enum cam_camnoc_port_type {
+	CAM_CAMNOC_CDM,
+	CAM_CAMNOC_IFE02,
+	CAM_CAMNOC_IFE13,
+	CAM_CAMNOC_IFE_LINEAR,
+	CAM_CAMNOC_IFE_UBWC_STATS,
+	CAM_CAMNOC_IFE_RDI_WR,
+	CAM_CAMNOC_IFE_RDI_RD,
+	CAM_CAMNOC_IFE0123_RDI_WRITE,
+	CAM_CAMNOC_IFE0_NRDI_WRITE,
+	CAM_CAMNOC_IFE01_RDI_READ,
+	CAM_CAMNOC_IFE1_NRDI_WRITE,
+	CAM_CAMNOC_IPE_BPS_LRME_READ,
+	CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+	CAM_CAMNOC_IPE_VID_DISP_WRITE,
+	CAM_CAMNOC_IPE0_RD,
+	CAM_CAMNOC_IPE1_BPS_RD,
+	CAM_CAMNOC_IPE_BPS_WR,
+	CAM_CAMNOC_JPEG,
+	CAM_CAMNOC_FD,
+	CAM_CAMNOC_ICP,
+};
+
+/**
+ * struct cam_camnoc_specific : CPAS camnoc specific settings
+ *
+ * @port_type: Port type
+ * @enable: Whether to enable settings for this connection
+ * @priority_lut_low: Priority Low LUT mapping for this connection
+ * @priority_lut_high: Priority High LUT mapping for this connection
+ * @urgency: Urgency (QoS) settings for this connection
+ * @danger_lut: Danger LUT mapping for this connection
+ * @safe_lut: Safe LUT mapping for this connection
+ * @ubwc_ctl: UBWC control settings for this connection
+ *
+ */
+struct cam_camnoc_specific {
+	enum cam_camnoc_port_type port_type;
+	bool enable;
+	struct cam_cpas_reg priority_lut_low;
+	struct cam_cpas_reg priority_lut_high;
+	struct cam_cpas_reg urgency;
+	struct cam_cpas_reg danger_lut;
+	struct cam_cpas_reg safe_lut;
+	struct cam_cpas_reg ubwc_ctl;
+	struct cam_cpas_reg flag_out_set0_low;
+};
+
+/**
+ * struct cam_camnoc_irq_sbm : Sideband manager settings for all CAMNOC IRQs
+ *
+ * @sbm_enable: SBM settings for IRQ enable
+ * @sbm_status: SBM settings for IRQ status
+ * @sbm_clear: SBM settings for IRQ clear
+ *
+ */
+struct cam_camnoc_irq_sbm {
+	struct cam_cpas_reg sbm_enable;
+	struct cam_cpas_reg sbm_status;
+	struct cam_cpas_reg sbm_clear;
+};
+
+/**
+ * struct cam_camnoc_irq_err : Error settings specific to each CAMNOC IRQ
+ *
+ * @irq_type: Type of IRQ
+ * @enable: Whether to enable error settings for this IRQ
+ * @sbm_port: Corresponding SBM port for this IRQ
+ * @err_enable: Error enable settings for this IRQ
+ * @err_status: Error status settings for this IRQ
+ * @err_clear: Error clear settings for this IRQ
+ *
+ */
+struct cam_camnoc_irq_err {
+	enum cam_camnoc_hw_irq_type irq_type;
+	bool enable;
+	uint32_t sbm_port;
+	struct cam_cpas_reg err_enable;
+	struct cam_cpas_reg err_status;
+	struct cam_cpas_reg err_clear;
+};
+
+/**
+ * struct cam_cpas_hw_errata_wa : Struct for HW errata workaround info
+ *
+ * @enable: Whether to enable this errata workround
+ * @data: HW Errata workaround data
+ *
+ */
+struct cam_cpas_hw_errata_wa {
+	bool enable;
+	union {
+		struct cam_cpas_reg reg_info;
+	} data;
+};
+
+/**
+ * struct cam_cpas_hw_errata_wa_list : List of HW Errata workaround info
+ *
+ * @camnoc_flush_slave_pending_trans: Errata workaround info for flushing
+ *         camnoc slave pending transactions before turning off CPAS_TOP gdsc
+ *
+ */
+struct cam_cpas_hw_errata_wa_list {
+	struct cam_cpas_hw_errata_wa camnoc_flush_slave_pending_trans;
+};
+
+/**
+ * struct cam_camnoc_err_logger_info : CAMNOC error logger register offsets
+ *
+ * @mainctrl: Register offset for mainctrl
+ * @errvld: Register offset for errvld
+ * @errlog0_low: Register offset for errlog0_low
+ * @errlog0_high: Register offset for errlog0_high
+ * @errlog1_low: Register offset for errlog1_low
+ * @errlog1_high: Register offset for errlog1_high
+ * @errlog2_low: Register offset for errlog2_low
+ * @errlog2_high: Register offset for errlog2_high
+ * @errlog3_low: Register offset for errlog3_low
+ * @errlog3_high: Register offset for errlog3_high
+ *
+ */
+struct cam_camnoc_err_logger_info {
+	uint32_t mainctrl;
+	uint32_t errvld;
+	uint32_t errlog0_low;
+	uint32_t errlog0_high;
+	uint32_t errlog1_low;
+	uint32_t errlog1_high;
+	uint32_t errlog2_low;
+	uint32_t errlog2_high;
+	uint32_t errlog3_low;
+	uint32_t errlog3_high;
+};
+
+/**
+ * struct cam_camnoc_info : Overall CAMNOC settings info
+ *
+ * @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings
+ * @specific_size: Array size of SPECIFICTONTTPTR settings
+ * @irq_sbm: Pointer to CAMNOC IRQ SBM settings
+ * @irq_err: Pointer to CAMNOC IRQ Error settings
+ * @irq_err_size: Array size of IRQ Error settings
+ * @err_logger: Pointer to CAMNOC IRQ Error logger read registers
+ * @errata_wa_list: HW Errata workaround info
+ *
+ */
+struct cam_camnoc_info {
+	struct cam_camnoc_specific *specific;
+	int specific_size;
+	struct cam_camnoc_irq_sbm *irq_sbm;
+	struct cam_camnoc_irq_err *irq_err;
+	int irq_err_size;
+	struct cam_camnoc_err_logger_info *err_logger;
+	struct cam_cpas_hw_errata_wa_list *errata_wa_list;
+};
+
+/**
+ * struct cam_cpas_work_payload : Struct for cpas work payload data
+ *
+ * @hw: Pointer to HW info
+ * @irq_status: IRQ status value
+ * @irq_data: IRQ data
+ * @work: Work handle
+ *
+ */
+struct cam_cpas_work_payload {
+	struct cam_hw_info *hw;
+	uint32_t irq_status;
+	uint32_t irq_data;
+	struct work_struct work;
+};
+
+#endif /* _CAM_CPASTOP_HW_H_ */

+ 531 - 0
drivers/cam_cpas/cpas_top/cpastop100.h

@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP100_H_
+#define _CPASTOP100_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas100_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas100_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas100_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE02,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+			.value = 0x3,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE13,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+			.value = 0x3,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = false,
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = false,
+	}
+};
+
+static struct cam_camnoc_err_logger_info cam170_cpas100_err_logger_offsets = {
+	.mainctrl     =  0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam170_cpas100_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = true,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+struct cam_camnoc_info cam170_cpas100_camnoc_info = {
+	.specific = &cam_cpas100_camnoc_specific[0],
+	.specific_size = sizeof(cam_cpas100_camnoc_specific) /
+		sizeof(cam_cpas100_camnoc_specific[0]),
+	.irq_sbm = &cam_cpas100_irq_sbm,
+	.irq_err = &cam_cpas100_irq_err[0],
+	.irq_err_size = sizeof(cam_cpas100_irq_err) /
+		sizeof(cam_cpas100_irq_err[0]),
+	.err_logger = &cam170_cpas100_err_logger_offsets,
+	.errata_wa_list = &cam170_cpas100_errata_wa_list,
+};
+
+#endif /* _CPASTOP100_H_ */

+ 530 - 0
drivers/cam_cpas/cpas_top/cpastop_v150_100.h

@@ -0,0 +1,530 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP_V150_100_H_
+#define _CPASTOP_V150_100_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas_v150_100_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas_v150_100_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas_v150_100_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE02,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE13,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 0x5,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = false,
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = true,
+		.flag_out_set0_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_WRITE,
+			.masked_value = 0,
+			.offset = 0x2088,
+			.value = 0x100000,
+		},
+	},
+};
+
+static struct cam_camnoc_err_logger_info cam150_cpas100_err_logger_offsets = {
+	.mainctrl     =  0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam150_cpas100_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = false,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+static struct cam_camnoc_info cam150_cpas100_camnoc_info = {
+	.specific = &cam_cpas_v150_100_camnoc_specific[0],
+	.specific_size = sizeof(cam_cpas_v150_100_camnoc_specific) /
+		sizeof(cam_cpas_v150_100_camnoc_specific[0]),
+	.irq_sbm = &cam_cpas_v150_100_irq_sbm,
+	.irq_err = &cam_cpas_v150_100_irq_err[0],
+	.irq_err_size = sizeof(cam_cpas_v150_100_irq_err) /
+		sizeof(cam_cpas_v150_100_irq_err[0]),
+	.err_logger = &cam150_cpas100_err_logger_offsets,
+	.errata_wa_list = &cam150_cpas100_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V150_100_H_ */

+ 538 - 0
drivers/cam_cpas/cpas_top/cpastop_v170_110.h

@@ -0,0 +1,538 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP_V170_110_H_
+#define _CPASTOP_V170_110_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas110_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas110_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas110_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE02,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+			.value = 0x66666543,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE13,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+			.value = 0x66666543,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = false,
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = true,
+		.flag_out_set0_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_WRITE,
+			.masked_value = 0,
+			.offset = 0x2088,
+			.value = 0x100000,
+		},
+	},
+};
+
+static struct cam_camnoc_err_logger_info cam170_cpas110_err_logger_offsets = {
+	.mainctrl     =  0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam170_cpas110_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = false,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+static struct cam_camnoc_info cam170_cpas110_camnoc_info = {
+	.specific = &cam_cpas110_camnoc_specific[0],
+	.specific_size = sizeof(cam_cpas110_camnoc_specific) /
+		sizeof(cam_cpas110_camnoc_specific[0]),
+	.irq_sbm = &cam_cpas110_irq_sbm,
+	.irq_err = &cam_cpas110_irq_err[0],
+	.irq_err_size = sizeof(cam_cpas110_irq_err) /
+		sizeof(cam_cpas110_irq_err[0]),
+	.err_logger = &cam170_cpas110_err_logger_offsets,
+	.errata_wa_list = &cam170_cpas110_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V170_110_H_ */

+ 558 - 0
drivers/cam_cpas/cpas_top/cpastop_v175_100.h

@@ -0,0 +1,558 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP_V175_100_H_
+#define _CPASTOP_V175_100_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas_v175_100_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas_v175_100_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas_v175_100_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE02,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+			.value = 0x66666543,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE13,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+			.value = 0x66666543,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = false,
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = true,
+		.flag_out_set0_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_WRITE,
+			.masked_value = 0,
+			.offset = 0x2088,
+			.value = 0x100000,
+		},
+	},
+};
+
+static struct cam_camnoc_err_logger_info cam175_cpas100_err_logger_offsets = {
+	.mainctrl     =  0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam175_cpas100_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = false,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+static struct cam_camnoc_info cam175_cpas100_camnoc_info = {
+	.specific = &cam_cpas_v175_100_camnoc_specific[0],
+	.specific_size = sizeof(cam_cpas_v175_100_camnoc_specific) /
+		sizeof(cam_cpas_v175_100_camnoc_specific[0]),
+	.irq_sbm = &cam_cpas_v175_100_irq_sbm,
+	.irq_err = &cam_cpas_v175_100_irq_err[0],
+	.irq_err_size = sizeof(cam_cpas_v175_100_irq_err) /
+		sizeof(cam_cpas_v175_100_irq_err[0]),
+	.err_logger = &cam175_cpas100_err_logger_offsets,
+	.errata_wa_list = &cam175_cpas100_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V175_100_H_ */

+ 558 - 0
drivers/cam_cpas/cpas_top/cpastop_v175_101.h

@@ -0,0 +1,558 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP_V175_101_H_
+#define _CPASTOP_V175_101_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas_v175_101_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas_v175_101_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas_v175_101_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE02,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+			.value = 0x66666543,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE13,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+			.value = 0x66666543,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = false,
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = true,
+		.flag_out_set0_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_WRITE,
+			.masked_value = 0,
+			.offset = 0x2088,
+			.value = 0x100000,
+		},
+	},
+};
+
+static struct cam_camnoc_err_logger_info cam175_cpas101_err_logger_offsets = {
+	.mainctrl     =  0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam175_cpas101_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = false,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+static struct cam_camnoc_info cam175_cpas101_camnoc_info = {
+	.specific = &cam_cpas_v175_101_camnoc_specific[0],
+	.specific_size = sizeof(cam_cpas_v175_101_camnoc_specific) /
+		sizeof(cam_cpas_v175_101_camnoc_specific[0]),
+	.irq_sbm = &cam_cpas_v175_101_irq_sbm,
+	.irq_err = &cam_cpas_v175_101_irq_err[0],
+	.irq_err_size = sizeof(cam_cpas_v175_101_irq_err) /
+		sizeof(cam_cpas_v175_101_irq_err[0]),
+	.err_logger = &cam175_cpas101_err_logger_offsets,
+	.errata_wa_list = &cam175_cpas101_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V175_101_H_ */

+ 760 - 0
drivers/cam_cpas/cpas_top/cpastop_v175_120.h

@@ -0,0 +1,760 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP_V175_120_H_
+#define _CPASTOP_V175_120_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas_v175_120_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2240, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2248, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2280, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas_v175_120_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x4F08, /* ERRORLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x4F10, /* ERRORLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x4F18, /* ERRORLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE0_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x3BA0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x3B90, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x3B98, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x55a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x5590, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x5598, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2F20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2F10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2F18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2Ba0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2B90,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2B98, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2288, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2290, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2288, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2290, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas_v175_120_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x4230, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x4234, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			/* cdm_main_SpecificToNttpTr_Urgency_Low */
+			.offset = 0x4238,
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x4240, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x4248, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE0123_RDI_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC_IFE0123_PRIORITYLUT_LOW */
+			.offset = 0x3630,
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC_IFE0123_PRIORITYLUT_HIGH */
+			.offset = 0x3634,
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x3638, /* SPECIFIC_IFE0123_URGENCY_LOW */
+			/* SPECIFIC_IFE0123_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE0123_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3640, /* SPECIFIC_IFE0123_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3648, /* SPECIFIC_IFE0123_SAFELUT_LOW */
+			.value = 0xF,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE0_NRDI_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3A30, /* SPECIFIC_IFE0_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3A34, /* SPECIFIC_IFE0_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x3A38, /* SPECIFIC_IFE0_URGENCY_LOW */
+			/* SPECIFIC_IFE0_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE0_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3A40, /* SPECIFIC_IFE0_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3A48, /* SPECIFIC_IFE0_SAFELUT_LOW */
+			.value = 0xF,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3B88, /* SPECIFIC_IFE0_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		/* IFE0/1 RDI READ PATH */
+		.port_type = CAM_CAMNOC_IFE01_RDI_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3230, /* SPECIFIC_IFE1_PRIORITYLUT_LOW */
+			.value = 0x44443333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3234, /* SPECIFIC_IFE1_PRIORITYLUT_HIGH */
+			.value = 0x66665555,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x3238, /* SPECIFIC_IFE1_URGENCY_LOW */
+			/* SPECIFIC_IFE1_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IFE1_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3240, /* SPECIFIC_IFE1_DANGERLUT_LOW */
+			.value = 0x00000000,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3248, /* SPECIFIC_IFE1_SAFELUT_LOW */
+			.value = 0xF,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE1_NRDI_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x5430, /* SPECIFIC_IFE1_WR_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC_IFE1_WR_PRIORITYLUT_HIGH */
+			.offset = 0x5434,
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x5438, /* SPECIFIC_IFE1_WR_URGENCY_LOW */
+			/* SPECIFIC_IFE1_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE1_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x5440, /* SPECIFIC_IFE1_WR_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x5448, /* SPECIFIC_IFE1_WR_SAFELUT_LOW */
+			.value = 0xF,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x5588, /* SPECIFIC_IFE1_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x2E38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 0x3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2F08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2A30, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2A34, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x2A38, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 0x3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2A40, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2A48, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2B88, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 0,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_VID_DISP_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC_IPE_VID_DISP_PRIORITYLUT_LOW */
+			.offset = 0x5E30,
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC_IPE_VID_DISP_PRIORITYLUT_HIGH */
+			.offset = 0x5E34,
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			/* SPECIFIC_IPE_VID_DISP_URGENCY_LOW */
+			.offset = 0x5E38,
+			/* SPECIFIC_IPE_VID_DISP_URGENCY_LOW_READ_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IPE_VID_DISP_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC__IPE_VID_DISP_DANGERLUT_LOW */
+			.offset = 0x5E40,
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC_IPE_VID_DISP_SAFELUT_LOW */
+			.offset = 0x5E48,
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x5F88, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2630, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2634, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2638, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2640, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2648, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3E30, /* SPECIFIC_FD_PRIORITYLUT_LOW */
+			.value = 0x44444444,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3E34, /* SPECIFIC_FD_PRIORITYLUT_HIGH */
+			.value = 0x44444444,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3E38, /* SPECIFIC_FD_URGENCY_LOW */
+			.value = 0x44,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3E40, /* SPECIFIC_FD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3E48, /* SPECIFIC_FD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+
+	},
+	{
+		/*SidebandManager_main_SidebandManager_FlagOutSet0_Low*/
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = true,
+		.flag_out_set0_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_WRITE,
+			.masked_value = 0,
+			.offset = 0x2288,
+			.value = 0x100000,
+		},
+	},
+};
+
+static struct cam_camnoc_err_logger_info cam175_cpas120_err_logger_offsets = {
+	.mainctrl     =  0x4F08, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x4F10, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x4F20, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x4F24, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x4F28, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x4F2c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x4F30, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x4F34, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x4F38, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x4F3c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam175_cpas120_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = false,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2300, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+static struct cam_camnoc_info cam175_cpas120_camnoc_info = {
+	.specific = &cam_cpas_v175_120_camnoc_specific[0],
+	.specific_size =  ARRAY_SIZE(cam_cpas_v175_120_camnoc_specific),
+	.irq_sbm = &cam_cpas_v175_120_irq_sbm,
+	.irq_err = &cam_cpas_v175_120_irq_err[0],
+	.irq_err_size = ARRAY_SIZE(cam_cpas_v175_120_irq_err),
+	.err_logger = &cam175_cpas120_err_logger_offsets,
+	.errata_wa_list = &cam175_cpas120_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V175_120_H_ */

+ 760 - 0
drivers/cam_cpas/cpas_top/cpastop_v175_130.h

@@ -0,0 +1,760 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP_V175_130_H_
+#define _CPASTOP_V175_130_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas_v175_130_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2240, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2248, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2280, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas_v175_130_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x4F08, /* ERRORLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x4F10, /* ERRORLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x4F18, /* ERRORLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE0_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x3BA0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x3B90, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x3B98, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x55a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x5590, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x5598, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2F20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2F10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2F18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2Ba0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2B90,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2B98, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2288, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2290, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2288, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2290, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas_v175_130_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x4230, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x4234, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			/* cdm_main_SpecificToNttpTr_Urgency_Low */
+			.offset = 0x4238,
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x4240, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x4248, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE0123_RDI_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC_IFE0123_PRIORITYLUT_LOW */
+			.offset = 0x3630,
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC_IFE0123_PRIORITYLUT_HIGH */
+			.offset = 0x3634,
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x3638, /* SPECIFIC_IFE0123_URGENCY_LOW */
+			/* SPECIFIC_IFE0123_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE0123_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3640, /* SPECIFIC_IFE0123_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3648, /* SPECIFIC_IFE0123_SAFELUT_LOW */
+			.value = 0xF,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE0_NRDI_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3A30, /* SPECIFIC_IFE0_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3A34, /* SPECIFIC_IFE0_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x3A38, /* SPECIFIC_IFE0_URGENCY_LOW */
+			/* SPECIFIC_IFE0_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE0_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3A40, /* SPECIFIC_IFE0_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3A48, /* SPECIFIC_IFE0_SAFELUT_LOW */
+			.value = 0xF,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3B88, /* SPECIFIC_IFE0_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		/* IFE0/1 RDI READ PATH */
+		.port_type = CAM_CAMNOC_IFE01_RDI_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3230, /* SPECIFIC_IFE1_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3234, /* SPECIFIC_IFE1_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x3238, /* SPECIFIC_IFE1_URGENCY_LOW */
+			/* SPECIFIC_IFE1_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IFE1_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3240, /* SPECIFIC_IFE1_DANGERLUT_LOW */
+			.value = 0x00000000,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3248, /* SPECIFIC_IFE1_SAFELUT_LOW */
+			.value = 0xF,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE1_NRDI_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x5430, /* SPECIFIC_IFE1_WR_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC_IFE1_WR_PRIORITYLUT_HIGH */
+			.offset = 0x5434,
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x5438, /* SPECIFIC_IFE1_WR_URGENCY_LOW */
+			/* SPECIFIC_IFE1_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE1_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x5440, /* SPECIFIC_IFE1_WR_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x5448, /* SPECIFIC_IFE1_WR_SAFELUT_LOW */
+			.value = 0xF,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x5588, /* SPECIFIC_IFE1_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x2E38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 0x3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2F08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2A30, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2A34, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x2A38, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 0x3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2A40, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2A48, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2B88, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 0,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_VID_DISP_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC_IPE_VID_DISP_PRIORITYLUT_LOW */
+			.offset = 0x5E30,
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC_IPE_VID_DISP_PRIORITYLUT_HIGH */
+			.offset = 0x5E34,
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			/* SPECIFIC_IPE_VID_DISP_URGENCY_LOW */
+			.offset = 0x5E38,
+			/* SPECIFIC_IPE_VID_DISP_URGENCY_LOW_READ_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IPE_VID_DISP_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC__IPE_VID_DISP_DANGERLUT_LOW */
+			.offset = 0x5E40,
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* SPECIFIC_IPE_VID_DISP_SAFELUT_LOW */
+			.offset = 0x5E48,
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x5F88, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2630, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2634, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2638, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2640, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2648, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3E30, /* SPECIFIC_FD_PRIORITYLUT_LOW */
+			.value = 0x44444444,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3E34, /* SPECIFIC_FD_PRIORITYLUT_HIGH */
+			.value = 0x44444444,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3E38, /* SPECIFIC_FD_URGENCY_LOW */
+			.value = 0x44,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3E40, /* SPECIFIC_FD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3E48, /* SPECIFIC_FD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+
+	},
+	{
+		/*SidebandManager_main_SidebandManager_FlagOutSet0_Low*/
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = true,
+		.flag_out_set0_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_WRITE,
+			.masked_value = 0,
+			.offset = 0x2288,
+			.value = 0x100000,
+		},
+	},
+};
+
+static struct cam_camnoc_err_logger_info cam175_cpas130_err_logger_offsets = {
+	.mainctrl     =  0x4F08, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x4F10, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x4F20, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x4F24, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x4F28, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x4F2c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x4F30, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x4F34, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x4F38, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x4F3c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam175_cpas130_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = false,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2300, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+static struct cam_camnoc_info cam175_cpas130_camnoc_info = {
+	.specific = &cam_cpas_v175_130_camnoc_specific[0],
+	.specific_size =  ARRAY_SIZE(cam_cpas_v175_130_camnoc_specific),
+	.irq_sbm = &cam_cpas_v175_130_irq_sbm,
+	.irq_err = &cam_cpas_v175_130_irq_err[0],
+	.irq_err_size = ARRAY_SIZE(cam_cpas_v175_130_irq_err),
+	.err_logger = &cam175_cpas130_err_logger_offsets,
+	.errata_wa_list = &cam175_cpas130_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V175_130_H_ */

+ 743 - 0
drivers/cam_cpas/cpas_top/cpastop_v480_100.h

@@ -0,0 +1,743 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP_V480_100_H_
+#define _CPASTOP_V480_100_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas_v480_100_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = false,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas_v480_100_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas_v480_100_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* CDM_URGENCY_LOW */
+			.mask = 0x7, /* CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x630, /* FD_PRIORITYLUT_LOW */
+			.value = 0x44444444,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x634, /* FD_PRIORITYLUT_HIGH */
+			.value = 0x44444444,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x638, /* FD_URGENCY_LOW */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x640, /* FD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x648, /* FD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE_LINEAR,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xA30, /* IFE_LINEAR_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xA34, /* IFE_LINEAR_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xA38, /* IFE_LINEAR_URGENCY_LOW */
+			/* IFE_LINEAR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* IFE_LINEAR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0xA40, /* IFE_LINEAR_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0xA48, /* IFE_LINEAR_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE_RDI_RD,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* IFE_RDI_RD_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* IFE_RDI_RD_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* IFE_RDI_RD_URGENCY_LOW */
+			/* IFE_RDI_RD_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* IFE_RDI_RD_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x1040, /* IFE_RDI_RD_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x1048, /* IFE_RDI_RD_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE_RDI_WR,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* IFE_RDI_WR_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* IFE_RDI_WR_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1438, /* IFE_RDI_WR_URGENCY_LOW */
+			/* IFE_RDI_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* IFE_RDI_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x1440, /* IFE_RDI_WR_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x1448, /* IFE_RDI_WR_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE_UBWC_STATS,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1A30, /* IFE_UBWC_STATS_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1A34, /* IFE_UBWC_STATS_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1A38, /* IFE_UBWC_STATS_URGENCY_LOW */
+			/* IFE_UBWC_STATS_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* IFE_UBWC_STATS_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x1A40, /* IFE_UBWC_STATS_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x1A48, /* IFE_UBWC_STATS_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1B88, /* IFE_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE0_RD,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1E30, /* IPE0_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1E34, /* IPE0_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1E38, /* IPE0_RD_URGENCY_LOW */
+			/* IPE0_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* IPE0_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1E40, /* IPE0_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1E48, /* IPE0_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1F08, /* IPE0_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE1_BPS_RD,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2430, /* IPE1_BPS_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2434, /* IPE1_BPS_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x2438, /* IPE1_BPS_RD_URGENCY_LOW */
+			/* IPE1_BPS_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* IPE1_BPS_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2440, /* IPE1_BPS_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2448, /* IPE1_BPS_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2508, /* IPE1_BPS_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_WR,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2830, /* IPE_BPS_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2834, /* IPE_BPS_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x2838, /* IPE_BPS_WR_URGENCY_LOW */
+			/* IPE_BPS_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* IPE_BPS_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2840, /* IPE_BPS_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2848, /* IPE_BPS_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2988, /* IPE_BPS_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E30, /* JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E34, /* JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E38, /* JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E40, /* JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E48, /* JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = true,
+		.flag_out_set0_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_WRITE,
+			.masked_value = 0,
+			.offset = 0x2088,
+			.value = 0x100000,
+		},
+	},
+};
+
+static struct cam_camnoc_err_logger_info cam480_cpas100_err_logger_offsets = {
+	.mainctrl     =  0x7008, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x7010, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x7020, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x7024, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x7028, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x702c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x7030, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x7034, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x7038, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x703c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam480_cpas100_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = false,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+static struct cam_camnoc_info cam480_cpas100_camnoc_info = {
+	.specific = &cam_cpas_v480_100_camnoc_specific[0],
+	.specific_size = ARRAY_SIZE(cam_cpas_v480_100_camnoc_specific),
+	.irq_sbm = &cam_cpas_v480_100_irq_sbm,
+	.irq_err = &cam_cpas_v480_100_irq_err[0],
+	.irq_err_size = ARRAY_SIZE(cam_cpas_v480_100_irq_err),
+	.err_logger = &cam480_cpas100_err_logger_offsets,
+	.errata_wa_list = &cam480_cpas100_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V480_100_H_ */

+ 543 - 0
drivers/cam_cpas/include/cam_cpas_api.h

@@ -0,0 +1,543 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CPAS_API_H_
+#define _CAM_CPAS_API_H_
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <media/cam_cpas.h>
+#include "cam_soc_util.h"
+
+#define CAM_HW_IDENTIFIER_LENGTH 128
+
+/* Default AXI Bandwidth vote */
+#define CAM_CPAS_DEFAULT_AXI_BW 1024
+
+#define CAM_CPAS_MAX_PATHS_PER_CLIENT 15
+#define CAM_CPAS_API_PATH_DATA_STD_START 512
+
+/**
+ * enum cam_cpas_reg_base - Enum for register base identifier. These
+ *                          are the identifiers used in generic register
+ *                          write/read APIs provided by cpas driver.
+ */
+enum cam_cpas_reg_base {
+	CAM_CPAS_REG_CPASTOP,
+	CAM_CPAS_REG_CAMNOC,
+	CAM_CPAS_REG_CAMSS,
+	CAM_CPAS_REG_MAX
+};
+
+/**
+ * enum cam_cpas_hw_version - Enum for Titan CPAS HW Versions
+ */
+enum cam_cpas_hw_version {
+	CAM_CPAS_TITAN_NONE = 0,
+	CAM_CPAS_TITAN_150_V100 = 0x150100,
+	CAM_CPAS_TITAN_170_V100 = 0x170100,
+	CAM_CPAS_TITAN_170_V110 = 0x170110,
+	CAM_CPAS_TITAN_170_V120 = 0x170120,
+	CAM_CPAS_TITAN_175_V100 = 0x175100,
+	CAM_CPAS_TITAN_175_V101 = 0x175101,
+	CAM_CPAS_TITAN_175_V120 = 0x175120,
+	CAM_CPAS_TITAN_175_V130 = 0x175130,
+	CAM_CPAS_TITAN_480_V100 = 0x480100,
+	CAM_CPAS_TITAN_MAX
+};
+
+/**
+ * enum cam_camnoc_irq_type - Enum for camnoc irq types
+ *
+ * @CAM_CAMNOC_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
+ *                              1 QHB port) has an error logger. The error
+ *                              observed at any slave port is logged into
+ *                              the error logger register and an IRQ is
+ *                              triggered
+ * @CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR  : Triggered if any error detected
+ *                                            in the IFE0 UBWC encoder instance
+ * @CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR  : Triggered if any error detected
+ *                                            in the IFE1 or IFE3 UBWC encoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_IFE0_UBWC_ENCODE_ERROR   : Triggered if any error detected
+ *                                            in the IFE0 UBWC encoder instance
+ * @CAM_CAMNOC_IRQ_IFE1_WR_UBWC_ENCODE_ERROR  : Triggered if any error detected
+ *                                            in the IFE1 UBWC encoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error detected
+ *                                            in the IPE/BPS UBWC decoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error detected
+ *                                            in the IPE/BPS UBWC encoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_AHB_TIMEOUT              : Triggered when the QHS_ICP slave
+ *                                            times out after 4000 AHB cycles
+ */
+enum cam_camnoc_irq_type {
+	CAM_CAMNOC_IRQ_SLAVE_ERROR,
+	CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_IFE0_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+	CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_AHB_TIMEOUT,
+};
+
+/**
+ * struct cam_camnoc_irq_slave_err_data : Data for Slave error.
+ *
+ * @mainctrl     : Err logger mainctrl info
+ * @errvld       : Err logger errvld info
+ * @errlog0_low  : Err logger errlog0_low info
+ * @errlog0_high : Err logger errlog0_high info
+ * @errlog1_low  : Err logger errlog1_low info
+ * @errlog1_high : Err logger errlog1_high info
+ * @errlog2_low  : Err logger errlog2_low info
+ * @errlog2_high : Err logger errlog2_high info
+ * @errlog3_low  : Err logger errlog3_low info
+ * @errlog3_high : Err logger errlog3_high info
+ *
+ */
+struct cam_camnoc_irq_slave_err_data {
+	union {
+		struct {
+			uint32_t stall_en : 1; /* bit 0 */
+			uint32_t fault_en : 1; /* bit 1 */
+			uint32_t rsv      : 30; /* bits 2-31 */
+		};
+		uint32_t value;
+	} mainctrl;
+	union {
+		struct {
+			uint32_t err_vld : 1; /* bit 0 */
+			uint32_t rsv     : 31; /* bits 1-31 */
+		};
+		uint32_t value;
+	} errvld;
+	union {
+		struct {
+			uint32_t loginfo_vld : 1; /* bit 0 */
+			uint32_t word_error  : 1; /* bit 1 */
+			uint32_t non_secure  : 1; /* bit 2 */
+			uint32_t device      : 1; /* bit 3 */
+			uint32_t opc         : 3; /* bits 4 - 6 */
+			uint32_t rsv0        : 1; /* bit 7 */
+			uint32_t err_code    : 3; /* bits 8 - 10 */
+			uint32_t sizef       : 3; /* bits 11 - 13 */
+			uint32_t rsv1        : 2; /* bits 14 - 15 */
+			uint32_t addr_space  : 6; /* bits 16 - 21 */
+			uint32_t rsv2        : 10; /* bits 22 - 31 */
+		};
+		uint32_t value;
+	}  errlog0_low;
+	union {
+		struct {
+			uint32_t len1 : 10; /* bits 0 - 9 */
+			uint32_t rsv  : 22; /* bits 10 - 31 */
+		};
+		uint32_t value;
+	} errlog0_high;
+	union {
+		struct {
+			uint32_t path : 16; /* bits 0 - 15 */
+			uint32_t rsv  : 16; /* bits 16 - 31 */
+		};
+		uint32_t value;
+	} errlog1_low;
+	union {
+		struct {
+			uint32_t extid : 18; /* bits 0 - 17 */
+			uint32_t rsv   : 14; /* bits 18 - 31 */
+		};
+		uint32_t value;
+	} errlog1_high;
+	union {
+		struct {
+			uint32_t errlog2_lsb : 32; /* bits 0 - 31 */
+		};
+		uint32_t value;
+	} errlog2_low;
+	union {
+		struct {
+			uint32_t errlog2_msb : 16; /* bits 0 - 16 */
+			uint32_t rsv         : 16; /* bits 16 - 31 */
+		};
+		uint32_t value;
+	} errlog2_high;
+	union {
+		struct {
+			uint32_t errlog3_lsb : 32; /* bits 0 - 31 */
+		};
+		uint32_t value;
+	} errlog3_low;
+	union {
+		struct {
+			uint32_t errlog3_msb : 32; /* bits 0 - 31 */
+		};
+		uint32_t value;
+	} errlog3_high;
+};
+
+/**
+ * struct cam_camnoc_irq_ubwc_enc_data : Data for UBWC Encode error.
+ *
+ * @encerr_status : Encode error status
+ *
+ */
+struct cam_camnoc_irq_ubwc_enc_data {
+	union {
+		struct {
+			uint32_t encerrstatus : 3; /* bits 0 - 2 */
+			uint32_t rsv          : 29; /* bits 3 - 31 */
+		};
+		uint32_t value;
+	} encerr_status;
+};
+
+/**
+ * struct cam_camnoc_irq_ubwc_dec_data : Data for UBWC Decode error.
+ *
+ * @decerr_status : Decoder error status
+ * @thr_err       : Set to 1 if
+ *                  At least one of the bflc_len fields in the bit steam exceeds
+ *                  its threshold value. This error is possible only for
+ *                  RGBA1010102, TP10, and RGB565 formats
+ * @fcl_err       : Set to 1 if
+ *                  Fast clear with a legal non-RGB format
+ * @len_md_err    : Set to 1 if
+ *                  The calculated burst length does not match burst length
+ *                  specified by the metadata value
+ * @format_err    : Set to 1 if
+ *                  Illegal format
+ *                  1. bad format :2,3,6
+ *                  2. For 32B MAL, metadata=6
+ *                  3. For 32B MAL RGB565, Metadata != 0,1,7
+ *                  4. For 64B MAL RGB565, metadata[3:1] == 1,2
+ *
+ */
+struct cam_camnoc_irq_ubwc_dec_data {
+	union {
+		struct {
+			uint32_t thr_err    : 1; /* bit 0 */
+			uint32_t fcl_err    : 1; /* bit 1 */
+			uint32_t len_md_err : 1; /* bit 2 */
+			uint32_t format_err : 1; /* bit 3 */
+			uint32_t rsv        : 28; /* bits 4 - 31 */
+		};
+		uint32_t value;
+	} decerr_status;
+};
+
+struct cam_camnoc_irq_ahb_timeout_data {
+	uint32_t data;
+};
+
+/**
+ * struct cam_cpas_irq_data : CAMNOC IRQ data
+ *
+ * @irq_type  : To identify the type of IRQ
+ * @u         : Union of irq err data information
+ * @slave_err : Data for Slave error.
+ *              Valid if type is CAM_CAMNOC_IRQ_SLAVE_ERROR
+ * @enc_err   : Data for UBWC Encode error.
+ *              Valid if type is one of below:
+ *              CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR
+ *              CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR
+ *              CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR
+ * @dec_err   : Data for UBWC Decode error.
+ *              Valid if type is CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR
+ * @ahb_err   : Data for Slave error.
+ *              Valid if type is CAM_CAMNOC_IRQ_AHB_TIMEOUT
+ *
+ */
+struct cam_cpas_irq_data {
+	enum cam_camnoc_irq_type irq_type;
+	union {
+		struct cam_camnoc_irq_slave_err_data   slave_err;
+		struct cam_camnoc_irq_ubwc_enc_data    enc_err;
+		struct cam_camnoc_irq_ubwc_dec_data    dec_err;
+		struct cam_camnoc_irq_ahb_timeout_data ahb_err;
+	} u;
+};
+
+/**
+ * struct cam_cpas_register_params : Register params for cpas client
+ *
+ * @identifier        : Input identifier string which is the device label
+ *                      from dt like vfe, ife, jpeg etc
+ * @cell_index        : Input integer identifier pointing to the cell index
+ *                      from dt of the device. This can be used to form a
+ *                      unique string with @identifier like vfe0, ife1,
+ *                      jpeg0, etc
+ * @dev               : device handle
+ * @userdata          : Input private data which will be passed as
+ *                      an argument while callback.
+ * @cam_cpas_callback : Input callback pointer for triggering the
+ *                      callbacks from CPAS driver.
+ *                      @client_handle : CPAS client handle
+ *                      @userdata    : User data given at the time of register
+ *                      @event_type  : event type
+ *                      @event_data  : event data
+ * @client_handle       : Output Unique handle generated for this register
+ *
+ */
+struct cam_cpas_register_params {
+	char            identifier[CAM_HW_IDENTIFIER_LENGTH];
+	uint32_t        cell_index;
+	struct device  *dev;
+	void           *userdata;
+	bool          (*cam_cpas_client_cb)(
+			uint32_t                  client_handle,
+			void                     *userdata,
+			struct cam_cpas_irq_data *irq_data);
+	uint32_t        client_handle;
+};
+
+/**
+ * enum cam_vote_type - Enum for voting type
+ *
+ * @CAM_VOTE_ABSOLUTE : Absolute vote
+ * @CAM_VOTE_DYNAMIC  : Dynamic vote
+ */
+enum cam_vote_type {
+	CAM_VOTE_ABSOLUTE,
+	CAM_VOTE_DYNAMIC,
+};
+
+/**
+ * struct cam_ahb_vote : AHB vote
+ *
+ * @type  : AHB voting type.
+ *          CAM_VOTE_ABSOLUTE : vote based on the value 'level' is set
+ *          CAM_VOTE_DYNAMIC  : vote calculated dynamically using 'freq'
+ *                              and 'dev' handle is set
+ * @level : AHB vote level
+ * @freq  : AHB vote dynamic frequency
+ *
+ */
+struct cam_ahb_vote {
+	enum cam_vote_type   type;
+	union {
+		enum cam_vote_level  level;
+		unsigned long        freq;
+	} vote;
+};
+
+/**
+ * struct cam_axi_vote : AXI vote
+ *
+ * @num_paths: Number of paths on which BW vote is sent to CPAS
+ * @axi_path: Per path BW vote info
+ *
+ */
+struct cam_axi_vote {
+	uint32_t num_paths;
+	struct cam_axi_per_path_bw_vote axi_path[CAM_CPAS_MAX_PATHS_PER_CLIENT];
+};
+
+/**
+ * cam_cpas_register_client()
+ *
+ * @brief: API to register cpas client
+ *
+ * @register_params: Input params to register as a client to CPAS
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_register_client(
+	struct cam_cpas_register_params *register_params);
+
+/**
+ * cam_cpas_unregister_client()
+ *
+ * @brief: API to unregister cpas client
+ *
+ * @client_handle: Client handle to be unregistered
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_unregister_client(uint32_t client_handle);
+
+/**
+ * cam_cpas_start()
+ *
+ * @brief: API to start cpas client hw. Clients have to vote for minimal
+ *     bandwidth requirements for AHB, AXI. Use cam_cpas_update_ahb_vote
+ *     to scale bandwidth after start.
+ *
+ * @client_handle: client cpas handle
+ * @ahb_vote     : Pointer to ahb vote info
+ * @axi_vote     : Pointer to axi bandwidth vote info
+ *
+ * If AXI vote is not applicable to a particular client, use the value exposed
+ * by CAM_CPAS_DEFAULT_AXI_BW as the default vote request.
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_start(
+	uint32_t               client_handle,
+	struct cam_ahb_vote   *ahb_vote,
+	struct cam_axi_vote   *axi_vote);
+
+/**
+ * cam_cpas_stop()
+ *
+ * @brief: API to stop cpas client hw. Bandwidth for AHB, AXI votes
+ *     would be removed for this client on this call. Clients should not
+ *     use cam_cpas_update_ahb_vote or cam_cpas_update_axi_vote
+ *     to remove their bandwidth vote.
+ *
+ * @client_handle: client cpas handle
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_stop(uint32_t client_handle);
+
+/**
+ * cam_cpas_update_ahb_vote()
+ *
+ * @brief: API to update AHB vote requirement. Use this function only
+ *     between cam_cpas_start and cam_cpas_stop in case clients wants
+ *     to scale to different vote level. Do not use this function to de-vote,
+ *     removing client's vote is implicit on cam_cpas_stop
+ *
+ * @client_handle : Client cpas handle
+ * @ahb_vote      : Pointer to ahb vote info
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_update_ahb_vote(
+	uint32_t               client_handle,
+	struct cam_ahb_vote   *ahb_vote);
+
+/**
+ * cam_cpas_update_axi_vote()
+ *
+ * @brief: API to update AXI vote requirement. Use this function only
+ *     between cam_cpas_start and cam_cpas_stop in case clients wants
+ *     to scale to different vote level. Do not use this function to de-vote,
+ *     removing client's vote is implicit on cam_cpas_stop
+ *
+ * @client_handle : Client cpas handle
+ * @axi_vote      : Pointer to axi bandwidth vote info
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_update_axi_vote(
+	uint32_t             client_handle,
+	struct cam_axi_vote *axi_vote);
+
+/**
+ * cam_cpas_reg_write()
+ *
+ * @brief: API to write a register value in CPAS register space
+ *
+ * @client_handle : Client cpas handle
+ * @reg_base      : Register base identifier
+ * @offset        : Offset from the register base address
+ * @mb            : Whether to do reg write with memory barrier
+ * @value         : Value to be written in register
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_reg_write(
+	uint32_t                  client_handle,
+	enum cam_cpas_reg_base    reg_base,
+	uint32_t                  offset,
+	bool                      mb,
+	uint32_t                  value);
+
+/**
+ * cam_cpas_reg_read()
+ *
+ * @brief: API to read a register value from CPAS register space
+ *
+ * @client_handle : Client cpas handle
+ * @reg_base      : Register base identifier
+ * @offset        : Offset from the register base address
+ * @mb            : Whether to do reg read with memory barrier
+ * @value         : Value to be red from register
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_reg_read(
+	uint32_t                  client_handle,
+	enum cam_cpas_reg_base    reg_base,
+	uint32_t                  offset,
+	bool                      mb,
+	uint32_t                 *value);
+
+/**
+ * cam_cpas_get_hw_info()
+ *
+ * @brief: API to get camera hw information
+ *
+ * @camera_family  : Camera family type. One of
+ *                   CAM_FAMILY_CAMERA_SS
+ *                   CAM_FAMILY_CPAS_SS
+ * @camera_version : Camera platform version
+ * @cpas_version   : Camera cpas version
+ * @cam_caps       : Camera capability
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_get_hw_info(
+	uint32_t                 *camera_family,
+	struct cam_hw_version    *camera_version,
+	struct cam_hw_version    *cpas_version,
+	uint32_t                 *cam_caps);
+
+/**
+ * cam_cpas_get_cpas_hw_version()
+ *
+ * @brief: API to get camera cpas hw version
+ *
+ * @hw_version  : Camera cpas hw version
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_get_cpas_hw_version(
+	uint32_t				 *hw_version);
+
+/**
+ * cam_cpas_axi_util_path_type_to_string()
+ *
+ * @brief: API to get string for given path type
+ *
+ * @path_data_type  : Path type
+ *
+ * @return string.
+ *
+ */
+const char *cam_cpas_axi_util_path_type_to_string(
+	uint32_t path_data_type);
+
+/**
+ * cam_cpas_axi_util_trans_type_to_string()
+ *
+ * @brief: API to get string for given transaction type
+ *
+ * @path_data_type  : Transaction type
+ *
+ * @return string.
+ *
+ */
+const char *cam_cpas_axi_util_trans_type_to_string(
+	uint32_t path_data_type);
+
+
+#endif /* _CAM_CPAS_API_H_ */

+ 17 - 0
drivers/cam_fd/Makefile

@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_sync
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_smmu
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cdm
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_fd
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_fd/fd_hw_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_fd/fd_hw_mgr/fd_hw
+ccflags-y += -I$(srctree)/techpack/camera/drivers/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += fd_hw_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_dev.o cam_fd_context.o

+ 249 - 0
drivers/cam_fd/cam_fd_context.c

@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_debug_util.h"
+#include "cam_fd_context.h"
+#include "cam_trace.h"
+
+static const char fd_dev_name[] = "fd";
+
+/* Functions in Available state */
+static int __cam_fd_ctx_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Acquire dev, rc=%d", rc);
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_ACQUIRED;
+	trace_cam_context_state("FD", ctx);
+
+	return rc;
+}
+
+/* Functions in Acquired state */
+static int __cam_fd_ctx_release_dev_in_acquired(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Release dev, rc=%d", rc);
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_AVAILABLE;
+	trace_cam_context_state("FD", ctx);
+
+	return rc;
+}
+
+static int __cam_fd_ctx_config_dev_in_acquired(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Prepare dev, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __cam_fd_ctx_start_dev_in_acquired(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_start_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Start dev, rc=%d", rc);
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_ACTIVATED;
+	trace_cam_context_state("FD", ctx);
+
+	return rc;
+}
+
+/* Functions in Activated state */
+static int __cam_fd_ctx_stop_dev_in_activated(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_stop_dev_to_hw(ctx);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Stop dev, rc=%d", rc);
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_ACQUIRED;
+	trace_cam_context_state("FD", ctx);
+
+	return rc;
+}
+
+static int __cam_fd_ctx_release_dev_in_activated(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = __cam_fd_ctx_stop_dev_in_activated(ctx, NULL);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Stop dev, rc=%d", rc);
+		return rc;
+	}
+
+	rc = __cam_fd_ctx_release_dev_in_acquired(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Release dev, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __cam_fd_ctx_flush_dev_in_activated(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_flush_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to flush device, rc=%d", rc);
+
+	return rc;
+}
+static int __cam_fd_ctx_config_dev_in_activated(
+	struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Prepare dev, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __cam_fd_ctx_handle_irq_in_activated(void *context,
+	uint32_t evt_id, void *evt_data)
+{
+	int rc;
+
+	rc = cam_context_buf_done_from_hw(context, evt_data, evt_id);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in buf done, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+	cam_fd_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_fd_ctx_acquire_dev_in_available,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_fd_ctx_release_dev_in_acquired,
+			.config_dev = __cam_fd_ctx_config_dev_in_acquired,
+			.start_dev = __cam_fd_ctx_start_dev_in_acquired,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = { },
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Activated */
+	{
+		.ioctl_ops = {
+			.stop_dev = __cam_fd_ctx_stop_dev_in_activated,
+			.release_dev = __cam_fd_ctx_release_dev_in_activated,
+			.config_dev = __cam_fd_ctx_config_dev_in_activated,
+			.flush_dev = __cam_fd_ctx_flush_dev_in_activated,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_fd_ctx_handle_irq_in_activated,
+	},
+};
+
+
+int cam_fd_context_init(struct cam_fd_context *fd_ctx,
+	struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf,
+	uint32_t ctx_id)
+{
+	int rc;
+
+	if (!base_ctx || !fd_ctx) {
+		CAM_ERR(CAM_FD, "Invalid Context %pK %pK", base_ctx, fd_ctx);
+		return -EINVAL;
+	}
+
+	memset(fd_ctx, 0, sizeof(*fd_ctx));
+
+	rc = cam_context_init(base_ctx, fd_dev_name, CAM_FD, ctx_id,
+		NULL, hw_intf, fd_ctx->req_base, CAM_CTX_REQ_MAX);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Camera Context Base init failed, rc=%d", rc);
+		return rc;
+	}
+
+	fd_ctx->base = base_ctx;
+	base_ctx->ctx_priv = fd_ctx;
+	base_ctx->state_machine = cam_fd_ctx_state_machine;
+
+	return rc;
+}
+
+int cam_fd_context_deinit(struct cam_fd_context *fd_ctx)
+{
+	int rc = 0;
+
+	if (!fd_ctx || !fd_ctx->base) {
+		CAM_ERR(CAM_FD, "Invalid inputs %pK", fd_ctx);
+		return -EINVAL;
+	}
+
+	rc = cam_context_deinit(fd_ctx->base);
+	if (rc)
+		CAM_ERR(CAM_FD, "Error in base deinit, rc=%d", rc);
+
+	memset(fd_ctx, 0, sizeof(*fd_ctx));
+
+	return rc;
+}

+ 30 - 0
drivers/cam_fd/cam_fd_context.h

@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_CONTEXT_H_
+#define _CAM_FD_CONTEXT_H_
+
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+
+/**
+ * struct cam_fd_context - Face Detection context information
+ *
+ * @base     : Base context pointer for this FD context
+ * @req_base : List of base requests for this FD context
+ */
+struct cam_fd_context {
+	struct cam_context       *base;
+	struct cam_ctx_request    req_base[CAM_CTX_REQ_MAX];
+};
+
+int cam_fd_context_init(struct cam_fd_context *fd_ctx,
+	struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf,
+	uint32_t ctx_id);
+int cam_fd_context_deinit(struct cam_fd_context *ctx);
+
+#endif /* _CAM_FD_CONTEXT_H_ */

+ 207 - 0
drivers/cam_fd/cam_fd_dev.c

@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_fd_context.h"
+#include "cam_fd_hw_mgr.h"
+#include "cam_fd_hw_mgr_intf.h"
+
+#define CAM_FD_DEV_NAME "cam-fd"
+
+/**
+ * struct cam_fd_dev - FD device information
+ *
+ * @sd:         Subdev information
+ * @base_ctx:   List of base contexts
+ * @fd_ctx:     List of FD contexts
+ * @lock:       Mutex handle
+ * @open_cnt:   FD subdev open count
+ * @probe_done: Whether FD probe is completed
+ */
+struct cam_fd_dev {
+	struct cam_subdev     sd;
+	struct cam_context    base_ctx[CAM_CTX_MAX];
+	struct cam_fd_context fd_ctx[CAM_CTX_MAX];
+	struct mutex          lock;
+	uint32_t              open_cnt;
+	bool                  probe_done;
+};
+
+static struct cam_fd_dev g_fd_dev;
+
+static int cam_fd_dev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_fd_dev *fd_dev = &g_fd_dev;
+
+	if (!fd_dev->probe_done) {
+		CAM_ERR(CAM_FD, "FD Dev not initialized, fd_dev=%pK", fd_dev);
+		return -ENODEV;
+	}
+
+	mutex_lock(&fd_dev->lock);
+	fd_dev->open_cnt++;
+	CAM_DBG(CAM_FD, "FD Subdev open count %d", fd_dev->open_cnt);
+	mutex_unlock(&fd_dev->lock);
+
+	return 0;
+}
+
+static int cam_fd_dev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_fd_dev *fd_dev = &g_fd_dev;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+
+	if (!fd_dev->probe_done) {
+		CAM_ERR(CAM_FD, "FD Dev not initialized, fd_dev=%pK", fd_dev);
+		return -ENODEV;
+	}
+
+	mutex_lock(&fd_dev->lock);
+	fd_dev->open_cnt--;
+	CAM_DBG(CAM_FD, "FD Subdev open count %d", fd_dev->open_cnt);
+	mutex_unlock(&fd_dev->lock);
+
+	if (!node) {
+		CAM_ERR(CAM_FD, "Node ptr is NULL");
+		return -EINVAL;
+	}
+
+	cam_node_shutdown(node);
+
+	return 0;
+}
+
+static const struct v4l2_subdev_internal_ops cam_fd_subdev_internal_ops = {
+	.open = cam_fd_dev_open,
+	.close = cam_fd_dev_close,
+};
+
+static int cam_fd_dev_probe(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+	struct cam_hw_mgr_intf hw_mgr_intf;
+	struct cam_node *node;
+
+	g_fd_dev.sd.internal_ops = &cam_fd_subdev_internal_ops;
+
+	/* Initialize the v4l2 subdevice first. (create cam_node) */
+	rc = cam_subdev_probe(&g_fd_dev.sd, pdev, CAM_FD_DEV_NAME,
+		CAM_FD_DEVICE_TYPE);
+	if (rc) {
+		CAM_ERR(CAM_FD, "FD cam_subdev_probe failed, rc=%d", rc);
+		return rc;
+	}
+	node = (struct cam_node *) g_fd_dev.sd.token;
+
+	rc = cam_fd_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in initializing FD HW manager, rc=%d",
+			rc);
+		goto unregister_subdev;
+	}
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_fd_context_init(&g_fd_dev.fd_ctx[i],
+			&g_fd_dev.base_ctx[i], &node->hw_mgr_intf, i);
+		if (rc) {
+			CAM_ERR(CAM_FD, "FD context init failed i=%d, rc=%d",
+				i, rc);
+			goto deinit_ctx;
+		}
+	}
+
+	rc = cam_node_init(node, &hw_mgr_intf, g_fd_dev.base_ctx, CAM_CTX_MAX,
+		CAM_FD_DEV_NAME);
+	if (rc) {
+		CAM_ERR(CAM_FD, "FD node init failed, rc=%d", rc);
+		goto deinit_ctx;
+	}
+
+	mutex_init(&g_fd_dev.lock);
+	g_fd_dev.probe_done = true;
+
+	CAM_DBG(CAM_FD, "Camera FD probe complete");
+
+	return 0;
+
+deinit_ctx:
+	for (--i; i >= 0; i--) {
+		if (cam_fd_context_deinit(&g_fd_dev.fd_ctx[i]))
+			CAM_ERR(CAM_FD, "FD context %d deinit failed", i);
+	}
+unregister_subdev:
+	if (cam_subdev_remove(&g_fd_dev.sd))
+		CAM_ERR(CAM_FD, "Failed in subdev remove");
+
+	return rc;
+}
+
+static int cam_fd_dev_remove(struct platform_device *pdev)
+{
+	int i, rc;
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_fd_context_deinit(&g_fd_dev.fd_ctx[i]);
+		if (rc)
+			CAM_ERR(CAM_FD, "FD context %d deinit failed, rc=%d",
+				i, rc);
+	}
+
+	rc = cam_fd_hw_mgr_deinit(pdev->dev.of_node);
+	if (rc)
+		CAM_ERR(CAM_FD, "Failed in hw mgr deinit, rc=%d", rc);
+
+	rc = cam_subdev_remove(&g_fd_dev.sd);
+	if (rc)
+		CAM_ERR(CAM_FD, "Unregister failed, rc=%d", rc);
+
+	mutex_destroy(&g_fd_dev.lock);
+	g_fd_dev.probe_done = false;
+
+	return rc;
+}
+
+static const struct of_device_id cam_fd_dt_match[] = {
+	{
+		.compatible = "qcom,cam-fd"
+	},
+	{}
+};
+
+static struct platform_driver cam_fd_driver = {
+	.probe = cam_fd_dev_probe,
+	.remove = cam_fd_dev_remove,
+	.driver = {
+		.name = "cam_fd",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_fd_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_fd_dev_init_module(void)
+{
+	return platform_driver_register(&cam_fd_driver);
+}
+
+static void __exit cam_fd_dev_exit_module(void)
+{
+	platform_driver_unregister(&cam_fd_driver);
+}
+
+module_init(cam_fd_dev_init_module);
+module_exit(cam_fd_dev_exit_module);
+MODULE_DESCRIPTION("MSM FD driver");
+MODULE_LICENSE("GPL v2");

+ 17 - 0
drivers/cam_fd/fd_hw_mgr/Makefile

@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_sync
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_smmu
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cdm
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_fd
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_fd/fd_hw_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_fd/fd_hw_mgr/fd_hw
+ccflags-y += -I$(srctree)/techpack/camera/drivers/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += fd_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_hw_mgr.o

+ 1949 - 0
drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c

@@ -0,0 +1,1949 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <cam_cpas.h>
+#include <cam_req_mgr.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+#include "cam_packet_util.h"
+#include "cam_fd_context.h"
+#include "cam_fd_hw_intf.h"
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+#include "cam_fd_hw_mgr_intf.h"
+#include "cam_fd_hw_mgr.h"
+#include "cam_trace.h"
+
+static struct cam_fd_hw_mgr g_fd_hw_mgr;
+
+static int cam_fd_mgr_util_packet_validate(struct cam_packet *packet,
+	size_t remain_len)
+{
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	int i, rc;
+
+	if (!packet)
+		return -EINVAL;
+
+	CAM_DBG(CAM_FD, "Packet request=%d, op_code=0x%x, size=%d, flags=%d",
+		packet->header.request_id, packet->header.op_code,
+		packet->header.size, packet->header.flags);
+	CAM_DBG(CAM_FD,
+		"Packet cmdbuf(offset=%d, num=%d) io(offset=%d, num=%d)",
+		packet->cmd_buf_offset, packet->num_cmd_buf,
+		packet->io_configs_offset, packet->num_io_configs);
+	CAM_DBG(CAM_FD,
+		"Packet Patch(offset=%d, num=%d) kmd(offset=%d, num=%d)",
+		packet->patch_offset, packet->num_patches,
+		packet->kmd_cmd_buf_offset, packet->kmd_cmd_buf_index);
+
+	if (cam_packet_util_validate_packet(packet, remain_len)) {
+		CAM_ERR(CAM_FD, "invalid packet:%d %d %d %d %d",
+			packet->kmd_cmd_buf_index,
+			packet->num_cmd_buf, packet->cmd_buf_offset,
+			packet->io_configs_offset, packet->header.size);
+		return -EINVAL;
+	}
+
+	/* All buffers must come through io config, do not support patching */
+	if (packet->num_patches || !packet->num_io_configs) {
+		CAM_ERR(CAM_FD, "wrong number of cmd/patch info: %u %u",
+			packet->num_cmd_buf, packet->num_patches);
+		return -EINVAL;
+	}
+
+	/* KMD Buf index can never be greater than or equal to num cmd bufs */
+	if (packet->kmd_cmd_buf_index >= packet->num_cmd_buf) {
+		CAM_ERR(CAM_FD, "Invalid kmd index %d (%d)",
+			packet->kmd_cmd_buf_index, packet->num_cmd_buf);
+		return -EINVAL;
+	}
+
+	if ((packet->header.op_code & 0xff) !=
+		CAM_PACKET_OPCODES_FD_FRAME_UPDATE) {
+		CAM_ERR(CAM_FD, "Invalid op_code %u",
+			packet->header.op_code & 0xff);
+		return -EINVAL;
+	}
+
+	cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)&packet->payload +
+		packet->cmd_buf_offset);
+
+	for (i = 0; i < packet->num_cmd_buf; i++) {
+		/*
+		 * We can allow 0 length cmd buffer. This can happen in case
+		 * umd gives an empty cmd buffer as kmd buffer
+		 */
+		if (!cmd_desc[i].length)
+			continue;
+
+		if ((cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_GENERIC) &&
+			(cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_CDM)) {
+			CAM_ERR(CAM_FD, "Invalid meta_data [%d] %u", i,
+				cmd_desc[i].meta_data);
+			return -EINVAL;
+		}
+
+		CAM_DBG(CAM_FD,
+			"CmdBuf[%d] hdl=%d, offset=%d, size=%d, len=%d, type=%d, meta_data=%d",
+			i,
+			cmd_desc[i].mem_handle, cmd_desc[i].offset,
+			cmd_desc[i].size, cmd_desc[i].length, cmd_desc[i].type,
+			cmd_desc[i].meta_data);
+
+		rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+		if (rc) {
+			CAM_ERR(CAM_FD, "Invalid cmd buffer %d", i);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_put_ctx(
+	struct list_head *src_list,
+	struct cam_fd_hw_mgr_ctx **fd_ctx)
+{
+	int rc = 0;
+	struct cam_fd_hw_mgr_ctx *ctx_ptr = NULL;
+
+	mutex_lock(&g_fd_hw_mgr.ctx_mutex);
+	ctx_ptr = *fd_ctx;
+	if (ctx_ptr)
+		list_add_tail(&ctx_ptr->list, src_list);
+	*fd_ctx = NULL;
+	mutex_unlock(&g_fd_hw_mgr.ctx_mutex);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_get_ctx(
+	struct list_head *src_list,
+	struct cam_fd_hw_mgr_ctx **fd_ctx)
+{
+	int rc = 0;
+	struct cam_fd_hw_mgr_ctx *ctx_ptr = NULL;
+
+	mutex_lock(&g_fd_hw_mgr.ctx_mutex);
+	if (!list_empty(src_list)) {
+		ctx_ptr = list_first_entry(src_list,
+			struct cam_fd_hw_mgr_ctx, list);
+		list_del_init(&ctx_ptr->list);
+	} else {
+		CAM_ERR(CAM_FD, "No more free fd hw mgr ctx");
+		rc = -1;
+	}
+	*fd_ctx = ctx_ptr;
+	mutex_unlock(&g_fd_hw_mgr.ctx_mutex);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_put_frame_req(
+	struct list_head *src_list,
+	struct cam_fd_mgr_frame_request **frame_req)
+{
+	int rc = 0;
+	struct cam_fd_mgr_frame_request *req_ptr = NULL;
+
+	mutex_lock(&g_fd_hw_mgr.frame_req_mutex);
+	req_ptr = *frame_req;
+	if (req_ptr) {
+		list_del_init(&req_ptr->list);
+		list_add_tail(&req_ptr->list, src_list);
+	}
+	*frame_req = NULL;
+	mutex_unlock(&g_fd_hw_mgr.frame_req_mutex);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_get_frame_req(
+	struct list_head *src_list,
+	struct cam_fd_mgr_frame_request **frame_req)
+{
+	int rc = 0;
+	struct cam_fd_mgr_frame_request *req_ptr = NULL;
+
+	mutex_lock(&g_fd_hw_mgr.frame_req_mutex);
+	if (!list_empty(src_list)) {
+		req_ptr = list_first_entry(src_list,
+			struct cam_fd_mgr_frame_request, list);
+		list_del_init(&req_ptr->list);
+	} else {
+		CAM_DBG(CAM_FD, "Frame req not available");
+		rc = -EPERM;
+	}
+	*frame_req = req_ptr;
+	mutex_unlock(&g_fd_hw_mgr.frame_req_mutex);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_get_device(struct cam_fd_hw_mgr *hw_mgr,
+	struct cam_fd_hw_mgr_ctx *hw_ctx, struct cam_fd_device **hw_device)
+{
+	if (!hw_mgr || !hw_ctx || !hw_device) {
+		CAM_ERR(CAM_FD, "Invalid input %pK %pK %pK", hw_mgr, hw_ctx,
+			hw_device);
+		return -EINVAL;
+	}
+
+	if ((hw_ctx->device_index < 0) ||
+		(hw_ctx->device_index >= CAM_FD_HW_MAX)) {
+		CAM_ERR(CAM_FD, "Invalid device indx %d", hw_ctx->device_index);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_FD, "ctx_index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	*hw_device = &hw_mgr->hw_device[hw_ctx->device_index];
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_release_device(struct cam_fd_hw_mgr *hw_mgr,
+	struct cam_fd_hw_mgr_ctx *hw_ctx)
+{
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_release_args hw_release_args;
+	int rc;
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	if (hw_device->hw_intf->hw_ops.release) {
+		hw_release_args.hw_ctx = hw_ctx;
+		hw_release_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		rc = hw_device->hw_intf->hw_ops.release(
+			hw_device->hw_intf->hw_priv, &hw_release_args,
+			sizeof(hw_release_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW release %d", rc);
+			return rc;
+		}
+	} else {
+		CAM_ERR(CAM_FD, "Invalid release function");
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	hw_device->num_ctxts--;
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	hw_ctx->device_index = -1;
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_select_device(struct cam_fd_hw_mgr *hw_mgr,
+	struct cam_fd_hw_mgr_ctx *hw_ctx,
+	struct cam_fd_acquire_dev_info *fd_acquire_args)
+{
+	int i, rc;
+	struct cam_fd_hw_reserve_args hw_reserve_args;
+	struct cam_fd_device *hw_device = NULL;
+
+	if (!hw_mgr || !hw_ctx || !fd_acquire_args) {
+		CAM_ERR(CAM_FD, "Invalid input %pK %pK %pK", hw_mgr, hw_ctx,
+			fd_acquire_args);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	/* Check if a device is free which can satisfy the requirements */
+	for (i = 0; i < hw_mgr->num_devices; i++) {
+		hw_device = &hw_mgr->hw_device[i];
+		CAM_DBG(CAM_FD,
+			"[%d] : num_ctxts=%d, modes=%d, raw_results=%d",
+			i, hw_device->num_ctxts,
+			hw_device->hw_caps.supported_modes,
+			hw_device->hw_caps.raw_results_available);
+		if ((hw_device->num_ctxts == 0) &&
+			(fd_acquire_args->mode &
+			hw_device->hw_caps.supported_modes) &&
+			(!fd_acquire_args->get_raw_results ||
+			hw_device->hw_caps.raw_results_available)) {
+			CAM_DBG(CAM_FD, "Found dedicated HW Index=%d", i);
+			hw_device->num_ctxts++;
+			break;
+		}
+	}
+
+	/*
+	 * We couldn't find a free HW which meets requirement, now check if
+	 * there is a HW which meets acquire requirements
+	 */
+	if (i == hw_mgr->num_devices) {
+		for (i = 0; i < hw_mgr->num_devices; i++) {
+			hw_device = &hw_mgr->hw_device[i];
+			if ((fd_acquire_args->mode &
+				hw_device->hw_caps.supported_modes) &&
+				(!fd_acquire_args->get_raw_results ||
+				hw_device->hw_caps.raw_results_available)) {
+				hw_device->num_ctxts++;
+				CAM_DBG(CAM_FD, "Found sharing HW Index=%d", i);
+				break;
+			}
+		}
+	}
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	if ((i == hw_mgr->num_devices) || !hw_device) {
+		CAM_ERR(CAM_FD, "Couldn't acquire HW %d %d",
+			fd_acquire_args->mode,
+			fd_acquire_args->get_raw_results);
+		return -EBUSY;
+	}
+
+	CAM_DBG(CAM_FD, "Device index %d selected for this acquire", i);
+
+	/* Check if we can reserve this HW */
+	if (hw_device->hw_intf->hw_ops.reserve) {
+		hw_reserve_args.hw_ctx = hw_ctx;
+		hw_reserve_args.mode = fd_acquire_args->mode;
+		rc = hw_device->hw_intf->hw_ops.reserve(
+			hw_device->hw_intf->hw_priv, &hw_reserve_args,
+			sizeof(hw_reserve_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW reserve %d", rc);
+			return rc;
+		}
+		hw_ctx->ctx_hw_private = hw_reserve_args.ctx_hw_private;
+	} else {
+		CAM_ERR(CAM_FD, "Invalid reserve function");
+		return -EPERM;
+	}
+
+	/* Update required info in hw context */
+	hw_ctx->device_index = i;
+
+	CAM_DBG(CAM_FD, "ctx index=%u, device_index=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_pdev_get_hw_intf(struct device_node *of_node,
+	int i, struct cam_hw_intf **device_hw_intf)
+{
+	struct device_node *device_node = NULL;
+	struct platform_device *child_pdev = NULL;
+	struct cam_hw_intf *hw_intf = NULL;
+	const char *name = NULL;
+	int rc;
+
+	rc = of_property_read_string_index(of_node, "compat-hw-name", i, &name);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Getting dev object name failed %d %d", i, rc);
+		goto put_node;
+	}
+
+	device_node = of_find_node_by_name(NULL, name);
+	if (!device_node) {
+		CAM_ERR(CAM_FD, "Cannot find node in dtsi %s", name);
+		return -ENODEV;
+	}
+
+	child_pdev = of_find_device_by_node(device_node);
+	if (!child_pdev) {
+		CAM_ERR(CAM_FD, "Failed to find device on bus %s",
+			device_node->name);
+		rc = -ENODEV;
+		goto put_node;
+	}
+
+	hw_intf = (struct cam_hw_intf *)platform_get_drvdata(child_pdev);
+	if (!hw_intf) {
+		CAM_ERR(CAM_FD, "No driver data for child device");
+		rc = -ENODEV;
+		goto put_node;
+	}
+
+	CAM_DBG(CAM_FD, "child type %d index %d child_intf %pK",
+		hw_intf->hw_type, hw_intf->hw_idx, hw_intf);
+
+	if (hw_intf->hw_idx >= CAM_FD_HW_MAX) {
+		CAM_ERR(CAM_FD, "hw_idx invalid %d", hw_intf->hw_idx);
+		rc = -ENODEV;
+		goto put_node;
+	}
+
+	rc = 0;
+	*device_hw_intf = hw_intf;
+
+put_node:
+	of_node_put(device_node);
+
+	return rc;
+}
+
+static int cam_fd_packet_generic_blob_handler(void *user_data,
+	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
+{
+	struct cam_fd_hw_cmd_prestart_args *prestart_args =
+		(struct cam_fd_hw_cmd_prestart_args *)user_data;
+
+	if (!blob_data || (blob_size == 0)) {
+		CAM_ERR(CAM_FD, "Invalid blob info %pK %u", blob_data,
+			blob_size);
+		return -EINVAL;
+	}
+
+	if (!prestart_args) {
+		CAM_ERR(CAM_FD, "Invalid user data");
+		return -EINVAL;
+	}
+
+	switch (blob_type) {
+	case CAM_FD_BLOB_TYPE_RAW_RESULTS_REQUIRED: {
+		uint32_t *get_raw_results = (uint32_t *)blob_data;
+
+		if (sizeof(uint32_t) != blob_size) {
+			CAM_ERR(CAM_FD, "Invalid blob size %zu %u",
+				sizeof(uint32_t), blob_size);
+			return -EINVAL;
+		}
+
+		prestart_args->get_raw_results = *get_raw_results;
+		break;
+	}
+	case CAM_FD_BLOB_TYPE_SOC_CLOCK_BW_REQUEST: {
+		struct cam_fd_soc_clock_bw_request *clk_req =
+			(struct cam_fd_soc_clock_bw_request *)blob_data;
+
+		if (sizeof(struct cam_fd_soc_clock_bw_request) != blob_size) {
+			CAM_ERR(CAM_FD, "Invalid blob size %zu %u",
+				sizeof(struct cam_fd_soc_clock_bw_request),
+				blob_size);
+			return -EINVAL;
+		}
+
+		CAM_DBG(CAM_FD, "SOC Clk Request clock=%lld, bw=%lld",
+			clk_req->clock_rate, clk_req->bandwidth);
+
+		break;
+	}
+	default:
+		CAM_WARN(CAM_FD, "Unknown blob type %d", blob_type);
+		break;
+	}
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_parse_generic_cmd_buffer(
+	struct cam_fd_hw_mgr_ctx *hw_ctx, struct cam_packet *packet,
+	struct cam_fd_hw_cmd_prestart_args *prestart_args)
+{
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	int i, rc = 0;
+
+	cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)&packet->payload +
+		packet->cmd_buf_offset);
+
+	for (i = 0; i < packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+		if (cmd_desc[i].meta_data == CAM_FD_CMD_BUFFER_ID_CDM)
+			continue;
+
+		rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+		if (rc)
+			return rc;
+
+		rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i],
+			cam_fd_packet_generic_blob_handler, prestart_args);
+		if (rc)
+			CAM_ERR(CAM_FD, "Failed in processing blobs %d", rc);
+
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_get_buf_map_requirement(uint32_t direction,
+	uint32_t resource_type, bool *need_io_map, bool *need_cpu_map)
+{
+	if (!need_io_map || !need_cpu_map) {
+		CAM_ERR(CAM_FD, "Invalid input pointers %pK %pK", need_io_map,
+			need_cpu_map);
+		return -EINVAL;
+	}
+
+	if (direction == CAM_BUF_INPUT) {
+		switch (resource_type) {
+		case CAM_FD_INPUT_PORT_ID_IMAGE:
+			*need_io_map = true;
+			*need_cpu_map = false;
+			break;
+		default:
+			CAM_WARN(CAM_FD, "Invalid port: dir %d, id %d",
+				direction, resource_type);
+			return -EINVAL;
+		}
+	} else if (direction == CAM_BUF_OUTPUT) {
+		switch (resource_type) {
+		case CAM_FD_OUTPUT_PORT_ID_RESULTS:
+			*need_io_map = true;
+			*need_cpu_map = true;
+			break;
+		case CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS:
+			*need_io_map = true;
+			*need_cpu_map = true;
+			break;
+		case CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER:
+			*need_io_map = true;
+			*need_cpu_map = false;
+			break;
+		default:
+			CAM_WARN(CAM_FD, "Invalid port: dir %d, id %d",
+				direction, resource_type);
+			return -EINVAL;
+		}
+	} else {
+		CAM_WARN(CAM_FD, "Invalid direction %d", direction);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl,
+	struct cam_hw_prepare_update_args *prepare,
+	struct cam_fd_hw_io_buffer *input_buf,
+	struct cam_fd_hw_io_buffer *output_buf, uint32_t io_buf_size)
+{
+	int rc = -EINVAL;
+	uint32_t plane, num_out_buf, num_in_buf;
+	int i, j;
+	struct cam_buf_io_cfg *io_cfg;
+	dma_addr_t io_addr[CAM_PACKET_MAX_PLANES];
+	uintptr_t cpu_addr[CAM_PACKET_MAX_PLANES];
+	size_t size;
+	bool need_io_map, need_cpu_map;
+
+	/* Get IO Buf information */
+	num_out_buf = 0;
+	num_in_buf  = 0;
+	io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
+		&prepare->packet->payload + prepare->packet->io_configs_offset);
+
+	for (i = 0; i < prepare->packet->num_io_configs; i++) {
+		CAM_DBG(CAM_FD,
+			"IOConfig[%d] : handle[%d] Dir[%d] Res[%d] Fence[%d], Format[%d]",
+			i, io_cfg[i].mem_handle[0], io_cfg[i].direction,
+			io_cfg[i].resource_type,
+			io_cfg[i].fence, io_cfg[i].format);
+
+		if ((num_in_buf >= io_buf_size) ||
+			(num_out_buf >= io_buf_size)) {
+			CAM_ERR(CAM_FD, "Invalid number of buffers %d %d %d",
+				num_in_buf, num_out_buf, io_buf_size);
+			return -EINVAL;
+		}
+
+		rc = cam_fd_mgr_util_get_buf_map_requirement(
+			io_cfg[i].direction, io_cfg[i].resource_type,
+			&need_io_map, &need_cpu_map);
+		if (rc) {
+			CAM_WARN(CAM_FD, "Invalid io buff [%d] : %d %d %d",
+				i, io_cfg[i].direction,
+				io_cfg[i].resource_type, rc);
+			continue;
+		}
+
+		memset(io_addr, 0x0, sizeof(io_addr));
+		for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) {
+			if (!io_cfg[i].mem_handle[plane])
+				break;
+
+			io_addr[plane] = 0x0;
+			cpu_addr[plane] = 0x0;
+
+			if (need_io_map) {
+				rc = cam_mem_get_io_buf(
+					io_cfg[i].mem_handle[plane],
+					iommu_hdl, &io_addr[plane], &size);
+				if (rc) {
+					CAM_ERR(CAM_FD,
+						"Invalid io buf %d %d %d %d",
+						io_cfg[i].direction,
+						io_cfg[i].resource_type, plane,
+						rc);
+					return -ENOMEM;
+				}
+
+				io_addr[plane] += io_cfg[i].offsets[plane];
+			}
+
+			if (need_cpu_map) {
+				rc = cam_mem_get_cpu_buf(
+					io_cfg[i].mem_handle[plane],
+					&cpu_addr[plane], &size);
+				if (rc || ((io_addr[plane] & 0xFFFFFFFF)
+					!= io_addr[plane])) {
+					CAM_ERR(CAM_FD,
+						"Invalid cpu buf %d %d %d %d",
+						io_cfg[i].direction,
+						io_cfg[i].resource_type, plane,
+						rc);
+					return rc;
+				}
+				if (io_cfg[i].offsets[plane] >= size) {
+					CAM_ERR(CAM_FD,
+						"Invalid cpu buf %d %d %d",
+						io_cfg[i].direction,
+						io_cfg[i].resource_type, plane);
+					rc = -EINVAL;
+					return rc;
+				}
+				cpu_addr[plane] += io_cfg[i].offsets[plane];
+			}
+
+			CAM_DBG(CAM_FD, "IO Address[%d][%d] : %pK, %pK",
+				io_cfg[i].direction, plane, io_addr[plane],
+				cpu_addr[plane]);
+		}
+
+		switch (io_cfg[i].direction) {
+		case CAM_BUF_INPUT: {
+			prepare->in_map_entries[num_in_buf].resource_handle =
+				io_cfg[i].resource_type;
+			prepare->in_map_entries[num_in_buf].sync_id =
+				io_cfg[i].fence;
+
+			input_buf[num_in_buf].valid = true;
+			for (j = 0; j < plane; j++) {
+				input_buf[num_in_buf].io_addr[j] = io_addr[j];
+				input_buf[num_in_buf].cpu_addr[j] = cpu_addr[j];
+			}
+			input_buf[num_in_buf].num_buf = plane;
+			input_buf[num_in_buf].io_cfg = &io_cfg[i];
+
+			num_in_buf++;
+			break;
+		}
+		case CAM_BUF_OUTPUT: {
+			prepare->out_map_entries[num_out_buf].resource_handle =
+				io_cfg[i].resource_type;
+			prepare->out_map_entries[num_out_buf].sync_id =
+				io_cfg[i].fence;
+
+			output_buf[num_out_buf].valid = true;
+			for (j = 0; j < plane; j++) {
+				output_buf[num_out_buf].io_addr[j] = io_addr[j];
+				output_buf[num_out_buf].cpu_addr[j] =
+					cpu_addr[j];
+			}
+			output_buf[num_out_buf].num_buf = plane;
+			output_buf[num_out_buf].io_cfg = &io_cfg[i];
+
+			num_out_buf++;
+			break;
+		}
+		default:
+			CAM_ERR(CAM_FD, "Unsupported io direction %d",
+				io_cfg[i].direction);
+			rc = -EINVAL;
+			break;
+		}
+	}
+
+	prepare->num_in_map_entries  = num_in_buf;
+	prepare->num_out_map_entries = num_out_buf;
+	return rc;
+}
+
+static int cam_fd_mgr_util_prepare_hw_update_entries(
+	struct cam_fd_hw_mgr *hw_mgr,
+	struct cam_hw_prepare_update_args *prepare,
+	struct cam_fd_hw_cmd_prestart_args *prestart_args,
+	struct cam_kmd_buf_info *kmd_buf_info)
+{
+	int i, rc;
+	struct cam_hw_update_entry *hw_entry;
+	uint32_t num_ent;
+	struct cam_fd_hw_mgr_ctx *hw_ctx =
+		(struct cam_fd_hw_mgr_ctx *)prepare->ctxt_to_hw_map;
+	struct cam_fd_device *hw_device;
+	uint32_t kmd_buf_max_size, kmd_buf_used_bytes = 0;
+	uint32_t *kmd_buf_addr;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	kmd_buf_addr = (uint32_t *)((uint8_t *)kmd_buf_info->cpu_addr +
+		kmd_buf_info->used_bytes);
+	kmd_buf_max_size = kmd_buf_info->size - kmd_buf_info->used_bytes;
+
+	prestart_args->cmd_buf_addr = kmd_buf_addr;
+	prestart_args->size  = kmd_buf_max_size;
+	prestart_args->pre_config_buf_size = 0;
+	prestart_args->post_config_buf_size = 0;
+
+	if (hw_device->hw_intf->hw_ops.process_cmd) {
+		rc = hw_device->hw_intf->hw_ops.process_cmd(
+			hw_device->hw_intf->hw_priv, CAM_FD_HW_CMD_PRESTART,
+			prestart_args,
+			sizeof(struct cam_fd_hw_cmd_prestart_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in CMD_PRESTART %d", rc);
+			return rc;
+		}
+	}
+
+	kmd_buf_used_bytes += prestart_args->pre_config_buf_size;
+	kmd_buf_used_bytes += prestart_args->post_config_buf_size;
+
+	/* HW layer is expected to add commands */
+	if (!kmd_buf_used_bytes || (kmd_buf_used_bytes > kmd_buf_max_size)) {
+		CAM_ERR(CAM_FD, "Invalid kmd used bytes %d (%d)",
+			kmd_buf_used_bytes, kmd_buf_max_size);
+		return -ENOMEM;
+	}
+
+	hw_entry = prepare->hw_update_entries;
+	num_ent = 0;
+
+	if (prestart_args->pre_config_buf_size) {
+		if ((num_ent + 1) >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_FD, "Insufficient  HW entries :%d %d",
+				num_ent, prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		hw_entry[num_ent].handle = kmd_buf_info->handle;
+		hw_entry[num_ent].len  = prestart_args->pre_config_buf_size;
+		hw_entry[num_ent].offset = kmd_buf_info->offset;
+
+		kmd_buf_info->used_bytes += prestart_args->pre_config_buf_size;
+		kmd_buf_info->offset += prestart_args->pre_config_buf_size;
+		num_ent++;
+	}
+
+	/*
+	 * set the cmd_desc to point the first command descriptor in the
+	 * packet and update hw entries with CDM command buffers
+	 */
+	cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)
+		&prepare->packet->payload + prepare->packet->cmd_buf_offset);
+
+	for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+		if (cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_CDM)
+			continue;
+
+		if (num_ent + 1 >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_FD, "Insufficient  HW entries :%d %d",
+				num_ent, prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		hw_entry[num_ent].handle = cmd_desc[i].mem_handle;
+		hw_entry[num_ent].len = cmd_desc[i].length;
+		hw_entry[num_ent].offset = cmd_desc[i].offset;
+		num_ent++;
+	}
+
+	if (prestart_args->post_config_buf_size) {
+		if (num_ent + 1 >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_FD, "Insufficient  HW entries :%d %d",
+				num_ent, prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		hw_entry[num_ent].handle = kmd_buf_info->handle;
+		hw_entry[num_ent].len    = prestart_args->post_config_buf_size;
+		hw_entry[num_ent].offset = kmd_buf_info->offset;
+
+		kmd_buf_info->used_bytes += prestart_args->post_config_buf_size;
+		kmd_buf_info->offset     += prestart_args->post_config_buf_size;
+
+		num_ent++;
+	}
+
+	prepare->num_hw_update_entries = num_ent;
+
+	CAM_DBG(CAM_FD, "FinalConfig : hw_entries=%d, Sync(in=%d, out=%d)",
+		prepare->num_hw_update_entries, prepare->num_in_map_entries,
+		prepare->num_out_map_entries);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_submit_frame(void *priv, void *data)
+{
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_mgr *hw_mgr;
+	struct cam_fd_mgr_frame_request *frame_req;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_hw_cmd_start_args start_args;
+	int rc;
+
+	if (!priv) {
+		CAM_ERR(CAM_FD, "Invalid data");
+		return -EINVAL;
+	}
+
+	hw_mgr = (struct cam_fd_hw_mgr *)priv;
+	mutex_lock(&hw_mgr->frame_req_mutex);
+
+	/* Check if we have any frames pending in high priority list */
+	if (!list_empty(&hw_mgr->frame_pending_list_high)) {
+		CAM_DBG(CAM_FD, "Pending frames in high priority list");
+		frame_req = list_first_entry(&hw_mgr->frame_pending_list_high,
+			struct cam_fd_mgr_frame_request, list);
+	} else if (!list_empty(&hw_mgr->frame_pending_list_normal)) {
+		CAM_DBG(CAM_FD, "Pending frames in normal priority list");
+		frame_req = list_first_entry(&hw_mgr->frame_pending_list_normal,
+			struct cam_fd_mgr_frame_request, list);
+	} else {
+		mutex_unlock(&hw_mgr->frame_req_mutex);
+		CAM_DBG(CAM_FD, "No pending frames");
+		return 0;
+	}
+
+	CAM_DBG(CAM_FD, "FrameSubmit : Frame[%lld]", frame_req->request_id);
+	hw_ctx = frame_req->hw_ctx;
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		mutex_unlock(&hw_mgr->frame_req_mutex);
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	mutex_lock(&hw_device->lock);
+	if (hw_device->ready_to_process == false) {
+		mutex_unlock(&hw_device->lock);
+		mutex_unlock(&hw_mgr->frame_req_mutex);
+		return -EBUSY;
+	}
+
+	trace_cam_submit_to_hw("FD", frame_req->request_id);
+
+	list_del_init(&frame_req->list);
+	mutex_unlock(&hw_mgr->frame_req_mutex);
+
+	if (hw_device->hw_intf->hw_ops.start) {
+		start_args.hw_ctx = hw_ctx;
+		start_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		start_args.hw_req_private = &frame_req->hw_req_private;
+		start_args.hw_update_entries = frame_req->hw_update_entries;
+		start_args.num_hw_update_entries =
+			frame_req->num_hw_update_entries;
+
+		rc = hw_device->hw_intf->hw_ops.start(
+			hw_device->hw_intf->hw_priv, &start_args,
+			sizeof(start_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW Start %d", rc);
+			mutex_unlock(&hw_device->lock);
+			goto put_req_into_free_list;
+		}
+	} else {
+		CAM_ERR(CAM_FD, "Invalid hw_ops.start");
+		mutex_unlock(&hw_device->lock);
+		rc = -EPERM;
+		goto put_req_into_free_list;
+	}
+
+	hw_device->ready_to_process = false;
+	hw_device->cur_hw_ctx = hw_ctx;
+	hw_device->req_id = frame_req->request_id;
+	mutex_unlock(&hw_device->lock);
+
+	rc = cam_fd_mgr_util_put_frame_req(
+		&hw_mgr->frame_processing_list, &frame_req);
+	if (rc) {
+		CAM_ERR(CAM_FD,
+			"Failed in putting frame req in processing list");
+		goto stop_unlock;
+	}
+
+	return rc;
+
+stop_unlock:
+	if (hw_device->hw_intf->hw_ops.stop) {
+		struct cam_fd_hw_stop_args stop_args;
+
+		stop_args.hw_ctx = hw_ctx;
+		stop_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		stop_args.hw_req_private = &frame_req->hw_req_private;
+		if (hw_device->hw_intf->hw_ops.stop(
+			hw_device->hw_intf->hw_priv, &stop_args,
+			sizeof(stop_args)))
+			CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
+	}
+put_req_into_free_list:
+	cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list, &frame_req);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_schedule_frame_worker_task(
+	struct cam_fd_hw_mgr *hw_mgr)
+{
+	int32_t rc = 0;
+	struct crm_workq_task *task;
+	struct cam_fd_mgr_work_data *work_data;
+
+	task = cam_req_mgr_workq_get_task(hw_mgr->work);
+	if (!task) {
+		CAM_ERR(CAM_FD, "no empty task available");
+		return -ENOMEM;
+	}
+
+	work_data = (struct cam_fd_mgr_work_data *)task->payload;
+	work_data->type = CAM_FD_WORK_FRAME;
+
+	task->process_cb = cam_fd_mgr_util_submit_frame;
+	rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+static int32_t cam_fd_mgr_workq_irq_cb(void *priv, void *data)
+{
+	struct cam_fd_device *hw_device = NULL;
+	struct cam_fd_hw_mgr *hw_mgr;
+	struct cam_fd_mgr_work_data *work_data;
+	struct cam_fd_mgr_frame_request *frame_req = NULL;
+	enum cam_fd_hw_irq_type irq_type;
+	bool frame_abort = true;
+	int rc;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_FD, "Invalid data %pK %pK", data, priv);
+		return -EINVAL;
+	}
+
+	hw_mgr = (struct cam_fd_hw_mgr *)priv;
+	work_data = (struct cam_fd_mgr_work_data *)data;
+	irq_type = work_data->irq_type;
+
+	CAM_DBG(CAM_FD, "FD IRQ type=%d", irq_type);
+
+	if (irq_type == CAM_FD_IRQ_HALT_DONE) {
+		/* HALT would be followed by a RESET, ignore this */
+		CAM_DBG(CAM_FD, "HALT IRQ callback");
+		return 0;
+	}
+
+	/* Get the frame from processing list */
+	rc = cam_fd_mgr_util_get_frame_req(&hw_mgr->frame_processing_list,
+		&frame_req);
+	if (rc || !frame_req) {
+		/*
+		 * This can happen if reset is triggered while no frames
+		 * were pending, so not an error, just continue to check if
+		 * there are any pending frames and submit
+		 */
+		CAM_DBG(CAM_FD, "No Frame in processing list, rc=%d", rc);
+		goto submit_next_frame;
+	}
+
+	if (!frame_req->hw_ctx) {
+		CAM_ERR(CAM_FD, "Invalid Frame request %lld",
+			frame_req->request_id);
+		goto put_req_in_free_list;
+	}
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, frame_req->hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		goto put_req_in_free_list;
+	}
+
+	/* Read frame results first */
+	if (irq_type == CAM_FD_IRQ_FRAME_DONE) {
+		struct cam_fd_hw_frame_done_args frame_done_args;
+
+		CAM_DBG(CAM_FD, "FrameDone : Frame[%lld]",
+			frame_req->request_id);
+
+		frame_done_args.hw_ctx = frame_req->hw_ctx;
+		frame_done_args.ctx_hw_private =
+			frame_req->hw_ctx->ctx_hw_private;
+		frame_done_args.request_id = frame_req->request_id;
+		frame_done_args.hw_req_private = &frame_req->hw_req_private;
+
+		if (hw_device->hw_intf->hw_ops.process_cmd) {
+			rc = hw_device->hw_intf->hw_ops.process_cmd(
+				hw_device->hw_intf->hw_priv,
+				CAM_FD_HW_CMD_FRAME_DONE,
+				&frame_done_args, sizeof(frame_done_args));
+			if (rc) {
+				CAM_ERR(CAM_FD, "Failed in CMD_PRESTART %d",
+					rc);
+				frame_abort = true;
+				goto notify_context;
+			}
+		}
+
+		frame_abort = false;
+	}
+
+	trace_cam_irq_handled("FD", irq_type);
+
+notify_context:
+	/* Do a callback to inform frame done or stop done */
+	if (frame_req->hw_ctx->event_cb) {
+		struct cam_hw_done_event_data buf_data;
+
+		CAM_DBG(CAM_FD, "FrameHALT : Frame[%lld]",
+			frame_req->request_id);
+
+		buf_data.num_handles = frame_req->num_hw_update_entries;
+		buf_data.request_id = frame_req->request_id;
+
+		rc = frame_req->hw_ctx->event_cb(frame_req->hw_ctx->cb_priv,
+			frame_abort, &buf_data);
+		if (rc)
+			CAM_ERR(CAM_FD, "Error in event cb handling %d", rc);
+	}
+
+	/*
+	 * Now we can set hw device is free to process further frames.
+	 * Note - Do not change state to IDLE until we read the frame results,
+	 * Otherwise, other thread may schedule frame processing before
+	 * reading current frame's results. Also, we need to set to IDLE state
+	 * in case some error happens after getting this irq callback
+	 */
+	mutex_lock(&hw_device->lock);
+	hw_device->ready_to_process = true;
+	hw_device->req_id = -1;
+	hw_device->cur_hw_ctx = NULL;
+	CAM_DBG(CAM_FD, "ready_to_process=%d", hw_device->ready_to_process);
+	mutex_unlock(&hw_device->lock);
+
+put_req_in_free_list:
+	rc = cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+		&frame_req);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in putting frame req in free list");
+		/* continue */
+	}
+
+submit_next_frame:
+	/* Check if there are any frames pending for processing and submit */
+	rc = cam_fd_mgr_util_submit_frame(hw_mgr, NULL);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error while submit frame, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_irq_cb(void *data, enum cam_fd_hw_irq_type irq_type)
+{
+	struct cam_fd_hw_mgr *hw_mgr = &g_fd_hw_mgr;
+	int rc = 0;
+	unsigned long flags;
+	struct crm_workq_task *task;
+	struct cam_fd_mgr_work_data *work_data;
+
+	spin_lock_irqsave(&hw_mgr->hw_mgr_slock, flags);
+	task = cam_req_mgr_workq_get_task(hw_mgr->work);
+	if (!task) {
+		CAM_ERR(CAM_FD, "no empty task available");
+		spin_unlock_irqrestore(&hw_mgr->hw_mgr_slock, flags);
+		return -ENOMEM;
+	}
+
+	work_data = (struct cam_fd_mgr_work_data *)task->payload;
+	work_data->type = CAM_FD_WORK_IRQ;
+	work_data->irq_type = irq_type;
+
+	task->process_cb = cam_fd_mgr_workq_irq_cb;
+	rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
+	if (rc)
+		CAM_ERR(CAM_FD, "Failed in enqueue work task, rc=%d", rc);
+
+	spin_unlock_irqrestore(&hw_mgr->hw_mgr_slock, flags);
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_get_caps(void *hw_mgr_priv, void *hw_get_caps_args)
+{
+	int rc = 0;
+	struct cam_fd_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd *query = hw_get_caps_args;
+	struct cam_fd_query_cap_cmd query_fd;
+	void __user *caps_handle =
+		u64_to_user_ptr(query->caps_handle);
+
+	if (copy_from_user(&query_fd, caps_handle,
+		sizeof(struct cam_fd_query_cap_cmd))) {
+		CAM_ERR(CAM_FD, "Failed in copy from user, rc=%d", rc);
+		return -EFAULT;
+	}
+
+	query_fd  = hw_mgr->fd_caps;
+
+	CAM_DBG(CAM_FD,
+		"IOMMU device(%d, %d), CDM(%d, %d), versions %d.%d, %d.%d",
+		query_fd.device_iommu.secure, query_fd.device_iommu.non_secure,
+		query_fd.cdm_iommu.secure, query_fd.cdm_iommu.non_secure,
+		query_fd.hw_caps.core_version.major,
+		query_fd.hw_caps.core_version.minor,
+		query_fd.hw_caps.wrapper_version.major,
+		query_fd.hw_caps.wrapper_version.minor);
+
+	if (copy_to_user(caps_handle, &query_fd,
+		sizeof(struct cam_fd_query_cap_cmd)))
+		rc = -EFAULT;
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_acquire_args *acquire_args =
+		(struct cam_hw_acquire_args *)hw_acquire_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_acquire_dev_info fd_acquire_args;
+	int rc;
+
+	if (!acquire_args || acquire_args->num_acq <= 0) {
+		CAM_ERR(CAM_FD, "Invalid acquire args %pK", acquire_args);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&fd_acquire_args,
+		(void __user *)acquire_args->acquire_info,
+		sizeof(struct cam_fd_acquire_dev_info))) {
+		CAM_ERR(CAM_FD, "Copy from user failed");
+		return -EFAULT;
+	}
+
+	CAM_DBG(CAM_FD, "Acquire : mode=%d, get_raw_results=%d, priority=%d",
+		fd_acquire_args.mode, fd_acquire_args.get_raw_results,
+		fd_acquire_args.priority);
+
+	/* get a free fd hw mgr ctx */
+	rc = cam_fd_mgr_util_get_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
+	if (rc || !hw_ctx) {
+		CAM_ERR(CAM_FD, "Get hw context failed, rc=%d, hw_ctx=%pK",
+			rc, hw_ctx);
+		return -EINVAL;
+	}
+
+	if (fd_acquire_args.get_raw_results && !hw_mgr->raw_results_available) {
+		CAM_ERR(CAM_FD, "HW cannot support raw results %d (%d)",
+			fd_acquire_args.get_raw_results,
+			hw_mgr->raw_results_available);
+		goto put_ctx;
+	}
+
+	if (!(fd_acquire_args.mode & hw_mgr->supported_modes)) {
+		CAM_ERR(CAM_FD, "HW cannot support requested mode 0x%x (0x%x)",
+			fd_acquire_args.mode, hw_mgr->supported_modes);
+		rc = -EPERM;
+		goto put_ctx;
+	}
+
+	rc = cam_fd_mgr_util_select_device(hw_mgr, hw_ctx, &fd_acquire_args);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in selecting device, rc=%d", rc);
+		goto put_ctx;
+	}
+
+	hw_ctx->ctx_in_use = true;
+	hw_ctx->hw_mgr = hw_mgr;
+	hw_ctx->get_raw_results = fd_acquire_args.get_raw_results;
+	hw_ctx->mode = fd_acquire_args.mode;
+
+	/* Save incoming cam core info into hw ctx*/
+	hw_ctx->cb_priv = acquire_args->context_data;
+	hw_ctx->event_cb = acquire_args->event_cb;
+
+	/* Update out args */
+	acquire_args->ctxt_to_hw_map = hw_ctx;
+
+	cam_fd_mgr_util_put_ctx(&hw_mgr->used_ctx_list, &hw_ctx);
+
+	return 0;
+put_ctx:
+	list_del_init(&hw_ctx->list);
+	cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
+	return rc;
+}
+
+static int cam_fd_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_release_args *release_args = hw_release_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	int rc;
+
+	if (!hw_mgr_priv || !hw_release_args) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK, %pK",
+			hw_mgr_priv, hw_release_args);
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+
+	rc = cam_fd_mgr_util_release_device(hw_mgr, hw_ctx);
+	if (rc)
+		CAM_ERR(CAM_FD, "Failed in release device, rc=%d", rc);
+
+	hw_ctx->ctx_in_use = false;
+	list_del_init(&hw_ctx->list);
+	cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
+
+	return 0;
+}
+
+static int cam_fd_mgr_hw_start(void *hw_mgr_priv, void *mgr_start_args)
+{
+	int rc = 0;
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_start_args *hw_mgr_start_args =
+		(struct cam_hw_start_args *)mgr_start_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_init_args hw_init_args;
+
+	if (!hw_mgr_priv || !hw_mgr_start_args) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+			hw_mgr_priv, hw_mgr_start_args);
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)hw_mgr_start_args->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_FD, "ctx index=%u, device_index=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	if (hw_device->hw_intf->hw_ops.init) {
+		hw_init_args.hw_ctx = hw_ctx;
+		hw_init_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		rc = hw_device->hw_intf->hw_ops.init(
+			hw_device->hw_intf->hw_priv, &hw_init_args,
+			sizeof(hw_init_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW Init %d", rc);
+			return rc;
+		}
+	} else {
+		CAM_ERR(CAM_FD, "Invalid init function");
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_flush_req(void *hw_mgr_priv,
+	struct cam_hw_flush_args *flush_args)
+{
+	int rc = 0;
+	struct cam_fd_mgr_frame_request *frame_req, *req_temp, *flush_req;
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_stop_args hw_stop_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	uint32_t i = 0;
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)flush_args->ctxt_to_hw_map;
+
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+	CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	mutex_lock(&hw_mgr->frame_req_mutex);
+	for (i = 0; i < flush_args->num_req_active; i++) {
+		flush_req = (struct cam_fd_mgr_frame_request *)
+			flush_args->flush_req_active[i];
+
+		list_for_each_entry_safe(frame_req, req_temp,
+			&hw_mgr->frame_pending_list_high, list) {
+			if (frame_req->hw_ctx != hw_ctx)
+				continue;
+
+			if (frame_req->request_id != flush_req->request_id)
+				continue;
+
+			list_del_init(&frame_req->list);
+			break;
+		}
+
+		list_for_each_entry_safe(frame_req, req_temp,
+			&hw_mgr->frame_pending_list_normal, list) {
+			if (frame_req->hw_ctx != hw_ctx)
+				continue;
+
+			if (frame_req->request_id != flush_req->request_id)
+				continue;
+
+			list_del_init(&frame_req->list);
+			break;
+		}
+
+		list_for_each_entry_safe(frame_req, req_temp,
+			&hw_mgr->frame_processing_list, list) {
+			if (frame_req->hw_ctx != hw_ctx)
+				continue;
+
+			if (frame_req->request_id != flush_req->request_id)
+				continue;
+
+			list_del_init(&frame_req->list);
+
+			mutex_lock(&hw_device->lock);
+			if ((hw_device->ready_to_process == true) ||
+				(hw_device->cur_hw_ctx != hw_ctx))
+				goto unlock_dev_flush_req;
+
+			if (hw_device->hw_intf->hw_ops.stop) {
+				hw_stop_args.hw_ctx = hw_ctx;
+				rc = hw_device->hw_intf->hw_ops.stop(
+					hw_device->hw_intf->hw_priv,
+					&hw_stop_args,
+					sizeof(hw_stop_args));
+				if (rc) {
+					CAM_ERR(CAM_FD,
+						"Failed in HW Stop %d", rc);
+					goto unlock_dev_flush_req;
+				}
+				hw_device->ready_to_process = true;
+			}
+
+unlock_dev_flush_req:
+			mutex_unlock(&hw_device->lock);
+			break;
+		}
+	}
+	mutex_unlock(&hw_mgr->frame_req_mutex);
+
+	for (i = 0; i < flush_args->num_req_pending; i++) {
+		flush_req = (struct cam_fd_mgr_frame_request *)
+			flush_args->flush_req_pending[i];
+		cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+			&flush_req);
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_flush_ctx(void *hw_mgr_priv,
+	struct cam_hw_flush_args *flush_args)
+{
+	int rc = 0;
+	struct cam_fd_mgr_frame_request *frame_req, *req_temp, *flush_req;
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_stop_args hw_stop_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	uint32_t i = 0;
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)flush_args->ctxt_to_hw_map;
+
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+	CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	mutex_lock(&hw_mgr->frame_req_mutex);
+	list_for_each_entry_safe(frame_req, req_temp,
+		&hw_mgr->frame_pending_list_high, list) {
+		if (frame_req->hw_ctx != hw_ctx)
+			continue;
+
+		list_del_init(&frame_req->list);
+	}
+
+	list_for_each_entry_safe(frame_req, req_temp,
+		&hw_mgr->frame_pending_list_normal, list) {
+		if (frame_req->hw_ctx != hw_ctx)
+			continue;
+
+		list_del_init(&frame_req->list);
+	}
+
+	list_for_each_entry_safe(frame_req, req_temp,
+		&hw_mgr->frame_processing_list, list) {
+		if (frame_req->hw_ctx != hw_ctx)
+			continue;
+
+		list_del_init(&frame_req->list);
+		mutex_lock(&hw_device->lock);
+		if ((hw_device->ready_to_process == true) ||
+			(hw_device->cur_hw_ctx != hw_ctx))
+			goto unlock_dev_flush_ctx;
+
+		if (hw_device->hw_intf->hw_ops.stop) {
+			hw_stop_args.hw_ctx = hw_ctx;
+			rc = hw_device->hw_intf->hw_ops.stop(
+				hw_device->hw_intf->hw_priv, &hw_stop_args,
+				sizeof(hw_stop_args));
+			if (rc) {
+				CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
+				goto unlock_dev_flush_ctx;
+			}
+			hw_device->ready_to_process = true;
+		}
+
+unlock_dev_flush_ctx:
+	mutex_unlock(&hw_device->lock);
+	}
+	mutex_unlock(&hw_mgr->frame_req_mutex);
+
+	for (i = 0; i < flush_args->num_req_pending; i++) {
+		flush_req = (struct cam_fd_mgr_frame_request *)
+			flush_args->flush_req_pending[i];
+		CAM_DBG(CAM_FD, "flush pending req %llu",
+			flush_req->request_id);
+		cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+			&flush_req);
+	}
+
+	for (i = 0; i < flush_args->num_req_active; i++) {
+		flush_req = (struct cam_fd_mgr_frame_request *)
+			flush_args->flush_req_active[i];
+		CAM_DBG(CAM_FD, "flush active req %llu", flush_req->request_id);
+		cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+			&flush_req);
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_flush(void *hw_mgr_priv,
+	void *hw_flush_args)
+{
+	int rc = 0;
+	struct cam_hw_flush_args *flush_args =
+		(struct cam_hw_flush_args *)hw_flush_args;
+
+	if (!hw_mgr_priv || !hw_flush_args) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+			hw_mgr_priv, hw_flush_args);
+		return -EINVAL;
+	}
+
+	switch (flush_args->flush_type) {
+	case CAM_FLUSH_TYPE_REQ:
+		rc = cam_fd_mgr_hw_flush_req(hw_mgr_priv, flush_args);
+		break;
+	case CAM_FLUSH_TYPE_ALL:
+		rc = cam_fd_mgr_hw_flush_ctx(hw_mgr_priv, flush_args);
+		break;
+	default:
+		rc = -EINVAL;
+		CAM_ERR(CAM_FD, "Invalid flush type %d",
+			flush_args->flush_type);
+		break;
+	}
+	return rc;
+}
+
+static int cam_fd_mgr_hw_stop(void *hw_mgr_priv, void *mgr_stop_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_stop_args *hw_mgr_stop_args =
+		(struct cam_hw_stop_args *)mgr_stop_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_deinit_args hw_deinit_args;
+	int rc = 0;
+
+	if (!hw_mgr_priv || !hw_mgr_stop_args) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+			hw_mgr_priv, hw_mgr_stop_args);
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)hw_mgr_stop_args->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+	CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	CAM_DBG(CAM_FD, "FD Device ready_to_process = %d",
+		hw_device->ready_to_process);
+
+	if (hw_device->hw_intf->hw_ops.deinit) {
+		hw_deinit_args.hw_ctx = hw_ctx;
+		hw_deinit_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		rc = hw_device->hw_intf->hw_ops.deinit(
+			hw_device->hw_intf->hw_priv, &hw_deinit_args,
+			sizeof(hw_deinit_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW DeInit %d", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_prepare_update(void *hw_mgr_priv,
+	void *hw_prepare_update_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_prepare_update_args *prepare =
+		(struct cam_hw_prepare_update_args *) hw_prepare_update_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_device *hw_device;
+	struct cam_kmd_buf_info kmd_buf;
+	int rc;
+	struct cam_fd_hw_cmd_prestart_args prestart_args;
+	struct cam_fd_mgr_frame_request *frame_req;
+
+	if (!hw_mgr_priv || !hw_prepare_update_args) {
+		CAM_ERR(CAM_FD, "Invalid args %pK %pK",
+			hw_mgr_priv, hw_prepare_update_args);
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)prepare->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		goto error;
+	}
+
+	rc = cam_fd_mgr_util_packet_validate(prepare->packet,
+		prepare->remain_len);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in packet validation %d", rc);
+		goto error;
+	}
+
+	rc = cam_packet_util_get_kmd_buffer(prepare->packet, &kmd_buf);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in get kmd buf buffer %d", rc);
+		goto error;
+	}
+
+	CAM_DBG(CAM_FD,
+		"KMD Buf : hdl=%d, cpu_addr=%pK, offset=%d, size=%d, used=%d",
+		kmd_buf.handle, kmd_buf.cpu_addr, kmd_buf.offset,
+		kmd_buf.size, kmd_buf.used_bytes);
+
+	/* We do not expect any patching, but just do it anyway */
+	rc = cam_packet_util_process_patches(prepare->packet,
+		hw_mgr->device_iommu.non_secure, -1);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Patch FD packet failed, rc=%d", rc);
+		return rc;
+	}
+
+	memset(&prestart_args, 0x0, sizeof(prestart_args));
+	prestart_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+	prestart_args.hw_ctx = hw_ctx;
+	prestart_args.request_id = prepare->packet->header.request_id;
+
+	rc = cam_fd_mgr_util_parse_generic_cmd_buffer(hw_ctx, prepare->packet,
+		&prestart_args);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in parsing gerneric cmd buffer %d", rc);
+		goto error;
+	}
+
+	rc = cam_fd_mgr_util_prepare_io_buf_info(
+		hw_mgr->device_iommu.non_secure, prepare,
+		prestart_args.input_buf, prestart_args.output_buf,
+		CAM_FD_MAX_IO_BUFFERS);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in prepare IO Buf %d", rc);
+		goto error;
+	}
+
+	rc = cam_fd_mgr_util_prepare_hw_update_entries(hw_mgr, prepare,
+		&prestart_args, &kmd_buf);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in hw update entries %d", rc);
+		goto error;
+	}
+
+	/* get a free frame req from free list */
+	rc = cam_fd_mgr_util_get_frame_req(&hw_mgr->frame_free_list,
+		&frame_req);
+	if (rc || !frame_req) {
+		CAM_ERR(CAM_FD, "Get frame_req failed, rc=%d, hw_ctx=%pK",
+			rc, hw_ctx);
+		return -ENOMEM;
+	}
+
+	/* Setup frame request info and queue to pending list */
+	frame_req->hw_ctx = hw_ctx;
+	frame_req->request_id = prepare->packet->header.request_id;
+	/* This has to be passed to HW while calling hw_ops->start */
+	frame_req->hw_req_private = prestart_args.hw_req_private;
+
+	/*
+	 * Save the current frame_req into priv,
+	 * this will come as priv while hw_config
+	 */
+	prepare->priv = frame_req;
+
+	CAM_DBG(CAM_FD, "FramePrepare : Frame[%lld]", frame_req->request_id);
+
+	return 0;
+error:
+	return rc;
+}
+
+static int cam_fd_mgr_hw_config(void *hw_mgr_priv, void *hw_config_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_config_args *config =
+		(struct cam_hw_config_args *) hw_config_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_mgr_frame_request *frame_req;
+	int rc;
+	int i;
+
+	if (!hw_mgr || !config) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK", hw_mgr, config);
+		return -EINVAL;
+	}
+
+	if (!config->num_hw_update_entries) {
+		CAM_ERR(CAM_FD, "No hw update enteries are available");
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)config->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+
+	frame_req = config->priv;
+
+	trace_cam_apply_req("FD", frame_req->request_id);
+	CAM_DBG(CAM_FD, "FrameHWConfig : Frame[%lld]", frame_req->request_id);
+
+	frame_req->num_hw_update_entries = config->num_hw_update_entries;
+	for (i = 0; i < config->num_hw_update_entries; i++) {
+		frame_req->hw_update_entries[i] = config->hw_update_entries[i];
+		CAM_DBG(CAM_FD, "PreStart HWEntry[%d] : %d %d %d %d %pK",
+			frame_req->hw_update_entries[i].handle,
+			frame_req->hw_update_entries[i].offset,
+			frame_req->hw_update_entries[i].len,
+			frame_req->hw_update_entries[i].flags,
+			frame_req->hw_update_entries[i].addr);
+	}
+
+	if (hw_ctx->priority == CAM_FD_PRIORITY_HIGH) {
+		CAM_DBG(CAM_FD, "Insert frame into prio0 queue");
+		rc = cam_fd_mgr_util_put_frame_req(
+			&hw_mgr->frame_pending_list_high, &frame_req);
+	} else {
+		CAM_DBG(CAM_FD, "Insert frame into prio1 queue");
+		rc = cam_fd_mgr_util_put_frame_req(
+			&hw_mgr->frame_pending_list_normal, &frame_req);
+	}
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in queuing frame req, rc=%d", rc);
+		goto put_free_list;
+	}
+
+	rc = cam_fd_mgr_util_schedule_frame_worker_task(hw_mgr);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Worker task scheduling failed %d", rc);
+		goto remove_and_put_free_list;
+	}
+
+	return 0;
+
+remove_and_put_free_list:
+
+	if (hw_ctx->priority == CAM_FD_PRIORITY_HIGH) {
+		CAM_DBG(CAM_FD, "Removing frame into prio0 queue");
+		cam_fd_mgr_util_get_frame_req(
+			&hw_mgr->frame_pending_list_high, &frame_req);
+	} else {
+		CAM_DBG(CAM_FD, "Removing frame into prio1 queue");
+		cam_fd_mgr_util_get_frame_req(
+			&hw_mgr->frame_pending_list_normal, &frame_req);
+	}
+put_free_list:
+	cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+		&frame_req);
+
+	return rc;
+}
+
+int cam_fd_hw_mgr_deinit(struct device_node *of_node)
+{
+	CAM_DBG(CAM_FD, "HW Mgr Deinit");
+
+	cam_req_mgr_workq_destroy(&g_fd_hw_mgr.work);
+
+	cam_smmu_destroy_handle(g_fd_hw_mgr.device_iommu.non_secure);
+	g_fd_hw_mgr.device_iommu.non_secure = -1;
+
+	mutex_destroy(&g_fd_hw_mgr.ctx_mutex);
+	mutex_destroy(&g_fd_hw_mgr.frame_req_mutex);
+	mutex_destroy(&g_fd_hw_mgr.hw_mgr_mutex);
+
+	return 0;
+}
+
+int cam_fd_hw_mgr_init(struct device_node *of_node,
+	struct cam_hw_mgr_intf *hw_mgr_intf)
+{
+	int count, i, rc = 0;
+	struct cam_hw_intf *hw_intf = NULL;
+	struct cam_fd_hw_mgr_ctx *hw_mgr_ctx;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_mgr_frame_request *frame_req;
+
+	if (!of_node || !hw_mgr_intf) {
+		CAM_ERR(CAM_FD, "Invalid args of_node %pK hw_mgr_intf %pK",
+			of_node, hw_mgr_intf);
+		return -EINVAL;
+	}
+
+	memset(&g_fd_hw_mgr, 0x0, sizeof(g_fd_hw_mgr));
+	memset(hw_mgr_intf, 0x0, sizeof(*hw_mgr_intf));
+
+	mutex_init(&g_fd_hw_mgr.ctx_mutex);
+	mutex_init(&g_fd_hw_mgr.frame_req_mutex);
+	mutex_init(&g_fd_hw_mgr.hw_mgr_mutex);
+	spin_lock_init(&g_fd_hw_mgr.hw_mgr_slock);
+
+	count = of_property_count_strings(of_node, "compat-hw-name");
+	if (!count || (count > CAM_FD_HW_MAX)) {
+		CAM_ERR(CAM_FD, "Invalid compat names in dev tree %d", count);
+		return -EINVAL;
+	}
+	g_fd_hw_mgr.num_devices = count;
+
+	g_fd_hw_mgr.raw_results_available = false;
+	g_fd_hw_mgr.supported_modes = 0;
+
+	for (i = 0; i < count; i++) {
+		hw_device = &g_fd_hw_mgr.hw_device[i];
+
+		rc = cam_fd_mgr_util_pdev_get_hw_intf(of_node, i, &hw_intf);
+		if (rc) {
+			CAM_ERR(CAM_FD, "hw intf from pdev failed, rc=%d", rc);
+			return rc;
+		}
+
+		mutex_init(&hw_device->lock);
+
+		hw_device->valid = true;
+		hw_device->hw_intf = hw_intf;
+		hw_device->ready_to_process = true;
+
+		if (hw_device->hw_intf->hw_ops.process_cmd) {
+			struct cam_fd_hw_cmd_set_irq_cb irq_cb_args;
+
+			irq_cb_args.cam_fd_hw_mgr_cb = cam_fd_mgr_irq_cb;
+			irq_cb_args.data = hw_device;
+
+			rc = hw_device->hw_intf->hw_ops.process_cmd(
+				hw_device->hw_intf->hw_priv,
+				CAM_FD_HW_CMD_REGISTER_CALLBACK,
+				&irq_cb_args, sizeof(irq_cb_args));
+			if (rc) {
+				CAM_ERR(CAM_FD,
+					"Failed in REGISTER_CALLBACK %d", rc);
+				return rc;
+			}
+		}
+
+		if (hw_device->hw_intf->hw_ops.get_hw_caps) {
+			rc = hw_device->hw_intf->hw_ops.get_hw_caps(
+				hw_intf->hw_priv, &hw_device->hw_caps,
+				sizeof(hw_device->hw_caps));
+			if (rc) {
+				CAM_ERR(CAM_FD, "Failed in get_hw_caps %d", rc);
+				return rc;
+			}
+
+			g_fd_hw_mgr.raw_results_available |=
+				hw_device->hw_caps.raw_results_available;
+			g_fd_hw_mgr.supported_modes |=
+				hw_device->hw_caps.supported_modes;
+
+			CAM_DBG(CAM_FD,
+				"Device[mode=%d, raw=%d], Mgr[mode=%d, raw=%d]",
+				hw_device->hw_caps.supported_modes,
+				hw_device->hw_caps.raw_results_available,
+				g_fd_hw_mgr.supported_modes,
+				g_fd_hw_mgr.raw_results_available);
+		}
+	}
+
+	INIT_LIST_HEAD(&g_fd_hw_mgr.free_ctx_list);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.used_ctx_list);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.frame_free_list);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.frame_pending_list_high);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.frame_pending_list_normal);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.frame_processing_list);
+
+	g_fd_hw_mgr.device_iommu.non_secure = -1;
+	g_fd_hw_mgr.device_iommu.secure = -1;
+	g_fd_hw_mgr.cdm_iommu.non_secure = -1;
+	g_fd_hw_mgr.cdm_iommu.secure = -1;
+
+	rc = cam_smmu_get_handle("fd",
+		&g_fd_hw_mgr.device_iommu.non_secure);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Get iommu handle failed, rc=%d", rc);
+		goto destroy_mutex;
+	}
+
+	rc = cam_cdm_get_iommu_handle("fd", &g_fd_hw_mgr.cdm_iommu);
+	if (rc)
+		CAM_DBG(CAM_FD, "Failed to acquire the CDM iommu handles");
+
+	CAM_DBG(CAM_FD, "iommu handles : device(%d, %d), cdm(%d, %d)",
+		g_fd_hw_mgr.device_iommu.non_secure,
+		g_fd_hw_mgr.device_iommu.secure,
+		g_fd_hw_mgr.cdm_iommu.non_secure,
+		g_fd_hw_mgr.cdm_iommu.secure);
+
+	/* Init hw mgr contexts and add to free list */
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		hw_mgr_ctx = &g_fd_hw_mgr.ctx_pool[i];
+
+		memset(hw_mgr_ctx, 0x0, sizeof(*hw_mgr_ctx));
+		INIT_LIST_HEAD(&hw_mgr_ctx->list);
+
+		hw_mgr_ctx->ctx_index = i;
+		hw_mgr_ctx->device_index = -1;
+		hw_mgr_ctx->hw_mgr = &g_fd_hw_mgr;
+
+		list_add_tail(&hw_mgr_ctx->list, &g_fd_hw_mgr.free_ctx_list);
+	}
+
+	/* Init hw mgr frame requests and add to free list */
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
+		frame_req = &g_fd_hw_mgr.frame_req[i];
+
+		memset(frame_req, 0x0, sizeof(*frame_req));
+		INIT_LIST_HEAD(&frame_req->list);
+
+		list_add_tail(&frame_req->list, &g_fd_hw_mgr.frame_free_list);
+	}
+
+	rc = cam_req_mgr_workq_create("cam_fd_worker", CAM_FD_WORKQ_NUM_TASK,
+		&g_fd_hw_mgr.work, CRM_WORKQ_USAGE_IRQ, 0);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Unable to create a worker, rc=%d", rc);
+		goto detach_smmu;
+	}
+
+	for (i = 0; i < CAM_FD_WORKQ_NUM_TASK; i++)
+		g_fd_hw_mgr.work->task.pool[i].payload =
+			&g_fd_hw_mgr.work_data[i];
+
+	/* Setup hw cap so that we can just return the info when requested */
+	memset(&g_fd_hw_mgr.fd_caps, 0, sizeof(g_fd_hw_mgr.fd_caps));
+	g_fd_hw_mgr.fd_caps.device_iommu = g_fd_hw_mgr.device_iommu;
+	g_fd_hw_mgr.fd_caps.cdm_iommu = g_fd_hw_mgr.cdm_iommu;
+	g_fd_hw_mgr.fd_caps.hw_caps = g_fd_hw_mgr.hw_device[0].hw_caps;
+
+	CAM_DBG(CAM_FD,
+		"IOMMU device(%d, %d), CDM(%d, %d) versions core[%d.%d], wrapper[%d.%d]",
+		g_fd_hw_mgr.fd_caps.device_iommu.secure,
+		g_fd_hw_mgr.fd_caps.device_iommu.non_secure,
+		g_fd_hw_mgr.fd_caps.cdm_iommu.secure,
+		g_fd_hw_mgr.fd_caps.cdm_iommu.non_secure,
+		g_fd_hw_mgr.fd_caps.hw_caps.core_version.major,
+		g_fd_hw_mgr.fd_caps.hw_caps.core_version.minor,
+		g_fd_hw_mgr.fd_caps.hw_caps.wrapper_version.major,
+		g_fd_hw_mgr.fd_caps.hw_caps.wrapper_version.minor);
+
+	hw_mgr_intf->hw_mgr_priv = &g_fd_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_fd_mgr_hw_get_caps;
+	hw_mgr_intf->hw_acquire = cam_fd_mgr_hw_acquire;
+	hw_mgr_intf->hw_release = cam_fd_mgr_hw_release;
+	hw_mgr_intf->hw_start = cam_fd_mgr_hw_start;
+	hw_mgr_intf->hw_stop = cam_fd_mgr_hw_stop;
+	hw_mgr_intf->hw_prepare_update = cam_fd_mgr_hw_prepare_update;
+	hw_mgr_intf->hw_config = cam_fd_mgr_hw_config;
+	hw_mgr_intf->hw_read = NULL;
+	hw_mgr_intf->hw_write = NULL;
+	hw_mgr_intf->hw_close = NULL;
+	hw_mgr_intf->hw_flush = cam_fd_mgr_hw_flush;
+
+	return rc;
+
+detach_smmu:
+	cam_smmu_destroy_handle(g_fd_hw_mgr.device_iommu.non_secure);
+	g_fd_hw_mgr.device_iommu.non_secure = -1;
+destroy_mutex:
+	mutex_destroy(&g_fd_hw_mgr.ctx_mutex);
+	mutex_destroy(&g_fd_hw_mgr.frame_req_mutex);
+	mutex_destroy(&g_fd_hw_mgr.hw_mgr_mutex);
+
+	return rc;
+}

+ 179 - 0
drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h

@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_MGR_H_
+#define _CAM_FD_HW_MGR_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <cam_fd.h>
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_fd_hw_intf.h"
+
+#define CAM_FD_HW_MAX            1
+#define CAM_FD_WORKQ_NUM_TASK    10
+
+struct cam_fd_hw_mgr;
+
+/**
+ * enum cam_fd_mgr_work_type - Type of worker task
+ *
+ * @CAM_FD_WORK_FRAME : Work type indicating frame task
+ * @CAM_FD_WORK_IRQ   : Work type indicating irq task
+ */
+enum cam_fd_mgr_work_type {
+	CAM_FD_WORK_FRAME,
+	CAM_FD_WORK_IRQ,
+};
+
+/**
+ * struct cam_fd_hw_mgr_ctx : FD HW Mgr context
+ *
+ * @list            : List pointer used to maintain this context
+ *                    in free, used list
+ * @ctx_index       : Index of this context
+ * @ctx_in_use      : Whether this context is in use
+ * @event_cb        : Event callback pointer to notify cam core context
+ * @cb_priv         : Event callback private pointer
+ * @hw_mgr          : Pointer to hw manager
+ * @get_raw_results : Whether this context needs raw results
+ * @mode            : Mode in which this context runs
+ * @device_index    : HW Device used by this context
+ * @ctx_hw_private  : HW layer's private context pointer for this context
+ * @priority        : Priority of this context
+ */
+struct cam_fd_hw_mgr_ctx {
+	struct list_head               list;
+	uint32_t                       ctx_index;
+	bool                           ctx_in_use;
+	cam_hw_event_cb_func           event_cb;
+	void                          *cb_priv;
+	struct cam_fd_hw_mgr          *hw_mgr;
+	bool                           get_raw_results;
+	enum cam_fd_hw_mode            mode;
+	int32_t                        device_index;
+	void                          *ctx_hw_private;
+	uint32_t                       priority;
+};
+
+/**
+ * struct cam_fd_device : FD HW Device
+ *
+ * @hw_caps          : This FD device's capabilities
+ * @hw_intf          : FD device's interface information
+ * @ready_to_process : Whether this device is ready to process next frame
+ * @num_ctxts        : Number of context currently running on this device
+ * @valid            : Whether this device is valid
+ * @lock             : Lock used for protectin
+ * @cur_hw_ctx       : current hw context running in the device
+ * @req_id           : current processing req id
+ */
+struct cam_fd_device {
+	struct cam_fd_hw_caps     hw_caps;
+	struct cam_hw_intf       *hw_intf;
+	bool                      ready_to_process;
+	uint32_t                  num_ctxts;
+	bool                      valid;
+	struct mutex              lock;
+	struct cam_fd_hw_mgr_ctx *cur_hw_ctx;
+	int64_t                   req_id;
+};
+
+/**
+ * struct cam_fd_mgr_frame_request : Frame request information maintained
+ *                                   in HW Mgr layer
+ *
+ * @list                  : List pointer used to maintain this request in
+ *                          free, pending, processing request lists
+ * @request_id            : Request ID corresponding to this request
+ * @hw_ctx                : HW context from which this request is coming
+ * @hw_req_private        : HW layer's private information specific to
+ *                          this request
+ * @hw_update_entries     : HW update entries corresponding to this request
+ *                          which needs to be submitted to HW through CDM
+ * @num_hw_update_entries : Number of HW update entries
+ */
+struct cam_fd_mgr_frame_request {
+	struct list_head               list;
+	uint64_t                       request_id;
+	struct cam_fd_hw_mgr_ctx      *hw_ctx;
+	struct cam_fd_hw_req_private   hw_req_private;
+	struct cam_hw_update_entry     hw_update_entries[CAM_FD_MAX_HW_ENTRIES];
+	uint32_t                       num_hw_update_entries;
+};
+
+/**
+ * struct cam_fd_mgr_work_data : HW Mgr work data information
+ *
+ * @type     : Type of work
+ * @irq_type : IRQ type when this work is queued because of irq callback
+ */
+struct cam_fd_mgr_work_data {
+	enum cam_fd_mgr_work_type      type;
+	enum cam_fd_hw_irq_type        irq_type;
+};
+
+/**
+ * struct cam_fd_hw_mgr : FD HW Mgr information
+ *
+ * @free_ctx_list             : List of free contexts available for acquire
+ * @used_ctx_list             : List of contexts that are acquired
+ * @frame_free_list           : List of free frame requests available
+ * @frame_pending_list_high   : List of high priority frame requests pending
+ *                              for processing
+ * @frame_pending_list_normal : List of normal priority frame requests pending
+ *                              for processing
+ * @frame_processing_list     : List of frame requests currently being
+ *                              processed currently. Generally maximum one
+ *                              request would be present in this list
+ * @hw_mgr_mutex              : Mutex to protect hw mgr data when accessed
+ *                              from multiple threads
+ * @hw_mgr_slock              : Spin lock to protect hw mgr data when accessed
+ *                              from multiple threads
+ * @ctx_mutex                 : Mutex to protect context list
+ * @frame_req_mutex           : Mutex to protect frame request list
+ * @device_iommu              : Device IOMMU information
+ * @cdm_iommu                 : CDM IOMMU information
+ * @hw_device                 : Underlying HW device information
+ * @num_devices               : Number of HW devices available
+ * @raw_results_available     : Whether raw results available in this driver
+ * @supported_modes           : Supported modes by this driver
+ * @ctx_pool                  : List of context
+ * @frame_req                 : List of frame requests
+ * @work                      : Worker handle
+ * @work_data                 : Worker data
+ * @fd_caps                   : FD driver capabilities
+ */
+struct cam_fd_hw_mgr {
+	struct list_head                   free_ctx_list;
+	struct list_head                   used_ctx_list;
+	struct list_head                   frame_free_list;
+	struct list_head                   frame_pending_list_high;
+	struct list_head                   frame_pending_list_normal;
+	struct list_head                   frame_processing_list;
+	struct mutex                       hw_mgr_mutex;
+	spinlock_t                         hw_mgr_slock;
+	struct mutex                       ctx_mutex;
+	struct mutex                       frame_req_mutex;
+	struct cam_iommu_handle            device_iommu;
+	struct cam_iommu_handle            cdm_iommu;
+	struct cam_fd_device               hw_device[CAM_FD_HW_MAX];
+	uint32_t                           num_devices;
+	bool                               raw_results_available;
+	uint32_t                           supported_modes;
+	struct cam_fd_hw_mgr_ctx           ctx_pool[CAM_CTX_MAX];
+	struct cam_fd_mgr_frame_request    frame_req[CAM_CTX_REQ_MAX];
+	struct cam_req_mgr_core_workq     *work;
+	struct cam_fd_mgr_work_data        work_data[CAM_FD_WORKQ_NUM_TASK];
+	struct cam_fd_query_cap_cmd        fd_caps;
+};
+
+#endif /* _CAM_FD_HW_MGR_H_ */

+ 18 - 0
drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h

@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_MGR_INTF_H_
+#define _CAM_FD_HW_MGR_INTF_H_
+
+#include <linux/of.h>
+
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+
+int cam_fd_hw_mgr_init(struct device_node *of_node,
+	struct cam_hw_mgr_intf *hw_mgr_intf);
+int cam_fd_hw_mgr_deinit(struct device_node *of_node);
+
+#endif /* _CAM_FD_HW_MGR_INTF_H_ */

+ 16 - 0
drivers/cam_fd/fd_hw_mgr/fd_hw/Makefile

@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_sync
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_smmu
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cdm
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_fd
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_fd/fd_hw_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_fd/fd_hw_mgr/fd_hw
+ccflags-y += -I$(srctree)/techpack/camera
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_hw_dev.o cam_fd_hw_core.o cam_fd_hw_soc.o

+ 1164 - 0
drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c

@@ -0,0 +1,1164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+#include "cam_trace.h"
+
+#define CAM_FD_REG_VAL_PAIR_SIZE 256
+
+static uint32_t cam_fd_cdm_write_reg_val_pair(uint32_t *buffer,
+	uint32_t index, uint32_t reg_offset, uint32_t reg_value)
+{
+	buffer[index++] = reg_offset;
+	buffer[index++] = reg_value;
+
+	CAM_DBG(CAM_FD, "FD_CDM_CMD: Base[FD_CORE] Offset[0x%8x] Value[0x%8x]",
+		reg_offset, reg_value);
+
+	return index;
+}
+
+static void cam_fd_hw_util_cdm_callback(uint32_t handle, void *userdata,
+	enum cam_cdm_cb_status status, uint64_t cookie)
+{
+	trace_cam_cdm_cb("FD", status);
+	CAM_DBG(CAM_FD, "CDM hdl=%x, udata=%pK, status=%d, cookie=%llu",
+		handle, userdata, status, cookie);
+}
+
+static void cam_fd_hw_util_enable_power_on_settings(struct cam_hw_info *fd_hw)
+{
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	struct cam_fd_hw_static_info *hw_static_info =
+		((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
+
+	if (hw_static_info->enable_errata_wa.single_irq_only == false) {
+		/* Enable IRQs here */
+		cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+			hw_static_info->wrapper_regs.irq_mask,
+			hw_static_info->irq_mask);
+	}
+
+	/* QoS settings */
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.vbif_req_priority,
+		hw_static_info->qos_priority);
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.vbif_priority_level,
+		hw_static_info->qos_priority_level);
+}
+
+int cam_fd_hw_util_get_hw_caps(struct cam_hw_info *fd_hw,
+	struct cam_fd_hw_caps *hw_caps)
+{
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	struct cam_fd_hw_static_info *hw_static_info =
+		((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
+	uint32_t reg_value;
+
+	if (!hw_static_info) {
+		CAM_ERR(CAM_FD, "Invalid hw info data");
+		return -EINVAL;
+	}
+
+	reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_CORE,
+		hw_static_info->core_regs.version);
+	hw_caps->core_version.major =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf00, 0x8);
+	hw_caps->core_version.minor =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0, 0x4);
+	hw_caps->core_version.incr =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf, 0x0);
+
+	reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.wrapper_version);
+	hw_caps->wrapper_version.major =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+	hw_caps->wrapper_version.minor =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->wrapper_version.incr =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	hw_caps->raw_results_available =
+		hw_static_info->results.raw_results_available;
+	hw_caps->supported_modes = hw_static_info->supported_modes;
+
+	CAM_DBG(CAM_FD, "core:%d.%d.%d wrapper:%d.%d.%d intermediate:%d",
+		hw_caps->core_version.major, hw_caps->core_version.minor,
+		hw_caps->core_version.incr, hw_caps->wrapper_version.major,
+		hw_caps->wrapper_version.minor, hw_caps->wrapper_version.incr,
+		hw_caps->raw_results_available);
+
+	return 0;
+}
+
+static int cam_fd_hw_util_fdwrapper_sync_reset(struct cam_hw_info *fd_hw)
+{
+	struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	long time_left;
+
+	/* Before triggering reset to HW, clear the reset complete */
+	reinit_completion(&fd_core->reset_complete);
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
+		hw_static_info->core_regs.control, 0x1);
+
+	if (hw_static_info->enable_errata_wa.single_irq_only) {
+		cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+			hw_static_info->wrapper_regs.irq_mask,
+			CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE));
+	}
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.sw_reset, 0x1);
+
+	time_left = wait_for_completion_timeout(&fd_core->reset_complete,
+		msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
+	if (time_left <= 0)
+		CAM_WARN(CAM_FD, "HW reset timeout time_left=%ld", time_left);
+
+	CAM_DBG(CAM_FD, "FD Wrapper SW Sync Reset complete");
+
+	return 0;
+}
+
+
+static int cam_fd_hw_util_fdwrapper_halt(struct cam_hw_info *fd_hw)
+{
+	struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	long time_left;
+
+	/* Before triggering halt to HW, clear halt complete */
+	reinit_completion(&fd_core->halt_complete);
+
+	if (hw_static_info->enable_errata_wa.single_irq_only) {
+		cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+			hw_static_info->wrapper_regs.irq_mask,
+			CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE));
+	}
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.hw_stop, 0x1);
+
+	time_left = wait_for_completion_timeout(&fd_core->halt_complete,
+		msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
+	if (time_left <= 0)
+		CAM_WARN(CAM_FD, "HW halt timeout time_left=%ld", time_left);
+
+	CAM_DBG(CAM_FD, "FD Wrapper Halt complete");
+
+	return 0;
+}
+
+static int cam_fd_hw_util_processcmd_prestart(struct cam_hw_info *fd_hw,
+	struct cam_fd_hw_cmd_prestart_args *prestart_args)
+{
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	struct cam_fd_hw_static_info *hw_static_info =
+		((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
+	struct cam_fd_ctx_hw_private *ctx_hw_private =
+		prestart_args->ctx_hw_private;
+	uint32_t size, size_required = 0;
+	uint32_t mem_base;
+	uint32_t *cmd_buf_addr = prestart_args->cmd_buf_addr;
+	uint32_t reg_val_pair[CAM_FD_REG_VAL_PAIR_SIZE];
+	uint32_t num_cmds = 0;
+	int i;
+	struct cam_fd_hw_io_buffer *io_buf;
+	struct cam_fd_hw_req_private *req_private;
+	uint32_t available_size = prestart_args->size;
+	bool work_buffer_configured = false;
+
+	if (!ctx_hw_private || !cmd_buf_addr) {
+		CAM_ERR(CAM_FD, "Invalid input prestart args %pK %pK",
+			ctx_hw_private, cmd_buf_addr);
+		return -EINVAL;
+	}
+
+	if (prestart_args->get_raw_results &&
+		!hw_static_info->results.raw_results_available) {
+		CAM_ERR(CAM_FD, "Raw results not supported %d %d",
+			prestart_args->get_raw_results,
+			hw_static_info->results.raw_results_available);
+		return -EINVAL;
+	}
+
+	req_private = &prestart_args->hw_req_private;
+	req_private->ctx_hw_private = prestart_args->ctx_hw_private;
+	req_private->request_id = prestart_args->request_id;
+	req_private->get_raw_results = prestart_args->get_raw_results;
+	req_private->fd_results = NULL;
+	req_private->raw_results = NULL;
+
+	/* Start preparing CDM register values that KMD has to insert */
+	num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds,
+		hw_static_info->core_regs.control, 0x1);
+	num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds,
+		hw_static_info->core_regs.control, 0x0);
+
+	for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) {
+		io_buf = &prestart_args->input_buf[i];
+
+		if (io_buf->valid == false)
+			break;
+
+		if (io_buf->io_cfg->direction != CAM_BUF_INPUT) {
+			CAM_ERR(CAM_FD, "Incorrect direction %d %d",
+				io_buf->io_cfg->direction, CAM_BUF_INPUT);
+			return -EINVAL;
+		}
+
+		switch (io_buf->io_cfg->resource_type) {
+		case CAM_FD_INPUT_PORT_ID_IMAGE: {
+			if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) {
+				CAM_ERR(CAM_FD,
+					"Invalid reg_val pair size %d, %d",
+					num_cmds, CAM_FD_REG_VAL_PAIR_SIZE);
+				return -EINVAL;
+			}
+
+			num_cmds = cam_fd_cdm_write_reg_val_pair(
+				reg_val_pair, num_cmds,
+				hw_static_info->core_regs.image_addr,
+				io_buf->io_addr[0]);
+			break;
+		}
+		default:
+			CAM_ERR(CAM_FD, "Invalid resource type %d",
+				io_buf->io_cfg->resource_type);
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) {
+		io_buf = &prestart_args->output_buf[i];
+
+		if (io_buf->valid == false)
+			break;
+
+		if (io_buf->io_cfg->direction != CAM_BUF_OUTPUT) {
+			CAM_ERR(CAM_FD, "Incorrect direction %d %d",
+				io_buf->io_cfg->direction, CAM_BUF_INPUT);
+			return -EINVAL;
+		}
+
+		switch (io_buf->io_cfg->resource_type) {
+		case CAM_FD_OUTPUT_PORT_ID_RESULTS: {
+			uint32_t face_results_offset;
+
+			size_required = hw_static_info->results.max_faces *
+				hw_static_info->results.per_face_entries * 4;
+
+			if (io_buf->io_cfg->planes[0].plane_stride <
+				size_required) {
+				CAM_ERR(CAM_FD, "Invalid results size %d %d",
+					io_buf->io_cfg->planes[0].plane_stride,
+					size_required);
+				return -EINVAL;
+			}
+
+			req_private->fd_results =
+				(struct cam_fd_results *)io_buf->cpu_addr[0];
+
+			face_results_offset =
+				(uint8_t *)&req_private->fd_results->faces[0] -
+				(uint8_t *)req_private->fd_results;
+
+			if (hw_static_info->ro_mode_supported) {
+				if ((num_cmds + 4) > CAM_FD_REG_VAL_PAIR_SIZE) {
+					CAM_ERR(CAM_FD,
+						"Invalid reg_val size %d, %d",
+						num_cmds,
+						CAM_FD_REG_VAL_PAIR_SIZE);
+					return -EINVAL;
+				}
+				/*
+				 * Face data actually starts 16bytes later in
+				 * the io buffer  Check cam_fd_results.
+				 */
+				num_cmds = cam_fd_cdm_write_reg_val_pair(
+					reg_val_pair, num_cmds,
+					hw_static_info->core_regs.result_addr,
+					io_buf->io_addr[0] +
+					face_results_offset);
+				num_cmds = cam_fd_cdm_write_reg_val_pair(
+					reg_val_pair, num_cmds,
+					hw_static_info->core_regs.ro_mode,
+					0x1);
+
+				req_private->ro_mode_enabled = true;
+			} else {
+				req_private->ro_mode_enabled = false;
+			}
+			break;
+		}
+		case CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS: {
+			size_required =
+				hw_static_info->results.raw_results_entries *
+				sizeof(uint32_t);
+
+			if (io_buf->io_cfg->planes[0].plane_stride <
+				size_required) {
+				CAM_ERR(CAM_FD, "Invalid results size %d %d",
+					io_buf->io_cfg->planes[0].plane_stride,
+					size_required);
+				return -EINVAL;
+			}
+
+			req_private->raw_results =
+				(uint32_t *)io_buf->cpu_addr[0];
+			break;
+		}
+		case CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER: {
+			if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) {
+				CAM_ERR(CAM_FD,
+					"Invalid reg_val pair size %d, %d",
+					num_cmds, CAM_FD_REG_VAL_PAIR_SIZE);
+				return -EINVAL;
+			}
+
+			num_cmds = cam_fd_cdm_write_reg_val_pair(
+				reg_val_pair, num_cmds,
+				hw_static_info->core_regs.work_addr,
+				io_buf->io_addr[0]);
+
+			work_buffer_configured = true;
+			break;
+		}
+		default:
+			CAM_ERR(CAM_FD, "Invalid resource type %d",
+				io_buf->io_cfg->resource_type);
+			return -EINVAL;
+		}
+	}
+
+	if (!req_private->fd_results || !work_buffer_configured) {
+		CAM_ERR(CAM_FD, "Invalid IO Buffers results=%pK work=%d",
+			req_private->fd_results, work_buffer_configured);
+		return -EINVAL;
+	}
+
+	/* First insert CHANGE_BASE command */
+	size = ctx_hw_private->cdm_ops->cdm_required_size_changebase();
+	/* since cdm returns dwords, we need to convert it into bytes */
+	if ((size * 4) > available_size) {
+		CAM_ERR(CAM_FD, "buf size:%d is not sufficient, expected: %d",
+			prestart_args->size, size);
+		return -EINVAL;
+	}
+
+	mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info,
+		((struct cam_fd_soc_private *)soc_info->soc_private)->
+		regbase_index[CAM_FD_REG_CORE]);
+
+	ctx_hw_private->cdm_ops->cdm_write_changebase(cmd_buf_addr, mem_base);
+	cmd_buf_addr += size;
+	available_size -= (size * 4);
+
+	size = ctx_hw_private->cdm_ops->cdm_required_size_reg_random(
+		num_cmds/2);
+	/* cdm util returns dwords, need to convert to bytes */
+	if ((size * 4) > available_size) {
+		CAM_ERR(CAM_FD, "Insufficient size:%d , expected size:%d",
+			available_size, size);
+		return -ENOMEM;
+	}
+	ctx_hw_private->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmds/2,
+		reg_val_pair);
+	cmd_buf_addr += size;
+	available_size -= (size * 4);
+
+	/* Update pre_config_buf_size in bytes */
+	prestart_args->pre_config_buf_size =
+		prestart_args->size - available_size;
+
+	/* Insert start trigger command into CDM as post config commands. */
+	num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, 0,
+		hw_static_info->core_regs.control, 0x2);
+	size = ctx_hw_private->cdm_ops->cdm_required_size_reg_random(
+		num_cmds/2);
+	if ((size * 4) > available_size) {
+		CAM_ERR(CAM_FD, "Insufficient size:%d , expected size:%d",
+			available_size, size);
+		return -ENOMEM;
+	}
+	ctx_hw_private->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmds/2,
+		reg_val_pair);
+	cmd_buf_addr += size;
+	available_size -= (size * 4);
+
+	prestart_args->post_config_buf_size = size * 4;
+
+	CAM_DBG(CAM_FD, "PreConfig [%pK %d], PostConfig[%pK %d]",
+		prestart_args->cmd_buf_addr, prestart_args->pre_config_buf_size,
+		cmd_buf_addr, prestart_args->post_config_buf_size);
+
+	for (i = 0; i < (prestart_args->pre_config_buf_size +
+		prestart_args->post_config_buf_size) / 4; i++)
+		CAM_DBG(CAM_FD, "CDM KMD Commands [%d] : [%pK] [0x%x]", i,
+			&prestart_args->cmd_buf_addr[i],
+			prestart_args->cmd_buf_addr[i]);
+
+	return 0;
+}
+
+static int cam_fd_hw_util_processcmd_frame_done(struct cam_hw_info *fd_hw,
+	struct cam_fd_hw_frame_done_args *frame_done_args)
+{
+	struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
+	struct cam_fd_hw_req_private *req_private;
+	uint32_t base, face_cnt;
+	uint32_t *buffer;
+	unsigned long flags;
+	int i;
+
+	mutex_lock(&fd_hw->hw_mutex);
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	if ((fd_core->core_state != CAM_FD_CORE_STATE_IDLE) ||
+		(fd_core->results_valid == false) ||
+		!fd_core->hw_req_private) {
+		CAM_ERR(CAM_FD,
+			"Invalid state for results state=%d, results=%d %pK",
+			fd_core->core_state, fd_core->results_valid,
+			fd_core->hw_req_private);
+		spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+		mutex_unlock(&fd_hw->hw_mutex);
+		return -EINVAL;
+	}
+	fd_core->core_state = CAM_FD_CORE_STATE_READING_RESULTS;
+	req_private = fd_core->hw_req_private;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	/*
+	 * Copy the register value as is into output buffers.
+	 * Wehter we are copying the output data by reading registers or
+	 * programming output buffer directly to HW must be transparent to UMD.
+	 * In case HW supports writing face count value directly into
+	 * DDR memory in future, these values should match.
+	 */
+	req_private->fd_results->face_count =
+		cam_fd_soc_register_read(&fd_hw->soc_info, CAM_FD_REG_CORE,
+		hw_static_info->core_regs.result_cnt);
+
+	face_cnt = req_private->fd_results->face_count & 0x3F;
+
+	if (face_cnt > hw_static_info->results.max_faces) {
+		CAM_WARN(CAM_FD, "Face count greater than max %d %d",
+			face_cnt, hw_static_info->results.max_faces);
+		face_cnt = hw_static_info->results.max_faces;
+	}
+
+	CAM_DBG(CAM_FD, "ReqID[%lld] Faces Detected = %d",
+		req_private->request_id, face_cnt);
+
+	/*
+	 * We need to read the face data information from registers only
+	 * if one of below is true
+	 * 1. RO mode is not set. i.e FD HW doesn't write face data into
+	 *    DDR memory
+	 * 2. On the current chipset, results written into DDR memory by FD HW
+	 *    are not gauranteed to be correct
+	 */
+	if (!req_private->ro_mode_enabled ||
+		hw_static_info->enable_errata_wa.ro_mode_results_invalid) {
+		buffer = (uint32_t *)&req_private->fd_results->faces[0];
+		base = hw_static_info->core_regs.results_reg_base;
+
+		/*
+		 * Write register values as is into face data buffer.  Its UMD
+		 * driver responsibility to interpret the data and extract face
+		 * properties from output buffer. Think in case output buffer
+		 * is directly programmed to HW, then KMD has no control to
+		 * extract the face properties and UMD anyway has to extract
+		 * face properties. So we follow the same approach and keep
+		 * this transparent to UMD.
+		 */
+		for (i = 0;
+			i < (face_cnt *
+			hw_static_info->results.per_face_entries); i++) {
+			*buffer = cam_fd_soc_register_read(&fd_hw->soc_info,
+				CAM_FD_REG_CORE, base + (i * 0x4));
+			CAM_DBG(CAM_FD, "FaceData[%d] : 0x%x", i / 4, *buffer);
+			buffer++;
+		}
+	}
+
+	if (req_private->get_raw_results &&
+		req_private->raw_results &&
+		hw_static_info->results.raw_results_available) {
+		buffer = req_private->raw_results;
+		base = hw_static_info->core_regs.raw_results_reg_base;
+
+		for (i = 0;
+			i < hw_static_info->results.raw_results_entries;
+			i++) {
+			*buffer = cam_fd_soc_register_read(&fd_hw->soc_info,
+				CAM_FD_REG_CORE, base + (i * 0x4));
+			CAM_DBG(CAM_FD, "RawData[%d] : 0x%x", i, *buffer);
+			buffer++;
+		}
+	}
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_core->hw_req_private = NULL;
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+	mutex_unlock(&fd_hw->hw_mutex);
+
+	return 0;
+}
+
+irqreturn_t cam_fd_hw_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)data;
+	struct cam_fd_core *fd_core;
+	struct cam_hw_soc_info *soc_info;
+	struct cam_fd_hw_static_info *hw_static_info;
+	uint32_t reg_value;
+	enum cam_fd_hw_irq_type irq_type = CAM_FD_IRQ_FRAME_DONE;
+	uint32_t num_irqs = 0;
+
+	if (!fd_hw) {
+		CAM_ERR(CAM_FD, "Invalid data in IRQ callback");
+		return IRQ_NONE;
+	}
+
+	fd_core = (struct cam_fd_core *) fd_hw->core_info;
+	soc_info = &fd_hw->soc_info;
+	hw_static_info = fd_core->hw_static_info;
+
+	reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.irq_status);
+
+	CAM_DBG(CAM_FD, "FD IRQ status 0x%x", reg_value);
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.irq_clear,
+		reg_value);
+
+	if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE)) {
+		complete_all(&fd_core->halt_complete);
+		irq_type = CAM_FD_IRQ_HALT_DONE;
+		num_irqs++;
+	}
+
+	if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE)) {
+		complete_all(&fd_core->reset_complete);
+		irq_type = CAM_FD_IRQ_RESET_DONE;
+		num_irqs++;
+	}
+
+	if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE)) {
+		complete_all(&fd_core->processing_complete);
+		irq_type = CAM_FD_IRQ_FRAME_DONE;
+		num_irqs++;
+	}
+
+	/*
+	 * We should never get an IRQ callback with no or more than one mask.
+	 * Validate first to make sure nothing going wrong.
+	 */
+	if (num_irqs != 1) {
+		CAM_ERR(CAM_FD,
+			"Invalid number of IRQs, value=0x%x, num_irqs=%d",
+			reg_value, num_irqs);
+		return IRQ_NONE;
+	}
+
+	trace_cam_irq_activated("FD", irq_type);
+
+	if (irq_type == CAM_FD_IRQ_HALT_DONE) {
+		/*
+		 * Do not send HALT IRQ callback to Hw Mgr,
+		 * a reset would always follow
+		 */
+		return IRQ_HANDLED;
+	}
+
+	spin_lock(&fd_core->spin_lock);
+	/* Do not change state to IDLE on HALT IRQ. Reset must follow halt */
+	if ((irq_type == CAM_FD_IRQ_RESET_DONE) ||
+		(irq_type == CAM_FD_IRQ_FRAME_DONE)) {
+
+		fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+		if (irq_type == CAM_FD_IRQ_FRAME_DONE)
+			fd_core->results_valid = true;
+
+		CAM_DBG(CAM_FD, "FD IRQ type %d, state=%d",
+			irq_type, fd_core->core_state);
+	}
+	spin_unlock(&fd_core->spin_lock);
+
+	if (fd_core->irq_cb.cam_fd_hw_mgr_cb)
+		fd_core->irq_cb.cam_fd_hw_mgr_cb(fd_core->irq_cb.data,
+			irq_type);
+
+	return IRQ_HANDLED;
+}
+
+int cam_fd_hw_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_caps *fd_hw_caps =
+		(struct cam_fd_hw_caps *)get_hw_cap_args;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		CAM_ERR(CAM_FD, "Invalid input pointers %pK %pK",
+			hw_priv, get_hw_cap_args);
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	*fd_hw_caps = fd_core->hw_caps;
+
+	CAM_DBG(CAM_FD, "core:%d.%d wrapper:%d.%d mode:%d, raw:%d",
+		fd_hw_caps->core_version.major,
+		fd_hw_caps->core_version.minor,
+		fd_hw_caps->wrapper_version.major,
+		fd_hw_caps->wrapper_version.minor,
+		fd_hw_caps->supported_modes,
+		fd_hw_caps->raw_results_available);
+
+	return 0;
+}
+
+int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_init_args *init_args =
+		(struct cam_fd_hw_init_args *)init_hw_args;
+	int rc = 0;
+	unsigned long flags;
+
+	if (!fd_hw || !init_args) {
+		CAM_ERR(CAM_FD, "Invalid argument %pK %pK", fd_hw, init_args);
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_init_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+			sizeof(struct cam_fd_hw_init_args));
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+
+	mutex_lock(&fd_hw->hw_mutex);
+	CAM_DBG(CAM_FD, "FD HW Init ref count before %d", fd_hw->open_count);
+
+	if (fd_hw->open_count > 0) {
+		rc = 0;
+		goto cdm_streamon;
+	}
+
+	rc = cam_fd_soc_enable_resources(&fd_hw->soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Enable SOC failed, rc=%d", rc);
+		goto unlock_return;
+	}
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_hw->hw_state = CAM_HW_STATE_POWER_UP;
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	rc = cam_fd_hw_reset(hw_priv, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Reset Failed, rc=%d", rc);
+		goto disable_soc;
+	}
+
+	cam_fd_hw_util_enable_power_on_settings(fd_hw);
+
+cdm_streamon:
+	fd_hw->open_count++;
+	CAM_DBG(CAM_FD, "FD HW Init ref count after %d", fd_hw->open_count);
+
+	if (init_args->ctx_hw_private) {
+		struct cam_fd_ctx_hw_private *ctx_hw_private =
+			init_args->ctx_hw_private;
+
+		rc = cam_cdm_stream_on(ctx_hw_private->cdm_handle);
+		if (rc) {
+			CAM_ERR(CAM_FD, "CDM StreamOn fail :handle=0x%x, rc=%d",
+				ctx_hw_private->cdm_handle, rc);
+			fd_hw->open_count--;
+			if (!fd_hw->open_count)
+				goto disable_soc;
+		}
+	}
+
+	mutex_unlock(&fd_hw->hw_mutex);
+
+	return rc;
+
+disable_soc:
+	if (cam_fd_soc_disable_resources(&fd_hw->soc_info))
+		CAM_ERR(CAM_FD, "Error in disable soc resources");
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+unlock_return:
+	mutex_unlock(&fd_hw->hw_mutex);
+	return rc;
+}
+
+int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = hw_priv;
+	struct cam_fd_core *fd_core = NULL;
+	struct cam_fd_hw_deinit_args *deinit_args =
+		(struct cam_fd_hw_deinit_args *)deinit_hw_args;
+	int rc = 0;
+	unsigned long flags;
+
+	if (!fd_hw || !deinit_hw_args) {
+		CAM_ERR(CAM_FD, "Invalid argument");
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_deinit_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+			sizeof(struct cam_fd_hw_deinit_args));
+		return -EINVAL;
+	}
+
+	mutex_lock(&fd_hw->hw_mutex);
+	if (fd_hw->open_count == 0) {
+		mutex_unlock(&fd_hw->hw_mutex);
+		CAM_ERR(CAM_FD, "Error Unbalanced deinit");
+		return -EFAULT;
+	}
+
+	fd_hw->open_count--;
+	CAM_DBG(CAM_FD, "FD HW ref count=%d", fd_hw->open_count);
+
+	if (fd_hw->open_count > 0) {
+		rc = 0;
+		goto positive_ref_cnt;
+	}
+
+	rc = cam_fd_soc_disable_resources(&fd_hw->soc_info);
+	if (rc)
+		CAM_ERR(CAM_FD, "Failed in Disable SOC, rc=%d", rc);
+
+	fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+
+	/* With the ref_cnt correct, this should never happen */
+	WARN_ON(!fd_core);
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+positive_ref_cnt:
+	if (deinit_args->ctx_hw_private) {
+		struct cam_fd_ctx_hw_private *ctx_hw_private =
+			deinit_args->ctx_hw_private;
+
+		rc = cam_cdm_stream_off(ctx_hw_private->cdm_handle);
+		if (rc) {
+			CAM_ERR(CAM_FD,
+				"Failed in CDM StreamOff, handle=0x%x, rc=%d",
+				ctx_hw_private->cdm_handle, rc);
+		}
+	}
+
+	mutex_unlock(&fd_hw->hw_mutex);
+	return rc;
+}
+
+int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_static_info *hw_static_info;
+	struct cam_hw_soc_info *soc_info;
+	unsigned long flags;
+	int rc;
+
+	if (!fd_hw) {
+		CAM_ERR(CAM_FD, "Invalid input handle");
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	hw_static_info = fd_core->hw_static_info;
+	soc_info = &fd_hw->soc_info;
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) ||
+		(fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) {
+		CAM_ERR(CAM_FD, "Reset not allowed in %d state",
+			fd_core->core_state);
+		spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+		return -EINVAL;
+	}
+
+	fd_core->results_valid = false;
+	fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.cgc_disable, 0x1);
+
+	rc = cam_fd_hw_util_fdwrapper_halt(fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in HALT rc=%d", rc);
+		return rc;
+	}
+
+	rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc);
+		return rc;
+	}
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.cgc_disable, 0x0);
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	return rc;
+}
+
+int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_static_info *hw_static_info;
+	struct cam_fd_hw_cmd_start_args *start_args =
+		(struct cam_fd_hw_cmd_start_args *)hw_start_args;
+	struct cam_fd_ctx_hw_private *ctx_hw_private;
+	unsigned long flags;
+	int rc;
+
+	if (!hw_priv || !start_args) {
+		CAM_ERR(CAM_FD, "Invalid input args %pK %pK", hw_priv,
+			start_args);
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_cmd_start_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+			sizeof(struct cam_fd_hw_cmd_start_args));
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	hw_static_info = fd_core->hw_static_info;
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	if (fd_core->core_state != CAM_FD_CORE_STATE_IDLE) {
+		CAM_ERR(CAM_FD, "Cannot start in %d state",
+			fd_core->core_state);
+		spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+		return -EINVAL;
+	}
+
+	/*
+	 * We are about to start FD HW processing, save the request
+	 * private data which is being processed by HW. Once the frame
+	 * processing is finished, process_cmd(FRAME_DONE) should be called
+	 * with same hw_req_private as input.
+	 */
+	fd_core->hw_req_private = start_args->hw_req_private;
+	fd_core->core_state = CAM_FD_CORE_STATE_PROCESSING;
+	fd_core->results_valid = false;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	ctx_hw_private = start_args->ctx_hw_private;
+
+	/* Before starting HW process, clear processing complete */
+	reinit_completion(&fd_core->processing_complete);
+
+	if (hw_static_info->enable_errata_wa.single_irq_only) {
+		cam_fd_soc_register_write(&fd_hw->soc_info, CAM_FD_REG_WRAPPER,
+			hw_static_info->wrapper_regs.irq_mask,
+			CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE));
+	}
+
+	if (start_args->num_hw_update_entries > 0) {
+		struct cam_cdm_bl_request *cdm_cmd = ctx_hw_private->cdm_cmd;
+		struct cam_hw_update_entry *cmd;
+		int i;
+
+		cdm_cmd->cmd_arrary_count = start_args->num_hw_update_entries;
+		cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+		cdm_cmd->flag = false;
+		cdm_cmd->userdata = NULL;
+		cdm_cmd->cookie = 0;
+
+		for (i = 0 ; i <= start_args->num_hw_update_entries; i++) {
+			cmd = (start_args->hw_update_entries + i);
+			cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+			cdm_cmd->cmd[i].offset = cmd->offset;
+			cdm_cmd->cmd[i].len = cmd->len;
+		}
+
+		rc = cam_cdm_submit_bls(ctx_hw_private->cdm_handle, cdm_cmd);
+		if (rc) {
+			CAM_ERR(CAM_FD,
+				"Failed to submit cdm commands, rc=%d", rc);
+			goto error;
+		}
+	} else {
+		CAM_ERR(CAM_FD, "Invalid number of hw update entries");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	return 0;
+error:
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	return rc;
+}
+
+int cam_fd_hw_halt_reset(void *hw_priv, void *stop_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_static_info *hw_static_info;
+	struct cam_hw_soc_info *soc_info;
+	unsigned long flags;
+	int rc;
+
+	if (!fd_hw) {
+		CAM_ERR(CAM_FD, "Invalid input handle");
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	hw_static_info = fd_core->hw_static_info;
+	soc_info = &fd_hw->soc_info;
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) ||
+		(fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) {
+		CAM_ERR(CAM_FD, "Reset not allowed in %d state",
+			fd_core->core_state);
+		spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+		return -EINVAL;
+	}
+
+	fd_core->results_valid = false;
+	fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.cgc_disable, 0x1);
+
+	rc = cam_fd_hw_util_fdwrapper_halt(fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in HALT rc=%d", rc);
+		return rc;
+	}
+
+	/* HALT must be followed by RESET */
+	rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc);
+		return rc;
+	}
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.cgc_disable, 0x0);
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	return rc;
+}
+
+int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	int rc = -EINVAL;
+	struct cam_fd_ctx_hw_private *ctx_hw_private;
+	struct cam_fd_hw_reserve_args *reserve_args =
+		(struct cam_fd_hw_reserve_args *)hw_reserve_args;
+	struct cam_cdm_acquire_data cdm_acquire;
+	struct cam_cdm_bl_request *cdm_cmd;
+	int i;
+
+	if (!fd_hw || !reserve_args) {
+		CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, reserve_args);
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_reserve_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+			sizeof(struct cam_fd_hw_reserve_args));
+		return -EINVAL;
+	}
+
+	cdm_cmd = kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+			((CAM_FD_MAX_HW_ENTRIES - 1) *
+			sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+	if (!cdm_cmd)
+		return -ENOMEM;
+
+	ctx_hw_private = kzalloc(sizeof(struct cam_fd_ctx_hw_private),
+		GFP_KERNEL);
+	if (!ctx_hw_private) {
+		kfree(cdm_cmd);
+		return -ENOMEM;
+	}
+
+	memset(&cdm_acquire, 0, sizeof(cdm_acquire));
+	strlcpy(cdm_acquire.identifier, "fd", sizeof("fd"));
+	cdm_acquire.cell_index = fd_hw->soc_info.index;
+	cdm_acquire.handle = 0;
+	cdm_acquire.userdata = ctx_hw_private;
+	cdm_acquire.cam_cdm_callback = cam_fd_hw_util_cdm_callback;
+	cdm_acquire.id = CAM_CDM_VIRTUAL;
+	cdm_acquire.base_array_cnt = fd_hw->soc_info.num_reg_map;
+	for (i = 0; i < fd_hw->soc_info.num_reg_map; i++)
+		cdm_acquire.base_array[i] = &fd_hw->soc_info.reg_map[i];
+
+	rc = cam_cdm_acquire(&cdm_acquire);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to acquire the CDM HW");
+		goto error;
+	}
+
+	ctx_hw_private->hw_ctx = reserve_args->hw_ctx;
+	ctx_hw_private->fd_hw = fd_hw;
+	ctx_hw_private->mode = reserve_args->mode;
+	ctx_hw_private->cdm_handle = cdm_acquire.handle;
+	ctx_hw_private->cdm_ops = cdm_acquire.ops;
+	ctx_hw_private->cdm_cmd = cdm_cmd;
+
+	reserve_args->ctx_hw_private = ctx_hw_private;
+
+	CAM_DBG(CAM_FD, "private=%pK, hw_ctx=%pK, mode=%d, cdm_handle=0x%x",
+		ctx_hw_private, ctx_hw_private->hw_ctx, ctx_hw_private->mode,
+		ctx_hw_private->cdm_handle);
+
+	return 0;
+error:
+	kfree(ctx_hw_private);
+	kfree(cdm_cmd);
+	return rc;
+}
+
+int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	int rc = -EINVAL;
+	struct cam_fd_ctx_hw_private *ctx_hw_private;
+	struct cam_fd_hw_release_args *release_args =
+		(struct cam_fd_hw_release_args *)hw_release_args;
+
+	if (!fd_hw || !release_args) {
+		CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, release_args);
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_release_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+			sizeof(struct cam_fd_hw_release_args));
+		return -EINVAL;
+	}
+
+	ctx_hw_private =
+		(struct cam_fd_ctx_hw_private *)release_args->ctx_hw_private;
+
+	rc = cam_cdm_release(ctx_hw_private->cdm_handle);
+	if (rc)
+		CAM_ERR(CAM_FD, "Release cdm handle failed, handle=0x%x, rc=%d",
+			ctx_hw_private->cdm_handle, rc);
+
+	kfree(ctx_hw_private->cdm_cmd);
+	kfree(ctx_hw_private);
+	release_args->ctx_hw_private = NULL;
+
+	return 0;
+}
+
+int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	int rc = -EINVAL;
+
+	if (!hw_priv || !cmd_args ||
+		(cmd_type >= CAM_FD_HW_CMD_MAX)) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK %d", hw_priv,
+			cmd_args, cmd_type);
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_FD_HW_CMD_REGISTER_CALLBACK: {
+		struct cam_fd_hw_cmd_set_irq_cb *irq_cb_args;
+		struct cam_fd_core *fd_core =
+			(struct cam_fd_core *)fd_hw->core_info;
+
+		if (sizeof(struct cam_fd_hw_cmd_set_irq_cb) != arg_size) {
+			CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		irq_cb_args = (struct cam_fd_hw_cmd_set_irq_cb *)cmd_args;
+		fd_core->irq_cb.cam_fd_hw_mgr_cb =
+			irq_cb_args->cam_fd_hw_mgr_cb;
+		fd_core->irq_cb.data = irq_cb_args->data;
+		rc = 0;
+		break;
+	}
+	case CAM_FD_HW_CMD_PRESTART: {
+		struct cam_fd_hw_cmd_prestart_args *prestart_args;
+
+		if (sizeof(struct cam_fd_hw_cmd_prestart_args) != arg_size) {
+			CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		prestart_args = (struct cam_fd_hw_cmd_prestart_args *)cmd_args;
+		rc = cam_fd_hw_util_processcmd_prestart(fd_hw, prestart_args);
+		break;
+	}
+	case CAM_FD_HW_CMD_FRAME_DONE: {
+		struct cam_fd_hw_frame_done_args *cmd_frame_results;
+
+		if (sizeof(struct cam_fd_hw_frame_done_args) !=
+			arg_size) {
+			CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		cmd_frame_results =
+			(struct cam_fd_hw_frame_done_args *)cmd_args;
+		rc = cam_fd_hw_util_processcmd_frame_done(fd_hw,
+			cmd_frame_results);
+		break;
+	}
+	default:
+		break;
+	}
+
+	return rc;
+}

+ 237 - 0
drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h

@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_CORE_H_
+#define _CAM_FD_HW_CORE_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <cam_defs.h>
+#include <cam_fd.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_fd_hw_intf.h"
+#include "cam_fd_hw_soc.h"
+
+#define CAM_FD_IRQ_TO_MASK(irq)        (1 << (irq))
+#define CAM_FD_MASK_TO_IRQ(mask, irq)  ((mask) >> (irq))
+
+#define CAM_FD_HW_HALT_RESET_TIMEOUT   750
+
+/**
+ * enum cam_fd_core_state - FD Core internal states
+ *
+ * @CAM_FD_CORE_STATE_POWERDOWN       : Indicates FD core is powered down
+ * @CAM_FD_CORE_STATE_IDLE            : Indicates FD HW is in idle state.
+ *                                      Core can be in this state when it is
+ *                                      ready to process frames or when
+ *                                      processing is finished and results are
+ *                                      available
+ * @CAM_FD_CORE_STATE_PROCESSING      : Indicates FD core is processing frame
+ * @CAM_FD_CORE_STATE_READING_RESULTS : Indicates results are being read from
+ *                                      FD core
+ * @CAM_FD_CORE_STATE_RESET_PROGRESS  :  Indicates FD Core is in reset state
+ */
+enum cam_fd_core_state {
+	CAM_FD_CORE_STATE_POWERDOWN,
+	CAM_FD_CORE_STATE_IDLE,
+	CAM_FD_CORE_STATE_PROCESSING,
+	CAM_FD_CORE_STATE_READING_RESULTS,
+	CAM_FD_CORE_STATE_RESET_PROGRESS,
+};
+
+/**
+ * struct cam_fd_ctx_hw_private : HW private information for a specific hw ctx.
+ *                                This information is populated by HW layer on
+ *                                reserve() and given back to HW Mgr as private
+ *                                data for the hw context. This private_data
+ *                                has to be passed by HW Mgr layer while
+ *                                further HW layer calls
+ *
+ * @hw_ctx           : Corresponding hw_ctx pointer
+ * @fd_hw            : FD HW info pointer
+ * @cdm_handle       : CDM Handle for this context
+ * @cdm_ops          : CDM Ops
+ * @cdm_cmd          : CDM command pointer
+ * @mode             : Mode this context is running
+ * @curr_req_private : Current Request information
+ *
+ */
+struct cam_fd_ctx_hw_private {
+	void                          *hw_ctx;
+	struct cam_hw_info            *fd_hw;
+	uint32_t                       cdm_handle;
+	struct cam_cdm_utils_ops      *cdm_ops;
+	struct cam_cdm_bl_request     *cdm_cmd;
+	enum cam_fd_hw_mode            mode;
+	struct cam_fd_hw_req_private  *curr_req_private;
+};
+
+/**
+ * struct cam_fd_core_regs : FD HW Core register offsets info
+ *
+ * @version              : Offset of version register
+ * @control              : Offset of control register
+ * @result_cnt           : Offset of result count register
+ * @result_addr          : Offset of results address register
+ * @image_addr           : Offset of image address register
+ * @work_addr            : Offset of work address register
+ * @ro_mode              : Offset of ro_mode register
+ * @results_reg_base     : Offset of results_reg_base register
+ * @raw_results_reg_base : Offset of raw_results_reg_base register
+ *
+ */
+struct cam_fd_core_regs {
+	uint32_t       version;
+	uint32_t       control;
+	uint32_t       result_cnt;
+	uint32_t       result_addr;
+	uint32_t       image_addr;
+	uint32_t       work_addr;
+	uint32_t       ro_mode;
+	uint32_t       results_reg_base;
+	uint32_t       raw_results_reg_base;
+};
+
+/**
+ * struct cam_fd_core_regs : FD HW Wrapper register offsets info
+ *
+ * @wrapper_version     : Offset of wrapper_version register
+ * @cgc_disable         : Offset of cgc_disable register
+ * @hw_stop             : Offset of hw_stop register
+ * @sw_reset            : Offset of sw_reset register
+ * @vbif_req_priority   : Offset of vbif_req_priority register
+ * @vbif_priority_level : Offset of vbif_priority_level register
+ * @vbif_done_status    : Offset of vbif_done_status register
+ * @irq_mask            : Offset of irq mask register
+ * @irq_status          : Offset of irq status register
+ * @irq_clear           : Offset of irq clear register
+ *
+ */
+struct cam_fd_wrapper_regs {
+	uint32_t       wrapper_version;
+	uint32_t       cgc_disable;
+	uint32_t       hw_stop;
+	uint32_t       sw_reset;
+	uint32_t       vbif_req_priority;
+	uint32_t       vbif_priority_level;
+	uint32_t       vbif_done_status;
+	uint32_t       irq_mask;
+	uint32_t       irq_status;
+	uint32_t       irq_clear;
+};
+
+/**
+ * struct cam_fd_hw_errata_wa : FD HW Errata workaround enable/dsiable info
+ *
+ * @single_irq_only         : Whether to enable only one irq at any time
+ * @ro_mode_enable_always   : Whether to enable ro mode always
+ * @ro_mode_results_invalid : Whether results written directly into output
+ *                            memory by HW are valid or not
+ */
+struct cam_fd_hw_errata_wa {
+	bool   single_irq_only;
+	bool   ro_mode_enable_always;
+	bool   ro_mode_results_invalid;
+};
+
+/**
+ * struct cam_fd_hw_results_prop : FD HW Results properties
+ *
+ * @max_faces             : Maximum number of faces supported
+ * @per_face_entries      : Number of register with properties for each face
+ * @raw_results_entries   : Number of raw results entries for the full search
+ * @raw_results_available : Whether raw results available on this HW
+ *
+ */
+struct cam_fd_hw_results_prop {
+	uint32_t       max_faces;
+	uint32_t       per_face_entries;
+	uint32_t       raw_results_entries;
+	bool           raw_results_available;
+};
+
+/**
+ * struct cam_fd_hw_static_info : FD HW information based on HW version
+ *
+ * @core_version       : Core version of FD HW
+ * @wrapper_version    : Wrapper version of FD HW
+ * @core_regs          : Register offset information for core registers
+ * @wrapper_regs       : Register offset information for wrapper registers
+ * @results            : Information about results available on this HW
+ * @enable_errata_wa   : Errata workaround information
+ * @irq_mask           : IRQ mask to enable
+ * @qos_priority       : QoS priority setting for this chipset
+ * @qos_priority_level : QoS priority level setting for this chipset
+ * @supported_modes    : Supported HW modes on this HW version
+ * @ro_mode_supported  : Whether RO mode is supported on this HW
+ *
+ */
+struct cam_fd_hw_static_info {
+	struct cam_hw_version          core_version;
+	struct cam_hw_version          wrapper_version;
+	struct cam_fd_core_regs        core_regs;
+	struct cam_fd_wrapper_regs     wrapper_regs;
+	struct cam_fd_hw_results_prop  results;
+	struct cam_fd_hw_errata_wa     enable_errata_wa;
+	uint32_t                       irq_mask;
+	uint32_t                       qos_priority;
+	uint32_t                       qos_priority_level;
+	uint32_t                       supported_modes;
+	bool                           ro_mode_supported;
+};
+
+/**
+ * struct cam_fd_core : FD HW core data structure
+ *
+ * @hw_static_info      : HW information specific to version
+ * @hw_caps             : HW capabilities
+ * @core_state          : Current HW state
+ * @processing_complete : Whether processing is complete
+ * @reset_complete      : Whether reset is complete
+ * @halt_complete       : Whether halt is complete
+ * @hw_req_private      : Request that is being currently processed by HW
+ * @results_valid       : Whether HW frame results are available to get
+ * @spin_lock           : Mutex to protect shared data in hw layer
+ * @irq_cb              : HW Manager callback information
+ *
+ */
+struct cam_fd_core {
+	struct cam_fd_hw_static_info   *hw_static_info;
+	struct cam_fd_hw_caps           hw_caps;
+	enum cam_fd_core_state          core_state;
+	struct completion               processing_complete;
+	struct completion               reset_complete;
+	struct completion               halt_complete;
+	struct cam_fd_hw_req_private   *hw_req_private;
+	bool                            results_valid;
+	spinlock_t                      spin_lock;
+	struct cam_fd_hw_cmd_set_irq_cb irq_cb;
+};
+
+int cam_fd_hw_util_get_hw_caps(struct cam_hw_info *fd_hw,
+	struct cam_fd_hw_caps *hw_caps);
+irqreturn_t cam_fd_hw_irq(int irq_num, void *data);
+
+int cam_fd_hw_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
+	uint32_t arg_size);
+int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size);
+int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size);
+int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size);
+int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size);
+int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size);
+int cam_fd_hw_halt_reset(void *hw_priv, void *stop_args, uint32_t arg_size);
+int cam_fd_hw_read(void *hw_priv, void *read_args, uint32_t arg_size);
+int cam_fd_hw_write(void *hw_priv, void *write_args, uint32_t arg_size);
+int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+
+#endif /* _CAM_FD_HW_CORE_H_ */

+ 235 - 0
drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c

@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_subdev.h"
+#include "cam_fd_hw_intf.h"
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+#include "cam_fd_hw_v41.h"
+#include "cam_fd_hw_v501.h"
+#include "cam_fd_hw_v600.h"
+
+static char fd_dev_name[8];
+
+static int cam_fd_hw_dev_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *fd_hw;
+	struct cam_hw_intf *fd_hw_intf;
+	struct cam_fd_core *fd_core;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_fd_hw_static_info *hw_static_info = NULL;
+	int rc = 0;
+	uint32_t hw_idx;
+	struct cam_fd_hw_init_args init_args;
+	struct cam_fd_hw_deinit_args deinit_args;
+
+	fd_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!fd_hw_intf)
+		return -ENOMEM;
+
+	fd_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!fd_hw) {
+		kfree(fd_hw_intf);
+		return -ENOMEM;
+	}
+
+	fd_core = kzalloc(sizeof(struct cam_fd_core), GFP_KERNEL);
+	if (!fd_core) {
+		kfree(fd_hw);
+		kfree(fd_hw_intf);
+		return -ENOMEM;
+	}
+	of_property_read_u32(pdev->dev.of_node,
+			"cell-index", &hw_idx);
+
+	fd_hw_intf->hw_priv = fd_hw;
+	fd_hw->core_info = fd_core;
+	fd_hw_intf->hw_idx = hw_idx;
+
+	memset(fd_dev_name, 0, sizeof(fd_dev_name));
+	snprintf(fd_dev_name, sizeof(fd_dev_name),
+		"fd%1u", fd_hw_intf->hw_idx);
+
+	fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	fd_hw->soc_info.pdev = pdev;
+	fd_hw->soc_info.dev = &pdev->dev;
+	fd_hw->soc_info.dev_name = fd_dev_name;
+	fd_hw->open_count = 0;
+	mutex_init(&fd_hw->hw_mutex);
+	spin_lock_init(&fd_hw->hw_lock);
+	init_completion(&fd_hw->hw_complete);
+
+	spin_lock_init(&fd_core->spin_lock);
+	init_completion(&fd_core->processing_complete);
+	init_completion(&fd_core->halt_complete);
+	init_completion(&fd_core->reset_complete);
+
+	fd_hw_intf->hw_ops.get_hw_caps = cam_fd_hw_get_hw_caps;
+	fd_hw_intf->hw_ops.init = cam_fd_hw_init;
+	fd_hw_intf->hw_ops.deinit = cam_fd_hw_deinit;
+	fd_hw_intf->hw_ops.reset = cam_fd_hw_reset;
+	fd_hw_intf->hw_ops.reserve = cam_fd_hw_reserve;
+	fd_hw_intf->hw_ops.release = cam_fd_hw_release;
+	fd_hw_intf->hw_ops.start = cam_fd_hw_start;
+	fd_hw_intf->hw_ops.stop = cam_fd_hw_halt_reset;
+	fd_hw_intf->hw_ops.read = NULL;
+	fd_hw_intf->hw_ops.write = NULL;
+	fd_hw_intf->hw_ops.process_cmd = cam_fd_hw_process_cmd;
+	fd_hw_intf->hw_type = CAM_HW_FD;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev || !match_dev->data) {
+		CAM_ERR(CAM_FD, "No Of_match data, %pK", match_dev);
+		rc = -EINVAL;
+		goto free_memory;
+	}
+	hw_static_info = (struct cam_fd_hw_static_info *)match_dev->data;
+	fd_core->hw_static_info = hw_static_info;
+
+	CAM_DBG(CAM_FD, "HW Static Info : version core[%d.%d] wrapper[%d.%d]",
+		hw_static_info->core_version.major,
+		hw_static_info->core_version.minor,
+		hw_static_info->wrapper_version.major,
+		hw_static_info->wrapper_version.minor);
+
+	rc = cam_fd_soc_init_resources(&fd_hw->soc_info, cam_fd_hw_irq, fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to init soc, rc=%d", rc);
+		goto free_memory;
+	}
+
+	memset(&init_args, 0x0, sizeof(init_args));
+	memset(&deinit_args, 0x0, sizeof(deinit_args));
+	rc = cam_fd_hw_init(fd_hw, &init_args, sizeof(init_args));
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to hw init, rc=%d", rc);
+		goto deinit_platform_res;
+	}
+
+	rc = cam_fd_hw_util_get_hw_caps(fd_hw, &fd_core->hw_caps);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to get hw caps, rc=%d", rc);
+		goto deinit_hw;
+	}
+
+	rc = cam_fd_hw_deinit(fd_hw, &deinit_args, sizeof(deinit_args));
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to deinit hw, rc=%d", rc);
+		goto deinit_platform_res;
+	}
+
+	platform_set_drvdata(pdev, fd_hw_intf);
+	CAM_DBG(CAM_FD, "FD-%d probe successful", fd_hw_intf->hw_idx);
+
+	return rc;
+
+deinit_hw:
+	if (cam_fd_hw_deinit(fd_hw, &deinit_args, sizeof(deinit_args)))
+		CAM_ERR(CAM_FD, "Failed in hw deinit");
+deinit_platform_res:
+	if (cam_fd_soc_deinit_resources(&fd_hw->soc_info))
+		CAM_ERR(CAM_FD, "Failed in soc deinit");
+	mutex_destroy(&fd_hw->hw_mutex);
+free_memory:
+	kfree(fd_hw);
+	kfree(fd_hw_intf);
+	kfree(fd_core);
+
+	return rc;
+}
+
+static int cam_fd_hw_dev_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct cam_hw_intf *fd_hw_intf;
+	struct cam_hw_info *fd_hw;
+	struct cam_fd_core *fd_core;
+
+	fd_hw_intf = platform_get_drvdata(pdev);
+	if (!fd_hw_intf) {
+		CAM_ERR(CAM_FD, "Invalid fd_hw_intf from pdev");
+		return -EINVAL;
+	}
+
+	fd_hw = fd_hw_intf->hw_priv;
+	if (!fd_hw) {
+		CAM_ERR(CAM_FD, "Invalid fd_hw from fd_hw_intf");
+		rc = -ENODEV;
+		goto free_fd_hw_intf;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	if (!fd_core) {
+		CAM_ERR(CAM_FD, "Invalid fd_core from fd_hw");
+		rc = -EINVAL;
+		goto deinit_platform_res;
+	}
+
+	kfree(fd_core);
+
+deinit_platform_res:
+	rc = cam_fd_soc_deinit_resources(&fd_hw->soc_info);
+	if (rc)
+		CAM_ERR(CAM_FD, "Error in FD soc deinit, rc=%d", rc);
+
+	mutex_destroy(&fd_hw->hw_mutex);
+	kfree(fd_hw);
+
+free_fd_hw_intf:
+	kfree(fd_hw_intf);
+
+	return rc;
+}
+
+static const struct of_device_id cam_fd_hw_dt_match[] = {
+	{
+		.compatible = "qcom,fd41",
+		.data = &cam_fd_wrapper120_core410_info,
+	},
+	{
+		.compatible = "qcom,fd501",
+		.data = &cam_fd_wrapper200_core501_info,
+	},
+	{
+		.compatible = "qcom,fd600",
+		.data = &cam_fd_wrapper200_core600_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_fd_hw_dt_match);
+
+static struct platform_driver cam_fd_hw_driver = {
+	.probe = cam_fd_hw_dev_probe,
+	.remove = cam_fd_hw_dev_remove,
+	.driver = {
+		.name = "cam_fd_hw",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_fd_hw_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_fd_hw_init_module(void)
+{
+	return platform_driver_register(&cam_fd_hw_driver);
+}
+
+static void __exit cam_fd_hw_exit_module(void)
+{
+	platform_driver_unregister(&cam_fd_hw_driver);
+}
+
+module_init(cam_fd_hw_init_module);
+module_exit(cam_fd_hw_exit_module);
+MODULE_DESCRIPTION("CAM FD HW driver");
+MODULE_LICENSE("GPL v2");

+ 282 - 0
drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h

@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_INTF_H_
+#define _CAM_FD_HW_INTF_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <cam_cpas.h>
+#include <cam_req_mgr.h>
+#include <cam_fd.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_subdev.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+
+#define CAM_FD_MAX_IO_BUFFERS        5
+#define CAM_FD_MAX_HW_ENTRIES        5
+
+/**
+ * enum cam_fd_hw_type - Enum for FD HW type
+ *
+ * @CAM_HW_FD : FaceDetection HW type
+ */
+enum cam_fd_hw_type {
+	CAM_HW_FD,
+};
+
+/**
+ * enum cam_fd_hw_mode - Mode in which HW can run
+ *
+ * @CAM_FD_MODE_FACEDETECTION : Face Detection mode in which face search
+ *                              is done on the given frame
+ * @CAM_FD_MODE_PYRAMID       : Pyramid mode where a pyramid image is generated
+ *                              from an input image
+ */
+enum cam_fd_hw_mode {
+	CAM_FD_MODE_FACEDETECTION    = 0x1,
+	CAM_FD_MODE_PYRAMID          = 0x2,
+};
+
+/**
+ * enum cam_fd_priority - FD priority levels
+ *
+ * @CAM_FD_PRIORITY_HIGH   : Indicates high priority client, driver prioritizes
+ *                           frame requests coming from contexts with HIGH
+ *                           priority compared to context with normal priority
+ * @CAM_FD_PRIORITY_NORMAL : Indicates normal priority client
+ */
+enum cam_fd_priority {
+	CAM_FD_PRIORITY_HIGH         = 0x0,
+	CAM_FD_PRIORITY_NORMAL,
+};
+
+/**
+ * enum cam_fd_hw_irq_type - FD HW IRQ types
+ *
+ * @CAM_FD_IRQ_FRAME_DONE : Indicates frame processing is finished
+ * @CAM_FD_IRQ_HALT_DONE  : Indicates HW halt is finished
+ * @CAM_FD_IRQ_RESET_DONE : Indicates HW reset is finished
+ */
+enum cam_fd_hw_irq_type {
+	CAM_FD_IRQ_FRAME_DONE,
+	CAM_FD_IRQ_HALT_DONE,
+	CAM_FD_IRQ_RESET_DONE,
+};
+
+/**
+ * enum cam_fd_hw_cmd_type - FD HW layer custom commands
+ *
+ * @CAM_FD_HW_CMD_PRESTART          : Command to process pre-start settings
+ * @CAM_FD_HW_CMD_FRAME_DONE        : Command to process frame done settings
+ * @CAM_FD_HW_CMD_UPDATE_SOC        : Command to process soc update
+ * @CAM_FD_HW_CMD_REGISTER_CALLBACK : Command to set hw mgr callback
+ * @CAM_FD_HW_CMD_MAX               : Indicates max cmd
+ */
+enum cam_fd_hw_cmd_type {
+	CAM_FD_HW_CMD_PRESTART,
+	CAM_FD_HW_CMD_FRAME_DONE,
+	CAM_FD_HW_CMD_UPDATE_SOC,
+	CAM_FD_HW_CMD_REGISTER_CALLBACK,
+	CAM_FD_HW_CMD_MAX,
+};
+
+/**
+ * struct cam_fd_hw_io_buffer : FD HW IO Buffer information
+ *
+ * @valid    : Whether this IO Buf configuration is valid
+ * @io_cfg   : IO Configuration information
+ * @num_buf  : Number planes in io_addr, cpu_addr array
+ * @io_addr  : Array of IO address information for planes
+ * @cpu_addr : Array of CPU address information for planes
+ */
+struct cam_fd_hw_io_buffer {
+	bool                   valid;
+	struct cam_buf_io_cfg *io_cfg;
+	uint32_t               num_buf;
+	uint64_t               io_addr[CAM_PACKET_MAX_PLANES];
+	uintptr_t              cpu_addr[CAM_PACKET_MAX_PLANES];
+};
+
+/**
+ * struct cam_fd_hw_req_private : FD HW layer's private information
+ *               specific to a request
+ *
+ * @ctx_hw_private  : FD HW layer's ctx specific private data
+ * @request_id      : Request ID corresponding to this private information
+ * @get_raw_results : Whether to get raw results for this request
+ * @ro_mode_enabled : Whether RO mode is enabled for this request
+ * @fd_results      : Pointer to save face detection results
+ * @raw_results     : Pointer to save face detection raw results
+ */
+struct cam_fd_hw_req_private {
+	void                  *ctx_hw_private;
+	uint64_t               request_id;
+	bool                   get_raw_results;
+	bool                   ro_mode_enabled;
+	struct cam_fd_results *fd_results;
+	uint32_t              *raw_results;
+};
+
+/**
+ * struct cam_fd_hw_reserve_args : Reserve args for this HW context
+ *
+ * @hw_ctx         : HW context for which reserve is requested
+ * @mode           : Mode for which this reserve is requested
+ * @ctx_hw_private : Pointer to save HW layer's private information specific
+ *                   to this hw context. This has to be passed while calling
+ *                   further HW layer calls
+ */
+struct cam_fd_hw_reserve_args {
+	void                  *hw_ctx;
+	enum cam_fd_hw_mode    mode;
+	void                  *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_release_args : Release args for this HW context
+ *
+ * @hw_ctx         : HW context for which release is requested
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_release_args {
+	void    *hw_ctx;
+	void    *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_init_args : Init args for this HW context
+ *
+ * @hw_ctx         : HW context for which init is requested
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_init_args {
+	void    *hw_ctx;
+	void    *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_deinit_args : Deinit args for this HW context
+ *
+ * @hw_ctx         : HW context for which deinit is requested
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_deinit_args {
+	void    *hw_ctx;
+	void    *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_cmd_prestart_args : Prestart command args
+ *
+ * @hw_ctx               : HW context which submitted this prestart
+ * @ctx_hw_private       : HW layer's private information specific to
+ *                         this hw context
+ * @request_id           : Request ID corresponds to this pre-start command
+ * @get_raw_results      : Whether to get raw results for this request
+ * @input_buf            : Input IO Buffer information for this request
+ * @output_buf           : Output IO Buffer information for this request
+ * @cmd_buf_addr         : Command buffer address to fill kmd commands
+ * @size                 : Size available in command buffer
+ * @pre_config_buf_size  : Buffer size filled with commands by KMD that has
+ *                         to be inserted before umd commands
+ * @post_config_buf_size : Buffer size filled with commands by KMD that has
+ *                         to be inserted after umd commands
+ * @hw_req_private       : HW layer's private information specific to
+ *                         this request
+ */
+struct cam_fd_hw_cmd_prestart_args {
+	void                         *hw_ctx;
+	void                         *ctx_hw_private;
+	uint64_t                      request_id;
+	bool                          get_raw_results;
+	struct cam_fd_hw_io_buffer    input_buf[CAM_FD_MAX_IO_BUFFERS];
+	struct cam_fd_hw_io_buffer    output_buf[CAM_FD_MAX_IO_BUFFERS];
+	uint32_t                     *cmd_buf_addr;
+	uint32_t                      size;
+	uint32_t                      pre_config_buf_size;
+	uint32_t                      post_config_buf_size;
+	struct cam_fd_hw_req_private  hw_req_private;
+};
+
+/**
+ * struct cam_fd_hw_cmd_start_args : Start command args
+ *
+ * @hw_ctx                : HW context which submitting start command
+ * @ctx_hw_private        : HW layer's private information specific to
+ *                            this hw context
+ * @hw_req_private        : HW layer's private information specific to
+ *          this request
+ * @hw_update_entries     : HW update entries corresponds to this request
+ * @num_hw_update_entries : Number of hw update entries
+ */
+struct cam_fd_hw_cmd_start_args {
+	void                          *hw_ctx;
+	void                          *ctx_hw_private;
+	struct cam_fd_hw_req_private  *hw_req_private;
+	struct cam_hw_update_entry    *hw_update_entries;
+	uint32_t                       num_hw_update_entries;
+};
+
+/**
+ * struct cam_fd_hw_stop_args : Stop command args
+ *
+ * @hw_ctx         : HW context which submitting stop command
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ * @request_id     : Request ID that need to be stopped
+ * @hw_req_private : HW layer's private information specific to this request
+ */
+struct cam_fd_hw_stop_args {
+	void                         *hw_ctx;
+	void                         *ctx_hw_private;
+	uint64_t                      request_id;
+	struct cam_fd_hw_req_private *hw_req_private;
+};
+
+/**
+ * struct cam_fd_hw_frame_done_args : Frame done command args
+ *
+ * @hw_ctx         : HW context which submitting frame done request
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ * @request_id     : Request ID that need to be stopped
+ * @hw_req_private : HW layer's private information specific to this request
+ */
+struct cam_fd_hw_frame_done_args {
+	void                         *hw_ctx;
+	void                         *ctx_hw_private;
+	uint64_t                      request_id;
+	struct cam_fd_hw_req_private *hw_req_private;
+};
+
+/**
+ * struct cam_fd_hw_reset_args : Reset command args
+ *
+ * @hw_ctx         : HW context which submitting reset command
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_reset_args {
+	void    *hw_ctx;
+	void    *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_cmd_set_irq_cb : Set IRQ callback command args
+ *
+ * @cam_fd_hw_mgr_cb : HW Mgr's callback pointer
+ * @data             : HW Mgr's private data
+ */
+struct cam_fd_hw_cmd_set_irq_cb {
+	int (*cam_fd_hw_mgr_cb)(void *data, enum cam_fd_hw_irq_type irq_type);
+	void *data;
+};
+
+#endif /* _CAM_FD_HW_INTF_H_ */

+ 295 - 0
drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c

@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/clk/qcom.h>
+
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+
+static bool cam_fd_hw_util_cpas_callback(uint32_t handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
+{
+	if (!irq_data)
+		return false;
+
+	CAM_DBG(CAM_FD, "CPAS hdl=%d, udata=%pK, irq_type=%d",
+		handle, userdata, irq_data->irq_type);
+
+	return false;
+}
+
+static int cam_fd_hw_soc_util_setup_regbase_indices(
+	struct cam_hw_soc_info *soc_info)
+{
+	struct cam_fd_soc_private *soc_private =
+		(struct cam_fd_soc_private *)soc_info->soc_private;
+	uint32_t index;
+	int rc, i;
+
+	for (i = 0; i < CAM_FD_REG_MAX; i++)
+		soc_private->regbase_index[i] = -1;
+
+	if ((soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) ||
+		(soc_info->num_mem_block != CAM_FD_REG_MAX)) {
+		CAM_ERR(CAM_FD, "Invalid num_mem_block=%d",
+			soc_info->num_mem_block);
+		return -EINVAL;
+	}
+
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "fd_core", &index);
+	if ((rc == 0) && (index < CAM_FD_REG_MAX)) {
+		soc_private->regbase_index[CAM_FD_REG_CORE] = index;
+	} else {
+		CAM_ERR(CAM_FD, "regbase not found for FD_CORE, rc=%d, %d %d",
+			rc, index, CAM_FD_REG_MAX);
+		return -EINVAL;
+	}
+
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "fd_wrapper", &index);
+	if ((rc == 0) && (index < CAM_FD_REG_MAX)) {
+		soc_private->regbase_index[CAM_FD_REG_WRAPPER] = index;
+	} else {
+		CAM_ERR(CAM_FD, "regbase not found FD_WRAPPER, rc=%d, %d %d",
+			rc, index, CAM_FD_REG_MAX);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_FD, "Reg indices : CORE=%d, WRAPPER=%d",
+		soc_private->regbase_index[CAM_FD_REG_CORE],
+		soc_private->regbase_index[CAM_FD_REG_WRAPPER]);
+
+	return 0;
+}
+
+static int cam_fd_soc_set_clk_flags(struct cam_hw_soc_info *soc_info)
+{
+	int i, rc = 0;
+
+	if (soc_info->num_clk > CAM_SOC_MAX_CLK) {
+		CAM_ERR(CAM_FD, "Invalid num clk %d", soc_info->num_clk);
+		return -EINVAL;
+	}
+
+	/* set memcore and mem periphery logic flags to 0 */
+	for (i = 0; i < soc_info->num_clk; i++) {
+		if ((strcmp(soc_info->clk_name[i], "fd_core_clk") == 0) ||
+			(strcmp(soc_info->clk_name[i], "fd_core_uar_clk") ==
+			0)) {
+			rc = cam_soc_util_set_clk_flags(soc_info, i,
+				CLKFLAG_NORETAIN_MEM);
+			if (rc)
+				CAM_ERR(CAM_FD,
+					"Failed in NORETAIN_MEM i=%d, rc=%d",
+					i, rc);
+
+			cam_soc_util_set_clk_flags(soc_info, i,
+				CLKFLAG_NORETAIN_PERIPH);
+			if (rc)
+				CAM_ERR(CAM_FD,
+					"Failed in NORETAIN_PERIPH i=%d, rc=%d",
+					i, rc);
+		}
+	}
+
+	return rc;
+}
+
+void cam_fd_soc_register_write(struct cam_hw_soc_info *soc_info,
+	enum cam_fd_reg_base reg_base, uint32_t reg_offset, uint32_t reg_value)
+{
+	struct cam_fd_soc_private *soc_private =
+		(struct cam_fd_soc_private *)soc_info->soc_private;
+	int32_t reg_index = soc_private->regbase_index[reg_base];
+
+	CAM_DBG(CAM_FD, "FD_REG_WRITE: Base[%d] Offset[0x%8x] Value[0x%8x]",
+		reg_base, reg_offset, reg_value);
+
+	cam_io_w_mb(reg_value,
+		soc_info->reg_map[reg_index].mem_base + reg_offset);
+}
+
+uint32_t cam_fd_soc_register_read(struct cam_hw_soc_info *soc_info,
+	enum cam_fd_reg_base reg_base, uint32_t reg_offset)
+{
+	struct cam_fd_soc_private *soc_private =
+		(struct cam_fd_soc_private *)soc_info->soc_private;
+	int32_t reg_index = soc_private->regbase_index[reg_base];
+	uint32_t reg_value;
+
+	reg_value = cam_io_r_mb(
+		soc_info->reg_map[reg_index].mem_base + reg_offset);
+
+	CAM_DBG(CAM_FD, "FD_REG_READ: Base[%d] Offset[0x%8x] Value[0x%8x]",
+		reg_base, reg_offset, reg_value);
+
+	return reg_value;
+}
+
+int cam_fd_soc_enable_resources(struct cam_hw_soc_info *soc_info)
+{
+	struct cam_fd_soc_private *soc_private = soc_info->soc_private;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote = {0};
+	int rc;
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.num_paths = 2;
+	axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL;
+	axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_READ;
+	axi_vote.axi_path[0].camnoc_bw = 7200000;
+	axi_vote.axi_path[0].mnoc_ab_bw = 7200000;
+	axi_vote.axi_path[0].mnoc_ib_bw = 7200000;
+	axi_vote.axi_path[1].path_data_type = CAM_AXI_PATH_DATA_ALL;
+	axi_vote.axi_path[1].transac_type = CAM_AXI_TRANSACTION_WRITE;
+	axi_vote.axi_path[1].camnoc_bw = 7200000;
+	axi_vote.axi_path[1].mnoc_ab_bw = 7200000;
+	axi_vote.axi_path[1].mnoc_ib_bw = 7200000;
+
+
+	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in CPAS START, rc=%d", rc);
+		return -EFAULT;
+	}
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, CAM_SVS_VOTE,
+		true);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error enable platform failed, rc=%d", rc);
+		goto stop_cpas;
+	}
+
+	return rc;
+
+stop_cpas:
+	if (cam_cpas_stop(soc_private->cpas_handle))
+		CAM_ERR(CAM_FD, "Error in CPAS STOP");
+
+	return rc;
+}
+
+
+int cam_fd_soc_disable_resources(struct cam_hw_soc_info *soc_info)
+{
+	struct cam_fd_soc_private *soc_private;
+	int rc = 0;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_FD, "Invalid soc_info param");
+		return -EINVAL;
+	}
+	soc_private = soc_info->soc_private;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		CAM_ERR(CAM_FD, "disable platform resources failed, rc=%d", rc);
+		return rc;
+	}
+
+	rc = cam_cpas_stop(soc_private->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in CPAS STOP, handle=0x%x, rc=%d",
+			soc_private->cpas_handle, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_fd_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *private_data)
+{
+	struct cam_fd_soc_private *soc_private;
+	struct cam_cpas_register_params cpas_register_param;
+	int rc;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in get_dt_properties, rc=%d", rc);
+		return rc;
+	}
+
+	rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+		private_data);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in request_platform_resource rc=%d",
+			rc);
+		return rc;
+	}
+
+	rc = cam_fd_soc_set_clk_flags(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FD, "failed in set_clk_flags rc=%d", rc);
+		goto release_res;
+	}
+
+	soc_private = kzalloc(sizeof(struct cam_fd_soc_private), GFP_KERNEL);
+	if (!soc_private) {
+		rc = -ENOMEM;
+		goto release_res;
+	}
+	soc_info->soc_private = soc_private;
+
+	rc = cam_fd_hw_soc_util_setup_regbase_indices(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in setup regbase, rc=%d", rc);
+		goto free_soc_private;
+	}
+
+	memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+	strlcpy(cpas_register_param.identifier, "fd", CAM_HW_IDENTIFIER_LENGTH);
+	cpas_register_param.cell_index = soc_info->index;
+	cpas_register_param.dev = &soc_info->pdev->dev;
+	cpas_register_param.userdata = private_data;
+	cpas_register_param.cam_cpas_client_cb = cam_fd_hw_util_cpas_callback;
+
+	rc = cam_cpas_register_client(&cpas_register_param);
+	if (rc) {
+		CAM_ERR(CAM_FD, "CPAS registration failed");
+		goto free_soc_private;
+	}
+	soc_private->cpas_handle = cpas_register_param.client_handle;
+	CAM_DBG(CAM_FD, "CPAS handle=%d", soc_private->cpas_handle);
+
+	return rc;
+
+free_soc_private:
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+release_res:
+	cam_soc_util_release_platform_resource(soc_info);
+
+	return rc;
+}
+
+int cam_fd_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+	struct cam_fd_soc_private *soc_private =
+		(struct cam_fd_soc_private *)soc_info->soc_private;
+	int rc;
+
+	rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_FD, "Unregister cpas failed, handle=%d, rc=%d",
+			soc_private->cpas_handle, rc);
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+	if (rc)
+		CAM_ERR(CAM_FD, "release platform failed, rc=%d", rc);
+
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+
+	return rc;
+}

+ 46 - 0
drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h

@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_SOC_H_
+#define _CAM_FD_HW_SOC_H_
+
+#include "cam_soc_util.h"
+
+/**
+ * enum cam_fd_reg_base - Enum for FD register sets
+ *
+ * @CAM_FD_REG_CORE    : Indicates FD Core register space
+ * @CAM_FD_REG_WRAPPER : Indicates FD Wrapper register space
+ * @CAM_FD_REG_MAX     : Max number of register sets supported
+ */
+enum cam_fd_reg_base {
+	CAM_FD_REG_CORE,
+	CAM_FD_REG_WRAPPER,
+	CAM_FD_REG_MAX
+};
+
+/**
+ * struct cam_fd_soc_private : FD private SOC information
+ *
+ * @regbase_index : Mapping between Register base enum to register index in SOC
+ * @cpas_handle   : CPAS handle
+ *
+ */
+struct cam_fd_soc_private {
+	int32_t        regbase_index[CAM_FD_REG_MAX];
+	uint32_t       cpas_handle;
+};
+
+int cam_fd_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *private_data);
+int cam_fd_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+int cam_fd_soc_enable_resources(struct cam_hw_soc_info *soc_info);
+int cam_fd_soc_disable_resources(struct cam_hw_soc_info *soc_info);
+uint32_t cam_fd_soc_register_read(struct cam_hw_soc_info *soc_info,
+	enum cam_fd_reg_base reg_base, uint32_t reg_offset);
+void cam_fd_soc_register_write(struct cam_hw_soc_info *soc_info,
+	enum cam_fd_reg_base reg_base, uint32_t reg_offset, uint32_t reg_value);
+
+#endif /* _CAM_FD_HW_SOC_H_ */

+ 63 - 0
drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h

@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_V41_H_
+#define _CAM_FD_HW_V41_H_
+
+static struct cam_fd_hw_static_info cam_fd_wrapper120_core410_info = {
+	.core_version = {
+		.major  = 4,
+		.minor  = 1,
+		.incr   = 0,
+	},
+	.wrapper_version = {
+		.major  = 1,
+		.minor  = 2,
+		.incr   = 0,
+	},
+	.core_regs = {
+		.version               = 0x38,
+		.control               = 0x0,
+		.result_cnt            = 0x4,
+		.result_addr           = 0x20,
+		.image_addr            = 0x24,
+		.work_addr             = 0x28,
+		.ro_mode               = 0x34,
+		.results_reg_base      = 0x400,
+		.raw_results_reg_base  = 0x800,
+	},
+	.wrapper_regs = {
+		.wrapper_version       = 0x0,
+		.cgc_disable           = 0x4,
+		.hw_stop               = 0x8,
+		.sw_reset              = 0x10,
+		.vbif_req_priority     = 0x20,
+		.vbif_priority_level   = 0x24,
+		.vbif_done_status      = 0x34,
+		.irq_mask              = 0x50,
+		.irq_status            = 0x54,
+		.irq_clear             = 0x58,
+	},
+	.results = {
+		.max_faces             = 35,
+		.per_face_entries      = 4,
+		.raw_results_available = true,
+		.raw_results_entries   = 512,
+	},
+	.enable_errata_wa = {
+		.single_irq_only         = true,
+		.ro_mode_enable_always   = true,
+		.ro_mode_results_invalid = true,
+	},
+	.irq_mask = CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE) |
+		CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE) |
+		CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE),
+	.qos_priority       = 4,
+	.qos_priority_level = 4,
+	.supported_modes    = CAM_FD_MODE_FACEDETECTION | CAM_FD_MODE_PYRAMID,
+	.ro_mode_supported  = true,
+};
+
+#endif /* _CAM_FD_HW_V41_H_ */

+ 63 - 0
drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v501.h

@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_V501_H_
+#define _CAM_FD_HW_V501_H_
+
+static struct cam_fd_hw_static_info cam_fd_wrapper200_core501_info = {
+	.core_version = {
+		.major  = 5,
+		.minor  = 0,
+		.incr   = 1,
+	},
+	.wrapper_version = {
+		.major  = 2,
+		.minor  = 0,
+		.incr   = 0,
+	},
+	.core_regs = {
+		.version               = 0x38,
+		.control               = 0x0,
+		.result_cnt            = 0x4,
+		.result_addr           = 0x20,
+		.image_addr            = 0x24,
+		.work_addr             = 0x28,
+		.ro_mode               = 0x34,
+		.results_reg_base      = 0x400,
+		.raw_results_reg_base  = 0x800,
+	},
+	.wrapper_regs = {
+		.wrapper_version       = 0x0,
+		.cgc_disable           = 0x4,
+		.hw_stop               = 0x8,
+		.sw_reset              = 0x10,
+		.vbif_req_priority     = 0x20,
+		.vbif_priority_level   = 0x24,
+		.vbif_done_status      = 0x34,
+		.irq_mask              = 0x50,
+		.irq_status            = 0x54,
+		.irq_clear             = 0x58,
+	},
+	.results = {
+		.max_faces             = 35,
+		.per_face_entries      = 4,
+		.raw_results_available = true,
+		.raw_results_entries   = 512,
+	},
+	.enable_errata_wa = {
+		.single_irq_only         = true,
+		.ro_mode_enable_always   = true,
+		.ro_mode_results_invalid = true,
+	},
+	.irq_mask = CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE) |
+		CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE) |
+		CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE),
+	.qos_priority       = 4,
+	.qos_priority_level = 4,
+	.supported_modes    = CAM_FD_MODE_FACEDETECTION | CAM_FD_MODE_PYRAMID,
+	.ro_mode_supported  = true,
+};
+
+#endif /* _CAM_FD_HW_V501_H_ */

+ 61 - 0
drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v600.h

@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_V600_H_
+#define _CAM_FD_HW_V600_H_
+
+static struct cam_fd_hw_static_info cam_fd_wrapper200_core600_info = {
+	.core_version = {
+		.major  = 6,
+		.minor  = 0,
+		.incr   = 0,
+	},
+	.wrapper_version = {
+		.major  = 2,
+		.minor  = 0,
+		.incr   = 0,
+	},
+	.core_regs = {
+		.version               = 0x38,
+		.control               = 0x0,
+		.result_cnt            = 0x4,
+		.result_addr           = 0x20,
+		.image_addr            = 0x24,
+		.work_addr             = 0x28,
+		.ro_mode               = 0x34,
+		.results_reg_base      = 0x400,
+	},
+	.wrapper_regs = {
+		.wrapper_version       = 0x0,
+		.cgc_disable           = 0x4,
+		.hw_stop               = 0x8,
+		.sw_reset              = 0x10,
+		.vbif_req_priority     = 0x20,
+		.vbif_priority_level   = 0x24,
+		.vbif_done_status      = 0x34,
+		.irq_mask              = 0x50,
+		.irq_status            = 0x54,
+		.irq_clear             = 0x58,
+	},
+	.results = {
+		.max_faces             = 35,
+		.per_face_entries      = 4,
+		.raw_results_available = false,
+	},
+	.enable_errata_wa = {
+		.single_irq_only         = true,
+		.ro_mode_enable_always   = true,
+		.ro_mode_results_invalid = true,
+	},
+	.irq_mask = CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE) |
+		CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE) |
+		CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE),
+	.qos_priority       = 4,
+	.qos_priority_level = 4,
+	.supported_modes    = CAM_FD_MODE_FACEDETECTION | CAM_FD_MODE_PYRAMID,
+	.ro_mode_supported  = true,
+};
+
+#endif /* _CAM_FD_HW_V600_H_ */

+ 17 - 0
drivers/cam_icp/Makefile

@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_sync
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/icp_hw_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/fw_inc
+ccflags-y += -I$(srctree)/techpack/camera
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_subdev.o cam_icp_context.o hfi.o

+ 271 - 0
drivers/cam_icp/cam_icp_context.c

@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <cam_sync.h>
+#include <cam_defs.h>
+#include <cam_icp.h>
+#include "cam_node.h"
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_icp_context.h"
+#include "cam_req_mgr_util.h"
+#include "cam_mem_mgr.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+#include "cam_packet_util.h"
+
+static const char icp_dev_name[] = "icp";
+
+static int cam_icp_context_dump_active_request(void *data, unsigned long iova,
+	uint32_t buf_info)
+{
+	struct cam_context *ctx = (struct cam_context *)data;
+	struct cam_ctx_request          *req = NULL;
+	struct cam_ctx_request          *req_temp = NULL;
+	struct cam_hw_mgr_dump_pf_data  *pf_dbg_entry = NULL;
+	int rc = 0;
+	bool b_mem_found = false;
+
+	if (!ctx) {
+		CAM_ERR(CAM_ICP, "Invalid ctx");
+		return -EINVAL;
+	}
+
+	CAM_INFO(CAM_ICP, "iommu fault for icp ctx %d state %d",
+		ctx->ctx_id, ctx->state);
+
+	list_for_each_entry_safe(req, req_temp,
+			&ctx->active_req_list, list) {
+		pf_dbg_entry = &(req->pf_data);
+		CAM_INFO(CAM_ICP, "req_id : %lld", req->request_id);
+
+		rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+			iova, buf_info, &b_mem_found);
+		if (rc)
+			CAM_ERR(CAM_ICP, "Failed to dump pf info");
+
+		if (b_mem_found)
+			CAM_ERR(CAM_ICP, "Found page fault in req %lld %d",
+				req->request_id, rc);
+	}
+
+	return rc;
+}
+
+static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+	if (!rc) {
+		ctx->state = CAM_CTX_ACQUIRED;
+		trace_cam_context_state("ICP", ctx);
+	}
+
+	return rc;
+}
+
+static int __cam_icp_release_dev_in_acquired(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Unable to release device");
+
+	ctx->state = CAM_CTX_AVAILABLE;
+	trace_cam_context_state("ICP", ctx);
+	return rc;
+}
+
+static int __cam_icp_start_dev_in_acquired(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_start_dev_to_hw(ctx, cmd);
+	if (!rc) {
+		ctx->state = CAM_CTX_READY;
+		trace_cam_context_state("ICP", ctx);
+	}
+
+	return rc;
+}
+
+static int __cam_icp_flush_dev_in_ready(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_flush_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to flush device");
+
+	return rc;
+}
+
+static int __cam_icp_config_dev_in_ready(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+	size_t len;
+	uintptr_t packet_addr;
+	struct cam_packet *packet;
+
+	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+		&packet_addr, &len);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "[%s][%d] Can not get packet address",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	packet = (struct cam_packet *) ((uint8_t *)packet_addr +
+		(uint32_t)cmd->offset);
+
+	if (((packet->header.op_code & 0xff) ==
+		CAM_ICP_OPCODE_IPE_SETTINGS) ||
+		((packet->header.op_code & 0xff) ==
+		CAM_ICP_OPCODE_BPS_SETTINGS))
+		rc = cam_context_config_dev_to_hw(ctx, cmd);
+	else
+		rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to prepare device");
+
+	return rc;
+}
+
+static int __cam_icp_stop_dev_in_ready(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_stop_dev_to_hw(ctx);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to stop device");
+
+	ctx->state = CAM_CTX_ACQUIRED;
+	trace_cam_context_state("ICP", ctx);
+	return rc;
+}
+
+static int __cam_icp_release_dev_in_ready(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = __cam_icp_stop_dev_in_ready(ctx, NULL);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to stop device");
+
+	rc = __cam_icp_release_dev_in_acquired(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to release device");
+
+	return rc;
+}
+
+static int __cam_icp_handle_buf_done_in_ready(void *ctx,
+	uint32_t evt_id, void *done)
+{
+	return cam_context_buf_done_from_hw(ctx, done, evt_id);
+}
+
+static struct cam_ctx_ops
+	cam_icp_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_icp_acquire_dev_in_available,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_icp_release_dev_in_acquired,
+			.start_dev = __cam_icp_start_dev_in_acquired,
+			.config_dev = __cam_icp_config_dev_in_ready,
+			.flush_dev = __cam_icp_flush_dev_in_ready,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_icp_handle_buf_done_in_ready,
+		.pagefault_ops = cam_icp_context_dump_active_request,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = {
+			.stop_dev = __cam_icp_stop_dev_in_ready,
+			.release_dev = __cam_icp_release_dev_in_ready,
+			.config_dev = __cam_icp_config_dev_in_ready,
+			.flush_dev = __cam_icp_flush_dev_in_ready,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_icp_handle_buf_done_in_ready,
+		.pagefault_ops = cam_icp_context_dump_active_request,
+	},
+	/* Activated */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+		.pagefault_ops = cam_icp_context_dump_active_request,
+	},
+};
+
+int cam_icp_context_init(struct cam_icp_context *ctx,
+	struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id)
+{
+	int rc;
+
+	if ((!ctx) || (!ctx->base) || (!hw_intf)) {
+		CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", ctx, hw_intf);
+		rc = -EINVAL;
+		goto err;
+	}
+
+	rc = cam_context_init(ctx->base, icp_dev_name, CAM_ICP, ctx_id,
+		NULL, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Camera Context Base init failed");
+		goto err;
+	}
+
+	ctx->base->state_machine = cam_icp_ctx_state_machine;
+	ctx->base->ctx_priv = ctx;
+	ctx->ctxt_to_hw_map = NULL;
+
+err:
+	return rc;
+}
+
+int cam_icp_context_deinit(struct cam_icp_context *ctx)
+{
+	if ((!ctx) || (!ctx->base)) {
+		CAM_ERR(CAM_ICP, "Invalid params: %pK", ctx);
+		return -EINVAL;
+	}
+
+	cam_context_deinit(ctx->base);
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}

+ 42 - 0
drivers/cam_icp/cam_icp_context.h

@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_ICP_CONTEXT_H_
+#define _CAM_ICP_CONTEXT_H_
+
+#include "cam_context.h"
+
+/**
+ * struct cam_icp_context - icp context
+ * @base: icp context object
+ * @state_machine: state machine for ICP context
+ * @req_base: common request structure
+ * @state: icp context state
+ * @ctxt_to_hw_map: context to FW handle mapping
+ */
+struct cam_icp_context {
+	struct cam_context *base;
+	struct cam_ctx_ops *state_machine;
+	struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+	uint32_t state;
+	void *ctxt_to_hw_map;
+};
+
+/**
+ * cam_icp_context_init() - ICP context init
+ * @ctx: Pointer to context
+ * @hw_intf: Pointer to ICP hardware interface
+ * @ctx_id: ID for this context
+ */
+int cam_icp_context_init(struct cam_icp_context *ctx,
+	struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id);
+
+/**
+ * cam_icp_context_deinit() - ICP context deinit
+ * @ctx: Pointer to context
+ */
+int cam_icp_context_deinit(struct cam_icp_context *ctx);
+
+#endif /* _CAM_ICP_CONTEXT_H_ */

+ 275 - 0
drivers/cam_icp/cam_icp_subdev.c

@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <cam_req_mgr.h>
+#include <cam_defs.h>
+#include <cam_icp.h>
+#include "cam_req_mgr_dev.h"
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_context.h"
+#include "cam_icp_context.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+#include "cam_smmu_api.h"
+
+#define CAM_ICP_DEV_NAME        "cam-icp"
+
+struct cam_icp_subdev {
+	struct cam_subdev sd;
+	struct cam_node *node;
+	struct cam_context ctx[CAM_ICP_CTX_MAX];
+	struct cam_icp_context ctx_icp[CAM_ICP_CTX_MAX];
+	struct mutex icp_lock;
+	int32_t open_cnt;
+	int32_t reserved;
+};
+
+static struct cam_icp_subdev g_icp_dev;
+
+static const struct of_device_id cam_icp_dt_match[] = {
+	{.compatible = "qcom,cam-icp"},
+	{}
+};
+
+static void cam_icp_dev_iommu_fault_handler(
+	struct iommu_domain *domain, struct device *dev, unsigned long iova,
+	int flags, void *token, uint32_t buf_info)
+{
+	int i = 0;
+	struct cam_node *node = NULL;
+
+	if (!token) {
+		CAM_ERR(CAM_ICP, "invalid token in page handler cb");
+		return;
+	}
+
+	node = (struct cam_node *)token;
+
+	for (i = 0; i < node->ctx_size; i++)
+		cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+			buf_info);
+}
+
+static int cam_icp_subdev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_hw_mgr_intf *hw_mgr_intf = NULL;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+	int rc = 0;
+
+	mutex_lock(&g_icp_dev.icp_lock);
+	if (g_icp_dev.open_cnt >= 1) {
+		CAM_ERR(CAM_ICP, "ICP subdev is already opened");
+		rc = -EALREADY;
+		goto end;
+	}
+
+	if (!node) {
+		CAM_ERR(CAM_ICP, "Invalid args");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	hw_mgr_intf = &node->hw_mgr_intf;
+	rc = hw_mgr_intf->hw_open(hw_mgr_intf->hw_mgr_priv, NULL);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "FW download failed");
+		goto end;
+	}
+	g_icp_dev.open_cnt++;
+end:
+	mutex_unlock(&g_icp_dev.icp_lock);
+	return rc;
+}
+
+static int cam_icp_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	int rc = 0;
+	struct cam_hw_mgr_intf *hw_mgr_intf = NULL;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+
+	mutex_lock(&g_icp_dev.icp_lock);
+	if (g_icp_dev.open_cnt <= 0) {
+		CAM_DBG(CAM_ICP, "ICP subdev is already closed");
+		rc = -EINVAL;
+		goto end;
+	}
+	g_icp_dev.open_cnt--;
+	if (!node) {
+		CAM_ERR(CAM_ICP, "Invalid args");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	hw_mgr_intf = &node->hw_mgr_intf;
+	if (!hw_mgr_intf) {
+		CAM_ERR(CAM_ICP, "hw_mgr_intf is not initialized");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = cam_node_shutdown(node);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "HW close failed");
+		goto end;
+	}
+
+end:
+	mutex_unlock(&g_icp_dev.icp_lock);
+	return 0;
+}
+
+const struct v4l2_subdev_internal_ops cam_icp_subdev_internal_ops = {
+	.open = cam_icp_subdev_open,
+	.close = cam_icp_subdev_close,
+};
+
+static int cam_icp_probe(struct platform_device *pdev)
+{
+	int rc = 0, i = 0;
+	struct cam_node *node;
+	struct cam_hw_mgr_intf *hw_mgr_intf;
+	int iommu_hdl = -1;
+
+	if (!pdev) {
+		CAM_ERR(CAM_ICP, "pdev is NULL");
+		return -EINVAL;
+	}
+
+	g_icp_dev.sd.pdev = pdev;
+	g_icp_dev.sd.internal_ops = &cam_icp_subdev_internal_ops;
+	rc = cam_subdev_probe(&g_icp_dev.sd, pdev, CAM_ICP_DEV_NAME,
+		CAM_ICP_DEVICE_TYPE);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "ICP cam_subdev_probe failed");
+		goto probe_fail;
+	}
+
+	node = (struct cam_node *) g_icp_dev.sd.token;
+
+	hw_mgr_intf = kzalloc(sizeof(*hw_mgr_intf), GFP_KERNEL);
+	if (!hw_mgr_intf) {
+		rc = -EINVAL;
+		goto hw_alloc_fail;
+	}
+
+	rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf,
+		&iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "ICP HW manager init failed: %d", rc);
+		goto hw_init_fail;
+	}
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+		g_icp_dev.ctx_icp[i].base = &g_icp_dev.ctx[i];
+		rc = cam_icp_context_init(&g_icp_dev.ctx_icp[i],
+					hw_mgr_intf, i);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "ICP context init failed");
+			goto ctx_fail;
+		}
+	}
+
+	rc = cam_node_init(node, hw_mgr_intf, g_icp_dev.ctx,
+				CAM_ICP_CTX_MAX, CAM_ICP_DEV_NAME);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "ICP node init failed");
+		goto ctx_fail;
+	}
+
+	cam_smmu_set_client_page_fault_handler(iommu_hdl,
+		cam_icp_dev_iommu_fault_handler, node);
+
+	g_icp_dev.open_cnt = 0;
+	mutex_init(&g_icp_dev.icp_lock);
+
+	CAM_DBG(CAM_ICP, "ICP probe complete");
+
+	return rc;
+
+ctx_fail:
+	for (--i; i >= 0; i--)
+		cam_icp_context_deinit(&g_icp_dev.ctx_icp[i]);
+hw_init_fail:
+	kfree(hw_mgr_intf);
+hw_alloc_fail:
+	cam_subdev_remove(&g_icp_dev.sd);
+probe_fail:
+	return rc;
+}
+
+static int cam_icp_remove(struct platform_device *pdev)
+{
+	int i;
+	struct v4l2_subdev *sd;
+	struct cam_subdev *subdev;
+
+	if (!pdev) {
+		CAM_ERR(CAM_ICP, "pdev is NULL");
+		return -ENODEV;
+	}
+
+	sd = platform_get_drvdata(pdev);
+	if (!sd) {
+		CAM_ERR(CAM_ICP, "V4l2 subdev is NULL");
+		return -ENODEV;
+	}
+
+	subdev = v4l2_get_subdevdata(sd);
+	if (!subdev) {
+		CAM_ERR(CAM_ICP, "cam subdev is NULL");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++)
+		cam_icp_context_deinit(&g_icp_dev.ctx_icp[i]);
+	cam_node_deinit(g_icp_dev.node);
+	cam_subdev_remove(&g_icp_dev.sd);
+	mutex_destroy(&g_icp_dev.icp_lock);
+
+	return 0;
+}
+
+static struct platform_driver cam_icp_driver = {
+	.probe = cam_icp_probe,
+	.remove = cam_icp_remove,
+	.driver = {
+		.name = "cam_icp",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_icp_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_icp_init_module(void)
+{
+	return platform_driver_register(&cam_icp_driver);
+}
+
+static void __exit cam_icp_exit_module(void)
+{
+	platform_driver_unregister(&cam_icp_driver);
+}
+module_init(cam_icp_init_module);
+module_exit(cam_icp_exit_module);
+MODULE_DESCRIPTION("MSM ICP driver");
+MODULE_LICENSE("GPL v2");

+ 168 - 0
drivers/cam_icp/fw_inc/hfi_intf.h

@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _HFI_INTF_H_
+#define _HFI_INTF_H_
+
+#include <linux/types.h>
+
+/**
+ * struct hfi_mem
+ * @len: length of memory
+ * @kva: kernel virtual address
+ * @iova: IO virtual address
+ * @reserved: reserved field
+ */
+struct hfi_mem {
+	uint64_t  len;
+	uintptr_t kva;
+	uint32_t  iova;
+	uint32_t  reserved;
+};
+
+/**
+ * struct hfi_mem_info
+ * @qtbl: qtable hfi memory
+ * @cmd_q: command queue hfi memory for host to firmware communication
+ * @msg_q: message queue hfi memory for firmware to host communication
+ * @dbg_q: debug queue hfi memory for firmware debug information
+ * @sfr_buf: buffer for subsystem failure reason[SFR]
+ * @sec_heap: secondary heap hfi memory for firmware
+ * @qdss: qdss mapped memory for fw
+ * @io_mem: io memory info
+ * @icp_base: icp base address
+ */
+struct hfi_mem_info {
+	struct hfi_mem qtbl;
+	struct hfi_mem cmd_q;
+	struct hfi_mem msg_q;
+	struct hfi_mem dbg_q;
+	struct hfi_mem sfr_buf;
+	struct hfi_mem sec_heap;
+	struct hfi_mem shmem;
+	struct hfi_mem qdss;
+	struct hfi_mem io_mem;
+	void __iomem *icp_base;
+};
+
+/**
+ * hfi_write_cmd() - function for hfi write
+ * @cmd_ptr: pointer to command data for hfi write
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int hfi_write_cmd(void *cmd_ptr);
+
+/**
+ * hfi_read_message() - function for hfi read
+ * @pmsg: buffer to place read message for hfi queue
+ * @q_id: queue id
+ * @words_read: total number of words read from the queue
+ *              returned as output to the caller
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id, uint32_t *words_read);
+
+/**
+ * hfi_init() - function initialize hfi after firmware download
+ * @event_driven_mode: event mode
+ * @hfi_mem: hfi memory info
+ * @icp_base: icp base address
+ * @debug: debug flag
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
+	void *__iomem icp_base, bool debug);
+
+/**
+ * hfi_get_hw_caps() - hardware capabilities from firmware
+ * @query_caps: holds query information from hfi
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int hfi_get_hw_caps(void *query_caps);
+
+/**
+ * hfi_send_system_cmd() - send hfi system command to firmware
+ * @type: type of system command
+ * @data: command data
+ * @size: size of command data
+ */
+void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size);
+
+/**
+ * cam_hfi_enable_cpu() - enable A5 CPU
+ * @icp_base: icp base address
+ */
+void cam_hfi_enable_cpu(void __iomem *icp_base);
+
+/**
+ * cam_hfi_disable_cpu() - disable A5 CPU
+ * @icp_base: icp base address
+ */
+void cam_hfi_disable_cpu(void __iomem *icp_base);
+
+/**
+ * cam_hfi_deinit() - cleanup HFI
+ */
+void cam_hfi_deinit(void __iomem *icp_base);
+/**
+ * hfi_set_debug_level() - set debug level
+ * @a5_dbg_type: 1 for debug_q & 2 for qdss
+ * @lvl: FW debug message level
+ */
+int hfi_set_debug_level(u64 a5_dbg_type, uint32_t lvl);
+
+/**
+ * hfi_set_fw_dump_level() - set firmware dump level
+ * @lvl: level of firmware dump level
+ */
+int hfi_set_fw_dump_level(uint32_t lvl);
+
+/**
+ * hfi_enable_ipe_bps_pc() - Enable interframe pc
+ * Host sends a command to firmware to enable interframe
+ * power collapse for IPE and BPS hardware.
+ *
+ * @enable: flag to enable/disable
+ * @core_info: Core information to firmware
+ */
+int hfi_enable_ipe_bps_pc(bool enable, uint32_t core_info);
+
+/**
+ * hfi_cmd_ubwc_config_ext() - UBWC configuration to firmware
+ * @ubwc_ipe_cfg: UBWC ipe fetch/write configuration params
+ * @ubwc_bps_cfg: UBWC bps fetch/write configuration params
+ */
+int hfi_cmd_ubwc_config_ext(uint32_t *ubwc_ipe_cfg,
+	uint32_t *ubwc_bps_cfg);
+
+/**
+ * hfi_cmd_ubwc_config() - UBWC configuration to firmware
+ *                         for older targets
+ * @ubwc_cfg: UBWC configuration parameters
+ */
+int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg);
+
+/**
+ * cam_hfi_resume() - function to resume
+ * @hfi_mem: hfi memory info
+ * @icp_base: icp base address
+ * @debug: debug flag
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
+	void __iomem *icp_base, bool debug);
+
+/**
+ * cam_hfi_queue_dump() - utility function to dump hfi queues
+ */
+void cam_hfi_queue_dump(void);
+
+
+#endif /* _HFI_INTF_H_ */

+ 336 - 0
drivers/cam_icp/fw_inc/hfi_reg.h

@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_HFI_REG_H_
+#define _CAM_HFI_REG_H_
+
+#include <linux/types.h>
+#include "hfi_intf.h"
+
+
+/* start of ICP CSR registers */
+#define HFI_REG_A5_HW_VERSION                   0x0
+#define HFI_REG_A5_CSR_NSEC_RESET               0x4
+#define HFI_REG_A5_CSR_A5_CONTROL               0x8
+#define HFI_REG_A5_CSR_ETM                      0xC
+#define HFI_REG_A5_CSR_A2HOSTINTEN              0x10
+#define HFI_REG_A5_CSR_A2HOSTINT                0x14
+#define HFI_REG_A5_CSR_A2HOSTINTCLR             0x18
+#define HFI_REG_A5_CSR_A2HOSTINTSTATUS          0x1C
+#define HFI_REG_A5_CSR_A2HOSTINTSET             0x20
+#define HFI_REG_A5_CSR_HOST2ICPINT              0x30
+#define HFI_REG_A5_CSR_A5_STATUS                0x200
+#define HFI_REG_A5_QGIC2_LM_ID                  0x204
+#define HFI_REG_A5_SPARE                        0x400
+
+/* general purpose registers from */
+#define HFI_REG_FW_VERSION                      0x44
+#define HFI_REG_HOST_ICP_INIT_REQUEST           0x48
+#define HFI_REG_ICP_HOST_INIT_RESPONSE          0x4C
+#define HFI_REG_SHARED_MEM_PTR                  0x50
+#define HFI_REG_SHARED_MEM_SIZE                 0x54
+#define HFI_REG_QTBL_PTR                        0x58
+#define HFI_REG_UNCACHED_HEAP_PTR               0x5C
+#define HFI_REG_UNCACHED_HEAP_SIZE              0x60
+#define HFI_REG_QDSS_IOVA                       0x6C
+#define HFI_REG_SFR_PTR                         0x68
+#define HFI_REG_QDSS_IOVA_SIZE                  0x70
+#define HFI_REG_IO_REGION_IOVA                  0x74
+#define HFI_REG_IO_REGION_SIZE                  0x78
+
+/* end of ICP CSR registers */
+
+/* flags for ICP CSR registers */
+#define ICP_FLAG_CSR_WAKE_UP_EN                 (1 << 4)
+#define ICP_FLAG_CSR_A5_EN                      (1 << 9)
+#define ICP_CSR_EN_CLKGATE_WFI                  (1 << 12)
+#define ICP_CSR_EDBGRQ                          (1 << 14)
+#define ICP_CSR_DBGSWENABLE                     (1 << 22)
+#define ICP_CSR_A5_STATUS_WFI                   (1 << 7)
+
+#define ICP_FLAG_A5_CTRL_DBG_EN                 (ICP_FLAG_CSR_WAKE_UP_EN|\
+						ICP_FLAG_CSR_A5_EN|\
+						ICP_CSR_EDBGRQ|\
+						ICP_CSR_DBGSWENABLE)
+
+#define ICP_FLAG_A5_CTRL_EN                     (ICP_FLAG_CSR_WAKE_UP_EN|\
+						ICP_FLAG_CSR_A5_EN|\
+						ICP_CSR_EN_CLKGATE_WFI)
+
+/* start of Queue table and queues */
+#define MAX_ICP_HFI_QUEUES                      4
+#define ICP_QHDR_TX_TYPE_MASK                   0xFF000000
+#define ICP_QHDR_RX_TYPE_MASK                   0x00FF0000
+#define ICP_QHDR_PRI_TYPE_MASK                  0x0000FF00
+#define ICP_QHDR_Q_ID_MASK                      0x000000FF
+
+#define ICP_CMD_Q_SIZE_IN_BYTES                 4096
+#define ICP_MSG_Q_SIZE_IN_BYTES                 4096
+#define ICP_DBG_Q_SIZE_IN_BYTES                 102400
+#define ICP_MSG_SFR_SIZE_IN_BYTES               4096
+
+#define ICP_SHARED_MEM_IN_BYTES                 (1024 * 1024)
+#define ICP_UNCACHED_HEAP_SIZE_IN_BYTES         (2 * 1024 * 1024)
+#define ICP_HFI_MAX_PKT_SIZE_IN_WORDS           25600
+#define ICP_HFI_MAX_PKT_SIZE_MSGQ_IN_WORDS      256
+
+#define ICP_HFI_QTBL_HOSTID1                    0x01000000
+#define ICP_HFI_QTBL_STATUS_ENABLED             0x00000001
+#define ICP_HFI_NUMBER_OF_QS                    3
+#define ICP_HFI_NUMBER_OF_ACTIVE_QS             3
+#define ICP_HFI_QTBL_OFFSET                     0
+#define ICP_HFI_VAR_SIZE_PKT                    0
+#define ICP_HFI_MAX_MSG_SIZE_IN_WORDS           128
+
+
+/* Queue Header type masks. Use these to access bitfields in qhdr_type */
+#define HFI_MASK_QHDR_TX_TYPE                   0xFF000000
+#define HFI_MASK_QHDR_RX_TYPE                   0x00FF0000
+#define HFI_MASK_QHDR_PRI_TYPE                  0x0000FF00
+#define HFI_MASK_QHDR_Q_ID_TYPE                 0x000000FF
+
+
+#define TX_EVENT_DRIVEN_MODE_1                  0
+#define RX_EVENT_DRIVEN_MODE_1                  0
+#define TX_EVENT_DRIVEN_MODE_2                  0x01000000
+#define RX_EVENT_DRIVEN_MODE_2                  0x00010000
+#define TX_EVENT_POLL_MODE_2                    0x02000000
+#define RX_EVENT_POLL_MODE_2                    0x00020000
+#define U32_OFFSET                              0x1
+#define BYTE_WORD_SHIFT                         2
+
+/**
+ * @INVALID: Invalid state
+ * @HFI_DEINIT: HFI is not initialized yet
+ * @HFI_INIT: HFI is initialized
+ * @HFI_READY: HFI is ready to send/receive commands/messages
+ */
+enum hfi_state {
+	HFI_DEINIT,
+	HFI_INIT,
+	HFI_READY
+};
+
+/**
+ * @RESET: init success
+ * @SET: init failed
+ */
+enum reg_settings {
+	RESET,
+	SET,
+	SET_WM = 1024
+};
+
+/**
+ * @INTR_DISABLE: Disable interrupt
+ * @INTR_ENABLE: Enable interrupt
+ * @INTR_ENABLE_WD0: Enable WD0
+ * @INTR_ENABLE_WD1: Enable WD1
+ */
+enum intr_status {
+	INTR_DISABLE,
+	INTR_ENABLE,
+	INTR_ENABLE_WD0,
+	INTR_ENABLE_WD1 = 0x4
+};
+
+/**
+ * @ICP_INIT_RESP_RESET: reset init state
+ * @ICP_INIT_RESP_SUCCESS: init success
+ * @ICP_INIT_RESP_FAILED: init failed
+ */
+enum host_init_resp {
+	ICP_INIT_RESP_RESET,
+	ICP_INIT_RESP_SUCCESS,
+	ICP_INIT_RESP_FAILED
+};
+
+/**
+ * @ICP_INIT_REQUEST_RESET: reset init request
+ * @ICP_INIT_REQUEST_SET: set init request
+ */
+enum host_init_request {
+	ICP_INIT_REQUEST_RESET,
+	ICP_INIT_REQUEST_SET
+};
+
+/**
+ * @QHDR_INACTIVE: Queue is inactive
+ * @QHDR_ACTIVE: Queue is active
+ */
+enum qhdr_status {
+	QHDR_INACTIVE,
+	QHDR_ACTIVE
+};
+
+/**
+ * @INTR_MODE: event driven mode 1, each send and receive generates interrupt
+ * @WM_MODE: event driven mode 2, interrupts based on watermark mechanism
+ * @POLL_MODE: poll method
+ */
+enum qhdr_event_drv_type {
+	INTR_MODE,
+	WM_MODE,
+	POLL_MODE
+};
+
+/**
+ * @TX_INT: event driven mode 1, each send and receive generates interrupt
+ * @TX_INT_WM: event driven mode 2, interrupts based on watermark mechanism
+ * @TX_POLL: poll method
+ * @ICP_QHDR_TX_TYPE_MASK defines position in qhdr_type
+ */
+enum qhdr_tx_type {
+	TX_INT,
+	TX_INT_WM,
+	TX_POLL
+};
+
+/**
+ * @RX_INT: event driven mode 1, each send and receive generates interrupt
+ * @RX_INT_WM: event driven mode 2, interrupts based on watermark mechanism
+ * @RX_POLL: poll method
+ * @ICP_QHDR_RX_TYPE_MASK defines position in qhdr_type
+ */
+enum qhdr_rx_type {
+	RX_INT,
+	RX_INT_WM,
+	RX_POLL
+};
+
+/**
+ * @Q_CMD: Host to FW command queue
+ * @Q_MSG: FW to Host message queue
+ * @Q_DEBUG: FW to Host debug queue
+ * @ICP_QHDR_Q_ID_MASK defines position in qhdr_type
+ */
+enum qhdr_q_id {
+	Q_CMD,
+	Q_MSG,
+	Q_DBG
+};
+
+/**
+ * struct hfi_qtbl_hdr
+ * @qtbl_version: Queue table version number
+ *                Higher 16 bits: Major version
+ *                Lower 16 bits: Minor version
+ * @qtbl_size: Queue table size from version to last parametr in qhdr entry
+ * @qtbl_qhdr0_offset: Offset to the start of first qhdr
+ * @qtbl_qhdr_size: Queue header size in bytes
+ * @qtbl_num_q: Total number of queues in Queue table
+ * @qtbl_num_active_q: Total number of active queues
+ */
+struct hfi_qtbl_hdr {
+	uint32_t qtbl_version;
+	uint32_t qtbl_size;
+	uint32_t qtbl_qhdr0_offset;
+	uint32_t qtbl_qhdr_size;
+	uint32_t qtbl_num_q;
+	uint32_t qtbl_num_active_q;
+} __packed;
+
+/**
+ * struct hfi_q_hdr
+ * @qhdr_status: Queue status, qhdr_state define possible status
+ * @qhdr_start_addr: Queue start address in non cached memory
+ * @qhdr_type: qhdr_tx, qhdr_rx, qhdr_q_id and priority defines qhdr type
+ * @qhdr_q_size: Queue size
+ *		Number of queue packets if qhdr_pkt_size is non-zero
+ *		Queue size in bytes if qhdr_pkt_size is zero
+ * @qhdr_pkt_size: Size of queue packet entries
+ *		0x0: variable queue packet size
+ *		non zero: size of queue packet entry, fixed
+ * @qhdr_pkt_drop_cnt: Number of packets dropped by sender
+ * @qhdr_rx_wm: Receiver watermark, applicable in event driven mode
+ * @qhdr_tx_wm: Sender watermark, applicable in event driven mode
+ * @qhdr_rx_req: Receiver sets this bit if queue is empty
+ * @qhdr_tx_req: Sender sets this bit if queue is full
+ * @qhdr_rx_irq_status: Receiver sets this bit and triggers an interrupt to
+ *		the sender after packets are dequeued. Sender clears this bit
+ * @qhdr_tx_irq_status: Sender sets this bit and triggers an interrupt to
+ *		the receiver after packets are queued. Receiver clears this bit
+ * @qhdr_read_idx: Read index
+ * @qhdr_write_idx: Write index
+ */
+struct hfi_q_hdr {
+	uint32_t dummy[15];
+	uint32_t qhdr_status;
+	uint32_t dummy1[15];
+	uint32_t qhdr_start_addr;
+	uint32_t dummy2[15];
+	uint32_t qhdr_type;
+	uint32_t dummy3[15];
+	uint32_t qhdr_q_size;
+	uint32_t dummy4[15];
+	uint32_t qhdr_pkt_size;
+	uint32_t dummy5[15];
+	uint32_t qhdr_pkt_drop_cnt;
+	uint32_t dummy6[15];
+	uint32_t qhdr_rx_wm;
+	uint32_t dummy7[15];
+	uint32_t qhdr_tx_wm;
+	uint32_t dummy8[15];
+	uint32_t qhdr_rx_req;
+	uint32_t dummy9[15];
+	uint32_t qhdr_tx_req;
+	uint32_t dummy10[15];
+	uint32_t qhdr_rx_irq_status;
+	uint32_t dummy11[15];
+	uint32_t qhdr_tx_irq_status;
+	uint32_t dummy12[15];
+	uint32_t qhdr_read_idx;
+	uint32_t dummy13[15];
+	uint32_t qhdr_write_idx;
+	uint32_t dummy14[15];
+};
+
+/**
+ * struct sfr_buf
+ * @size: Number of characters
+ * @msg : Subsystem failure reason
+ */
+struct sfr_buf {
+	uint32_t size;
+	char msg[ICP_MSG_SFR_SIZE_IN_BYTES];
+};
+
+/**
+ * struct hfi_q_tbl
+ * @q_tbl_hdr: Queue table header
+ * @q_hdr: Queue header info, it holds info of cmd, msg and debug queues
+ */
+struct hfi_qtbl {
+	struct hfi_qtbl_hdr q_tbl_hdr;
+	struct hfi_q_hdr q_hdr[MAX_ICP_HFI_QUEUES];
+};
+
+/**
+ * struct hfi_info
+ * @map: Hfi shared memory info
+ * @smem_size: Shared memory size
+ * @uncachedheap_size: uncached heap size
+ * @msgpacket_buf: message buffer
+ * @hfi_state: State machine for hfi
+ * @cmd_q_lock: Lock for command queue
+ * @cmd_q_state: State of command queue
+ * @mutex msg_q_lock: Lock for message queue
+ * @msg_q_state: State of message queue
+ * @csr_base: CSR base address
+ */
+struct hfi_info {
+	struct hfi_mem_info map;
+	uint32_t smem_size;
+	uint32_t uncachedheap_size;
+	uint32_t msgpacket_buf[ICP_HFI_MAX_MSG_SIZE_IN_WORDS];
+	uint8_t hfi_state;
+	struct mutex cmd_q_lock;
+	bool cmd_q_state;
+	struct mutex msg_q_lock;
+	bool msg_q_state;
+	void __iomem *csr_base;
+};
+
+#endif /* _CAM_HFI_REG_H_ */

+ 564 - 0
drivers/cam_icp/fw_inc/hfi_session_defs.h

@@ -0,0 +1,564 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_HFI_SESSION_DEFS_H
+#define _CAM_HFI_SESSION_DEFS_H
+
+#include <linux/types.h>
+
+#define HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO             0x1
+#define HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS         0x2
+#define HFI_IPEBPS_CMD_OPCODE_BPS_ABORT                 0x3
+#define HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY               0x4
+
+#define HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO             0x5
+#define HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS         0x6
+#define HFI_IPEBPS_CMD_OPCODE_IPE_ABORT                 0x7
+#define HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY               0x8
+
+#define HFI_IPEBPS_CMD_OPCODE_BPS_WAIT_FOR_IPE          0x9
+#define HFI_IPEBPS_CMD_OPCODE_BPS_WAIT_FOR_BPS          0xa
+#define HFI_IPEBPS_CMD_OPCODE_IPE_WAIT_FOR_BPS          0xb
+#define HFI_IPEBPS_CMD_OPCODE_IPE_WAIT_FOR_IPE          0xc
+
+#define HFI_IPEBPS_CMD_OPCODE_MEM_MAP                   0xe
+#define HFI_IPEBPS_CMD_OPCODE_MEM_UNMAP                 0xf
+
+#define HFI_IPEBPS_HANDLE_TYPE_BPS                      0x1
+#define HFI_IPEBPS_HANDLE_TYPE_IPE_RT                   0x2
+#define HFI_IPEBPS_HANDLE_TYPE_IPE_NON_RT               0x3
+
+/**
+ * struct mem_map_region_data
+ * @start_addr: cmd buffer region start addr
+ * @len       : length of the region
+ *
+ * create mem_map_region_data
+ */
+struct mem_map_region_data {
+	uint32_t               start_addr;
+	uint32_t               len;
+};
+
+/**
+ * struct hfi_cmd_ipe_bps_map
+ * @user_data          : user arg
+ * @mem_map_request_num: number of mappings
+ * @mem_map_region_sets: array of all map/unmap info
+ *
+ * create hfi_cmd_ipe_bps_map
+ */
+struct hfi_cmd_ipe_bps_map {
+	uint64_t                   user_data;
+	uint32_t                   mem_map_request_num;
+	struct mem_map_region_data mem_map_region_sets[1];
+} __packed;
+
+/**
+ * struct hfi_cmd_ipe_bps_map_ack
+ * @rc       : Async return code
+ * @user_data: user_arg
+ *
+ * create hfi_cmd_ipe_bps_map_ack
+ */
+struct hfi_cmd_ipe_bps_map_ack {
+	uint32_t            rc;
+	uint64_t            user_data;
+};
+
+/**
+ * struct abort_data
+ * @num_req_ids: Number of req ids
+ * @num_req_id: point to specific req id
+ *
+ * create abort data
+ */
+struct abort_data {
+	uint32_t num_req_ids;
+	uint32_t num_req_id[1];
+};
+
+/**
+ * struct hfi_cmd_data
+ * @abort: abort data
+ * @user data: user supplied argument
+ *
+ * create session abort data
+ */
+struct hfi_cmd_abort {
+	struct abort_data abort;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_abort_destroy
+ * @user_data: user supplied data
+ *
+ * IPE/BPS destroy/abort command
+ * @HFI_IPEBPS_CMD_OPCODE_IPE_ABORT
+ * @HFI_IPEBPS_CMD_OPCODE_BPS_ABORT
+ * @HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY
+ * @HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY
+ */
+struct hfi_cmd_abort_destroy {
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_chaining_ops
+ * @wait_hdl: current session handle waits on wait_hdl to complete operation
+ * @user_data: user supplied argument
+ *
+ * this structure for chaining opcodes
+ * BPS_WAITS_FOR_IPE
+ * BPS_WAITS_FOR_BPS
+ * IPE_WAITS_FOR_BPS
+ * IPE_WAITS_FOR_IPE
+ */
+struct hfi_cmd_chaining_ops {
+	uint32_t wait_hdl;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_create_handle
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @handle_type: IPE/BPS firmware session handle type
+ * @user_data1: caller provided data1
+ * @user_data2: caller provided data2
+ *
+ * create firmware session handle
+ */
+struct hfi_cmd_create_handle {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t handle_type;
+	uint64_t user_data1;
+	uint64_t user_data2;
+} __packed;
+
+/**
+ * struct hfi_cmd_ipebps_async
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @opcode: opcode for IPE/BPS async operation
+ *          CONFIG_IO: configures I/O for IPE/BPS handle
+ *          FRAME_PROCESS: image frame to be processed by IPE/BPS
+ *          ABORT: abort all processing frames of IPE/BPS handle
+ *          DESTROY: destroy earlier created IPE/BPS handle
+ *          BPS_WAITS_FOR_IPE: sync for BPS to wait for IPE
+ *          BPS_WAITS_FOR_BPS: sync for BPS to wait for BPS
+ *          IPE_WAITS_FOR_IPE: sync for IPE to wait for IPE
+ *          IPE_WAITS_FOR_BPS: sync for IPE to wait for BPS
+ * @num_fw_handles: number of IPE/BPS firmware handles in fw_handles array
+ * @fw_handles: IPE/BPS handles array
+ * @payload: command payload for IPE/BPS opcodes
+ * @direct: points to actual payload
+ * @indirect: points to address of payload
+ *
+ * sends async command to the earlier created IPE or BPS handle
+ * for asynchronous operation.
+ */
+struct hfi_cmd_ipebps_async {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t opcode;
+	uint64_t user_data1;
+	uint64_t user_data2;
+	uint32_t num_fw_handles;
+	uint32_t fw_handles[1];
+	union {
+		uint32_t direct[1];
+		uint32_t indirect;
+	} payload;
+} __packed;
+
+/**
+ * struct hfi_msg_create_handle_ack
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @err_type: error code
+ * @fw_handle: output param for IPE/BPS handle
+ * @user_data1: user provided data1
+ * @user_data2: user provided data2
+ *
+ * ack for create handle command of IPE/BPS
+ * @HFI_MSG_IPEBPS_CREATE_HANDLE_ACK
+ */
+struct hfi_msg_create_handle_ack {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t err_type;
+	uint32_t fw_handle;
+	uint64_t user_data1;
+	uint64_t user_data2;
+} __packed;
+
+/**
+ * struct hfi_msg_ipebps_async
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @opcode: opcode of IPE/BPS async operation
+ * @user_data1: user provided data1
+ * @user_data2: user provided data2
+ * @err_type: error code
+ * @msg_data: IPE/BPS async done message data
+ *
+ * result of IPE/BPS async command
+ * @HFI_MSG_IPEBPS_ASYNC_COMMAND_ACK
+ */
+struct hfi_msg_ipebps_async_ack {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t opcode;
+	uint64_t user_data1;
+	uint64_t user_data2;
+	uint32_t err_type;
+	uint32_t msg_data[1];
+} __packed;
+
+/**
+ * struct hfi_msg_frame_process_done
+ * @result: result of frame process command
+ * @scratch_buffer_address: address of scratch buffer
+ */
+struct hfi_msg_frame_process_done {
+	uint32_t result;
+	uint32_t scratch_buffer_address;
+};
+
+/**
+ * struct hfi_msg_chaining_op
+ * @status: return status
+ * @user_data: user data provided as part of chaining ops
+ *
+ * IPE/BPS wait response
+ */
+struct hfi_msg_chaining_op {
+	uint32_t status;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_msg_abort_destroy
+ * @status: return status
+ * @user_data: user data provided as part of abort/destroy ops
+ *
+ * IPE/BPS abort/destroy response
+ */
+struct hfi_msg_abort_destroy {
+	uint32_t status;
+	uint64_t user_data;
+} __packed;
+
+#define MAX_NUM_OF_IMAGE_PLANES	2
+#define MAX_HFR_GROUP          16
+
+enum hfi_ipe_io_images {
+	IPE_INPUT_IMAGE_FULL,
+	IPE_INPUT_IMAGE_DS4,
+	IPE_INPUT_IMAGE_DS16,
+	IPE_INPUT_IMAGE_DS64,
+	IPE_INPUT_IMAGE_FULL_REF,
+	IPE_INPUT_IMAGE_DS4_REF,
+	IPE_INPUT_IMAGE_DS16_REF,
+	IPE_INPUT_IMAGE_DS64_REF,
+	IPE_OUTPUT_IMAGE_DISPLAY,
+	IPE_OUTPUT_IMAGE_VIDEO,
+	IPE_OUTPUT_IMAGE_FULL_REF,
+	IPE_OUTPUT_IMAGE_DS4_REF,
+	IPE_OUTPUT_IMAGE_DS16_REF,
+	IPE_OUTPUT_IMAGE_DS64_REF,
+	IPE_INPUT_IMAGE_FIRST = IPE_INPUT_IMAGE_FULL,
+	IPE_INPUT_IMAGE_LAST = IPE_INPUT_IMAGE_DS64_REF,
+	IPE_OUTPUT_IMAGE_FIRST = IPE_OUTPUT_IMAGE_DISPLAY,
+	IPE_OUTPUT_IMAGE_LAST = IPE_OUTPUT_IMAGE_DS64_REF,
+	IPE_IO_IMAGES_MAX
+};
+
+enum bps_io_images {
+	BPS_INPUT_IMAGE,
+	BPS_OUTPUT_IMAGE_FULL,
+	BPS_OUTPUT_IMAGE_DS4,
+	BPS_OUTPUT_IMAGE_DS16,
+	BPS_OUTPUT_IMAGE_DS64,
+	BPS_OUTPUT_IMAGE_STATS_BG,
+	BPS_OUTPUT_IMAGE_STATS_BHIST,
+	BPS_OUTPUT_IMAGE_REG1,
+	BPS_OUTPUT_IMAGE_REG2,
+	BPS_OUTPUT_IMAGE_FIRST = BPS_OUTPUT_IMAGE_FULL,
+	BPS_OUTPUT_IMAGE_LAST = BPS_OUTPUT_IMAGE_REG2,
+	BPS_IO_IMAGES_MAX
+};
+
+struct frame_buffer {
+	uint32_t buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+	uint32_t meta_buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+} __packed;
+
+struct bps_frame_process_data {
+	struct frame_buffer buffers[BPS_IO_IMAGES_MAX];
+	uint32_t max_num_cores;
+	uint32_t target_time;
+	uint32_t ubwc_stats_buffer_addr;
+	uint32_t ubwc_stats_buffer_size;
+	uint32_t cdm_buffer_addr;
+	uint32_t cdm_buffer_size;
+	uint32_t iq_settings_addr;
+	uint32_t strip_lib_out_addr;
+	uint32_t cdm_prog_addr;
+	uint32_t request_id;
+};
+
+enum hfi_ipe_image_format {
+	IMAGE_FORMAT_INVALID,
+	IMAGE_FORMAT_MIPI_8,
+	IMAGE_FORMAT_MIPI_10,
+	IMAGE_FORMAT_MIPI_12,
+	IMAGE_FORMAT_MIPI_14,
+	IMAGE_FORMAT_BAYER_8,
+	IMAGE_FORMAT_BAYER_10,
+	IMAGE_FORMAT_BAYER_12,
+	IMAGE_FORMAT_BAYER_14,
+	IMAGE_FORMAT_PDI_10,
+	IMAGE_FORMAT_PD_10,
+	IMAGE_FORMAT_PD_8,
+	IMAGE_FORMAT_INDICATIONS,
+	IMAGE_FORMAT_REFINEMENT,
+	IMAGE_FORMAT_UBWC_TP_10,
+	IMAGE_FORMAT_UBWC_NV_12,
+	IMAGE_FORMAT_UBWC_NV12_4R,
+	IMAGE_FORMAT_UBWC_P010,
+	IMAGE_FORMAT_LINEAR_TP_10,
+	IMAGE_FORMAT_LINEAR_P010,
+	IMAGE_FORMAT_LINEAR_NV12,
+	IMAGE_FORMAT_LINEAR_PLAIN_16,
+	IMAGE_FORMAT_YUV422_8,
+	IMAGE_FORMAT_YUV422_10,
+	IMAGE_FORMAT_STATISTICS_BAYER_GRID,
+	IMAGE_FORMAT_STATISTICS_BAYER_HISTOGRAM,
+	IMAGE_FORMAT_MAX
+};
+
+enum hfi_ipe_plane_format {
+	PLANE_FORMAT_INVALID = 0,
+	PLANE_FORMAT_MIPI_8,
+	PLANE_FORMAT_MIPI_10,
+	PLANE_FORMAT_MIPI_12,
+	PLANE_FORMAT_MIPI_14,
+	PLANE_FORMAT_BAYER_8,
+	PLANE_FORMAT_BAYER_10,
+	PLANE_FORMAT_BAYER_12,
+	PLANE_FORMAT_BAYER_14,
+	PLANE_FORMAT_PDI_10,
+	PLANE_FORMAT_PD_10,
+	PLANE_FORMAT_PD_8,
+	PLANE_FORMAT_INDICATIONS,
+	PLANE_FORMAT_REFINEMENT,
+	PLANE_FORMAT_UBWC_TP_10_Y,
+	PLANE_FORMAT_UBWC_TP_10_C,
+	PLANE_FORMAT_UBWC_NV_12_Y,
+	PLANE_FORMAT_UBWC_NV_12_C,
+	PLANE_FORMAT_UBWC_NV_12_4R_Y,
+	PLANE_FORMAT_UBWC_NV_12_4R_C,
+	PLANE_FORMAT_UBWC_P010_Y,
+	PLANE_FORMAT_UBWC_P010_C,
+	PLANE_FORMAT_LINEAR_TP_10_Y,
+	PLANE_FORMAT_LINEAR_TP_10_C,
+	PLANE_FORMAT_LINEAR_P010_Y,
+	PLANE_FORMAT_LINEAR_P010_C,
+	PLANE_FORMAT_LINEAR_NV12_Y,
+	PLANE_FORMAT_LINEAR_NV12_C,
+	PLANE_FORMAT_LINEAR_PLAIN_16_Y,
+	PLANE_FORMAT_LINEAR_PLAIN_16_C,
+	PLANE_FORMAT_YUV422_8,
+	PLANE_FORMAT_YUV422_10,
+	PLANE_FORMAT_STATISTICS_BAYER_GRID,
+	PLANE_FORMAT_STATISTICS_BAYER_HISTOGRAM,
+	PLANE_FORMAT_MAX
+};
+
+enum hfi_ipe_bayer_pixel_order {
+	FIRST_PIXEL_R,
+	FIRST_PIXEL_GR,
+	FIRST_PIXEL_B,
+	FIRST_PIXEL_GB,
+	FIRST_PIXEL_MAX
+};
+
+enum hfi_ipe_pixel_pack_alignment {
+	PIXEL_LSB_ALIGNED,
+	PIXEL_MSB_ALIGNED,
+};
+
+enum hfi_ipe_yuv_422_order {
+	PIXEL_ORDER_Y_U_Y_V,
+	PIXEL_ORDER_Y_V_Y_U,
+	PIXEL_ORDER_U_Y_V_Y,
+	PIXEL_ORDER_V_Y_U_Y,
+	PIXEL_ORDER_YUV422_MAX
+};
+
+enum ubwc_write_client {
+	IPE_WR_CLIENT_0 = 0,
+	IPE_WR_CLIENT_1,
+	IPE_WR_CLIENT_5,
+	IPE_WR_CLIENT_6,
+	IPE_WR_CLIENT_7,
+	IPE_WR_CLIENT_8,
+	IPE_WR_CLIENT_MAX
+};
+
+/**
+ * struct image_info
+ * @format: image format
+ * @img_width: image width
+ * @img_height: image height
+ * @bayer_order: pixel order
+ * @pix_align: alignment
+ * @yuv422_order: YUV order
+ * @byte_swap: byte swap
+ */
+struct image_info {
+	enum hfi_ipe_image_format format;
+	uint32_t img_width;
+	uint32_t img_height;
+	enum hfi_ipe_bayer_pixel_order bayer_order;
+	enum hfi_ipe_pixel_pack_alignment pix_align;
+	enum hfi_ipe_yuv_422_order yuv422_order;
+	uint32_t byte_swap;
+} __packed;
+
+/**
+ * struct buffer_layout
+ * @buf_stride: buffer stride
+ * @buf_height: buffer height
+ */
+struct buffer_layout {
+	uint32_t buf_stride;
+	uint32_t buf_height;
+} __packed;
+
+/**
+ * struct image_desc
+ * @info: image info
+ * @buf_layout: buffer layout
+ * @meta_buf_layout: meta buffer layout
+ */
+struct image_desc {
+	struct image_info info;
+	struct buffer_layout buf_layout[MAX_NUM_OF_IMAGE_PLANES];
+	struct buffer_layout meta_buf_layout[MAX_NUM_OF_IMAGE_PLANES];
+} __packed;
+
+struct ica_stab_coeff {
+	uint32_t coeffs[8];
+} __packed;
+
+struct ica_stab_params {
+	uint32_t mode;
+	struct ica_stab_coeff transforms[3];
+} __packed;
+
+struct frame_set {
+	struct frame_buffer buffers[IPE_IO_IMAGES_MAX];
+	struct ica_stab_params ica_params;
+	uint32_t cdm_ica1_addr;
+	uint32_t cdm_ica2_addr;
+} __packed;
+
+struct ipe_frame_process_data {
+	uint32_t strip_lib_out_addr;
+	uint32_t iq_settings_addr;
+	uint32_t scratch_buffer_addr;
+	uint32_t scratch_buffer_size;
+	uint32_t ubwc_stats_buffer_addr;
+	uint32_t ubwc_stats_buffer_size;
+	uint32_t cdm_buffer_addr;
+	uint32_t cdm_buffer_size;
+	uint32_t max_num_cores;
+	uint32_t target_time;
+	uint32_t cdm_prog_base;
+	uint32_t cdm_pre_ltm;
+	uint32_t cdm_post_ltm;
+	uint32_t cdm_anr_full_pass;
+	uint32_t cdm_anr_ds4;
+	uint32_t cdm_anr_ds16;
+	uint32_t cdm_anr_ds64;
+	uint32_t cdm_tf_full_pass;
+	uint32_t cdm_tf_ds4;
+	uint32_t cdm_tf_ds16;
+	uint32_t cdm_tf_ds64;
+	uint32_t request_id;
+	uint32_t frames_in_batch;
+	struct frame_set framesets[MAX_HFR_GROUP];
+} __packed;
+
+/**
+ * struct hfi_cmd_ipe_config
+ * @images: images descreptions
+ * @user_data: user supplied data
+ *
+ * payload for IPE async command
+ */
+struct hfi_cmd_ipe_config {
+	struct image_desc images[IPE_IO_IMAGES_MAX];
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct frame_buffers
+ * @buf_ptr: buffer pointers for all planes
+ * @meta_buf_ptr: meta buffer pointers for all planes
+ */
+struct frame_buffers {
+	uint32_t buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+	uint32_t meta_buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+} __packed;
+
+/**
+ * struct hfi_msg_ipe_config
+ * @rc: result of ipe config command
+ * @scratch_mem_size: scratch mem size for a config
+ * @user_data: user data
+ */
+struct hfi_msg_ipe_config {
+	uint32_t rc;
+	uint32_t scratch_mem_size;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_msg_bps_common
+ * @rc: result of ipe config command
+ * @user_data: user data
+ */
+struct hfi_msg_bps_common {
+	uint32_t rc;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct ipe_bps_destroy
+ * @user_data: user data
+ */
+struct ipe_bps_destroy {
+	uint64_t userdata;
+};
+
+/**
+ * struct hfi_msg_ipe_frame_process
+ * @status: result of ipe frame process command
+ * @scratch_buf_addr: address of scratch buffer
+ * @user_data: user data
+ */
+struct hfi_msg_ipe_frame_process {
+	uint32_t status;
+	uint32_t scratch_buf_addr;
+	uint64_t user_data;
+} __packed;
+
+#endif /* _CAM_HFI_SESSION_DEFS_H */

+ 540 - 0
drivers/cam_icp/fw_inc/hfi_sys_defs.h

@@ -0,0 +1,540 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *  Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _HFI_DEFS_H_
+#define _HFI_DEFS_H_
+
+#include <linux/types.h>
+
+/*
+ * Following base acts as common starting points
+ * for all enumerations.
+ */
+#define HFI_COMMON_BASE                 0x0
+
+/* HFI Domain base offset for commands and messages */
+#define HFI_DOMAIN_SHFT                 (24)
+#define HFI_DOMAIN_BMSK                 (0x7 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_ICP             (0x0 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_IPE_BPS         (0x1 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_CDM             (0x2 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_DBG             (0x3 << HFI_DOMAIN_SHFT)
+
+/* Command base offset for commands */
+#define HFI_CMD_START_OFFSET            0x10000
+
+/* Command base offset for messages */
+#define HFI_MSG_START_OFFSET            0x20000
+
+/* System Level Error types */
+#define HFI_ERR_SYS_NONE                (HFI_COMMON_BASE)
+#define HFI_ERR_SYS_FATAL               (HFI_COMMON_BASE + 0x1)
+#define HFI_ERR_SYS_VERSION_MISMATCH    (HFI_COMMON_BASE + 0x2)
+#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN  (HFI_COMMON_BASE + 0x3)
+#define HFI_ERR_SYS_UNSUPPORT_CMD       (HFI_COMMON_BASE + 0x4)
+#define HFI_ERR_SYS_CMDFAILED           (HFI_COMMON_BASE + 0x5)
+#define HFI_ERR_SYS_CMDSIZE             (HFI_COMMON_BASE + 0x6)
+
+/* System Level Event types */
+#define HFI_EVENT_SYS_ERROR             (HFI_COMMON_BASE + 0x1)
+#define HFI_EVENT_ICP_ERROR             (HFI_COMMON_BASE + 0x2)
+#define HFI_EVENT_IPE_BPS_ERROR         (HFI_COMMON_BASE + 0x3)
+#define HFI_EVENT_CDM_ERROR             (HFI_COMMON_BASE + 0x4)
+#define HFI_EVENT_DBG_ERROR             (HFI_COMMON_BASE + 0x5)
+
+/* Core level start Ranges for errors */
+#define HFI_ERR_ICP_START               (HFI_COMMON_BASE + 0x64)
+#define HFI_ERR_IPE_BPS_START           (HFI_ERR_ICP_START + 0x64)
+#define HFI_ERR_CDM_START               (HFI_ERR_IPE_BPS_START + 0x64)
+#define HFI_ERR_DBG_START               (HFI_ERR_CDM_START + 0x64)
+
+/*ICP Core level  error messages */
+#define HFI_ERR_NO_RES                  (HFI_ERR_ICP_START + 0x1)
+#define HFI_ERR_UNSUPPORTED_RES         (HFI_ERR_ICP_START + 0x2)
+#define HFI_ERR_UNSUPPORTED_PROP        (HFI_ERR_ICP_START + 0x3)
+#define HFI_ERR_INIT_EXPECTED           (HFI_ERR_ICP_START + 0x4)
+#define HFI_ERR_INIT_IGNORED            (HFI_ERR_ICP_START + 0x5)
+
+/* System level commands */
+#define HFI_CMD_COMMON_START \
+		(HFI_DOMAIN_BASE_ICP + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_SYS_INIT               (HFI_CMD_COMMON_START + 0x1)
+#define HFI_CMD_SYS_PC_PREP            (HFI_CMD_COMMON_START + 0x2)
+#define HFI_CMD_SYS_SET_PROPERTY       (HFI_CMD_COMMON_START + 0x3)
+#define HFI_CMD_SYS_GET_PROPERTY       (HFI_CMD_COMMON_START + 0x4)
+#define HFI_CMD_SYS_PING               (HFI_CMD_COMMON_START + 0x5)
+#define HFI_CMD_SYS_RESET              (HFI_CMD_COMMON_START + 0x6)
+
+/* Core level commands */
+/* IPE/BPS core Commands */
+#define HFI_CMD_IPE_BPS_COMMON_START \
+		(HFI_DOMAIN_BASE_IPE_BPS + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_IPEBPS_CREATE_HANDLE \
+		(HFI_CMD_IPE_BPS_COMMON_START + 0x8)
+#define HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT \
+		(HFI_CMD_IPE_BPS_COMMON_START + 0xa)
+#define HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT \
+		(HFI_CMD_IPE_BPS_COMMON_START + 0xe)
+
+/* CDM core Commands */
+#define HFI_CMD_CDM_COMMON_START \
+		(HFI_DOMAIN_BASE_CDM + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_CDM_TEST_START (HFI_CMD_CDM_COMMON_START + 0x800)
+#define HFI_CMD_CDM_END        (HFI_CMD_CDM_COMMON_START + 0xFFF)
+
+/* Debug/Test Commands */
+#define HFI_CMD_DBG_COMMON_START \
+		(HFI_DOMAIN_BASE_DBG + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_DBG_TEST_START  (HFI_CMD_DBG_COMMON_START + 0x800)
+#define HFI_CMD_DBG_END         (HFI_CMD_DBG_COMMON_START + 0xFFF)
+
+/* System level messages */
+#define HFI_MSG_ICP_COMMON_START \
+		(HFI_DOMAIN_BASE_ICP + HFI_MSG_START_OFFSET + 0x0)
+#define HFI_MSG_SYS_INIT_DONE           (HFI_MSG_ICP_COMMON_START + 0x1)
+#define HFI_MSG_SYS_PC_PREP_DONE        (HFI_MSG_ICP_COMMON_START + 0x2)
+#define HFI_MSG_SYS_DEBUG               (HFI_MSG_ICP_COMMON_START + 0x3)
+#define HFI_MSG_SYS_IDLE                (HFI_MSG_ICP_COMMON_START + 0x4)
+#define HFI_MSG_SYS_PROPERTY_INFO       (HFI_MSG_ICP_COMMON_START + 0x5)
+#define HFI_MSG_SYS_PING_ACK            (HFI_MSG_ICP_COMMON_START + 0x6)
+#define HFI_MSG_SYS_RESET_ACK           (HFI_MSG_ICP_COMMON_START + 0x7)
+#define HFI_MSG_EVENT_NOTIFY            (HFI_MSG_ICP_COMMON_START + 0x8)
+
+/* Core level Messages */
+/* IPE/BPS core Messages */
+#define HFI_MSG_IPE_BPS_COMMON_START \
+		(HFI_DOMAIN_BASE_IPE_BPS + HFI_MSG_START_OFFSET + 0x0)
+#define HFI_MSG_IPEBPS_CREATE_HANDLE_ACK \
+		(HFI_MSG_IPE_BPS_COMMON_START + 0x08)
+#define HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK \
+		(HFI_MSG_IPE_BPS_COMMON_START + 0x0a)
+#define HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK \
+		(HFI_MSG_IPE_BPS_COMMON_START + 0x0e)
+#define HFI_MSG_IPE_BPS_TEST_START	\
+		(HFI_MSG_IPE_BPS_COMMON_START + 0x800)
+#define HFI_MSG_IPE_BPS_END \
+		(HFI_MSG_IPE_BPS_COMMON_START + 0xFFF)
+
+/* CDM core Messages */
+#define HFI_MSG_CDM_COMMON_START \
+		(HFI_DOMAIN_BASE_CDM + HFI_MSG_START_OFFSET + 0x0)
+#define  HFI_MSG_PRI_CDM_PAYLOAD_ACK    (HFI_MSG_CDM_COMMON_START + 0xa)
+#define  HFI_MSG_PRI_LLD_PAYLOAD_ACK    (HFI_MSG_CDM_COMMON_START + 0xb)
+#define HFI_MSG_CDM_TEST_START          (HFI_MSG_CDM_COMMON_START + 0x800)
+#define HFI_MSG_CDM_END                 (HFI_MSG_CDM_COMMON_START + 0xFFF)
+
+/* core level test command ranges */
+/* ICP core level test command range */
+#define HFI_CMD_ICP_TEST_START          (HFI_CMD_ICP_COMMON_START + 0x800)
+#define HFI_CMD_ICP_END                 (HFI_CMD_ICP_COMMON_START + 0xFFF)
+
+/* IPE/BPS core level test command range */
+#define HFI_CMD_IPE_BPS_TEST_START \
+		(HFI_CMD_IPE_BPS_COMMON_START + 0x800)
+#define HFI_CMD_IPE_BPS_END (HFI_CMD_IPE_BPS_COMMON_START + 0xFFF)
+
+/* ICP core level test message range */
+#define HFI_MSG_ICP_TEST_START  (HFI_MSG_ICP_COMMON_START + 0x800)
+#define HFI_MSG_ICP_END         (HFI_MSG_ICP_COMMON_START + 0xFFF)
+
+/* ICP core level Debug test message range */
+#define HFI_MSG_DBG_COMMON_START \
+		(HFI_DOMAIN_BASE_DBG + 0x0)
+#define HFI_MSG_DBG_TEST_START  (HFI_MSG_DBG_COMMON_START + 0x800)
+#define HFI_MSG_DBG_END         (HFI_MSG_DBG_COMMON_START + 0xFFF)
+
+/* System  level property base offset */
+#define HFI_PROPERTY_ICP_COMMON_START  (HFI_DOMAIN_BASE_ICP + 0x0)
+
+#define HFI_PROP_SYS_DEBUG_CFG           (HFI_PROPERTY_ICP_COMMON_START + 0x1)
+#define HFI_PROP_SYS_UBWC_CFG            (HFI_PROPERTY_ICP_COMMON_START + 0x2)
+#define HFI_PROP_SYS_IMAGE_VER           (HFI_PROPERTY_ICP_COMMON_START + 0x3)
+#define HFI_PROP_SYS_SUPPORTED           (HFI_PROPERTY_ICP_COMMON_START + 0x4)
+#define HFI_PROP_SYS_IPEBPS_PC           (HFI_PROPERTY_ICP_COMMON_START + 0x5)
+#define HFI_PROP_SYS_FW_DUMP_CFG         (HFI_PROPERTY_ICP_COMMON_START + 0x8)
+#define HFI_PROPERTY_SYS_UBWC_CONFIG_EX  (HFI_PROPERTY_ICP_COMMON_START + 0x9)
+
+/* Capabilities reported at sys init */
+#define HFI_CAPS_PLACEHOLDER_1         (HFI_COMMON_BASE + 0x1)
+#define HFI_CAPS_PLACEHOLDER_2         (HFI_COMMON_BASE + 0x2)
+
+/* Section describes different debug levels (HFI_DEBUG_MSG_X)
+ * available for debug messages from FW
+ */
+#define  HFI_DEBUG_MSG_LOW      0x00000001
+#define  HFI_DEBUG_MSG_MEDIUM   0x00000002
+#define  HFI_DEBUG_MSG_HIGH     0x00000004
+#define  HFI_DEBUG_MSG_ERROR    0x00000008
+#define  HFI_DEBUG_MSG_FATAL    0x00000010
+/* Messages containing performance data */
+#define  HFI_DEBUG_MSG_PERF     0x00000020
+/* Disable ARM9 WFI in low power mode. */
+#define  HFI_DEBUG_CFG_WFI      0x01000000
+/* Disable ARM9 watchdog. */
+#define  HFI_DEBUG_CFG_ARM9WD   0x10000000
+
+
+/*
+ * HFI_FW_DUMP levels
+ * HFI_FW_DUMP_xx
+ */
+#define HFI_FW_DUMP_DISABLED    0x00000000
+#define HFI_FW_DUMP_ON_FAILURE  0x00000001
+#define HFI_FW_DUMP_ALWAYS      0x00000002
+
+/* Number of available dump levels. */
+#define NUM_HFI_DUMP_LVL        0x00000003
+
+/* Debug Msg Communication types:
+ * Section describes different modes (HFI_DEBUG_MODE_X)
+ * available to communicate the debug messages
+ */
+ /* Debug message output through the interface debug queue. */
+#define HFI_DEBUG_MODE_QUEUE     0x00000001
+ /* Debug message output through QDSS. */
+#define HFI_DEBUG_MODE_QDSS      0x00000002
+ /* Number of debug modes available. */
+#define NUM_HFI_DEBUG_MODE       0x00000002
+
+#define HFI_DEBUG_MSG_LOW        0x00000001
+#define HFI_DEBUG_MSG_MEDIUM     0x00000002
+#define HFI_DEBUG_MSG_HIGH       0x00000004
+#define HFI_DEBUG_MSG_ERROR      0x00000008
+#define HFI_DEBUG_MSG_FATAL      0x00000010
+#define HFI_DEBUG_MSG_PERF       0x00000020
+#define HFI_DEBUG_CFG_WFI        0x01000000
+#define HFI_DEBUG_CFG_ARM9WD     0x10000000
+
+#define HFI_DEV_VERSION_MAX      0x5
+
+/**
+ * start of sys command packet types
+ * These commands are used to get system level information
+ * from firmware
+ */
+
+/**
+ * struct hfi_caps_support
+ * payload to report caps through HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED
+ * @type: capability type
+ * @min: minimum supported value for the capability
+ * @max: maximum supported value for the capability
+ * @step_size: supported steps between min-max
+ */
+struct hfi_caps_support {
+	uint32_t type;
+	uint32_t min;
+	uint32_t max;
+	uint32_t step_size;
+} __packed;
+
+/**
+ * struct hfi_caps_support_info
+ * capability report through HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED
+ * @num_caps: number of capabilities listed
+ * @caps_data: capabilities info array
+ */
+struct hfi_caps_support_info {
+	uint32_t num_caps;
+	struct hfi_caps_support caps_data[1];
+} __packed;
+
+/**
+ * struct hfi_debug
+ * payload structure to configure HFI_PROPERTY_SYS_DEBUG_CONFIG
+ * @debug_config: it is a result of HFI_DEBUG_MSG_X values that
+ *                are OR-ed together to specify the debug message types
+ *                to otput
+ * @debug_mode: debug message output through debug queue/qdss
+ * @HFI_PROPERTY_SYS_DEBUG_CONFIG
+ */
+struct hfi_debug {
+	uint32_t debug_config;
+	uint32_t debug_mode;
+} __packed;
+
+/**
+ * struct hfi_ipe_bps_pc
+ * payload structure to configure HFI_PROPERTY_SYS_IPEBPS_PC
+ * @enable: Flag to enable IPE, BPS interfrane power collapse
+ * @core_info: Core information to firmware
+ */
+struct hfi_ipe_bps_pc {
+	uint32_t enable;
+	uint32_t core_info;
+} __packed;
+
+/**
+ * struct hfi_cmd_ubwc_cfg
+ * Payload structure to configure HFI_PROP_SYS_UBWC_CFG
+ * @ubwc_fetch_cfg: UBWC configuration for fecth
+ * @ubwc_write_cfg: UBWC configuration for write
+ */
+struct hfi_cmd_ubwc_cfg {
+	uint32_t ubwc_fetch_cfg;
+	uint32_t ubwc_write_cfg;
+} __packed;
+
+/**
+ * struct hfi_cmd_ubwc_cfg_ext
+ * Payload structure to configure HFI_UBWC_CFG_TYPE_EXT
+ * @bps: UBWC configuration for bps
+ * @ipe: UBWC configuration for ipe
+ */
+struct hfi_cmd_ubwc_cfg_ext {
+	struct hfi_cmd_ubwc_cfg bps;
+	struct hfi_cmd_ubwc_cfg ipe;
+} __packed;
+
+/**
+ * struct hfi_cmd_sys_init
+ * command to initialization of system session
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @HFI_CMD_SYS_INIT
+ */
+struct hfi_cmd_sys_init {
+	uint32_t size;
+	uint32_t pkt_type;
+} __packed;
+
+/**
+ * struct hfi_cmd_pc_prep
+ * command to firmware to prepare for power collapse
+ * @eize: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @HFI_CMD_SYS_PC_PREP
+ */
+struct hfi_cmd_pc_prep {
+	uint32_t size;
+	uint32_t pkt_type;
+} __packed;
+
+/**
+ * struct hfi_cmd_prop
+ * command to get/set properties of firmware
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @num_prop: number of properties queried/set
+ * @prop_data: array of property IDs being queried. size depends on num_prop
+ *             array of property IDs and associated structure pairs in set
+ * @HFI_CMD_SYS_GET_PROPERTY
+ * @HFI_CMD_SYS_SET_PROPERTY
+ */
+struct hfi_cmd_prop {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t num_prop;
+	uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_cmd_ping_pkt
+ * ping command pings the firmware to confirm whether
+ * it is alive.
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @user_data: client data, firmware returns this data
+ *             as part of HFI_MSG_SYS_PING_ACK
+ * @HFI_CMD_SYS_PING
+ */
+struct hfi_cmd_ping_pkt {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_sys_reset_pkt
+ * sends the reset command to FW. FW responds in the same type
+ * of packet. so can be used for reset_ack_pkt type also
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @user_data: client data, firmware returns this data
+ *             as part of HFI_MSG_SYS_RESET_ACK
+ * @HFI_CMD_SYS_RESET
+ */
+
+struct hfi_cmd_sys_reset_pkt {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint64_t user_data;
+} __packed;
+
+/* end of sys command packet types */
+
+/* start of sys message packet types */
+
+/**
+ * struct hfi_prop
+ * structure to report maximum supported features of firmware.
+ */
+struct hfi_sys_support {
+	uint32_t place_holder;
+} __packed;
+
+/**
+ * struct hfi_supported_prop
+ * structure to report HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED
+ * for a session
+ * @num_prop: number of properties supported
+ * @prop_data: array of supported property IDs
+ */
+struct hfi_supported_prop {
+	uint32_t num_prop;
+	uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_image_version
+ * system image version
+ * @major: major version number
+ * @minor: minor version number
+ * @ver_name_size: size of version name
+ * @ver_name: image version name
+ */
+struct hfi_image_version {
+	uint32_t major;
+	uint32_t minor;
+	uint32_t ver_name_size;
+	uint8_t  ver_name[1];
+} __packed;
+
+/**
+ * struct hfi_msg_init_done_data
+ * @api_ver:    Firmware API version
+ * @dev_ver:    Device version
+ * @num_icp_hw: Number of ICP hardware information
+ * @dev_hw_ver: Supported hardware version information
+ * @reserved:   Reserved field
+ */
+struct hfi_msg_init_done_data {
+	uint32_t api_ver;
+	uint32_t dev_ver;
+	uint32_t num_icp_hw;
+	uint32_t dev_hw_ver[HFI_DEV_VERSION_MAX];
+	uint32_t reserved;
+};
+
+/**
+ * struct hfi_msg_init_done
+ * system init done message from firmware. Many system level properties
+ * are returned with the packet
+ * @size:      Packet size in bytes
+ * @pkt_type:  Opcode of a packet
+ * @err_type:  Error code associated with response
+ * @num_prop:  Number of default capability info
+ * @prop_data: Array of property ids and corresponding structure pairs
+ */
+struct hfi_msg_init_done {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t err_type;
+	uint32_t num_prop;
+	uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_msg_pc_prep_done
+ * system power collapse preparation done message
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @err_type: error code associated with the response
+ */
+struct hfi_msg_pc_prep_done {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t err_type;
+} __packed;
+
+/**
+ * struct hfi_msg_prop
+ * system property info from firmware
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @num_prop: number of property info structures
+ * @prop_data: array of property IDs and associated structure pairs
+ */
+struct hfi_msg_prop {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t num_prop;
+	uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_msg_idle
+ * system idle message from firmware
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ */
+struct hfi_msg_idle {
+	uint32_t size;
+	uint32_t pkt_type;
+} __packed;
+
+/**
+ * struct hfi_msg_ping_ack
+ * system ping ack message
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @user_data: this data is sent as part of ping command from host
+ */
+struct hfi_msg_ping_ack {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_msg_debug
+ * system debug message defination
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @msg_type: debug message type
+ * @msg_size: size of debug message in bytes
+ * @timestamp_hi: most significant 32 bits of the 64 bit timestamp field.
+ *                timestamp shall be interpreted as a signed 64-bit value
+ *                representing microseconds.
+ * @timestamp_lo: least significant 32 bits of the 64 bit timestamp field.
+ *                timestamp shall be interpreted as a signed 64-bit value
+ *                representing microseconds.
+ * @msg_data: message data in string form
+ */
+struct hfi_msg_debug {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t msg_type;
+	uint32_t msg_size;
+	uint32_t timestamp_hi;
+	uint32_t timestamp_lo;
+	uint8_t  msg_data[1];
+} __packed;
+/**
+ * struct hfi_msg_event_notify
+ * event notify message
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @fw_handle: firmware session handle
+ * @event_id: session event id
+ * @event_data1: event data corresponding to event ID
+ * @event_data2: event data corresponding to event ID
+ * @ext_event_data: info array, interpreted based on event_data1
+ * and event_data2
+ */
+struct hfi_msg_event_notify {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t fw_handle;
+	uint32_t event_id;
+	uint32_t event_data1;
+	uint32_t event_data2;
+	uint32_t ext_event_data[1];
+} __packed;
+/**
+ * end of sys message packet types
+ */
+
+#endif /* _HFI_DEFS_H_ */

+ 942 - 0
drivers/cam_icp/hfi.c

@@ -0,0 +1,942 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/errno.h>
+#include <linux/timer.h>
+#include <cam_icp.h>
+#include <linux/iopoll.h>
+#include <soc/qcom/socinfo.h>
+
+#include "cam_io_util.h"
+#include "hfi_reg.h"
+#include "hfi_sys_defs.h"
+#include "hfi_session_defs.h"
+#include "hfi_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+#include "cam_soc_util.h"
+
+#define HFI_VERSION_INFO_MAJOR_VAL  1
+#define HFI_VERSION_INFO_MINOR_VAL  1
+#define HFI_VERSION_INFO_STEP_VAL   0
+#define HFI_VERSION_INFO_STEP_VAL   0
+#define HFI_VERSION_INFO_MAJOR_BMSK  0xFF000000
+#define HFI_VERSION_INFO_MAJOR_SHFT  24
+#define HFI_VERSION_INFO_MINOR_BMSK  0xFFFF00
+#define HFI_VERSION_INFO_MINOR_SHFT  8
+#define HFI_VERSION_INFO_STEP_BMSK   0xFF
+#define HFI_VERSION_INFO_STEP_SHFT  0
+
+#define HFI_MAX_POLL_TRY 5
+
+#define HFI_MAX_PC_POLL_TRY 150
+#define HFI_POLL_TRY_SLEEP 1
+
+static struct hfi_info *g_hfi;
+unsigned int g_icp_mmu_hdl;
+static DEFINE_MUTEX(hfi_cmd_q_mutex);
+static DEFINE_MUTEX(hfi_msg_q_mutex);
+
+void cam_hfi_queue_dump(void)
+{
+	struct hfi_qtbl *qtbl;
+	struct hfi_qtbl_hdr *qtbl_hdr;
+	struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr;
+	struct hfi_mem_info *hfi_mem = NULL;
+	uint32_t *read_q, *read_ptr;
+	int i;
+
+	hfi_mem = &g_hfi->map;
+	if (!hfi_mem) {
+		CAM_ERR(CAM_HFI, "Unable to dump queues hfi memory is NULL");
+		return;
+	}
+
+	qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva;
+	qtbl_hdr = &qtbl->q_tbl_hdr;
+	CAM_DBG(CAM_HFI,
+		"qtbl: version = %x size = %u num q = %u qhdr_size = %u",
+		qtbl_hdr->qtbl_version, qtbl_hdr->qtbl_size,
+		qtbl_hdr->qtbl_num_q, qtbl_hdr->qtbl_qhdr_size);
+
+	cmd_q_hdr = &qtbl->q_hdr[Q_CMD];
+	CAM_DBG(CAM_HFI, "cmd: size = %u r_idx = %u w_idx = %u addr = %x",
+		cmd_q_hdr->qhdr_q_size, cmd_q_hdr->qhdr_read_idx,
+		cmd_q_hdr->qhdr_write_idx, hfi_mem->cmd_q.iova);
+	read_q = (uint32_t *)g_hfi->map.cmd_q.kva;
+	read_ptr = (uint32_t *)(read_q + 0);
+	CAM_DBG(CAM_HFI, "CMD Q START");
+	for (i = 0; i < ICP_CMD_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; i++)
+		CAM_DBG(CAM_HFI, "Word: %d Data: 0x%08x ", i, read_ptr[i]);
+
+	msg_q_hdr = &qtbl->q_hdr[Q_MSG];
+	CAM_DBG(CAM_HFI, "msg: size = %u r_idx = %u w_idx = %u addr = %x",
+		msg_q_hdr->qhdr_q_size, msg_q_hdr->qhdr_read_idx,
+		msg_q_hdr->qhdr_write_idx, hfi_mem->msg_q.iova);
+	read_q = (uint32_t *)g_hfi->map.msg_q.kva;
+	read_ptr = (uint32_t *)(read_q + 0);
+	CAM_DBG(CAM_HFI, "MSG Q START");
+	for (i = 0; i < ICP_MSG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; i++)
+		CAM_DBG(CAM_HFI, "Word: %d Data: 0x%08x ", i, read_ptr[i]);
+}
+
+int hfi_write_cmd(void *cmd_ptr)
+{
+	uint32_t size_in_words, empty_space, new_write_idx, read_idx, temp;
+	uint32_t *write_q, *write_ptr;
+	struct hfi_qtbl *q_tbl;
+	struct hfi_q_hdr *q;
+	int rc = 0;
+
+	if (!cmd_ptr) {
+		CAM_ERR(CAM_HFI, "command is null");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hfi_cmd_q_mutex);
+	if (!g_hfi) {
+		CAM_ERR(CAM_HFI, "HFI interface not setup");
+		rc = -ENODEV;
+		goto err;
+	}
+
+	if (g_hfi->hfi_state != HFI_READY ||
+		!g_hfi->cmd_q_state) {
+		CAM_ERR(CAM_HFI, "HFI state: %u, cmd q state: %u",
+			g_hfi->hfi_state, g_hfi->cmd_q_state);
+		rc = -ENODEV;
+		goto err;
+	}
+
+	q_tbl = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
+	q = &q_tbl->q_hdr[Q_CMD];
+
+	write_q = (uint32_t *)g_hfi->map.cmd_q.kva;
+
+	size_in_words = (*(uint32_t *)cmd_ptr) >> BYTE_WORD_SHIFT;
+	if (!size_in_words) {
+		CAM_DBG(CAM_HFI, "failed");
+		rc = -EINVAL;
+		goto err;
+	}
+
+	read_idx = q->qhdr_read_idx;
+	empty_space = (q->qhdr_write_idx >= read_idx) ?
+		(q->qhdr_q_size - (q->qhdr_write_idx - read_idx)) :
+		(read_idx - q->qhdr_write_idx);
+	if (empty_space <= size_in_words) {
+		CAM_ERR(CAM_HFI, "failed: empty space %u, size_in_words %u",
+			empty_space, size_in_words);
+		rc = -EIO;
+		goto err;
+	}
+
+	new_write_idx = q->qhdr_write_idx + size_in_words;
+	write_ptr = (uint32_t *)(write_q + q->qhdr_write_idx);
+
+	if (new_write_idx < q->qhdr_q_size) {
+		memcpy(write_ptr, (uint8_t *)cmd_ptr,
+			size_in_words << BYTE_WORD_SHIFT);
+	} else {
+		new_write_idx -= q->qhdr_q_size;
+		temp = (size_in_words - new_write_idx) << BYTE_WORD_SHIFT;
+		memcpy(write_ptr, (uint8_t *)cmd_ptr, temp);
+		memcpy(write_q, (uint8_t *)cmd_ptr + temp,
+			new_write_idx << BYTE_WORD_SHIFT);
+	}
+
+	/*
+	 * To make sure command data in a command queue before
+	 * updating write index
+	 */
+	wmb();
+
+	q->qhdr_write_idx = new_write_idx;
+
+	/*
+	 * Before raising interrupt make sure command data is ready for
+	 * firmware to process
+	 */
+	wmb();
+	cam_io_w_mb((uint32_t)INTR_ENABLE,
+		g_hfi->csr_base + HFI_REG_A5_CSR_HOST2ICPINT);
+err:
+	mutex_unlock(&hfi_cmd_q_mutex);
+	return rc;
+}
+
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id,
+	uint32_t *words_read)
+{
+	struct hfi_qtbl *q_tbl_ptr;
+	struct hfi_q_hdr *q;
+	uint32_t new_read_idx, size_in_words, word_diff, temp;
+	uint32_t *read_q, *read_ptr, *write_ptr;
+	uint32_t size_upper_bound = 0;
+	int rc = 0;
+
+	if (!pmsg) {
+		CAM_ERR(CAM_HFI, "Invalid msg");
+		return -EINVAL;
+	}
+
+	if (q_id > Q_DBG) {
+		CAM_ERR(CAM_HFI, "Invalid q :%u", q_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hfi_msg_q_mutex);
+	if (!g_hfi) {
+		CAM_ERR(CAM_HFI, "hfi not set up yet");
+		rc = -ENODEV;
+		goto err;
+	}
+
+	if ((g_hfi->hfi_state != HFI_READY) ||
+		!g_hfi->msg_q_state) {
+		CAM_ERR(CAM_HFI, "hfi state: %u, msg q state: %u",
+			g_hfi->hfi_state, g_hfi->msg_q_state);
+		rc = -ENODEV;
+		goto err;
+	}
+
+	q_tbl_ptr = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
+	q = &q_tbl_ptr->q_hdr[q_id];
+
+	if (q->qhdr_read_idx == q->qhdr_write_idx) {
+		CAM_DBG(CAM_HFI, "Q not ready, state:%u, r idx:%u, w idx:%u",
+			g_hfi->hfi_state, q->qhdr_read_idx, q->qhdr_write_idx);
+		rc = -EIO;
+		goto err;
+	}
+
+	if (q_id == Q_MSG) {
+		read_q = (uint32_t *)g_hfi->map.msg_q.kva;
+		size_upper_bound = ICP_HFI_MAX_PKT_SIZE_MSGQ_IN_WORDS;
+	} else {
+		read_q = (uint32_t *)g_hfi->map.dbg_q.kva;
+		size_upper_bound = ICP_HFI_MAX_PKT_SIZE_IN_WORDS;
+	}
+
+	read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx);
+	write_ptr = (uint32_t *)(read_q + q->qhdr_write_idx);
+
+	if (write_ptr > read_ptr)
+		size_in_words = write_ptr - read_ptr;
+	else {
+		word_diff = read_ptr - write_ptr;
+		if (q_id == Q_MSG)
+			size_in_words = (ICP_MSG_Q_SIZE_IN_BYTES >>
+			BYTE_WORD_SHIFT) - word_diff;
+		else
+			size_in_words = (ICP_DBG_Q_SIZE_IN_BYTES >>
+			BYTE_WORD_SHIFT) - word_diff;
+	}
+
+	if ((size_in_words == 0) ||
+		(size_in_words > size_upper_bound)) {
+		CAM_ERR(CAM_HFI, "invalid HFI message packet size - 0x%08x",
+			size_in_words << BYTE_WORD_SHIFT);
+		q->qhdr_read_idx = q->qhdr_write_idx;
+		rc = -EIO;
+		goto err;
+	}
+
+	new_read_idx = q->qhdr_read_idx + size_in_words;
+
+	if (new_read_idx < q->qhdr_q_size) {
+		memcpy(pmsg, read_ptr, size_in_words << BYTE_WORD_SHIFT);
+	} else {
+		new_read_idx -= q->qhdr_q_size;
+		temp = (size_in_words - new_read_idx) << BYTE_WORD_SHIFT;
+		memcpy(pmsg, read_ptr, temp);
+		memcpy((uint8_t *)pmsg + temp, read_q,
+			new_read_idx << BYTE_WORD_SHIFT);
+	}
+
+	q->qhdr_read_idx = new_read_idx;
+	*words_read = size_in_words;
+	/* Memory Barrier to make sure message
+	 * queue parameters are updated after read
+	 */
+	wmb();
+err:
+	mutex_unlock(&hfi_msg_q_mutex);
+	return rc;
+}
+
+int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg)
+{
+	uint8_t *prop;
+	struct hfi_cmd_prop *dbg_prop;
+	uint32_t size = 0;
+
+	size = sizeof(struct hfi_cmd_prop) +
+		sizeof(struct hfi_cmd_ubwc_cfg);
+
+	CAM_DBG(CAM_HFI,
+		"size of ubwc %u, ubwc_cfg [rd-0x%x,wr-0x%x]",
+		size, ubwc_cfg[0],  ubwc_cfg[1]);
+
+	prop = kzalloc(size, GFP_KERNEL);
+	if (!prop)
+		return -ENOMEM;
+
+	dbg_prop = (struct hfi_cmd_prop *)prop;
+	dbg_prop->size = size;
+	dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+	dbg_prop->num_prop = 1;
+	dbg_prop->prop_data[0] = HFI_PROP_SYS_UBWC_CFG;
+	dbg_prop->prop_data[1] = ubwc_cfg[0];
+	dbg_prop->prop_data[2] = ubwc_cfg[1];
+
+	hfi_write_cmd(prop);
+	kfree(prop);
+
+	return 0;
+}
+
+int hfi_cmd_ubwc_config_ext(uint32_t *ubwc_ipe_cfg,
+	uint32_t *ubwc_bps_cfg)
+{
+	uint8_t *prop;
+	struct hfi_cmd_prop *dbg_prop;
+	uint32_t size = 0;
+
+	size = sizeof(struct hfi_cmd_prop) +
+		sizeof(struct hfi_cmd_ubwc_cfg_ext);
+
+	CAM_DBG(CAM_HFI,
+		"size of ubwc %u, ubwc_ipe_cfg[rd-0x%x,wr-0x%x] ubwc_bps_cfg[rd-0x%x,wr-0x%x]",
+		size, ubwc_ipe_cfg[0], ubwc_ipe_cfg[1],
+		ubwc_bps_cfg[0], ubwc_bps_cfg[1]);
+
+	prop = kzalloc(size, GFP_KERNEL);
+	if (!prop)
+		return -ENOMEM;
+
+	dbg_prop = (struct hfi_cmd_prop *)prop;
+	dbg_prop->size = size;
+	dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+	dbg_prop->num_prop = 1;
+	dbg_prop->prop_data[0] = HFI_PROPERTY_SYS_UBWC_CONFIG_EX;
+	dbg_prop->prop_data[1] = ubwc_bps_cfg[0];
+	dbg_prop->prop_data[2] = ubwc_bps_cfg[1];
+	dbg_prop->prop_data[3] = ubwc_ipe_cfg[0];
+	dbg_prop->prop_data[4] = ubwc_ipe_cfg[1];
+
+	hfi_write_cmd(prop);
+	kfree(prop);
+
+	return 0;
+}
+
+
+int hfi_enable_ipe_bps_pc(bool enable, uint32_t core_info)
+{
+	uint8_t *prop;
+	struct hfi_cmd_prop *dbg_prop;
+	uint32_t size = 0;
+
+	size = sizeof(struct hfi_cmd_prop) +
+		sizeof(struct hfi_ipe_bps_pc);
+
+	prop = kzalloc(size, GFP_KERNEL);
+	if (!prop)
+		return -ENOMEM;
+
+	dbg_prop = (struct hfi_cmd_prop *)prop;
+	dbg_prop->size = size;
+	dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+	dbg_prop->num_prop = 1;
+	dbg_prop->prop_data[0] = HFI_PROP_SYS_IPEBPS_PC;
+	dbg_prop->prop_data[1] = enable;
+	dbg_prop->prop_data[2] = core_info;
+
+	hfi_write_cmd(prop);
+	kfree(prop);
+
+	return 0;
+}
+
+int hfi_set_debug_level(u64 a5_dbg_type, uint32_t lvl)
+{
+	uint8_t *prop;
+	struct hfi_cmd_prop *dbg_prop;
+	uint32_t size = 0, val;
+
+	val = HFI_DEBUG_MSG_LOW |
+		HFI_DEBUG_MSG_MEDIUM |
+		HFI_DEBUG_MSG_HIGH |
+		HFI_DEBUG_MSG_ERROR |
+		HFI_DEBUG_MSG_FATAL |
+		HFI_DEBUG_MSG_PERF |
+		HFI_DEBUG_CFG_WFI |
+		HFI_DEBUG_CFG_ARM9WD;
+
+	if (lvl > val)
+		return -EINVAL;
+
+	size = sizeof(struct hfi_cmd_prop) +
+		sizeof(struct hfi_debug);
+
+	prop = kzalloc(size, GFP_KERNEL);
+	if (!prop)
+		return -ENOMEM;
+
+	dbg_prop = (struct hfi_cmd_prop *)prop;
+	dbg_prop->size = size;
+	dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+	dbg_prop->num_prop = 1;
+	dbg_prop->prop_data[0] = HFI_PROP_SYS_DEBUG_CFG;
+	dbg_prop->prop_data[1] = lvl;
+	dbg_prop->prop_data[2] = a5_dbg_type;
+	hfi_write_cmd(prop);
+
+	kfree(prop);
+
+	return 0;
+}
+
+int hfi_set_fw_dump_level(uint32_t lvl)
+{
+	uint8_t *prop = NULL;
+	struct hfi_cmd_prop *fw_dump_level_switch_prop = NULL;
+	uint32_t size = 0;
+
+	CAM_DBG(CAM_HFI, "fw dump ENTER");
+
+	size = sizeof(struct hfi_cmd_prop) + sizeof(lvl);
+	prop = kzalloc(size, GFP_KERNEL);
+	if (!prop)
+		return -ENOMEM;
+
+	fw_dump_level_switch_prop = (struct hfi_cmd_prop *)prop;
+	fw_dump_level_switch_prop->size = size;
+	fw_dump_level_switch_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+	fw_dump_level_switch_prop->num_prop = 1;
+	fw_dump_level_switch_prop->prop_data[0] = HFI_PROP_SYS_FW_DUMP_CFG;
+	fw_dump_level_switch_prop->prop_data[1] = lvl;
+
+	CAM_DBG(CAM_HFI, "prop->size = %d\n"
+			 "prop->pkt_type = %d\n"
+			 "prop->num_prop = %d\n"
+			 "prop->prop_data[0] = %d\n"
+			 "prop->prop_data[1] = %d\n",
+			 fw_dump_level_switch_prop->size,
+			 fw_dump_level_switch_prop->pkt_type,
+			 fw_dump_level_switch_prop->num_prop,
+			 fw_dump_level_switch_prop->prop_data[0],
+			 fw_dump_level_switch_prop->prop_data[1]);
+
+	hfi_write_cmd(prop);
+	kfree(prop);
+	return 0;
+}
+
+void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size)
+{
+	switch (type) {
+	case HFI_CMD_SYS_INIT: {
+		struct hfi_cmd_sys_init init;
+
+		memset(&init, 0, sizeof(init));
+
+		init.size = sizeof(struct hfi_cmd_sys_init);
+		init.pkt_type = type;
+		hfi_write_cmd(&init);
+	}
+		break;
+	case HFI_CMD_SYS_PC_PREP: {
+		struct hfi_cmd_pc_prep prep;
+
+		prep.size = sizeof(struct hfi_cmd_pc_prep);
+		prep.pkt_type = type;
+		hfi_write_cmd(&prep);
+	}
+		break;
+	case HFI_CMD_SYS_SET_PROPERTY: {
+		struct hfi_cmd_prop prop;
+
+		if ((uint32_t)data == (uint32_t)HFI_PROP_SYS_DEBUG_CFG) {
+			prop.size = sizeof(struct hfi_cmd_prop);
+			prop.pkt_type = type;
+			prop.num_prop = 1;
+			prop.prop_data[0] = HFI_PROP_SYS_DEBUG_CFG;
+			hfi_write_cmd(&prop);
+		}
+	}
+		break;
+	case HFI_CMD_SYS_GET_PROPERTY:
+		break;
+	case HFI_CMD_SYS_PING: {
+		struct hfi_cmd_ping_pkt ping;
+
+		ping.size = sizeof(struct hfi_cmd_ping_pkt);
+		ping.pkt_type = type;
+		ping.user_data = (uint64_t)data;
+		hfi_write_cmd(&ping);
+	}
+		break;
+	case HFI_CMD_SYS_RESET: {
+		struct hfi_cmd_sys_reset_pkt reset;
+
+		reset.size = sizeof(struct hfi_cmd_sys_reset_pkt);
+		reset.pkt_type = type;
+		reset.user_data = (uint64_t)data;
+		hfi_write_cmd(&reset);
+	}
+		break;
+	case HFI_CMD_IPEBPS_CREATE_HANDLE: {
+		struct hfi_cmd_create_handle handle;
+
+		handle.size = sizeof(struct hfi_cmd_create_handle);
+		handle.pkt_type = type;
+		handle.handle_type = (uint32_t)data;
+		handle.user_data1 = 0;
+		hfi_write_cmd(&handle);
+	}
+		break;
+	case HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT:
+		break;
+	default:
+		CAM_ERR(CAM_HFI, "command not supported :%d", type);
+		break;
+	}
+}
+
+
+int hfi_get_hw_caps(void *query_buf)
+{
+	int i = 0;
+	struct cam_icp_query_cap_cmd *query_cmd = NULL;
+
+	if (!query_buf) {
+		CAM_ERR(CAM_HFI, "query buf is NULL");
+		return -EINVAL;
+	}
+
+	query_cmd = (struct cam_icp_query_cap_cmd *)query_buf;
+	query_cmd->fw_version.major = 0x12;
+	query_cmd->fw_version.minor = 0x12;
+	query_cmd->fw_version.revision = 0x12;
+
+	query_cmd->api_version.major = 0x13;
+	query_cmd->api_version.minor = 0x13;
+	query_cmd->api_version.revision = 0x13;
+
+	query_cmd->num_ipe = 2;
+	query_cmd->num_bps = 1;
+
+	for (i = 0; i < CAM_ICP_DEV_TYPE_MAX; i++) {
+		query_cmd->dev_ver[i].dev_type = i;
+		query_cmd->dev_ver[i].hw_ver.major = 0x34 + i;
+		query_cmd->dev_ver[i].hw_ver.minor = 0x34 + i;
+		query_cmd->dev_ver[i].hw_ver.incr = 0x34 + i;
+	}
+	return 0;
+}
+
+void cam_hfi_disable_cpu(void __iomem *icp_base)
+{
+	uint32_t data;
+	uint32_t val;
+	uint32_t try = 0;
+
+	while (try < HFI_MAX_PC_POLL_TRY) {
+		data = cam_io_r_mb(icp_base + HFI_REG_A5_CSR_A5_STATUS);
+		CAM_DBG(CAM_HFI, "wfi status = %x\n", (int)data);
+
+		if (data & ICP_CSR_A5_STATUS_WFI)
+			break;
+		/* Need to poll here to confirm that FW is going trigger wfi
+		 * and Host can the proceed. No interrupt is expected from FW
+		 * at this time.
+		 */
+		usleep_range(HFI_POLL_TRY_SLEEP * 1000,
+			(HFI_POLL_TRY_SLEEP * 1000) + 1000);
+		try++;
+	}
+
+	val = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	val &= ~(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN);
+	cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+
+	val = cam_io_r(icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+	cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+
+	cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_RESET,
+		icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
+	cam_io_w_mb((uint32_t)INTR_DISABLE,
+		icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+}
+
+void cam_hfi_enable_cpu(void __iomem *icp_base)
+{
+	cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN,
+			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	cam_io_w_mb((uint32_t)0x10, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+}
+
+int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
+	void __iomem *icp_base, bool debug)
+{
+	int rc = 0;
+	uint32_t data;
+	uint32_t fw_version, status = 0;
+	uint32_t retry_cnt = 0;
+
+	cam_hfi_enable_cpu(icp_base);
+	g_hfi->csr_base = icp_base;
+
+	if (debug) {
+		cam_io_w_mb(ICP_FLAG_A5_CTRL_DBG_EN,
+			(icp_base + HFI_REG_A5_CSR_A5_CONTROL));
+
+		/* Barrier needed as next write should be done after
+		 * sucessful previous write. Next write enable clock
+		 * gating
+		 */
+		wmb();
+
+		cam_io_w_mb((uint32_t)ICP_FLAG_A5_CTRL_EN,
+			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+
+	} else {
+		cam_io_w_mb((uint32_t)ICP_FLAG_A5_CTRL_EN,
+			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	}
+
+	while (retry_cnt < HFI_MAX_POLL_TRY) {
+		readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
+			status, (status == ICP_INIT_RESP_SUCCESS), 100, 10000);
+
+		CAM_DBG(CAM_HFI, "1: status = %u", status);
+		status = cam_io_r_mb(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
+		CAM_DBG(CAM_HFI, "2: status = %u", status);
+		if (status == ICP_INIT_RESP_SUCCESS)
+			break;
+
+		if (status == ICP_INIT_RESP_FAILED) {
+			CAM_ERR(CAM_HFI, "ICP Init Failed. status = %u",
+				status);
+			fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+			CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
+			return -EINVAL;
+		}
+		retry_cnt++;
+	}
+
+	if ((retry_cnt == HFI_MAX_POLL_TRY) &&
+		(status == ICP_INIT_RESP_RESET)) {
+		CAM_ERR(CAM_HFI, "Reached Max retries. status = %u",
+				status);
+		fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+		CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
+		return -EINVAL;
+	}
+
+	cam_io_w_mb((uint32_t)(INTR_ENABLE|INTR_ENABLE_WD0),
+		icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+
+	fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+	CAM_DBG(CAM_HFI, "fw version : [%x]", fw_version);
+
+	data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
+	CAM_DBG(CAM_HFI, "wfi status = %x", (int)data);
+
+	cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->sfr_buf.iova,
+		icp_base + HFI_REG_SFR_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->shmem.iova,
+		icp_base + HFI_REG_SHARED_MEM_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
+		icp_base + HFI_REG_SHARED_MEM_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
+		icp_base + HFI_REG_UNCACHED_HEAP_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
+		icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->qdss.iova,
+		icp_base + HFI_REG_QDSS_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->qdss.len,
+		icp_base + HFI_REG_QDSS_IOVA_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->io_mem.iova,
+		icp_base + HFI_REG_IO_REGION_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->io_mem.len,
+		icp_base + HFI_REG_IO_REGION_SIZE);
+
+	return rc;
+}
+
+int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
+		void __iomem *icp_base, bool debug)
+{
+	int rc = 0;
+	struct hfi_qtbl *qtbl;
+	struct hfi_qtbl_hdr *qtbl_hdr;
+	struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr, *dbg_q_hdr;
+	uint32_t hw_version, soc_version, fw_version, status = 0;
+	uint32_t retry_cnt = 0;
+	struct sfr_buf *sfr_buffer;
+
+	mutex_lock(&hfi_cmd_q_mutex);
+	mutex_lock(&hfi_msg_q_mutex);
+
+	if (!g_hfi) {
+		g_hfi = kzalloc(sizeof(struct hfi_info), GFP_KERNEL);
+		if (!g_hfi) {
+			rc = -ENOMEM;
+			goto alloc_fail;
+		}
+	}
+
+	if (g_hfi->hfi_state != HFI_DEINIT) {
+		CAM_ERR(CAM_HFI, "hfi_init: invalid state");
+		return -EINVAL;
+	}
+
+	memcpy(&g_hfi->map, hfi_mem, sizeof(g_hfi->map));
+	g_hfi->hfi_state = HFI_DEINIT;
+	soc_version = socinfo_get_version();
+	if (debug) {
+		cam_io_w_mb(
+		(uint32_t)(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN |
+		ICP_CSR_EDBGRQ | ICP_CSR_DBGSWENABLE),
+		icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+		msleep(100);
+		cam_io_w_mb((uint32_t)(ICP_FLAG_CSR_A5_EN |
+		ICP_FLAG_CSR_WAKE_UP_EN | ICP_CSR_EN_CLKGATE_WFI),
+		icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	} else {
+		/* Due to hardware bug in V1 ICP clock gating has to be
+		 * disabled, this is supposed to be fixed in V-2. But enabling
+		 * the clock gating is causing the firmware hang, hence
+		 * disabling the clock gating on both V1 and V2 until the
+		 * hardware team root causes this
+		 */
+		cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN |
+			ICP_FLAG_CSR_WAKE_UP_EN |
+			ICP_CSR_EN_CLKGATE_WFI,
+			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	}
+
+	qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva;
+	qtbl_hdr = &qtbl->q_tbl_hdr;
+	qtbl_hdr->qtbl_version = 0xFFFFFFFF;
+	qtbl_hdr->qtbl_size = sizeof(struct hfi_qtbl);
+	qtbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_qtbl_hdr);
+	qtbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_q_hdr);
+	qtbl_hdr->qtbl_num_q = ICP_HFI_NUMBER_OF_QS;
+	qtbl_hdr->qtbl_num_active_q = ICP_HFI_NUMBER_OF_QS;
+
+	/* setup host-to-firmware command queue */
+	cmd_q_hdr = &qtbl->q_hdr[Q_CMD];
+	cmd_q_hdr->qhdr_status = QHDR_ACTIVE;
+	cmd_q_hdr->qhdr_start_addr = hfi_mem->cmd_q.iova;
+	cmd_q_hdr->qhdr_q_size =  ICP_CMD_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
+	cmd_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
+	cmd_q_hdr->qhdr_pkt_drop_cnt = RESET;
+	cmd_q_hdr->qhdr_read_idx = RESET;
+	cmd_q_hdr->qhdr_write_idx = RESET;
+
+	/* setup firmware-to-Host message queue */
+	msg_q_hdr = &qtbl->q_hdr[Q_MSG];
+	msg_q_hdr->qhdr_status = QHDR_ACTIVE;
+	msg_q_hdr->qhdr_start_addr = hfi_mem->msg_q.iova;
+	msg_q_hdr->qhdr_q_size = ICP_MSG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
+	msg_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
+	msg_q_hdr->qhdr_pkt_drop_cnt = RESET;
+	msg_q_hdr->qhdr_read_idx = RESET;
+	msg_q_hdr->qhdr_write_idx = RESET;
+
+	/* setup firmware-to-Host message queue */
+	dbg_q_hdr = &qtbl->q_hdr[Q_DBG];
+	dbg_q_hdr->qhdr_status = QHDR_ACTIVE;
+	dbg_q_hdr->qhdr_start_addr = hfi_mem->dbg_q.iova;
+	dbg_q_hdr->qhdr_q_size = ICP_DBG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
+	dbg_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
+	dbg_q_hdr->qhdr_pkt_drop_cnt = RESET;
+	dbg_q_hdr->qhdr_read_idx = RESET;
+	dbg_q_hdr->qhdr_write_idx = RESET;
+
+	sfr_buffer = (struct sfr_buf *)hfi_mem->sfr_buf.kva;
+	sfr_buffer->size = ICP_MSG_SFR_SIZE_IN_BYTES;
+
+	switch (event_driven_mode) {
+	case INTR_MODE:
+		cmd_q_hdr->qhdr_type = Q_CMD;
+		cmd_q_hdr->qhdr_rx_wm = SET;
+		cmd_q_hdr->qhdr_tx_wm = SET;
+		cmd_q_hdr->qhdr_rx_req = SET;
+		cmd_q_hdr->qhdr_tx_req = RESET;
+		cmd_q_hdr->qhdr_rx_irq_status = RESET;
+		cmd_q_hdr->qhdr_tx_irq_status = RESET;
+
+		msg_q_hdr->qhdr_type = Q_MSG;
+		msg_q_hdr->qhdr_rx_wm = SET;
+		msg_q_hdr->qhdr_tx_wm = SET;
+		msg_q_hdr->qhdr_rx_req = SET;
+		msg_q_hdr->qhdr_tx_req = RESET;
+		msg_q_hdr->qhdr_rx_irq_status = RESET;
+		msg_q_hdr->qhdr_tx_irq_status = RESET;
+
+		dbg_q_hdr->qhdr_type = Q_DBG;
+		dbg_q_hdr->qhdr_rx_wm = SET;
+		dbg_q_hdr->qhdr_tx_wm = SET_WM;
+		dbg_q_hdr->qhdr_rx_req = RESET;
+		dbg_q_hdr->qhdr_tx_req = RESET;
+		dbg_q_hdr->qhdr_rx_irq_status = RESET;
+		dbg_q_hdr->qhdr_tx_irq_status = RESET;
+
+		break;
+
+	case POLL_MODE:
+		cmd_q_hdr->qhdr_type = Q_CMD | TX_EVENT_POLL_MODE_2 |
+			RX_EVENT_POLL_MODE_2;
+		msg_q_hdr->qhdr_type = Q_MSG | TX_EVENT_POLL_MODE_2 |
+			RX_EVENT_POLL_MODE_2;
+		dbg_q_hdr->qhdr_type = Q_DBG | TX_EVENT_POLL_MODE_2 |
+			RX_EVENT_POLL_MODE_2;
+		break;
+
+	case WM_MODE:
+		cmd_q_hdr->qhdr_type = Q_CMD | TX_EVENT_DRIVEN_MODE_2 |
+			RX_EVENT_DRIVEN_MODE_2;
+		cmd_q_hdr->qhdr_rx_wm = SET;
+		cmd_q_hdr->qhdr_tx_wm = SET;
+		cmd_q_hdr->qhdr_rx_req = RESET;
+		cmd_q_hdr->qhdr_tx_req = SET;
+		cmd_q_hdr->qhdr_rx_irq_status = RESET;
+		cmd_q_hdr->qhdr_tx_irq_status = RESET;
+
+		msg_q_hdr->qhdr_type = Q_MSG | TX_EVENT_DRIVEN_MODE_2 |
+			RX_EVENT_DRIVEN_MODE_2;
+		msg_q_hdr->qhdr_rx_wm = SET;
+		msg_q_hdr->qhdr_tx_wm = SET;
+		msg_q_hdr->qhdr_rx_req = SET;
+		msg_q_hdr->qhdr_tx_req = RESET;
+		msg_q_hdr->qhdr_rx_irq_status = RESET;
+		msg_q_hdr->qhdr_tx_irq_status = RESET;
+
+		dbg_q_hdr->qhdr_type = Q_DBG | TX_EVENT_DRIVEN_MODE_2 |
+			RX_EVENT_DRIVEN_MODE_2;
+		dbg_q_hdr->qhdr_rx_wm = SET;
+		dbg_q_hdr->qhdr_tx_wm = SET_WM;
+		dbg_q_hdr->qhdr_rx_req = RESET;
+		dbg_q_hdr->qhdr_tx_req = RESET;
+		dbg_q_hdr->qhdr_rx_irq_status = RESET;
+		dbg_q_hdr->qhdr_tx_irq_status = RESET;
+		break;
+
+	default:
+		CAM_ERR(CAM_HFI, "Invalid event driven mode :%u",
+			event_driven_mode);
+		break;
+	}
+
+	cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova,
+		icp_base + HFI_REG_QTBL_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->sfr_buf.iova,
+		icp_base + HFI_REG_SFR_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->shmem.iova,
+		icp_base + HFI_REG_SHARED_MEM_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
+		icp_base + HFI_REG_SHARED_MEM_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
+		icp_base + HFI_REG_UNCACHED_HEAP_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
+		icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
+	cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_SET,
+		icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
+	cam_io_w_mb((uint32_t)hfi_mem->qdss.iova,
+		icp_base + HFI_REG_QDSS_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->qdss.len,
+		icp_base + HFI_REG_QDSS_IOVA_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->io_mem.iova,
+		icp_base + HFI_REG_IO_REGION_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->io_mem.len,
+		icp_base + HFI_REG_IO_REGION_SIZE);
+
+	hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
+
+	while (retry_cnt < HFI_MAX_POLL_TRY) {
+		readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
+			status, (status == ICP_INIT_RESP_SUCCESS), 100, 10000);
+
+		CAM_DBG(CAM_HFI, "1: status = %u rc = %d", status, rc);
+		status = cam_io_r_mb(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
+		CAM_DBG(CAM_HFI, "2: status = %u rc = %d", status, rc);
+		if (status == ICP_INIT_RESP_SUCCESS)
+			break;
+
+		if (status == ICP_INIT_RESP_FAILED) {
+			CAM_ERR(CAM_HFI, "ICP Init Failed. status = %u",
+				status);
+			fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+			CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
+			goto regions_fail;
+		}
+		retry_cnt++;
+	}
+
+	if ((retry_cnt == HFI_MAX_POLL_TRY) &&
+		(status == ICP_INIT_RESP_RESET)) {
+		CAM_ERR(CAM_HFI, "Reached Max retries. status = %u",
+				status);
+		fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+		CAM_ERR(CAM_HFI,
+			"hw version : : [%x], fw version : [%x]",
+			hw_version, fw_version);
+		goto regions_fail;
+	}
+
+	fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+	CAM_DBG(CAM_HFI, "hw version : : [%x], fw version : [%x]",
+		hw_version, fw_version);
+
+	g_hfi->csr_base = icp_base;
+	g_hfi->hfi_state = HFI_READY;
+	g_hfi->cmd_q_state = true;
+	g_hfi->msg_q_state = true;
+	cam_io_w_mb((uint32_t)(INTR_ENABLE|INTR_ENABLE_WD0),
+		icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+
+	mutex_unlock(&hfi_cmd_q_mutex);
+	mutex_unlock(&hfi_msg_q_mutex);
+
+	return rc;
+regions_fail:
+	kfree(g_hfi);
+	g_hfi = NULL;
+alloc_fail:
+	mutex_unlock(&hfi_cmd_q_mutex);
+	mutex_unlock(&hfi_msg_q_mutex);
+	return rc;
+}
+
+void cam_hfi_deinit(void __iomem *icp_base)
+{
+	mutex_lock(&hfi_cmd_q_mutex);
+	mutex_lock(&hfi_msg_q_mutex);
+
+	if (!g_hfi) {
+		CAM_ERR(CAM_HFI, "hfi path not established yet");
+		goto err;
+	}
+
+	g_hfi->cmd_q_state = false;
+	g_hfi->msg_q_state = false;
+
+	kzfree(g_hfi);
+	g_hfi = NULL;
+
+err:
+	mutex_unlock(&hfi_cmd_q_mutex);
+	mutex_unlock(&hfi_msg_q_mutex);
+}

+ 12 - 0
drivers/cam_icp/icp_hw/Makefile

@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/icp_hw_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw_mgr/ a5_hw/ ipe_hw/ bps_hw/

+ 14 - 0
drivers/cam_icp/icp_hw/a5_hw/Makefile

@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/a5_hw
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/fw_inc
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += a5_dev.o a5_core.o a5_soc.o

+ 518 - 0
drivers/cam_icp/icp_hw/a5_hw/a5_core.c

@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/elf.h>
+#include <cam_icp.h>
+#include "cam_io_util.h"
+#include "cam_a5_hw_intf.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "a5_core.h"
+#include "a5_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "hfi_intf.h"
+#include "hfi_sys_defs.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static int cam_a5_cpas_vote(struct cam_a5_device_core_info *core_info,
+	struct cam_icp_cpas_vote *cpas_vote)
+{
+	int rc = 0;
+
+	if (cpas_vote->ahb_vote_valid)
+		rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
+			&cpas_vote->ahb_vote);
+
+	if (cpas_vote->axi_vote_valid)
+		rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
+			&cpas_vote->axi_vote);
+
+	if (rc)
+		CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
+
+	return rc;
+}
+
+static int32_t cam_icp_validate_fw(const uint8_t *elf)
+{
+	struct elf32_hdr *elf_hdr;
+
+	if (!elf) {
+		CAM_ERR(CAM_ICP, "Invalid params");
+		return -EINVAL;
+	}
+
+	elf_hdr = (struct elf32_hdr *)elf;
+
+	if (memcmp(elf_hdr->e_ident, ELFMAG, SELFMAG)) {
+		CAM_ERR(CAM_ICP, "ICP elf identifier is failed");
+		return -EINVAL;
+	}
+
+	/* check architecture */
+	if (elf_hdr->e_machine != EM_ARM) {
+		CAM_ERR(CAM_ICP, "unsupported arch");
+		return -EINVAL;
+	}
+
+	/* check elf bit format */
+	if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
+		CAM_ERR(CAM_ICP, "elf doesn't support 32 bit format");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int32_t cam_icp_get_fw_size(const uint8_t *elf, uint32_t *fw_size)
+{
+	int32_t rc = 0;
+	int32_t i = 0;
+	uint32_t num_prg_hdrs;
+	unsigned char *icp_prg_hdr_tbl;
+	uint32_t seg_mem_size = 0;
+	struct elf32_hdr *elf_hdr;
+	struct elf32_phdr *prg_hdr;
+
+	if (!elf || !fw_size) {
+		CAM_ERR(CAM_ICP, "invalid args");
+		return -EINVAL;
+	}
+
+	*fw_size = 0;
+
+	elf_hdr = (struct elf32_hdr *)elf;
+	num_prg_hdrs = elf_hdr->e_phnum;
+	icp_prg_hdr_tbl = (unsigned char *)elf + elf_hdr->e_phoff;
+	prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
+
+	if (!prg_hdr) {
+		CAM_ERR(CAM_ICP, "failed to get elf program header attr");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ICP, "num_prg_hdrs = %d", num_prg_hdrs);
+	for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) {
+		if (prg_hdr->p_flags == 0)
+			continue;
+
+		seg_mem_size = (prg_hdr->p_memsz + prg_hdr->p_align - 1) &
+					~(prg_hdr->p_align - 1);
+		seg_mem_size += prg_hdr->p_vaddr;
+		CAM_DBG(CAM_ICP, "memsz:%x align:%x addr:%x seg_mem_size:%x",
+			(int)prg_hdr->p_memsz, (int)prg_hdr->p_align,
+			(int)prg_hdr->p_vaddr, (int)seg_mem_size);
+		if (*fw_size < seg_mem_size)
+			*fw_size = seg_mem_size;
+
+	}
+
+	if (*fw_size == 0) {
+		CAM_ERR(CAM_ICP, "invalid elf fw file");
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int32_t cam_icp_program_fw(const uint8_t *elf,
+		struct cam_a5_device_core_info *core_info)
+{
+	int32_t rc = 0;
+	uint32_t num_prg_hdrs;
+	unsigned char *icp_prg_hdr_tbl;
+	int32_t i = 0;
+	u8 *dest;
+	u8 *src;
+	struct elf32_hdr *elf_hdr;
+	struct elf32_phdr *prg_hdr;
+
+	elf_hdr = (struct elf32_hdr *)elf;
+	num_prg_hdrs = elf_hdr->e_phnum;
+	icp_prg_hdr_tbl = (unsigned char *)elf + elf_hdr->e_phoff;
+	prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
+
+	if (!prg_hdr) {
+		CAM_ERR(CAM_ICP, "failed to get elf program header attr");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) {
+		if (prg_hdr->p_flags == 0)
+			continue;
+
+		CAM_DBG(CAM_ICP, "Loading FW header size: %u",
+			prg_hdr->p_filesz);
+		if (prg_hdr->p_filesz != 0) {
+			src = (u8 *)((u8 *)elf + prg_hdr->p_offset);
+			dest = (u8 *)(((u8 *)core_info->fw_kva_addr) +
+				prg_hdr->p_vaddr);
+
+			memcpy_toio(dest, src, prg_hdr->p_filesz);
+		}
+	}
+
+	return rc;
+}
+
+static int32_t cam_a5_download_fw(void *device_priv)
+{
+	int32_t rc = 0;
+	uint32_t fw_size;
+	const uint8_t *fw_start = NULL;
+	struct cam_hw_info *a5_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_a5_device_hw_info *hw_info = NULL;
+	struct platform_device         *pdev = NULL;
+	struct a5_soc_info *cam_a5_soc_info = NULL;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+	hw_info = core_info->a5_hw_info;
+	pdev = soc_info->pdev;
+	cam_a5_soc_info = soc_info->soc_private;
+
+	rc = request_firmware(&core_info->fw_elf, "CAMERA_ICP.elf", &pdev->dev);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Failed to locate fw: %d", rc);
+		return rc;
+	}
+
+	if (!core_info->fw_elf) {
+		CAM_ERR(CAM_ICP, "Invalid elf size");
+		rc = -EINVAL;
+		goto fw_download_failed;
+	}
+
+	fw_start = core_info->fw_elf->data;
+	rc = cam_icp_validate_fw(fw_start);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "fw elf validation failed");
+		goto fw_download_failed;
+	}
+
+	rc = cam_icp_get_fw_size(fw_start, &fw_size);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "unable to get fw size");
+		goto fw_download_failed;
+	}
+
+	if (core_info->fw_buf_len < fw_size) {
+		CAM_ERR(CAM_ICP, "mismatch in fw size: %u %llu",
+			fw_size, core_info->fw_buf_len);
+		rc = -EINVAL;
+		goto fw_download_failed;
+	}
+
+	rc = cam_icp_program_fw(fw_start, core_info);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "fw program is failed");
+		goto fw_download_failed;
+	}
+
+fw_download_failed:
+	release_firmware(core_info->fw_elf);
+	return rc;
+}
+
+int cam_a5_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *a5_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_icp_cpas_vote cpas_vote;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_ICP, "soc_info: %pK core_info: %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+	cpas_vote.axi_vote.num_paths = 1;
+	cpas_vote.axi_vote.axi_path[0].path_data_type =
+		CAM_ICP_DEFAULT_AXI_PATH;
+	cpas_vote.axi_vote.axi_path[0].transac_type =
+		CAM_ICP_DEFAULT_AXI_TRANSAC;
+	cpas_vote.axi_vote.axi_path[0].camnoc_bw =
+		CAM_ICP_A5_BW_BYTES_VOTE;
+	cpas_vote.axi_vote.axi_path[0].mnoc_ab_bw =
+		CAM_ICP_A5_BW_BYTES_VOTE;
+	cpas_vote.axi_vote.axi_path[0].mnoc_ib_bw =
+		CAM_ICP_A5_BW_BYTES_VOTE;
+	cpas_vote.axi_vote.axi_path[0].ddr_ab_bw =
+		CAM_ICP_A5_BW_BYTES_VOTE;
+	cpas_vote.axi_vote.axi_path[0].ddr_ib_bw =
+		CAM_ICP_A5_BW_BYTES_VOTE;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "cpas start failed: %d", rc);
+		goto error;
+	}
+	core_info->cpas_start = true;
+
+	rc = cam_a5_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
+		if (cam_cpas_stop(core_info->cpas_handle))
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
+		else
+			core_info->cpas_start = false;
+	}
+
+error:
+	return rc;
+}
+
+int cam_a5_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *a5_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_a5_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
+
+	if (core_info->cpas_start) {
+		if (cam_cpas_stop(core_info->cpas_handle))
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
+		else
+			core_info->cpas_start = false;
+	}
+
+	return rc;
+}
+
+irqreturn_t cam_a5_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *a5_dev = data;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_a5_device_hw_info *hw_info = NULL;
+	uint32_t irq_status = 0;
+
+	if (!data) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info or query_cap args");
+		return IRQ_HANDLED;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+	hw_info = core_info->a5_hw_info;
+
+	irq_status = cam_io_r_mb(soc_info->reg_map[A5_SIERRA_BASE].mem_base +
+				core_info->a5_hw_info->a5_host_int_status);
+
+	cam_io_w_mb(irq_status,
+			soc_info->reg_map[A5_SIERRA_BASE].mem_base +
+			core_info->a5_hw_info->a5_host_int_clr);
+
+	if ((irq_status & A5_WDT_0) ||
+		(irq_status & A5_WDT_1)) {
+		CAM_ERR_RATE_LIMIT(CAM_ICP, "watch dog interrupt from A5");
+	}
+
+	spin_lock(&a5_dev->hw_lock);
+	if (core_info->irq_cb.icp_hw_mgr_cb)
+		core_info->irq_cb.icp_hw_mgr_cb(irq_status,
+					core_info->irq_cb.data);
+	spin_unlock(&a5_dev->hw_lock);
+
+	return IRQ_HANDLED;
+}
+
+int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *a5_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_a5_device_hw_info *hw_info = NULL;
+	struct a5_soc_info *a5_soc = NULL;
+	unsigned long flags;
+	uint32_t ubwc_ipe_cfg[ICP_UBWC_MAX] = {0};
+	uint32_t ubwc_bps_cfg[ICP_UBWC_MAX] = {0};
+	uint32_t index = 0;
+	int rc = 0, ddr_type = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_ICP_A5_CMD_MAX) {
+		CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
+		return -EINVAL;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+	hw_info = core_info->a5_hw_info;
+
+	switch (cmd_type) {
+	case CAM_ICP_A5_CMD_FW_DOWNLOAD:
+		rc = cam_a5_download_fw(device_priv);
+		break;
+	case CAM_ICP_A5_CMD_SET_FW_BUF: {
+		struct cam_icp_a5_set_fw_buf_info *fw_buf_info = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_ICP, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		core_info->fw_buf = fw_buf_info->iova;
+		core_info->fw_kva_addr = fw_buf_info->kva;
+		core_info->fw_buf_len = fw_buf_info->len;
+
+		CAM_DBG(CAM_ICP, "fw buf info = %x %llx %lld",
+			core_info->fw_buf, core_info->fw_kva_addr,
+			core_info->fw_buf_len);
+		break;
+	}
+	case CAM_ICP_A5_SET_IRQ_CB: {
+		struct cam_icp_a5_set_irq_cb *irq_cb = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_ICP, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		spin_lock_irqsave(&a5_dev->hw_lock, flags);
+		core_info->irq_cb.icp_hw_mgr_cb = irq_cb->icp_hw_mgr_cb;
+		core_info->irq_cb.data = irq_cb->data;
+		spin_unlock_irqrestore(&a5_dev->hw_lock, flags);
+		break;
+	}
+
+	case CAM_ICP_A5_SEND_INIT:
+		hfi_send_system_cmd(HFI_CMD_SYS_INIT, 0, 0);
+		break;
+
+	case CAM_ICP_A5_CMD_PC_PREP:
+		hfi_send_system_cmd(HFI_CMD_SYS_PC_PREP, 0, 0);
+		break;
+
+	case CAM_ICP_A5_CMD_VOTE_CPAS: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_ICP, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		cam_a5_cpas_vote(core_info, cpas_vote);
+		break;
+	}
+
+	case CAM_ICP_A5_CMD_CPAS_START: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_ICP, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		if (!core_info->cpas_start) {
+			rc = cam_cpas_start(core_info->cpas_handle,
+					&cpas_vote->ahb_vote,
+					&cpas_vote->axi_vote);
+			core_info->cpas_start = true;
+		}
+		break;
+	}
+
+	case CAM_ICP_A5_CMD_CPAS_STOP:
+		if (core_info->cpas_start) {
+			cam_cpas_stop(core_info->cpas_handle);
+			core_info->cpas_start = false;
+		}
+		break;
+	case CAM_ICP_A5_CMD_UBWC_CFG: {
+		struct a5_ubwc_cfg_ext *ubwc_cfg_ext = NULL;
+
+		a5_soc = soc_info->soc_private;
+		if (!a5_soc) {
+			CAM_ERR(CAM_ICP, "A5 private soc info is NULL");
+			return -EINVAL;
+		}
+
+		if (a5_soc->ubwc_config_ext) {
+			/* Invoke kernel API to determine DDR type */
+			ddr_type = of_fdt_get_ddrtype();
+			if ((ddr_type == DDR_TYPE_LPDDR5) ||
+				(ddr_type == DDR_TYPE_LPDDR5X))
+				index = 1;
+
+			ubwc_cfg_ext = &a5_soc->uconfig.ubwc_cfg_ext;
+			ubwc_ipe_cfg[0] =
+				ubwc_cfg_ext->ubwc_ipe_fetch_cfg[index];
+			ubwc_ipe_cfg[1] =
+				ubwc_cfg_ext->ubwc_ipe_write_cfg[index];
+			ubwc_bps_cfg[0] =
+				ubwc_cfg_ext->ubwc_bps_fetch_cfg[index];
+			ubwc_bps_cfg[1] =
+				ubwc_cfg_ext->ubwc_bps_write_cfg[index];
+			rc = hfi_cmd_ubwc_config_ext(&ubwc_ipe_cfg[0],
+					&ubwc_bps_cfg[0]);
+		} else {
+			rc = hfi_cmd_ubwc_config(a5_soc->uconfig.ubwc_cfg);
+		}
+
+		break;
+	}
+	default:
+		break;
+	}
+
+	return rc;
+}

+ 81 - 0
drivers/cam_icp/icp_hw/a5_hw/a5_core.h

@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_A5_CORE_H
+#define CAM_A5_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include "cam_a5_hw_intf.h"
+
+#define A5_QGIC_BASE            0
+#define A5_SIERRA_BASE          1
+#define A5_CSR_BASE             2
+
+#define A5_HOST_INT             0x1
+#define A5_WDT_0                0x2
+#define A5_WDT_1                0x4
+
+#define ELF_GUARD_PAGE          (2 * 1024 * 1024)
+
+struct cam_a5_device_hw_info {
+	uint32_t hw_ver;
+	uint32_t nsec_reset;
+	uint32_t a5_control;
+	uint32_t a5_host_int_en;
+	uint32_t a5_host_int;
+	uint32_t a5_host_int_clr;
+	uint32_t a5_host_int_status;
+	uint32_t a5_host_int_set;
+	uint32_t host_a5_int;
+	uint32_t fw_version;
+	uint32_t init_req;
+	uint32_t init_response;
+	uint32_t shared_mem_ptr;
+	uint32_t shared_mem_size;
+	uint32_t qtbl_ptr;
+	uint32_t uncached_heap_ptr;
+	uint32_t uncached_heap_size;
+	uint32_t a5_status;
+};
+
+/**
+ * struct cam_a5_device_hw_info
+ * @a5_hw_info: A5 hardware info
+ * @fw_elf: start address of fw start with elf header
+ * @fw: start address of fw blob
+ * @fw_buf: smmu alloc/mapped fw buffer
+ * @fw_buf_len: fw buffer length
+ * @query_cap: A5 query info from firmware
+ * @a5_acquire: Acquire information of A5
+ * @irq_cb: IRQ callback
+ * @cpas_handle: CPAS handle for A5
+ * @cpast_start: state variable for cpas
+ */
+struct cam_a5_device_core_info {
+	struct cam_a5_device_hw_info *a5_hw_info;
+	const struct firmware *fw_elf;
+	void *fw;
+	uint32_t fw_buf;
+	uintptr_t fw_kva_addr;
+	uint64_t fw_buf_len;
+	struct cam_icp_a5_query_cap query_cap;
+	struct cam_icp_a5_acquire_dev a5_acquire[8];
+	struct cam_icp_a5_set_irq_cb irq_cb;
+	uint32_t cpas_handle;
+	bool cpas_start;
+};
+
+int cam_a5_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_a5_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+
+irqreturn_t cam_a5_irq(int irq_num, void *data);
+#endif /* CAM_A5_CORE_H */

+ 228 - 0
drivers/cam_icp/icp_hw/a5_hw/a5_dev.c

@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include "a5_core.h"
+#include "a5_soc.h"
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_a5_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+struct a5_soc_info cam_a5_soc_info;
+EXPORT_SYMBOL(cam_a5_soc_info);
+
+struct cam_a5_device_hw_info cam_a5_hw_info = {
+	.hw_ver = 0x0,
+	.nsec_reset = 0x4,
+	.a5_control = 0x8,
+	.a5_host_int_en = 0x10,
+	.a5_host_int = 0x14,
+	.a5_host_int_clr = 0x18,
+	.a5_host_int_status = 0x1c,
+	.a5_host_int_set = 0x20,
+	.host_a5_int = 0x30,
+	.fw_version = 0x44,
+	.init_req = 0x48,
+	.init_response = 0x4c,
+	.shared_mem_ptr = 0x50,
+	.shared_mem_size = 0x54,
+	.qtbl_ptr = 0x58,
+	.uncached_heap_ptr = 0x5c,
+	.uncached_heap_size = 0x60,
+	.a5_status = 0x200,
+};
+EXPORT_SYMBOL(cam_a5_hw_info);
+
+static bool cam_a5_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
+{
+	bool error_handled = false;
+
+	if (!irq_data)
+		return error_handled;
+
+	switch (irq_data->irq_type) {
+	case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+		CAM_ERR_RATE_LIMIT(CAM_ICP,
+			"IPE/BPS UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+			irq_data->irq_type,
+			irq_data->u.dec_err.decerr_status.value,
+			irq_data->u.dec_err.decerr_status.thr_err,
+			irq_data->u.dec_err.decerr_status.fcl_err,
+			irq_data->u.dec_err.decerr_status.len_md_err,
+			irq_data->u.dec_err.decerr_status.format_err);
+		error_handled = true;
+		break;
+	case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+		CAM_ERR_RATE_LIMIT(CAM_ICP,
+			"IPE/BPS UBWC Encode error type=%d status=%x",
+			irq_data->irq_type,
+			irq_data->u.enc_err.encerr_status.value);
+		error_handled = true;
+		break;
+	default:
+		break;
+	}
+
+	return error_handled;
+}
+
+int cam_a5_register_cpas(struct cam_hw_soc_info *soc_info,
+			struct cam_a5_device_core_info *core_info,
+			uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "icp", sizeof("icp"));
+	cpas_register_params.cam_cpas_client_cb = cam_a5_cpas_cb;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "failed: %d", rc);
+		return rc;
+	}
+
+	core_info->cpas_handle = cpas_register_params.client_handle;
+	return rc;
+}
+
+int cam_a5_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct cam_hw_info *a5_dev = NULL;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_a5_device_hw_info *hw_info = NULL;
+
+	a5_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!a5_dev_intf)
+		return -ENOMEM;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &a5_dev_intf->hw_idx);
+
+	a5_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!a5_dev) {
+		rc = -ENOMEM;
+		goto a5_dev_alloc_failure;
+	}
+
+	a5_dev->soc_info.pdev = pdev;
+	a5_dev->soc_info.dev = &pdev->dev;
+	a5_dev->soc_info.dev_name = pdev->name;
+	a5_dev_intf->hw_priv = a5_dev;
+	a5_dev_intf->hw_ops.init = cam_a5_init_hw;
+	a5_dev_intf->hw_ops.deinit = cam_a5_deinit_hw;
+	a5_dev_intf->hw_ops.process_cmd = cam_a5_process_cmd;
+	a5_dev_intf->hw_type = CAM_ICP_DEV_A5;
+
+	CAM_DBG(CAM_ICP, "type %d index %d",
+		a5_dev_intf->hw_type,
+		a5_dev_intf->hw_idx);
+
+	platform_set_drvdata(pdev, a5_dev_intf);
+
+	a5_dev->core_info = kzalloc(sizeof(struct cam_a5_device_core_info),
+					GFP_KERNEL);
+	if (!a5_dev->core_info) {
+		rc = -ENOMEM;
+		goto core_info_alloc_failure;
+	}
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_ICP, "No a5 hardware info");
+		rc = -EINVAL;
+		goto match_err;
+	}
+	hw_info = (struct cam_a5_device_hw_info *)match_dev->data;
+	core_info->a5_hw_info = hw_info;
+
+	a5_dev->soc_info.soc_private = &cam_a5_soc_info;
+
+	rc = cam_a5_init_soc_resources(&a5_dev->soc_info, cam_a5_irq,
+		a5_dev);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "failed to init_soc");
+		goto init_soc_failure;
+	}
+
+	CAM_DBG(CAM_ICP, "soc info : %pK",
+				(void *)&a5_dev->soc_info);
+	rc = cam_a5_register_cpas(&a5_dev->soc_info,
+			core_info, a5_dev_intf->hw_idx);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "a5 cpas registration failed");
+		goto cpas_reg_failed;
+	}
+	a5_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&a5_dev->hw_mutex);
+	spin_lock_init(&a5_dev->hw_lock);
+	init_completion(&a5_dev->hw_complete);
+
+	CAM_DBG(CAM_ICP, "A5%d probe successful",
+		a5_dev_intf->hw_idx);
+	return 0;
+
+cpas_reg_failed:
+init_soc_failure:
+match_err:
+	kfree(a5_dev->core_info);
+core_info_alloc_failure:
+	kfree(a5_dev);
+a5_dev_alloc_failure:
+	kfree(a5_dev_intf);
+
+	return rc;
+}
+
+static const struct of_device_id cam_a5_dt_match[] = {
+	{
+		.compatible = "qcom,cam-a5",
+		.data = &cam_a5_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_a5_dt_match);
+
+static struct platform_driver cam_a5_driver = {
+	.probe = cam_a5_probe,
+	.driver = {
+		.name = "cam-a5",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_a5_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_a5_init_module(void)
+{
+	return platform_driver_register(&cam_a5_driver);
+}
+
+static void __exit cam_a5_exit_module(void)
+{
+	platform_driver_unregister(&cam_a5_driver);
+}
+
+module_init(cam_a5_init_module);
+module_exit(cam_a5_exit_module);
+MODULE_DESCRIPTION("CAM A5 driver");
+MODULE_LICENSE("GPL v2");

+ 191 - 0
drivers/cam_icp/icp_hw/a5_hw/a5_soc.c

@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "a5_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0, i;
+	const char *fw_name;
+	struct a5_soc_info *a5_soc_info;
+	struct device_node *of_node = NULL;
+	struct platform_device *pdev = NULL;
+	struct a5_ubwc_cfg_ext *ubwc_cfg_ext = NULL;
+	int num_ubwc_cfg;
+
+	pdev = soc_info->pdev;
+	of_node = pdev->dev.of_node;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "get a5 dt prop is failed");
+		return rc;
+	}
+
+	a5_soc_info = soc_info->soc_private;
+	fw_name = a5_soc_info->fw_name;
+
+	rc = of_property_read_string(of_node, "fw_name", &fw_name);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "fw_name read failed");
+		goto end;
+	}
+
+	ubwc_cfg_ext = &a5_soc_info->uconfig.ubwc_cfg_ext;
+	num_ubwc_cfg = of_property_count_u32_elems(of_node,
+		"ubwc-ipe-fetch-cfg");
+	if ((num_ubwc_cfg < 0) || (num_ubwc_cfg > ICP_UBWC_MAX)) {
+		CAM_DBG(CAM_ICP, "wrong ubwc_ipe_fetch_cfg: %d", num_ubwc_cfg);
+		rc = num_ubwc_cfg;
+		goto ubwc_ex_cfg;
+	}
+
+	for (i = 0; i < num_ubwc_cfg; i++) {
+		rc = of_property_read_u32_index(of_node, "ubwc-ipe-fetch-cfg",
+			i, &ubwc_cfg_ext->ubwc_ipe_fetch_cfg[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ICP,
+				"unable to read ubwc_ipe_fetch_cfg values");
+			goto end;
+		}
+	}
+
+	num_ubwc_cfg = of_property_count_u32_elems(of_node,
+		"ubwc-ipe-write-cfg");
+	if ((num_ubwc_cfg < 0) || (num_ubwc_cfg > ICP_UBWC_MAX)) {
+		CAM_ERR(CAM_ICP, "wrong ubwc_ipe_write_cfg: %d", num_ubwc_cfg);
+		rc = num_ubwc_cfg;
+		goto end;
+	}
+
+	for (i = 0; i < num_ubwc_cfg; i++) {
+		rc = of_property_read_u32_index(of_node, "ubwc-ipe-write-cfg",
+				i, &ubwc_cfg_ext->ubwc_ipe_write_cfg[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ICP,
+				"unable to read ubwc_ipe_write_cfg values");
+			goto end;
+		}
+	}
+
+	num_ubwc_cfg = of_property_count_u32_elems(of_node,
+		"ubwc-bps-fetch-cfg");
+	if ((num_ubwc_cfg < 0) || (num_ubwc_cfg > ICP_UBWC_MAX)) {
+		CAM_ERR(CAM_ICP, "wrong ubwc_bps_fetch_cfg: %d", num_ubwc_cfg);
+		rc = num_ubwc_cfg;
+		goto end;
+	}
+
+	for (i = 0; i < num_ubwc_cfg; i++) {
+		rc = of_property_read_u32_index(of_node, "ubwc-bps-fetch-cfg",
+			i, &ubwc_cfg_ext->ubwc_bps_fetch_cfg[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ICP,
+				"unable to read ubwc_bps_fetch_cfg values");
+			goto end;
+		}
+	}
+
+	num_ubwc_cfg = of_property_count_u32_elems(of_node,
+		"ubwc-bps-write-cfg");
+	if ((num_ubwc_cfg < 0) || (num_ubwc_cfg > ICP_UBWC_MAX)) {
+		CAM_ERR(CAM_ICP, "wrong ubwc_bps_write_cfg: %d", num_ubwc_cfg);
+		rc = num_ubwc_cfg;
+		goto end;
+	}
+
+	for (i = 0; i < num_ubwc_cfg; i++) {
+		rc = of_property_read_u32_index(of_node, "ubwc-bps-write-cfg",
+			i, &ubwc_cfg_ext->ubwc_bps_write_cfg[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ICP,
+				"unable to read ubwc_bps_write_cfg values");
+			goto end;
+		}
+	}
+
+	a5_soc_info->ubwc_config_ext = true;
+	CAM_DBG(CAM_ICP, "read ubwc_cfg_ext for ipe/bps");
+	return rc;
+
+ubwc_ex_cfg:
+	num_ubwc_cfg = of_property_count_u32_elems(of_node, "ubwc-cfg");
+	if ((num_ubwc_cfg < 0) || (num_ubwc_cfg > ICP_UBWC_MAX)) {
+		CAM_ERR(CAM_ICP, "wrong ubwc_cfg: %d", num_ubwc_cfg);
+		rc = num_ubwc_cfg;
+		goto end;
+	}
+
+	for (i = 0; i < num_ubwc_cfg; i++) {
+		rc = of_property_read_u32_index(of_node, "ubwc-cfg",
+			i, &a5_soc_info->uconfig.ubwc_cfg[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ICP, "unable to read ubwc_cfg values");
+			break;
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int cam_a5_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t a5_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, a5_irq_handler,
+		irq_data);
+
+	return rc;
+}
+
+int cam_a5_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t a5_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_a5_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	rc = cam_a5_request_platform_resource(soc_info, a5_irq_handler,
+		irq_data);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+int cam_a5_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, true);
+	if (rc)
+		CAM_ERR(CAM_ICP, "enable platform failed");
+
+	return rc;
+}
+
+int cam_a5_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		CAM_ERR(CAM_ICP, "disable platform failed");
+
+	return rc;
+}

+ 36 - 0
drivers/cam_icp/icp_hw/a5_hw/a5_soc.h

@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_A5_SOC_H
+#define CAM_A5_SOC_H
+
+#include "cam_soc_util.h"
+
+#define ICP_UBWC_MAX 2
+
+struct a5_ubwc_cfg_ext {
+	uint32_t ubwc_ipe_fetch_cfg[ICP_UBWC_MAX];
+	uint32_t ubwc_ipe_write_cfg[ICP_UBWC_MAX];
+	uint32_t ubwc_bps_fetch_cfg[ICP_UBWC_MAX];
+	uint32_t ubwc_bps_write_cfg[ICP_UBWC_MAX];
+};
+
+struct a5_soc_info {
+	char *fw_name;
+	bool ubwc_config_ext;
+	union {
+		uint32_t ubwc_cfg[ICP_UBWC_MAX];
+		struct a5_ubwc_cfg_ext ubwc_cfg_ext;
+	} uconfig;
+};
+
+int cam_a5_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t a5_irq_handler, void *irq_data);
+
+int cam_a5_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_a5_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif

+ 14 - 0
drivers/cam_icp/icp_hw/bps_hw/Makefile

@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/bps_hw
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/fw_inc
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += bps_dev.o bps_core.o bps_soc.o

+ 416 - 0
drivers/cam_icp/icp_hw/bps_hw/bps_core.c

@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/iopoll.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "bps_core.h"
+#include "bps_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_bps_hw_intf.h"
+#include "cam_icp_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "hfi_reg.h"
+
+#define HFI_MAX_POLL_TRY 5
+
+static int cam_bps_cpas_vote(struct cam_bps_device_core_info *core_info,
+			struct cam_icp_cpas_vote *cpas_vote)
+{
+	int rc = 0;
+
+	if (cpas_vote->ahb_vote_valid)
+		rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
+				&cpas_vote->ahb_vote);
+	if (cpas_vote->axi_vote_valid)
+		rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
+				&cpas_vote->axi_vote);
+
+	if (rc < 0)
+		CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
+
+	return rc;
+}
+
+
+int cam_bps_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *bps_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_bps_device_core_info *core_info = NULL;
+	struct cam_icp_cpas_vote cpas_vote;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &bps_dev->soc_info;
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+	cpas_vote.axi_vote.num_paths = 1;
+	cpas_vote.axi_vote.axi_path[0].path_data_type =
+		CAM_BPS_DEFAULT_AXI_PATH;
+	cpas_vote.axi_vote.axi_path[0].transac_type =
+		CAM_BPS_DEFAULT_AXI_TRANSAC;
+	cpas_vote.axi_vote.axi_path[0].camnoc_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.axi_path[0].mnoc_ab_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.axi_path[0].mnoc_ib_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.axi_path[0].ddr_ab_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.axi_path[0].ddr_ib_bw =
+		CAM_CPAS_DEFAULT_AXI_BW;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+			&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "cpas start failed: %d", rc);
+		goto error;
+	}
+	core_info->cpas_start = true;
+
+	rc = cam_bps_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
+		if (cam_cpas_stop(core_info->cpas_handle))
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
+		else
+			core_info->cpas_start = false;
+	} else {
+		core_info->clk_enable = true;
+	}
+
+error:
+	return rc;
+}
+
+int cam_bps_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *bps_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_bps_device_core_info *core_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &bps_dev->soc_info;
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_bps_disable_soc_resources(soc_info, core_info->clk_enable);
+	if (rc)
+		CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
+	core_info->clk_enable = false;
+
+	if (core_info->cpas_start) {
+		if (cam_cpas_stop(core_info->cpas_handle))
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
+		else
+			core_info->cpas_start = false;
+	}
+
+	return rc;
+}
+
+static int cam_bps_handle_pc(struct cam_hw_info *bps_dev)
+{
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_bps_device_core_info *core_info = NULL;
+	struct cam_bps_device_hw_info *hw_info = NULL;
+	int pwr_ctrl;
+	int pwr_status;
+
+	soc_info = &bps_dev->soc_info;
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+	hw_info = core_info->bps_hw_info;
+
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
+		true, &pwr_ctrl);
+	if (!(pwr_ctrl & BPS_COLLAPSE_MASK)) {
+		cam_cpas_reg_read(core_info->cpas_handle,
+			CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
+			true, &pwr_status);
+
+		cam_cpas_reg_write(core_info->cpas_handle,
+			CAM_CPAS_REG_CPASTOP,
+			hw_info->pwr_ctrl, true, 0x1);
+
+		if ((pwr_status >> BPS_PWR_ON_MASK))
+			CAM_WARN(CAM_ICP, "BPS: pwr_status(%x):pwr_ctrl(%x)",
+				pwr_status, pwr_ctrl);
+	}
+	cam_bps_get_gdsc_control(soc_info);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true,
+		&pwr_ctrl);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
+		true, &pwr_status);
+	CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
+		pwr_ctrl, pwr_status);
+
+	return 0;
+}
+
+static int cam_bps_handle_resume(struct cam_hw_info *bps_dev)
+{
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_bps_device_core_info *core_info = NULL;
+	struct cam_bps_device_hw_info *hw_info = NULL;
+	int pwr_ctrl;
+	int pwr_status;
+	int rc = 0;
+
+	soc_info = &bps_dev->soc_info;
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+	hw_info = core_info->bps_hw_info;
+
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
+	if (pwr_ctrl & BPS_COLLAPSE_MASK) {
+		CAM_DBG(CAM_ICP, "BPS: pwr_ctrl set(%x)", pwr_ctrl);
+		cam_cpas_reg_write(core_info->cpas_handle,
+			CAM_CPAS_REG_CPASTOP,
+			hw_info->pwr_ctrl, true, 0);
+	}
+
+	rc = cam_bps_transfer_gdsc_control(soc_info);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_status, true, &pwr_status);
+	CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
+		pwr_ctrl, pwr_status);
+
+	return rc;
+}
+
+static int cam_bps_cmd_reset(struct cam_hw_soc_info *soc_info,
+	struct cam_bps_device_core_info *core_info)
+{
+	uint32_t retry_cnt = 0;
+	uint32_t status = 0;
+	int pwr_ctrl, pwr_status, rc = 0;
+	bool reset_bps_cdm_fail = false;
+	bool reset_bps_top_fail = false;
+
+	CAM_DBG(CAM_ICP, "CAM_ICP_BPS_CMD_RESET");
+
+	if (!core_info->clk_enable || !core_info->cpas_start) {
+		CAM_ERR(CAM_ICP, "BPS reset failed. clk_en %d cpas_start %d",
+				core_info->clk_enable, core_info->cpas_start);
+		return -EINVAL;
+	}
+
+	/* Reset BPS CDM core*/
+	cam_io_w_mb((uint32_t)0xF,
+		soc_info->reg_map[0].mem_base + BPS_CDM_RST_CMD);
+	while (retry_cnt < HFI_MAX_POLL_TRY) {
+		readw_poll_timeout((soc_info->reg_map[0].mem_base +
+			BPS_CDM_IRQ_STATUS),
+			status, ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1),
+			100, 10000);
+
+		CAM_DBG(CAM_ICP, "bps_cdm_irq_status = %u", status);
+
+		if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1)
+			break;
+		retry_cnt++;
+	}
+	status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		BPS_CDM_IRQ_STATUS);
+	if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
+		CAM_ERR(CAM_ICP, "BPS CDM rst failed status 0x%x", status);
+		reset_bps_cdm_fail = true;
+	}
+
+	/* Reset BPS core*/
+	status = 0;
+	cam_io_w_mb((uint32_t)0x3,
+		soc_info->reg_map[0].mem_base + BPS_TOP_RST_CMD);
+	while (retry_cnt < HFI_MAX_POLL_TRY) {
+		readw_poll_timeout((soc_info->reg_map[0].mem_base +
+			BPS_TOP_IRQ_STATUS),
+			status, ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1),
+			100, 10000);
+
+		CAM_DBG(CAM_ICP, "bps_top_irq_status = %u", status);
+
+		if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1)
+			break;
+		retry_cnt++;
+	}
+	status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		BPS_TOP_IRQ_STATUS);
+	if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
+		CAM_ERR(CAM_ICP, "BPS top rst failed status 0x%x", status);
+		reset_bps_top_fail = true;
+	}
+
+	cam_bps_get_gdsc_control(soc_info);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, core_info->bps_hw_info->pwr_ctrl,
+		true, &pwr_ctrl);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, core_info->bps_hw_info->pwr_status,
+		true, &pwr_status);
+	CAM_DBG(CAM_ICP, "(After) pwr_ctrl = %x pwr_status = %x",
+		pwr_ctrl, pwr_status);
+
+	if (reset_bps_cdm_fail || reset_bps_top_fail)
+		rc = -EAGAIN;
+
+	return rc;
+}
+
+int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *bps_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_bps_device_core_info *core_info = NULL;
+	struct cam_bps_device_hw_info *hw_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_ICP_BPS_CMD_MAX) {
+		CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
+		return -EINVAL;
+	}
+
+	soc_info = &bps_dev->soc_info;
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+	hw_info = core_info->bps_hw_info;
+
+	switch (cmd_type) {
+	case CAM_ICP_BPS_CMD_VOTE_CPAS: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_ICP, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		cam_bps_cpas_vote(core_info, cpas_vote);
+		break;
+	}
+
+	case CAM_ICP_BPS_CMD_CPAS_START: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_ICP, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		if (!core_info->cpas_start) {
+			rc = cam_cpas_start(core_info->cpas_handle,
+					&cpas_vote->ahb_vote,
+					&cpas_vote->axi_vote);
+			core_info->cpas_start = true;
+		}
+		break;
+	}
+
+	case CAM_ICP_BPS_CMD_CPAS_STOP:
+		if (core_info->cpas_start) {
+			cam_cpas_stop(core_info->cpas_handle);
+			core_info->cpas_start = false;
+		}
+		break;
+	case CAM_ICP_BPS_CMD_POWER_COLLAPSE:
+		rc = cam_bps_handle_pc(bps_dev);
+		break;
+	case CAM_ICP_BPS_CMD_POWER_RESUME:
+		rc = cam_bps_handle_resume(bps_dev);
+		break;
+	case CAM_ICP_BPS_CMD_UPDATE_CLK: {
+		struct cam_a5_clk_update_cmd *clk_upd_cmd =
+			(struct cam_a5_clk_update_cmd *)cmd_args;
+		uint32_t clk_rate = clk_upd_cmd->curr_clk_rate;
+
+		CAM_DBG(CAM_ICP, "bps_src_clk rate = %d", (int)clk_rate);
+
+		if (!core_info->clk_enable) {
+			if (clk_upd_cmd->ipe_bps_pc_enable) {
+				cam_bps_handle_pc(bps_dev);
+				cam_cpas_reg_write(core_info->cpas_handle,
+					CAM_CPAS_REG_CPASTOP,
+					hw_info->pwr_ctrl, true, 0x0);
+			}
+			rc = cam_bps_toggle_clk(soc_info, true);
+			if (rc)
+				CAM_ERR(CAM_ICP, "Enable failed");
+			else
+				core_info->clk_enable = true;
+			if (clk_upd_cmd->ipe_bps_pc_enable) {
+				rc = cam_bps_handle_resume(bps_dev);
+				if (rc)
+					CAM_ERR(CAM_ICP, "BPS resume failed");
+			}
+		}
+		CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
+		rc = cam_bps_update_clk_rate(soc_info, clk_rate);
+		if (rc)
+			CAM_ERR(CAM_ICP, "Failed to update clk");
+		}
+		break;
+	case CAM_ICP_BPS_CMD_DISABLE_CLK:
+		if (core_info->clk_enable == true)
+			cam_bps_toggle_clk(soc_info, false);
+		core_info->clk_enable = false;
+		break;
+	case CAM_ICP_BPS_CMD_RESET:
+		rc = cam_bps_cmd_reset(soc_info, core_info);
+		break;
+	default:
+		CAM_ERR(CAM_ICP, "Invalid Cmd Type:%u", cmd_type);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+irqreturn_t cam_bps_irq(int irq_num, void *data)
+{
+	return IRQ_HANDLED;
+}

+ 39 - 0
drivers/cam_icp/icp_hw/bps_hw/bps_core.h

@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_BPS_CORE_H
+#define CAM_BPS_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define BPS_COLLAPSE_MASK 0x1
+#define BPS_PWR_ON_MASK   0x2
+
+struct cam_bps_device_hw_info {
+	uint32_t hw_idx;
+	uint32_t pwr_ctrl;
+	uint32_t pwr_status;
+	uint32_t reserved;
+};
+
+struct cam_bps_device_core_info {
+	struct cam_bps_device_hw_info *bps_hw_info;
+	uint32_t cpas_handle;
+	bool cpas_start;
+	bool clk_enable;
+};
+
+int cam_bps_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_bps_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+
+irqreturn_t cam_bps_irq(int irq_num, void *data);
+#endif /* CAM_BPS_CORE_H */

+ 207 - 0
drivers/cam_icp/icp_hw/bps_hw/bps_dev.c

@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include "bps_core.h"
+#include "bps_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_icp_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_bps_device_hw_info cam_bps_hw_info = {
+	.hw_idx = 0,
+	.pwr_ctrl = 0x5c,
+	.pwr_status = 0x58,
+	.reserved = 0,
+};
+EXPORT_SYMBOL(cam_bps_hw_info);
+
+static char bps_dev_name[8];
+
+static bool cam_bps_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
+{
+	bool error_handled = false;
+
+	if (!irq_data)
+		return error_handled;
+
+	switch (irq_data->irq_type) {
+	case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+		CAM_ERR_RATE_LIMIT(CAM_ICP,
+			"IPE/BPS UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+			irq_data->irq_type,
+			irq_data->u.dec_err.decerr_status.value,
+			irq_data->u.dec_err.decerr_status.thr_err,
+			irq_data->u.dec_err.decerr_status.fcl_err,
+			irq_data->u.dec_err.decerr_status.len_md_err,
+			irq_data->u.dec_err.decerr_status.format_err);
+		error_handled = true;
+		break;
+	case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+		CAM_ERR_RATE_LIMIT(CAM_ICP,
+			"IPE/BPS UBWC Encode error type=%d status=%x",
+			irq_data->irq_type,
+			irq_data->u.enc_err.encerr_status.value);
+		error_handled = true;
+		break;
+	default:
+		break;
+	}
+
+	return error_handled;
+}
+
+int cam_bps_register_cpas(struct cam_hw_soc_info *soc_info,
+			struct cam_bps_device_core_info *core_info,
+			uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "bps", sizeof("bps"));
+	cpas_register_params.cam_cpas_client_cb = cam_bps_cpas_cb;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_handle = cpas_register_params.client_handle;
+
+	return rc;
+}
+
+int cam_bps_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info            *bps_dev = NULL;
+	struct cam_hw_intf            *bps_dev_intf = NULL;
+	const struct of_device_id         *match_dev = NULL;
+	struct cam_bps_device_core_info   *core_info = NULL;
+	struct cam_bps_device_hw_info     *hw_info = NULL;
+	int                                rc = 0;
+
+	bps_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!bps_dev_intf)
+		return -ENOMEM;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &bps_dev_intf->hw_idx);
+
+	bps_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!bps_dev) {
+		kfree(bps_dev_intf);
+		return -ENOMEM;
+	}
+
+	memset(bps_dev_name, 0, sizeof(bps_dev_name));
+	snprintf(bps_dev_name, sizeof(bps_dev_name),
+		"bps%1u", bps_dev_intf->hw_idx);
+
+	bps_dev->soc_info.pdev = pdev;
+	bps_dev->soc_info.dev = &pdev->dev;
+	bps_dev->soc_info.dev_name = bps_dev_name;
+	bps_dev_intf->hw_priv = bps_dev;
+	bps_dev_intf->hw_ops.init = cam_bps_init_hw;
+	bps_dev_intf->hw_ops.deinit = cam_bps_deinit_hw;
+	bps_dev_intf->hw_ops.process_cmd = cam_bps_process_cmd;
+	bps_dev_intf->hw_type = CAM_ICP_DEV_BPS;
+	platform_set_drvdata(pdev, bps_dev_intf);
+	bps_dev->core_info = kzalloc(sizeof(struct cam_bps_device_core_info),
+					GFP_KERNEL);
+	if (!bps_dev->core_info) {
+		kfree(bps_dev);
+		kfree(bps_dev_intf);
+		return -ENOMEM;
+	}
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_ICP, "No bps hardware info");
+		kfree(bps_dev->core_info);
+		kfree(bps_dev);
+		kfree(bps_dev_intf);
+		rc = -EINVAL;
+		return rc;
+	}
+	hw_info = &cam_bps_hw_info;
+	core_info->bps_hw_info = hw_info;
+
+	rc = cam_bps_init_soc_resources(&bps_dev->soc_info, cam_bps_irq,
+		bps_dev);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "failed to init_soc");
+		kfree(bps_dev->core_info);
+		kfree(bps_dev);
+		kfree(bps_dev_intf);
+		return rc;
+	}
+	CAM_DBG(CAM_ICP, "soc info : %pK",
+		(void *)&bps_dev->soc_info);
+
+	rc = cam_bps_register_cpas(&bps_dev->soc_info,
+			core_info, bps_dev_intf->hw_idx);
+	if (rc < 0) {
+		kfree(bps_dev->core_info);
+		kfree(bps_dev);
+		kfree(bps_dev_intf);
+		return rc;
+	}
+	bps_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&bps_dev->hw_mutex);
+	spin_lock_init(&bps_dev->hw_lock);
+	init_completion(&bps_dev->hw_complete);
+	CAM_DBG(CAM_ICP, "BPS%d probe successful",
+		bps_dev_intf->hw_idx);
+
+	return rc;
+}
+
+static const struct of_device_id cam_bps_dt_match[] = {
+	{
+		.compatible = "qcom,cam-bps",
+		.data = &cam_bps_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_bps_dt_match);
+
+static struct platform_driver cam_bps_driver = {
+	.probe = cam_bps_probe,
+	.driver = {
+		.name = "cam-bps",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_bps_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_bps_init_module(void)
+{
+	return platform_driver_register(&cam_bps_driver);
+}
+
+static void __exit cam_bps_exit_module(void)
+{
+	platform_driver_unregister(&cam_bps_driver);
+}
+
+module_init(cam_bps_init_module);
+module_exit(cam_bps_exit_module);
+MODULE_DESCRIPTION("CAM BPS driver");
+MODULE_LICENSE("GPL v2");

+ 162 - 0
drivers/cam_icp/icp_hw/bps_hw/bps_soc.c

@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "bps_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+static int cam_bps_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0)
+		CAM_ERR(CAM_ICP, "get bps dt prop is failed");
+
+	return rc;
+}
+
+static int cam_bps_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t bps_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, bps_irq_handler,
+		irq_data);
+
+	return rc;
+}
+
+int cam_bps_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t bps_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_bps_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	rc = cam_bps_request_platform_resource(soc_info, bps_irq_handler,
+		irq_data);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, false);
+	if (rc)
+		CAM_ERR(CAM_ICP, "enable platform failed");
+
+	return rc;
+}
+
+int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clk)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, disable_clk,
+		false);
+	if (rc)
+		CAM_ERR(CAM_ICP, "disable platform failed");
+
+	return rc;
+}
+
+int cam_bps_transfer_gdsc_control(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+	int rc;
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = regulator_set_mode(soc_info->rgltr[i],
+			REGULATOR_MODE_FAST);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
+				soc_info->rgltr_name[i]);
+			goto rgltr_set_mode_failed;
+		}
+	}
+	return 0;
+
+rgltr_set_mode_failed:
+	for (i = i - 1; i >= 0; i--)
+		if (soc_info->rgltr[i])
+			regulator_set_mode(soc_info->rgltr[i],
+					REGULATOR_MODE_NORMAL);
+
+	return rc;
+}
+
+int cam_bps_get_gdsc_control(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+	int rc;
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = regulator_set_mode(soc_info->rgltr[i],
+			REGULATOR_MODE_NORMAL);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
+				soc_info->rgltr_name[i]);
+			goto rgltr_set_mode_failed;
+		}
+	}
+	return 0;
+
+rgltr_set_mode_failed:
+	for (i = i - 1; i >= 0; i--)
+		if (soc_info->rgltr[i])
+			regulator_set_mode(soc_info->rgltr[i],
+					REGULATOR_MODE_FAST);
+
+	return rc;
+}
+
+int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info,
+	uint32_t clk_rate)
+{
+	int32_t src_clk_idx;
+
+	if (!soc_info)
+		return -EINVAL;
+
+	src_clk_idx = soc_info->src_clk_idx;
+
+	if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) &&
+		(soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) &&
+		(clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) {
+		CAM_DBG(CAM_ICP, "clk_rate %d greater than max, reset to %d",
+			clk_rate,
+			soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]);
+		clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx];
+	}
+
+	return cam_soc_util_set_src_clk_rate(soc_info, clk_rate);
+}
+
+int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable)
+{
+	int rc = 0;
+
+	if (clk_enable)
+		rc = cam_soc_util_clk_enable_default(soc_info, CAM_SVS_VOTE);
+	else
+		cam_soc_util_clk_disable_default(soc_info);
+
+	return rc;
+}

+ 26 - 0
drivers/cam_icp/icp_hw/bps_hw/bps_soc.h

@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_BPS_SOC_H_
+#define _CAM_BPS_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_bps_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t bps_irq_handler, void *irq_data);
+
+int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clk);
+
+int cam_bps_get_gdsc_control(struct cam_hw_soc_info *soc_info);
+
+int cam_bps_transfer_gdsc_control(struct cam_hw_soc_info *soc_info);
+
+int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info,
+	uint32_t clk_rate);
+int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable);
+#endif /* _CAM_BPS_SOC_H_*/

+ 19 - 0
drivers/cam_icp/icp_hw/icp_hw_mgr/Makefile

@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/isp/isp_hw/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/isp/isp_hw/hw_utils/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/isp/isp_hw/isp_hw_mgr/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/icp_hw_mgr/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/fw_inc/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_smmu/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_sync
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/icp_hw_mgr/include/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/a5_hw/
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_hw_mgr.o

+ 5582 - 0
drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c

@@ -0,0 +1,5582 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <cam_defs.h>
+#include <cam_icp.h>
+#include <cam_cpas.h>
+
+#include "cam_sync_api.h"
+#include "cam_packet_util.h"
+#include "cam_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_icp_hw_mgr.h"
+#include "cam_a5_hw_intf.h"
+#include "cam_bps_hw_intf.h"
+#include "cam_ipe_hw_intf.h"
+#include "cam_smmu_api.h"
+#include "cam_mem_mgr.h"
+#include "hfi_intf.h"
+#include "hfi_reg.h"
+#include "hfi_session_defs.h"
+#include "hfi_sys_defs.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "a5_core.h"
+#include "hfi_sys_defs.h"
+#include "cam_debug_util.h"
+#include "cam_soc_util.h"
+#include "cam_trace.h"
+#include "cam_cpas_api.h"
+#include "cam_common_util.h"
+
+#define ICP_WORKQ_TASK_CMD_TYPE 1
+#define ICP_WORKQ_TASK_MSG_TYPE 2
+
+#define ICP_DEV_TYPE_TO_CLK_TYPE(dev_type) \
+	((dev_type == CAM_ICP_RES_TYPE_BPS) ? ICP_CLK_HW_BPS : ICP_CLK_HW_IPE)
+
+#define ICP_DEVICE_IDLE_TIMEOUT 400
+
+static struct cam_icp_hw_mgr icp_hw_mgr;
+
+static void cam_icp_mgr_process_dbg_buf(unsigned int debug_lvl);
+
+static const char *cam_icp_dev_type_to_name(
+	uint32_t dev_type)
+{
+	switch (dev_type) {
+	case CAM_ICP_RES_TYPE_BPS:
+		return "BPS";
+	case CAM_ICP_RES_TYPE_IPE_RT:
+		return "IPE_RT";
+	case CAM_ICP_RES_TYPE_IPE:
+		return "IPE";
+	default:
+		return "Invalid dev type";
+	}
+}
+
+static int cam_icp_send_ubwc_cfg(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	int rc;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is NULL");
+		return -EINVAL;
+	}
+
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_UBWC_CFG, NULL, 0);
+	if (rc)
+		CAM_ERR(CAM_ICP, "CAM_ICP_A5_CMD_UBWC_CFG is failed");
+
+	return rc;
+}
+
+static void cam_icp_hw_mgr_clk_info_update(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	struct cam_icp_clk_info *hw_mgr_clk_info;
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+	else
+		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+
+	if (hw_mgr_clk_info->base_clk >= ctx_data->clk_info.base_clk)
+		hw_mgr_clk_info->base_clk -= ctx_data->clk_info.base_clk;
+}
+
+static void cam_icp_hw_mgr_reset_clk_info(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int i;
+
+	for (i = 0; i < ICP_CLK_HW_MAX; i++) {
+		hw_mgr->clk_info[i].base_clk = 0;
+		hw_mgr->clk_info[i].curr_clk = ICP_CLK_SVS_HZ;
+		hw_mgr->clk_info[i].threshold = ICP_OVER_CLK_THRESHOLD;
+		hw_mgr->clk_info[i].over_clked = 0;
+		hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	}
+	hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
+}
+
+static int cam_icp_get_actual_clk_rate_idx(
+	struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk)
+{
+	int i;
+
+	for (i = 0; i < CAM_MAX_VOTE; i++)
+		if (ctx_data->clk_info.clk_rate[i] >= base_clk)
+			return i;
+
+	/*
+	 * Caller has to ensure returned index is within array
+	 * size bounds while accessing that index.
+	 */
+
+	return i;
+}
+
+static bool cam_icp_is_over_clk(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_icp_clk_info *hw_mgr_clk_info)
+{
+	int base_clk_idx;
+	int curr_clk_idx;
+
+	base_clk_idx = cam_icp_get_actual_clk_rate_idx(ctx_data,
+		hw_mgr_clk_info->base_clk);
+
+	curr_clk_idx = cam_icp_get_actual_clk_rate_idx(ctx_data,
+		hw_mgr_clk_info->curr_clk);
+
+	CAM_DBG(CAM_ICP, "bc_idx = %d cc_idx = %d %d %d",
+		base_clk_idx, curr_clk_idx, hw_mgr_clk_info->base_clk,
+		hw_mgr_clk_info->curr_clk);
+
+	if (curr_clk_idx > base_clk_idx)
+		return true;
+
+	return false;
+}
+
+static int cam_icp_get_lower_clk_rate(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk)
+{
+	int i;
+
+	i = cam_icp_get_actual_clk_rate_idx(ctx_data, base_clk);
+
+	if (i > 0)
+		return ctx_data->clk_info.clk_rate[i - 1];
+
+	CAM_DBG(CAM_ICP, "Already clk at lower level");
+	return base_clk;
+}
+
+static int cam_icp_get_next_clk_rate(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk)
+{
+	int i;
+
+	i = cam_icp_get_actual_clk_rate_idx(ctx_data, base_clk);
+
+	if (i < CAM_MAX_VOTE - 1)
+		return ctx_data->clk_info.clk_rate[i + 1];
+
+	CAM_DBG(CAM_ICP, "Already clk at higher level");
+
+	return base_clk;
+}
+
+static int cam_icp_get_actual_clk_rate(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk)
+{
+	int i;
+
+	for (i = 0; i < CAM_MAX_VOTE; i++)
+		if (ctx_data->clk_info.clk_rate[i] >= base_clk)
+			return ctx_data->clk_info.clk_rate[i];
+
+	return base_clk;
+}
+
+static int cam_icp_supported_clk_rates(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int i;
+	struct cam_hw_soc_info *soc_info;
+	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_hw_info *dev = NULL;
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		dev_intf = hw_mgr->bps_dev_intf;
+	else
+		dev_intf = hw_mgr->ipe0_dev_intf;
+
+	if (!dev_intf) {
+		CAM_ERR(CAM_ICP, "dev_intf is invalid");
+		return -EINVAL;
+	}
+	dev = (struct cam_hw_info *)dev_intf->hw_priv;
+	soc_info = &dev->soc_info;
+
+	for (i = 0; i < CAM_MAX_VOTE; i++) {
+		ctx_data->clk_info.clk_rate[i] =
+			soc_info->clk_rate[i][soc_info->src_clk_idx];
+		CAM_DBG(CAM_ICP, "clk_info[%d] = %d",
+			i, ctx_data->clk_info.clk_rate[i]);
+	}
+
+	return 0;
+}
+
+static int cam_icp_clk_idx_from_req_id(struct cam_icp_hw_ctx_data *ctx_data,
+	uint64_t req_id)
+{
+	struct hfi_frame_process_info *frame_process;
+	int i;
+
+	frame_process = &ctx_data->hfi_frame_process;
+
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
+		if (frame_process->request_id[i] == req_id)
+			return i;
+
+	return 0;
+}
+
+static int cam_icp_ctx_clk_info_init(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int i;
+
+	ctx_data->clk_info.curr_fc = 0;
+	ctx_data->clk_info.base_clk = 0;
+	ctx_data->clk_info.uncompressed_bw = 0;
+	ctx_data->clk_info.compressed_bw = 0;
+	for (i = 0; i < CAM_ICP_MAX_PER_PATH_VOTES; i++) {
+		ctx_data->clk_info.axi_path[i].camnoc_bw = 0;
+		ctx_data->clk_info.axi_path[i].mnoc_ab_bw = 0;
+		ctx_data->clk_info.axi_path[i].mnoc_ib_bw = 0;
+	}
+
+	cam_icp_supported_clk_rates(&icp_hw_mgr, ctx_data);
+
+	return 0;
+}
+
+static bool cam_icp_frame_pending(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	return !bitmap_empty(ctx_data->hfi_frame_process.bitmap,
+			CAM_FRAME_CMD_MAX);
+}
+
+static int cam_icp_ctx_timer_reset(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	if (ctx_data && ctx_data->watch_dog) {
+		ctx_data->watch_dog_reset_counter++;
+		CAM_DBG(CAM_ICP, "reset timer : ctx_id = %d, counter=%d",
+			ctx_data->ctx_id, ctx_data->watch_dog_reset_counter);
+		crm_timer_reset(ctx_data->watch_dog);
+	}
+
+	return 0;
+}
+
+static void cam_icp_device_timer_reset(struct cam_icp_hw_mgr *hw_mgr,
+	int device_index)
+{
+	if ((device_index >= ICP_CLK_HW_MAX) || (!hw_mgr))
+		return;
+
+	if (hw_mgr->clk_info[device_index].watch_dog) {
+		CAM_DBG(CAM_ICP, "reset timer : device_index = %d",
+			device_index);
+		crm_timer_reset(hw_mgr->clk_info[device_index].watch_dog);
+		hw_mgr->clk_info[device_index].watch_dog_reset_counter++;
+	}
+}
+
+static int32_t cam_icp_deinit_idle_clk(void *priv, void *data)
+{
+	struct cam_icp_hw_mgr *hw_mgr = (struct cam_icp_hw_mgr *)priv;
+	struct clk_work_data *task_data = (struct clk_work_data *)data;
+	struct cam_icp_clk_info *clk_info =
+		(struct cam_icp_clk_info *)task_data->data;
+	uint32_t id;
+	uint32_t i;
+	struct cam_icp_hw_ctx_data *ctx_data;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_a5_clk_update_cmd clk_upd_cmd;
+	int rc = 0;
+	bool busy = false;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	clk_info->base_clk = 0;
+	clk_info->curr_clk = 0;
+	clk_info->over_clked = 0;
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+		ctx_data = &hw_mgr->ctx_data[i];
+		mutex_lock(&ctx_data->ctx_mutex);
+		if ((ctx_data->state == CAM_ICP_CTX_STATE_ACQUIRED) &&
+			(ICP_DEV_TYPE_TO_CLK_TYPE(
+			ctx_data->icp_dev_acquire_info->dev_type)
+			== clk_info->hw_type)) {
+			busy = cam_icp_frame_pending(ctx_data);
+			if (busy) {
+				mutex_unlock(&ctx_data->ctx_mutex);
+				break;
+			}
+			cam_icp_ctx_clk_info_init(ctx_data);
+		}
+		mutex_unlock(&ctx_data->ctx_mutex);
+	}
+
+	if (busy) {
+		cam_icp_device_timer_reset(hw_mgr, clk_info->hw_type);
+		rc = -EBUSY;
+		goto done;
+	}
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (clk_info->hw_type == ICP_CLK_HW_BPS) {
+		dev_intf = bps_dev_intf;
+		id = CAM_ICP_BPS_CMD_DISABLE_CLK;
+	} else if (clk_info->hw_type == ICP_CLK_HW_IPE) {
+		dev_intf = ipe0_dev_intf;
+		id = CAM_ICP_IPE_CMD_DISABLE_CLK;
+	} else {
+		CAM_ERR(CAM_ICP, "Error");
+		goto done;
+	}
+
+	CAM_DBG(CAM_ICP, "Disable %d", clk_info->hw_type);
+
+	clk_upd_cmd.ipe_bps_pc_enable = icp_hw_mgr.ipe_bps_pc_flag;
+
+	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+		&clk_upd_cmd, sizeof(struct cam_a5_clk_update_cmd));
+
+	if (clk_info->hw_type != ICP_CLK_HW_BPS)
+		if (ipe1_dev_intf)
+			ipe1_dev_intf->hw_ops.process_cmd(
+				ipe1_dev_intf->hw_priv, id,
+				&clk_upd_cmd,
+				sizeof(struct cam_a5_clk_update_cmd));
+
+done:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int32_t cam_icp_ctx_timer(void *priv, void *data)
+{
+	struct clk_work_data *task_data = (struct clk_work_data *)data;
+	struct cam_icp_hw_ctx_data *ctx_data =
+		(struct cam_icp_hw_ctx_data *)task_data->data;
+	struct cam_icp_hw_mgr *hw_mgr = &icp_hw_mgr;
+	uint32_t id;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_icp_clk_info *clk_info;
+	struct cam_icp_cpas_vote clk_update;
+	int i = 0;
+	int device_share_ratio = 1;
+
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "ctx_data is NULL, failed to update clk");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	if ((ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) ||
+		(ctx_data->watch_dog_reset_counter == 0)) {
+		CAM_DBG(CAM_ICP, "state %d, counter=%d",
+			ctx_data->state, ctx_data->watch_dog_reset_counter);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return 0;
+	}
+
+	if (cam_icp_frame_pending(ctx_data)) {
+		cam_icp_ctx_timer_reset(ctx_data);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return -EBUSY;
+	}
+
+	CAM_DBG(CAM_ICP,
+		"E :ctx_id = %d ubw = %lld cbw = %lld curr_fc = %u bc = %u",
+		ctx_data->ctx_id,
+		ctx_data->clk_info.uncompressed_bw,
+		ctx_data->clk_info.compressed_bw,
+		ctx_data->clk_info.curr_fc, ctx_data->clk_info.base_clk);
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return -EINVAL;
+	}
+
+	if (!ctx_data->icp_dev_acquire_info) {
+		CAM_WARN(CAM_ICP, "NULL acquire info");
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return -EINVAL;
+	}
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+		dev_intf = bps_dev_intf;
+		clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+		id = CAM_ICP_BPS_CMD_VOTE_CPAS;
+	} else {
+		dev_intf = ipe0_dev_intf;
+		clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+		id = CAM_ICP_IPE_CMD_VOTE_CPAS;
+	}
+
+	/*
+	 * Since there are 2 devices, we assume the load is evenly shared
+	 * between HWs and corresponding AXI paths. So divide total bw by half
+	 * to vote on each device
+	 */
+	if ((ctx_data->icp_dev_acquire_info->dev_type !=
+		CAM_ICP_RES_TYPE_BPS) && (ipe1_dev_intf))
+		device_share_ratio = 2;
+
+	clk_update.ahb_vote.type = CAM_VOTE_DYNAMIC;
+	clk_update.ahb_vote.vote.freq = 0;
+	clk_update.ahb_vote_valid = false;
+
+	if (ctx_data->bw_config_version == CAM_ICP_BW_CONFIG_V1) {
+		clk_update.axi_vote.num_paths = 1;
+		if (ctx_data->icp_dev_acquire_info->dev_type ==
+			CAM_ICP_RES_TYPE_BPS) {
+			clk_update.axi_vote.axi_path[0].path_data_type =
+				CAM_BPS_DEFAULT_AXI_PATH;
+			clk_update.axi_vote.axi_path[0].transac_type =
+				CAM_BPS_DEFAULT_AXI_TRANSAC;
+		} else {
+			clk_update.axi_vote.axi_path[0].path_data_type =
+				CAM_IPE_DEFAULT_AXI_PATH;
+			clk_update.axi_vote.axi_path[0].transac_type =
+				CAM_IPE_DEFAULT_AXI_TRANSAC;
+		}
+
+		clk_info->compressed_bw -= ctx_data->clk_info.compressed_bw;
+		clk_info->uncompressed_bw -= ctx_data->clk_info.uncompressed_bw;
+
+		ctx_data->clk_info.uncompressed_bw = 0;
+		ctx_data->clk_info.compressed_bw = 0;
+		ctx_data->clk_info.curr_fc = 0;
+		ctx_data->clk_info.base_clk = 0;
+
+		clk_update.axi_vote.num_paths = 1;
+		clk_update.axi_vote.axi_path[0].camnoc_bw =
+			clk_info->uncompressed_bw / device_share_ratio;
+		clk_update.axi_vote.axi_path[0].mnoc_ab_bw =
+			clk_info->compressed_bw / device_share_ratio;
+		clk_update.axi_vote.axi_path[0].mnoc_ib_bw =
+			clk_info->compressed_bw / device_share_ratio;
+		clk_update.axi_vote.axi_path[0].ddr_ab_bw =
+			clk_info->compressed_bw / device_share_ratio;
+		clk_update.axi_vote.axi_path[0].ddr_ib_bw =
+			clk_info->compressed_bw / device_share_ratio;
+	} else {
+		int path_index;
+
+		/*
+		 * Remove previous vote of this context from hw mgr first.
+		 * hw_mgr_clk_info has all valid paths, with each path in its
+		 * own index. BW that we wanted to vote now is after removing
+		 * current context's vote from hw mgr consolidated vote
+		 */
+		for (i = 0; i < ctx_data->clk_info.num_paths; i++) {
+			if (ctx_data->icp_dev_acquire_info->dev_type ==
+				CAM_ICP_RES_TYPE_BPS) {
+				/*
+				 * By assuming BPS has Read-All, Write-All
+				 * votes only.
+				 */
+				path_index =
+					ctx_data->clk_info.axi_path[i]
+					.transac_type -
+					CAM_AXI_TRANSACTION_READ;
+			} else {
+				path_index =
+					ctx_data->clk_info.axi_path[i]
+					.path_data_type -
+					CAM_AXI_PATH_DATA_IPE_START_OFFSET;
+			}
+
+			if (path_index >= CAM_ICP_MAX_PER_PATH_VOTES) {
+				CAM_WARN(CAM_ICP,
+					"Invalid path %d, start offset=%d, max=%d",
+					ctx_data->clk_info.axi_path[i]
+					.path_data_type,
+					CAM_AXI_PATH_DATA_IPE_START_OFFSET,
+					CAM_ICP_MAX_PER_PATH_VOTES);
+				continue;
+			}
+
+			clk_info->axi_path[path_index].camnoc_bw -=
+				ctx_data->clk_info.axi_path[i].camnoc_bw;
+			clk_info->axi_path[path_index].mnoc_ab_bw -=
+				ctx_data->clk_info.axi_path[i].mnoc_ab_bw;
+			clk_info->axi_path[path_index].mnoc_ib_bw -=
+				ctx_data->clk_info.axi_path[i].mnoc_ib_bw;
+			clk_info->axi_path[path_index].ddr_ab_bw -=
+				ctx_data->clk_info.axi_path[i].ddr_ab_bw;
+			clk_info->axi_path[path_index].ddr_ib_bw -=
+				ctx_data->clk_info.axi_path[i].ddr_ib_bw;
+		}
+
+		memset(&ctx_data->clk_info.axi_path[0], 0,
+			CAM_ICP_MAX_PER_PATH_VOTES *
+			sizeof(struct cam_axi_per_path_bw_vote));
+		ctx_data->clk_info.curr_fc = 0;
+		ctx_data->clk_info.base_clk = 0;
+
+		clk_update.axi_vote.num_paths = clk_info->num_paths;
+		memcpy(&clk_update.axi_vote.axi_path[0],
+			&clk_info->axi_path[0],
+			clk_update.axi_vote.num_paths *
+			sizeof(struct cam_axi_per_path_bw_vote));
+
+		if (device_share_ratio > 1) {
+			for (i = 0; i < clk_update.axi_vote.num_paths; i++) {
+				clk_update.axi_vote.axi_path[i].camnoc_bw /=
+					device_share_ratio;
+				clk_update.axi_vote.axi_path[i].mnoc_ab_bw /=
+					device_share_ratio;
+				clk_update.axi_vote.axi_path[i].mnoc_ib_bw /=
+					device_share_ratio;
+				clk_update.axi_vote.axi_path[i].ddr_ab_bw /=
+					device_share_ratio;
+				clk_update.axi_vote.axi_path[i].ddr_ib_bw /=
+					device_share_ratio;
+			}
+		}
+	}
+
+	clk_update.axi_vote_valid = true;
+	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+		&clk_update, sizeof(clk_update));
+
+	/*
+	 * Vote half bandwidth each on both devices.
+	 * Total bw at mnoc - CPAS will take care of adding up.
+	 * camnoc clk calculate is more accurate this way.
+	 */
+	if ((ctx_data->icp_dev_acquire_info->dev_type !=
+		CAM_ICP_RES_TYPE_BPS) && (ipe1_dev_intf))
+		ipe1_dev_intf->hw_ops.process_cmd(ipe1_dev_intf->hw_priv, id,
+		&clk_update, sizeof(clk_update));
+
+	CAM_DBG(CAM_ICP, "X :ctx_id = %d curr_fc = %u bc = %u",
+		ctx_data->ctx_id, ctx_data->clk_info.curr_fc,
+		ctx_data->clk_info.base_clk);
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	return 0;
+}
+
+static void cam_icp_ctx_timer_cb(struct timer_list *timer_data)
+{
+	unsigned long flags;
+	struct crm_workq_task *task;
+	struct clk_work_data *task_data;
+	struct cam_req_mgr_timer *timer =
+		container_of(timer_data, struct cam_req_mgr_timer, sys_timer);
+
+	spin_lock_irqsave(&icp_hw_mgr.hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.timer_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "no empty task");
+		spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+		return;
+	}
+
+	task_data = (struct clk_work_data *)task->payload;
+	task_data->data = timer->parent;
+	task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_icp_ctx_timer;
+	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+}
+
+static void cam_icp_device_timer_cb(struct timer_list *timer_data)
+{
+	unsigned long flags;
+	struct crm_workq_task *task;
+	struct clk_work_data *task_data;
+	struct cam_req_mgr_timer *timer =
+		container_of(timer_data, struct cam_req_mgr_timer, sys_timer);
+
+	spin_lock_irqsave(&icp_hw_mgr.hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.timer_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "no empty task");
+		spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+		return;
+	}
+
+	task_data = (struct clk_work_data *)task->payload;
+	task_data->data = timer->parent;
+	task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_icp_deinit_idle_clk;
+	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+}
+
+static int cam_icp_clk_info_init(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int i, j;
+
+	for (i = 0; i < ICP_CLK_HW_MAX; i++) {
+		hw_mgr->clk_info[i].base_clk = ICP_CLK_SVS_HZ;
+		hw_mgr->clk_info[i].curr_clk = ICP_CLK_SVS_HZ;
+		hw_mgr->clk_info[i].threshold = ICP_OVER_CLK_THRESHOLD;
+		hw_mgr->clk_info[i].over_clked = 0;
+		hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		for (j = 0; j < CAM_ICP_MAX_PER_PATH_VOTES; j++) {
+			hw_mgr->clk_info[i].axi_path[j].path_data_type = 0;
+			hw_mgr->clk_info[i].axi_path[j].transac_type = 0;
+			hw_mgr->clk_info[i].axi_path[j].camnoc_bw = 0;
+			hw_mgr->clk_info[i].axi_path[j].mnoc_ab_bw = 0;
+			hw_mgr->clk_info[i].axi_path[j].mnoc_ib_bw = 0;
+		}
+
+		hw_mgr->clk_info[i].hw_type = i;
+		hw_mgr->clk_info[i].watch_dog_reset_counter = 0;
+	}
+
+	hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
+
+	return 0;
+}
+
+static int cam_icp_ctx_timer_start(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int rc = 0;
+
+	rc = crm_timer_init(&ctx_data->watch_dog,
+		200, ctx_data, &cam_icp_ctx_timer_cb);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to start timer");
+
+	ctx_data->watch_dog_reset_counter = 0;
+
+	CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id);
+	return rc;
+}
+
+static int cam_icp_device_timer_start(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	int i;
+
+	for (i = 0; i < ICP_CLK_HW_MAX; i++)  {
+		if (!hw_mgr->clk_info[i].watch_dog) {
+			rc = crm_timer_init(&hw_mgr->clk_info[i].watch_dog,
+				ICP_DEVICE_IDLE_TIMEOUT, &hw_mgr->clk_info[i],
+				&cam_icp_device_timer_cb);
+
+			if (rc)
+				CAM_ERR(CAM_ICP, "Failed to start timer %d", i);
+
+			hw_mgr->clk_info[i].watch_dog_reset_counter = 0;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_icp_ctx_timer_stop(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	if (ctx_data->watch_dog) {
+		CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id);
+		ctx_data->watch_dog_reset_counter = 0;
+		crm_timer_exit(&ctx_data->watch_dog);
+		ctx_data->watch_dog = NULL;
+	}
+
+	return 0;
+}
+
+static void cam_icp_device_timer_stop(struct cam_icp_hw_mgr *hw_mgr)
+{
+	if (!hw_mgr->bps_ctxt_cnt &&
+		hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog) {
+		hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog_reset_counter = 0;
+		crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog);
+		hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog = NULL;
+	}
+
+	if (!hw_mgr->ipe_ctxt_cnt &&
+		hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog) {
+		hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog_reset_counter = 0;
+		crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog);
+		hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog = NULL;
+	}
+}
+
+static uint32_t cam_icp_mgr_calc_base_clk(uint32_t frame_cycles,
+	uint64_t budget)
+{
+	uint64_t base_clk;
+	uint64_t mul = 1000000000;
+
+	base_clk = (frame_cycles * mul) / budget;
+
+	CAM_DBG(CAM_ICP, "budget = %lld fc = %d ib = %lld base_clk = %lld",
+		budget, frame_cycles,
+		(long long)(frame_cycles * mul), base_clk);
+
+	return base_clk;
+}
+
+static bool cam_icp_busy_prev_reqs(struct hfi_frame_process_info *frm_process,
+	uint64_t req_id)
+{
+	int i;
+	int cnt;
+
+	for (i = 0, cnt = 0; i < CAM_FRAME_CMD_MAX; i++) {
+		if (frm_process->request_id[i]) {
+			if (frm_process->fw_process_flag[i]) {
+				CAM_DBG(CAM_ICP, "r id = %lld busy = %d",
+					frm_process->request_id[i],
+					frm_process->fw_process_flag[i]);
+				cnt++;
+			}
+		}
+	}
+	if (cnt > 1)
+		return true;
+
+	return false;
+}
+
+static int cam_icp_calc_total_clk(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_clk_info *hw_mgr_clk_info, uint32_t dev_type)
+{
+	int i;
+	struct cam_icp_hw_ctx_data *ctx_data;
+
+	hw_mgr_clk_info->base_clk = 0;
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+		ctx_data = &hw_mgr->ctx_data[i];
+		if (ctx_data->state == CAM_ICP_CTX_STATE_ACQUIRED &&
+			ICP_DEV_TYPE_TO_CLK_TYPE(
+			ctx_data->icp_dev_acquire_info->dev_type) ==
+			ICP_DEV_TYPE_TO_CLK_TYPE(dev_type))
+			hw_mgr_clk_info->base_clk +=
+				ctx_data->clk_info.base_clk;
+	}
+
+	return 0;
+}
+
+static bool cam_icp_update_clk_busy(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_icp_clk_info *hw_mgr_clk_info,
+	struct cam_icp_clk_bw_request *clk_info,
+	uint32_t base_clk)
+{
+	uint32_t next_clk_level;
+	uint32_t actual_clk;
+	bool rc = false;
+
+	/* 1. if current request frame cycles(fc) are more than previous
+	 *      frame fc
+	 *      Calculate the new base clock.
+	 *      if sum of base clocks are more than next available clk level
+	 *       Update clock rate, change curr_clk_rate to sum of base clock
+	 *       rates and make over_clked to zero
+	 *      else
+	 *       Update clock rate to next level, update curr_clk_rate and make
+	 *       overclked cnt to zero
+	 * 2. if current fc is less than or equal to previous  frame fc
+	 *      Still Bump up the clock to next available level
+	 *      if it is available, then update clock, make overclk cnt to
+	 *      zero. If the clock is already at highest clock rate then
+	 *      no need to update the clock
+	 */
+	ctx_data->clk_info.base_clk = base_clk;
+	hw_mgr_clk_info->over_clked = 0;
+	if (clk_info->frame_cycles > ctx_data->clk_info.curr_fc) {
+		cam_icp_calc_total_clk(hw_mgr, hw_mgr_clk_info,
+			ctx_data->icp_dev_acquire_info->dev_type);
+		actual_clk = cam_icp_get_actual_clk_rate(hw_mgr,
+			ctx_data, base_clk);
+		if (hw_mgr_clk_info->base_clk > actual_clk) {
+			hw_mgr_clk_info->curr_clk = hw_mgr_clk_info->base_clk;
+		} else {
+			next_clk_level = cam_icp_get_next_clk_rate(hw_mgr,
+				ctx_data, hw_mgr_clk_info->curr_clk);
+			hw_mgr_clk_info->curr_clk = next_clk_level;
+		}
+		rc = true;
+	} else {
+		next_clk_level =
+			cam_icp_get_next_clk_rate(hw_mgr, ctx_data,
+			hw_mgr_clk_info->curr_clk);
+		if (hw_mgr_clk_info->curr_clk < next_clk_level) {
+			hw_mgr_clk_info->curr_clk = next_clk_level;
+			rc = true;
+		}
+	}
+	ctx_data->clk_info.curr_fc = clk_info->frame_cycles;
+
+	return rc;
+}
+
+static bool cam_icp_update_clk_overclk_free(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_icp_clk_info *hw_mgr_clk_info,
+	struct cam_icp_clk_bw_request *clk_info,
+	uint32_t base_clk)
+{
+	int rc = false;
+
+	/*
+	 * In caseof no pending packets case
+	 *    1. In caseof overclk cnt is less than threshold, increase
+	 *       overclk count and no update in the clock rate
+	 *    2. In caseof overclk cnt is greater than or equal to threshold
+	 *       then lower clock rate by one level and update hw_mgr current
+	 *       clock value.
+	 *        a. In case of new clock rate greater than sum of clock
+	 *           rates, reset overclk count value to zero if it is
+	 *           overclock
+	 *        b. if it is less than sum of base clocks then go to next
+	 *           level of clock and make overclk count to zero
+	 *        c. if it is same as sum of base clock rates update overclock
+	 *           cnt to 0
+	 */
+	if (hw_mgr_clk_info->over_clked < hw_mgr_clk_info->threshold) {
+		hw_mgr_clk_info->over_clked++;
+		rc = false;
+	} else {
+		hw_mgr_clk_info->curr_clk =
+			cam_icp_get_lower_clk_rate(hw_mgr, ctx_data,
+			hw_mgr_clk_info->curr_clk);
+		if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk) {
+			if (cam_icp_is_over_clk(hw_mgr, ctx_data,
+				hw_mgr_clk_info))
+				hw_mgr_clk_info->over_clked = 0;
+		} else if (hw_mgr_clk_info->curr_clk <
+			hw_mgr_clk_info->base_clk) {
+			hw_mgr_clk_info->curr_clk =
+				cam_icp_get_next_clk_rate(hw_mgr, ctx_data,
+				hw_mgr_clk_info->curr_clk);
+				hw_mgr_clk_info->over_clked = 0;
+		} else if (hw_mgr_clk_info->curr_clk ==
+			hw_mgr_clk_info->base_clk) {
+			hw_mgr_clk_info->over_clked = 0;
+		}
+		rc = true;
+	}
+
+	return rc;
+}
+
+static bool cam_icp_update_clk_free(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_icp_clk_info *hw_mgr_clk_info,
+	struct cam_icp_clk_bw_request *clk_info,
+	uint32_t base_clk)
+{
+	int rc = false;
+	bool over_clocked = false;
+
+	ctx_data->clk_info.curr_fc = clk_info->frame_cycles;
+	ctx_data->clk_info.base_clk = base_clk;
+	cam_icp_calc_total_clk(hw_mgr, hw_mgr_clk_info,
+		ctx_data->icp_dev_acquire_info->dev_type);
+
+	/*
+	 * Current clock is not always sum of base clocks, due to
+	 * clock scales update to next higher or lower levels, it
+	 * equals to one of discrete clock values supported by hardware.
+	 * So even current clock is higher than sum of base clocks, we
+	 * can not consider it is over clocked. if it is greater than
+	 * discrete clock level then only it is considered as over clock.
+	 * 1. Handle over clock case
+	 * 2. If current clock is less than sum of base clocks
+	 *    update current clock
+	 * 3. If current clock is same as sum of base clocks no action
+	 */
+
+	over_clocked = cam_icp_is_over_clk(hw_mgr, ctx_data,
+		hw_mgr_clk_info);
+
+	if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk &&
+		over_clocked) {
+		rc = cam_icp_update_clk_overclk_free(hw_mgr, ctx_data,
+			hw_mgr_clk_info, clk_info, base_clk);
+	} else if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk) {
+		hw_mgr_clk_info->over_clked = 0;
+		rc = false;
+	}  else if (hw_mgr_clk_info->curr_clk < hw_mgr_clk_info->base_clk) {
+		hw_mgr_clk_info->curr_clk = cam_icp_get_actual_clk_rate(hw_mgr,
+			ctx_data, hw_mgr_clk_info->base_clk);
+		rc = true;
+	}
+
+	return rc;
+}
+
+static bool cam_icp_debug_clk_update(struct cam_icp_clk_info *hw_mgr_clk_info)
+{
+	if (icp_hw_mgr.icp_debug_clk < ICP_CLK_TURBO_HZ &&
+		icp_hw_mgr.icp_debug_clk &&
+		icp_hw_mgr.icp_debug_clk != hw_mgr_clk_info->curr_clk) {
+		hw_mgr_clk_info->base_clk = icp_hw_mgr.icp_debug_clk;
+		hw_mgr_clk_info->curr_clk = icp_hw_mgr.icp_debug_clk;
+		hw_mgr_clk_info->uncompressed_bw = icp_hw_mgr.icp_debug_clk;
+		hw_mgr_clk_info->compressed_bw = icp_hw_mgr.icp_debug_clk;
+		CAM_DBG(CAM_ICP, "bc = %d cc = %d",
+			hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk);
+		return true;
+	}
+
+	return false;
+}
+
+static bool cam_icp_default_clk_update(struct cam_icp_clk_info *hw_mgr_clk_info)
+{
+	if (icp_hw_mgr.icp_default_clk != hw_mgr_clk_info->curr_clk) {
+		hw_mgr_clk_info->base_clk = icp_hw_mgr.icp_default_clk;
+		hw_mgr_clk_info->curr_clk = icp_hw_mgr.icp_default_clk;
+		hw_mgr_clk_info->uncompressed_bw = icp_hw_mgr.icp_default_clk;
+		hw_mgr_clk_info->compressed_bw = icp_hw_mgr.icp_default_clk;
+		CAM_DBG(CAM_ICP, "bc = %d cc = %d",
+			hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk);
+		return true;
+	}
+
+	return false;
+}
+
+static bool cam_icp_update_bw_v2(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_icp_clk_info *hw_mgr_clk_info,
+	struct cam_icp_clk_bw_req_internal_v2 *clk_info,
+	bool busy)
+{
+	int i, path_index;
+	bool update_required = true;
+
+	/*
+	 * If current request bandwidth is different from previous frames, then
+	 * recalculate bandwidth of all contexts of same hardware and update
+	 * voting of bandwidth
+	 */
+
+	for (i = 0; i < clk_info->num_paths; i++)
+		CAM_DBG(CAM_ICP, "clk_info camnoc = %lld busy = %d",
+			clk_info->axi_path[i].camnoc_bw, busy);
+
+	if (clk_info->num_paths == ctx_data->clk_info.num_paths) {
+		update_required = false;
+		for (i = 0; i < clk_info->num_paths; i++) {
+			if ((clk_info->axi_path[i].transac_type ==
+			ctx_data->clk_info.axi_path[i].transac_type) &&
+			(clk_info->axi_path[i].path_data_type ==
+			ctx_data->clk_info.axi_path[i].path_data_type) &&
+			(clk_info->axi_path[i].camnoc_bw ==
+			ctx_data->clk_info.axi_path[i].camnoc_bw) &&
+			(clk_info->axi_path[i].mnoc_ab_bw ==
+			ctx_data->clk_info.axi_path[i].mnoc_ab_bw)) {
+				continue;
+			} else {
+				update_required = true;
+				break;
+			}
+		}
+	}
+
+	if (!update_required) {
+		CAM_DBG(CAM_ICP,
+			"Incoming BW hasn't changed, no update required");
+		return false;
+	}
+
+	if (busy) {
+		for (i = 0; i < clk_info->num_paths; i++) {
+			if (ctx_data->clk_info.axi_path[i].camnoc_bw >
+				clk_info->axi_path[i].camnoc_bw)
+				return false;
+		}
+	}
+
+	/*
+	 * Remove previous vote of this context from hw mgr first.
+	 * hw_mgr_clk_info has all valid paths, with each path in its own index
+	 */
+	for (i = 0; i < ctx_data->clk_info.num_paths; i++) {
+		if (ctx_data->icp_dev_acquire_info->dev_type ==
+			CAM_ICP_RES_TYPE_BPS) {
+			/* By assuming BPS has Read-All, Write-All votes only */
+			path_index =
+				ctx_data->clk_info.axi_path[i].transac_type -
+				CAM_AXI_TRANSACTION_READ;
+		} else {
+			path_index =
+				ctx_data->clk_info.axi_path[i].path_data_type -
+				CAM_AXI_PATH_DATA_IPE_START_OFFSET;
+		}
+
+		if (path_index >= CAM_ICP_MAX_PER_PATH_VOTES) {
+			CAM_WARN(CAM_ICP,
+				"Invalid path %d, start offset=%d, max=%d",
+				ctx_data->clk_info.axi_path[i].path_data_type,
+				CAM_AXI_PATH_DATA_IPE_START_OFFSET,
+				CAM_ICP_MAX_PER_PATH_VOTES);
+			continue;
+		}
+
+		hw_mgr_clk_info->axi_path[path_index].camnoc_bw -=
+			ctx_data->clk_info.axi_path[i].camnoc_bw;
+		hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw -=
+			ctx_data->clk_info.axi_path[i].mnoc_ab_bw;
+		hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw -=
+			ctx_data->clk_info.axi_path[i].mnoc_ib_bw;
+		hw_mgr_clk_info->axi_path[path_index].ddr_ab_bw -=
+			ctx_data->clk_info.axi_path[i].ddr_ab_bw;
+		hw_mgr_clk_info->axi_path[path_index].ddr_ib_bw -=
+			ctx_data->clk_info.axi_path[i].ddr_ib_bw;
+	}
+
+	ctx_data->clk_info.num_paths = clk_info->num_paths;
+
+	memcpy(&ctx_data->clk_info.axi_path[0],
+		&clk_info->axi_path[0],
+		clk_info->num_paths * sizeof(struct cam_axi_per_path_bw_vote));
+
+	/*
+	 * Add new vote of this context in hw mgr.
+	 * hw_mgr_clk_info has all paths, with each path in its own index
+	 */
+	for (i = 0; i < ctx_data->clk_info.num_paths; i++) {
+		if (ctx_data->icp_dev_acquire_info->dev_type ==
+			CAM_ICP_RES_TYPE_BPS) {
+			/* By assuming BPS has Read-All, Write-All votes only */
+			path_index =
+				ctx_data->clk_info.axi_path[i].transac_type -
+				CAM_AXI_TRANSACTION_READ;
+		} else {
+			path_index =
+				ctx_data->clk_info.axi_path[i].path_data_type -
+				CAM_AXI_PATH_DATA_IPE_START_OFFSET;
+		}
+
+		if (path_index >= CAM_ICP_MAX_PER_PATH_VOTES) {
+			CAM_WARN(CAM_ICP,
+				"Invalid path %d, start offset=%d, max=%d",
+				ctx_data->clk_info.axi_path[i].path_data_type,
+				CAM_AXI_PATH_DATA_IPE_START_OFFSET,
+				CAM_ICP_MAX_PER_PATH_VOTES);
+			continue;
+		}
+
+		hw_mgr_clk_info->axi_path[path_index].path_data_type =
+			ctx_data->clk_info.axi_path[i].path_data_type;
+		hw_mgr_clk_info->axi_path[path_index].transac_type =
+			ctx_data->clk_info.axi_path[i].transac_type;
+		hw_mgr_clk_info->axi_path[path_index].camnoc_bw +=
+			ctx_data->clk_info.axi_path[i].camnoc_bw;
+		hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw +=
+			ctx_data->clk_info.axi_path[i].mnoc_ab_bw;
+		hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw +=
+			ctx_data->clk_info.axi_path[i].mnoc_ib_bw;
+		hw_mgr_clk_info->axi_path[path_index].ddr_ab_bw +=
+			ctx_data->clk_info.axi_path[i].ddr_ab_bw;
+		hw_mgr_clk_info->axi_path[path_index].ddr_ib_bw +=
+			ctx_data->clk_info.axi_path[i].ddr_ib_bw;
+
+		CAM_DBG(CAM_ICP,
+			"Consolidate Path Vote : Dev[%s] i[%d] path_idx[%d] : [%s %s] [%lld %lld]",
+			cam_icp_dev_type_to_name(
+			ctx_data->icp_dev_acquire_info->dev_type),
+			i, path_index,
+			cam_cpas_axi_util_trans_type_to_string(
+			hw_mgr_clk_info->axi_path[path_index].transac_type),
+			cam_cpas_axi_util_path_type_to_string(
+			hw_mgr_clk_info->axi_path[path_index].path_data_type),
+			hw_mgr_clk_info->axi_path[path_index].camnoc_bw,
+			hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw);
+	}
+
+	if (hw_mgr_clk_info->num_paths < ctx_data->clk_info.num_paths)
+		hw_mgr_clk_info->num_paths = ctx_data->clk_info.num_paths;
+
+	return true;
+}
+
+static bool cam_icp_update_bw(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_icp_clk_info *hw_mgr_clk_info,
+	struct cam_icp_clk_bw_request *clk_info,
+	bool busy)
+{
+	int i;
+	struct cam_icp_hw_ctx_data *ctx;
+
+	/*
+	 * If current request bandwidth is different from previous frames, then
+	 * recalculate bandwidth of all contexts of same hardware and update
+	 * voting of bandwidth
+	 */
+	CAM_DBG(CAM_ICP, "ubw ctx = %lld clk_info ubw = %lld busy = %d",
+		ctx_data->clk_info.uncompressed_bw,
+		clk_info->uncompressed_bw, busy);
+
+	if ((clk_info->uncompressed_bw == ctx_data->clk_info.uncompressed_bw) &&
+		(ctx_data->clk_info.uncompressed_bw ==
+		hw_mgr_clk_info->uncompressed_bw)) {
+		CAM_DBG(CAM_ICP, "Update not required bw=%lld",
+			ctx_data->clk_info.uncompressed_bw);
+		return false;
+	}
+
+	if (busy &&
+		(ctx_data->clk_info.uncompressed_bw >
+		clk_info->uncompressed_bw)) {
+		CAM_DBG(CAM_ICP, "Busy, Update not req existing=%lld, new=%lld",
+			ctx_data->clk_info.uncompressed_bw,
+			clk_info->uncompressed_bw);
+		return false;
+	}
+
+	ctx_data->clk_info.uncompressed_bw = clk_info->uncompressed_bw;
+	ctx_data->clk_info.compressed_bw = clk_info->compressed_bw;
+	hw_mgr_clk_info->uncompressed_bw = 0;
+	hw_mgr_clk_info->compressed_bw = 0;
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+		ctx = &hw_mgr->ctx_data[i];
+		if (ctx->state == CAM_ICP_CTX_STATE_ACQUIRED &&
+			ICP_DEV_TYPE_TO_CLK_TYPE(
+			ctx->icp_dev_acquire_info->dev_type) ==
+			ICP_DEV_TYPE_TO_CLK_TYPE(
+			ctx_data->icp_dev_acquire_info->dev_type)) {
+			hw_mgr_clk_info->uncompressed_bw +=
+				ctx->clk_info.uncompressed_bw;
+			hw_mgr_clk_info->compressed_bw +=
+				ctx->clk_info.compressed_bw;
+			CAM_DBG(CAM_ICP,
+				"Current context=[%lld %lld] Total=[%lld %lld]",
+				ctx->clk_info.uncompressed_bw,
+				ctx->clk_info.compressed_bw,
+				hw_mgr_clk_info->uncompressed_bw,
+				hw_mgr_clk_info->compressed_bw);
+		}
+	}
+
+	return true;
+}
+
+static bool cam_icp_check_clk_update(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, int idx)
+{
+	bool busy, rc = false;
+	uint32_t base_clk;
+	struct cam_icp_clk_bw_request *clk_info;
+	struct hfi_frame_process_info *frame_info;
+	uint64_t req_id;
+	struct cam_icp_clk_info *hw_mgr_clk_info;
+
+	cam_icp_ctx_timer_reset(ctx_data);
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+		cam_icp_device_timer_reset(hw_mgr, ICP_CLK_HW_BPS);
+		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+		CAM_DBG(CAM_ICP, "Reset bps timer");
+	} else {
+		cam_icp_device_timer_reset(hw_mgr, ICP_CLK_HW_IPE);
+		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+		CAM_DBG(CAM_ICP, "Reset ipe timer");
+	}
+
+	if (icp_hw_mgr.icp_debug_clk)
+		return cam_icp_debug_clk_update(hw_mgr_clk_info);
+
+	/* Check is there any pending frames in this context */
+	frame_info = &ctx_data->hfi_frame_process;
+	req_id = frame_info->request_id[idx];
+	busy = cam_icp_busy_prev_reqs(frame_info, req_id);
+	CAM_DBG(CAM_ICP, "busy = %d req_id = %lld", busy, req_id);
+
+	clk_info = &ctx_data->hfi_frame_process.clk_info[idx];
+	if (!clk_info->frame_cycles)
+		return cam_icp_default_clk_update(hw_mgr_clk_info);
+
+	/* Calculate base clk rate */
+	base_clk = cam_icp_mgr_calc_base_clk(
+		clk_info->frame_cycles, clk_info->budget_ns);
+	ctx_data->clk_info.rt_flag = clk_info->rt_flag;
+
+	if (busy)
+		rc = cam_icp_update_clk_busy(hw_mgr, ctx_data,
+			hw_mgr_clk_info, clk_info, base_clk);
+	else
+		rc = cam_icp_update_clk_free(hw_mgr, ctx_data,
+			hw_mgr_clk_info, clk_info, base_clk);
+
+	CAM_DBG(CAM_ICP, "bc = %d cc = %d busy = %d overclk = %d uc = %d",
+		hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk,
+		busy, hw_mgr_clk_info->over_clked, rc);
+
+	return rc;
+}
+
+static bool cam_icp_check_bw_update(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, int idx)
+{
+	bool busy, bw_updated = false;
+	int i;
+	struct cam_icp_clk_bw_request *clk_info;
+	struct cam_icp_clk_bw_req_internal_v2 *clk_info_v2;
+	struct cam_icp_clk_info *hw_mgr_clk_info;
+	struct hfi_frame_process_info *frame_info;
+	uint64_t req_id;
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+	else
+		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+
+	frame_info = &ctx_data->hfi_frame_process;
+	req_id = frame_info->request_id[idx];
+	busy = cam_icp_busy_prev_reqs(frame_info, req_id);
+
+	if (ctx_data->bw_config_version == CAM_ICP_BW_CONFIG_V1) {
+		clk_info = &ctx_data->hfi_frame_process.clk_info[idx];
+
+		CAM_DBG(CAM_ICP,
+			"Ctx[%pK][%d] Req[%lld] Current camno=%lld, mnoc=%lld",
+			ctx_data, ctx_data->ctx_id, req_id,
+			hw_mgr_clk_info->uncompressed_bw,
+			hw_mgr_clk_info->compressed_bw);
+
+		bw_updated = cam_icp_update_bw(hw_mgr, ctx_data,
+			hw_mgr_clk_info, clk_info, busy);
+	} else if (ctx_data->bw_config_version == CAM_ICP_BW_CONFIG_V2) {
+		clk_info_v2 = &ctx_data->hfi_frame_process.clk_info_v2[idx];
+
+		bw_updated = cam_icp_update_bw_v2(hw_mgr, ctx_data,
+			hw_mgr_clk_info, clk_info_v2, busy);
+
+		for (i = 0; i < hw_mgr_clk_info->num_paths; i++) {
+			CAM_DBG(CAM_ICP,
+				"Final path_type: %s, transac_type: %s, camnoc_bw = %lld mnoc_ab_bw = %lld, mnoc_ib_bw = %lld, device: %s",
+				cam_cpas_axi_util_path_type_to_string(
+				hw_mgr_clk_info->axi_path[i].path_data_type),
+				cam_cpas_axi_util_trans_type_to_string(
+				hw_mgr_clk_info->axi_path[i].transac_type),
+				hw_mgr_clk_info->axi_path[i].camnoc_bw,
+				hw_mgr_clk_info->axi_path[i].mnoc_ab_bw,
+				hw_mgr_clk_info->axi_path[i].mnoc_ib_bw,
+				cam_icp_dev_type_to_name(
+				ctx_data->icp_dev_acquire_info->dev_type));
+		}
+	} else {
+		CAM_ERR(CAM_ICP, "Invalid bw config version: %d",
+			ctx_data->bw_config_version);
+		return false;
+	}
+
+	return bw_updated;
+}
+
+static int cam_icp_update_clk_rate(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	uint32_t id;
+	uint32_t curr_clk_rate;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_a5_clk_update_cmd clk_upd_cmd;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+		return -EINVAL;
+	}
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+		dev_intf = bps_dev_intf;
+		curr_clk_rate = hw_mgr->clk_info[ICP_CLK_HW_BPS].curr_clk;
+		id = CAM_ICP_BPS_CMD_UPDATE_CLK;
+	} else {
+		dev_intf = ipe0_dev_intf;
+		curr_clk_rate = hw_mgr->clk_info[ICP_CLK_HW_IPE].curr_clk;
+		id = CAM_ICP_IPE_CMD_UPDATE_CLK;
+	}
+
+	CAM_DBG(CAM_PERF, "clk_rate %u for dev_type %d", curr_clk_rate,
+		ctx_data->icp_dev_acquire_info->dev_type);
+	clk_upd_cmd.curr_clk_rate = curr_clk_rate;
+	clk_upd_cmd.ipe_bps_pc_enable = icp_hw_mgr.ipe_bps_pc_flag;
+
+	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+		&clk_upd_cmd, sizeof(struct cam_a5_clk_update_cmd));
+
+	if (ctx_data->icp_dev_acquire_info->dev_type != CAM_ICP_RES_TYPE_BPS)
+		if (ipe1_dev_intf)
+			ipe1_dev_intf->hw_ops.process_cmd(
+				ipe1_dev_intf->hw_priv, id,
+				&clk_upd_cmd,
+				sizeof(struct cam_a5_clk_update_cmd));
+
+	return 0;
+}
+
+static int cam_icp_update_cpas_vote(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	uint32_t id;
+	int i = 0;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_icp_clk_info *clk_info;
+	struct cam_icp_cpas_vote clk_update = {{0}, {0}, 0, 0};
+	int device_share_ratio = 1;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+		return -EINVAL;
+	}
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+		dev_intf = bps_dev_intf;
+		clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+		id = CAM_ICP_BPS_CMD_VOTE_CPAS;
+	} else {
+		dev_intf = ipe0_dev_intf;
+		clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+		id = CAM_ICP_IPE_CMD_VOTE_CPAS;
+	}
+
+	/*
+	 * Since there are 2 devices, we assume the load is evenly shared
+	 * between HWs and corresponding AXI paths. So divide total bw by half
+	 * to vote on each device
+	 */
+	if ((ctx_data->icp_dev_acquire_info->dev_type !=
+		CAM_ICP_RES_TYPE_BPS) && (ipe1_dev_intf))
+		device_share_ratio = 2;
+
+	clk_update.ahb_vote.type = CAM_VOTE_DYNAMIC;
+	clk_update.ahb_vote.vote.freq = 0;
+	clk_update.ahb_vote_valid = false;
+
+	if (ctx_data->bw_config_version == CAM_ICP_BW_CONFIG_V1) {
+		clk_update.axi_vote.num_paths = 1;
+		if (ctx_data->icp_dev_acquire_info->dev_type ==
+			CAM_ICP_RES_TYPE_BPS) {
+			clk_update.axi_vote.axi_path[0].path_data_type =
+				CAM_BPS_DEFAULT_AXI_PATH;
+			clk_update.axi_vote.axi_path[0].transac_type =
+				CAM_BPS_DEFAULT_AXI_TRANSAC;
+		} else {
+			clk_update.axi_vote.axi_path[0].path_data_type =
+				CAM_IPE_DEFAULT_AXI_PATH;
+			clk_update.axi_vote.axi_path[0].transac_type =
+				CAM_IPE_DEFAULT_AXI_TRANSAC;
+		}
+		clk_update.axi_vote.axi_path[0].camnoc_bw =
+			clk_info->uncompressed_bw / device_share_ratio;
+		clk_update.axi_vote.axi_path[0].mnoc_ab_bw =
+			clk_info->compressed_bw / device_share_ratio;
+		clk_update.axi_vote.axi_path[0].mnoc_ib_bw =
+			clk_info->compressed_bw / device_share_ratio;
+		clk_update.axi_vote.axi_path[0].ddr_ab_bw =
+			clk_info->compressed_bw / device_share_ratio;
+		clk_update.axi_vote.axi_path[0].ddr_ib_bw =
+			clk_info->compressed_bw / device_share_ratio;
+	} else {
+		clk_update.axi_vote.num_paths = clk_info->num_paths;
+		memcpy(&clk_update.axi_vote.axi_path[0],
+			&clk_info->axi_path[0],
+			clk_update.axi_vote.num_paths *
+			sizeof(struct cam_axi_per_path_bw_vote));
+
+		if (device_share_ratio > 1) {
+			for (i = 0; i < clk_update.axi_vote.num_paths; i++) {
+				clk_update.axi_vote.axi_path[i].camnoc_bw /=
+					device_share_ratio;
+				clk_update.axi_vote.axi_path[i].mnoc_ab_bw /=
+					device_share_ratio;
+				clk_update.axi_vote.axi_path[i].mnoc_ib_bw /=
+					device_share_ratio;
+				clk_update.axi_vote.axi_path[i].ddr_ab_bw /=
+					device_share_ratio;
+				clk_update.axi_vote.axi_path[i].ddr_ib_bw /=
+					device_share_ratio;
+			}
+		}
+	}
+
+	clk_update.axi_vote_valid = true;
+	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+		&clk_update, sizeof(clk_update));
+
+	/*
+	 * Vote half bandwidth each on both devices.
+	 * Total bw at mnoc - CPAS will take care of adding up.
+	 * camnoc clk calculate is more accurate this way.
+	 */
+	if ((ctx_data->icp_dev_acquire_info->dev_type !=
+		CAM_ICP_RES_TYPE_BPS) && (ipe1_dev_intf))
+		ipe1_dev_intf->hw_ops.process_cmd(ipe1_dev_intf->hw_priv, id,
+		&clk_update, sizeof(clk_update));
+
+	return 0;
+}
+
+static int cam_icp_mgr_ipe_bps_clk_update(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, int idx)
+{
+	int rc = 0;
+
+	if (cam_icp_check_clk_update(hw_mgr, ctx_data, idx))
+		rc = cam_icp_update_clk_rate(hw_mgr, ctx_data);
+
+	if (cam_icp_check_bw_update(hw_mgr, ctx_data, idx))
+		rc |= cam_icp_update_cpas_vote(hw_mgr, ctx_data);
+
+	return rc;
+}
+
+static int cam_icp_mgr_ipe_bps_resume(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	uint32_t core_info_mask = 0;
+	int rc = 0;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+		return -EINVAL;
+	}
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+		if (hw_mgr->bps_ctxt_cnt++)
+			goto end;
+		if (!hw_mgr->bps_clk_state) {
+			bps_dev_intf->hw_ops.init(
+				bps_dev_intf->hw_priv, NULL, 0);
+			hw_mgr->bps_clk_state = true;
+		}
+		if (icp_hw_mgr.ipe_bps_pc_flag) {
+			bps_dev_intf->hw_ops.process_cmd(
+				bps_dev_intf->hw_priv,
+				CAM_ICP_BPS_CMD_POWER_RESUME, NULL, 0);
+		}
+		core_info_mask = ICP_PWR_CLP_BPS;
+	} else {
+		if (hw_mgr->ipe_ctxt_cnt++)
+			goto end;
+		if (!hw_mgr->ipe_clk_state)
+			ipe0_dev_intf->hw_ops.init(
+				ipe0_dev_intf->hw_priv, NULL, 0);
+		if (icp_hw_mgr.ipe_bps_pc_flag) {
+			ipe0_dev_intf->hw_ops.process_cmd(
+				ipe0_dev_intf->hw_priv,
+				CAM_ICP_IPE_CMD_POWER_RESUME, NULL, 0);
+		}
+
+		if ((icp_hw_mgr.ipe1_enable) &&
+			(ipe1_dev_intf) &&
+			(!hw_mgr->ipe_clk_state)) {
+			ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
+				NULL, 0);
+
+			if (icp_hw_mgr.ipe_bps_pc_flag) {
+				ipe1_dev_intf->hw_ops.process_cmd(
+					ipe1_dev_intf->hw_priv,
+					CAM_ICP_IPE_CMD_POWER_RESUME,
+					NULL, 0);
+			}
+		}
+		hw_mgr->ipe_clk_state = true;
+
+		if ((icp_hw_mgr.ipe1_enable) &&
+			(ipe1_dev_intf))
+			core_info_mask = (ICP_PWR_CLP_IPE0 |
+				ICP_PWR_CLP_IPE1);
+		else
+			core_info_mask = ICP_PWR_CLP_IPE0;
+	}
+
+	CAM_DBG(CAM_ICP, "core_info %X", core_info_mask);
+	if (icp_hw_mgr.ipe_bps_pc_flag)
+		rc = hfi_enable_ipe_bps_pc(true, core_info_mask);
+	else
+		rc = hfi_enable_ipe_bps_pc(false, core_info_mask);
+end:
+	return rc;
+}
+
+static int cam_icp_mgr_ipe_bps_power_collapse(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, int dev_type)
+{
+	int rc = 0, dev;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+		return -EINVAL;
+	}
+
+	if (!ctx_data)
+		dev = dev_type;
+	else
+		dev = ctx_data->icp_dev_acquire_info->dev_type;
+
+	if (dev == CAM_ICP_RES_TYPE_BPS) {
+		CAM_DBG(CAM_ICP, "bps ctx cnt %d", hw_mgr->bps_ctxt_cnt);
+		if (ctx_data)
+			--hw_mgr->bps_ctxt_cnt;
+
+		if (hw_mgr->bps_ctxt_cnt)
+			goto end;
+
+		if (icp_hw_mgr.ipe_bps_pc_flag &&
+			!atomic_read(&hw_mgr->recovery)) {
+			rc = bps_dev_intf->hw_ops.process_cmd(
+				bps_dev_intf->hw_priv,
+				CAM_ICP_BPS_CMD_POWER_COLLAPSE,
+				NULL, 0);
+		}
+
+		if (hw_mgr->bps_clk_state) {
+			bps_dev_intf->hw_ops.deinit
+				(bps_dev_intf->hw_priv, NULL, 0);
+			hw_mgr->bps_clk_state = false;
+		}
+	} else {
+		CAM_DBG(CAM_ICP, "ipe ctx cnt %d", hw_mgr->ipe_ctxt_cnt);
+		if (ctx_data)
+			--hw_mgr->ipe_ctxt_cnt;
+
+		if (hw_mgr->ipe_ctxt_cnt)
+			goto end;
+
+		if (icp_hw_mgr.ipe_bps_pc_flag &&
+			!atomic_read(&hw_mgr->recovery)) {
+			rc = ipe0_dev_intf->hw_ops.process_cmd(
+				ipe0_dev_intf->hw_priv,
+				CAM_ICP_IPE_CMD_POWER_COLLAPSE, NULL, 0);
+		}
+
+		if (hw_mgr->ipe_clk_state)
+			ipe0_dev_intf->hw_ops.deinit(
+				ipe0_dev_intf->hw_priv, NULL, 0);
+
+		if (ipe1_dev_intf) {
+			if (icp_hw_mgr.ipe_bps_pc_flag &&
+				!atomic_read(&hw_mgr->recovery)) {
+				rc = ipe1_dev_intf->hw_ops.process_cmd(
+					ipe1_dev_intf->hw_priv,
+					CAM_ICP_IPE_CMD_POWER_COLLAPSE,
+					NULL, 0);
+			}
+
+		if (hw_mgr->ipe_clk_state)
+			ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
+				NULL, 0);
+		}
+
+		hw_mgr->ipe_clk_state = false;
+	}
+
+end:
+	return rc;
+}
+
+static int cam_icp_mgr_ipe_bps_get_gdsc_control(
+	struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong");
+		return -EINVAL;
+	}
+
+	if (icp_hw_mgr.ipe_bps_pc_flag) {
+		rc = bps_dev_intf->hw_ops.process_cmd(
+			bps_dev_intf->hw_priv,
+			CAM_ICP_BPS_CMD_POWER_COLLAPSE,
+			NULL, 0);
+
+		rc = ipe0_dev_intf->hw_ops.process_cmd(
+			ipe0_dev_intf->hw_priv,
+			CAM_ICP_IPE_CMD_POWER_COLLAPSE, NULL, 0);
+
+		if (ipe1_dev_intf) {
+			rc = ipe1_dev_intf->hw_ops.process_cmd(
+				ipe1_dev_intf->hw_priv,
+				CAM_ICP_IPE_CMD_POWER_COLLAPSE,
+				NULL, 0);
+		}
+	}
+
+	return rc;
+}
+
+static int cam_icp_set_dbg_default_clk(void *data, u64 val)
+{
+	icp_hw_mgr.icp_debug_clk = val;
+	return 0;
+}
+
+static int cam_icp_get_dbg_default_clk(void *data, u64 *val)
+{
+	*val = icp_hw_mgr.icp_debug_clk;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_default_clk,
+	cam_icp_get_dbg_default_clk,
+	cam_icp_set_dbg_default_clk, "%16llu");
+
+static int cam_icp_set_a5_dbg_lvl(void *data, u64 val)
+{
+	icp_hw_mgr.a5_dbg_lvl = val;
+	return 0;
+}
+
+static int cam_icp_get_a5_dbg_lvl(void *data, u64 *val)
+{
+	*val = icp_hw_mgr.a5_dbg_lvl;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_fs, cam_icp_get_a5_dbg_lvl,
+	cam_icp_set_a5_dbg_lvl, "%08llu");
+
+static int cam_icp_set_a5_dbg_type(void *data, u64 val)
+{
+	if (val <= NUM_HFI_DEBUG_MODE)
+		icp_hw_mgr.a5_debug_type = val;
+	return 0;
+}
+
+static int cam_icp_get_a5_dbg_type(void *data, u64 *val)
+{
+	*val = icp_hw_mgr.a5_debug_type;
+	return 0;
+}
+
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_type_fs, cam_icp_get_a5_dbg_type,
+	cam_icp_set_a5_dbg_type, "%08llu");
+
+static int cam_icp_set_a5_fw_dump_lvl(void *data, u64 val)
+{
+	if (val < NUM_HFI_DUMP_LVL)
+		icp_hw_mgr.a5_fw_dump_lvl = val;
+	return 0;
+}
+
+static int cam_icp_get_a5_fw_dump_lvl(void *data, u64 *val)
+{
+	*val = icp_hw_mgr.a5_fw_dump_lvl;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_fw_dump, cam_icp_get_a5_fw_dump_lvl,
+	cam_icp_set_a5_fw_dump_lvl, "%08llu");
+
+static int cam_icp_hw_mgr_create_debugfs_entry(void)
+{
+	int rc = 0;
+
+	icp_hw_mgr.dentry = debugfs_create_dir("camera_icp", NULL);
+	if (!icp_hw_mgr.dentry)
+		return -ENOMEM;
+
+	if (!debugfs_create_bool("icp_pc",
+		0644,
+		icp_hw_mgr.dentry,
+		&icp_hw_mgr.icp_pc_flag)) {
+		CAM_ERR(CAM_ICP, "failed to create icp_pc entry");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	if (!debugfs_create_bool("ipe_bps_pc",
+		0644,
+		icp_hw_mgr.dentry,
+		&icp_hw_mgr.ipe_bps_pc_flag)) {
+		CAM_ERR(CAM_ICP, "failed to create ipe_bps_pc entry");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	if (!debugfs_create_file("icp_debug_clk",
+		0644,
+		icp_hw_mgr.dentry, NULL,
+		&cam_icp_debug_default_clk)) {
+		CAM_ERR(CAM_ICP, "failed to create icp_debug_clk entry");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	if (!debugfs_create_bool("a5_jtag_debug",
+		0644,
+		icp_hw_mgr.dentry,
+		&icp_hw_mgr.a5_jtag_debug)) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	if (!debugfs_create_file("a5_debug_type",
+		0644,
+		icp_hw_mgr.dentry,
+		NULL, &cam_icp_debug_type_fs)) {
+		CAM_ERR(CAM_ICP, "failed to create a5_debug_type");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	if (!debugfs_create_file("a5_debug_lvl",
+		0644,
+		icp_hw_mgr.dentry,
+		NULL, &cam_icp_debug_fs)) {
+		CAM_ERR(CAM_ICP, "failed to create a5_dbg_lvl");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	if (!debugfs_create_file("a5_fw_dump_lvl",
+		0644,
+		icp_hw_mgr.dentry,
+		NULL, &cam_icp_debug_fw_dump)) {
+		CAM_ERR(CAM_ICP, "failed to create a5_fw_dump_lvl");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	return rc;
+err:
+	debugfs_remove_recursive(icp_hw_mgr.dentry);
+	icp_hw_mgr.dentry = NULL;
+	return rc;
+}
+
+static int cam_icp_mgr_process_cmd(void *priv, void *data)
+{
+	int rc;
+	struct hfi_cmd_work_data *task_data = NULL;
+	struct cam_icp_hw_mgr *hw_mgr;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_ICP, "Invalid params%pK %pK", data, priv);
+		return -EINVAL;
+	}
+
+	hw_mgr = priv;
+	task_data = (struct hfi_cmd_work_data *)data;
+
+	rc = hfi_write_cmd(task_data->data);
+
+	return rc;
+}
+
+static int cam_icp_mgr_cleanup_ctx(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int i;
+	struct hfi_frame_process_info *hfi_frame_process;
+	struct cam_hw_done_event_data buf_data;
+
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++) {
+		if (!hfi_frame_process->request_id[i])
+			continue;
+		buf_data.request_id = hfi_frame_process->request_id[i];
+		ctx_data->ctxt_event_cb(ctx_data->context_priv,
+			false, &buf_data);
+		hfi_frame_process->request_id[i] = 0;
+		if (ctx_data->hfi_frame_process.in_resource[i] > 0) {
+			CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+				ctx_data->hfi_frame_process.in_resource[i]);
+			cam_sync_destroy(
+				ctx_data->hfi_frame_process.in_resource[i]);
+			ctx_data->hfi_frame_process.in_resource[i] = 0;
+		}
+		hfi_frame_process->fw_process_flag[i] = false;
+		clear_bit(i, ctx_data->hfi_frame_process.bitmap);
+	}
+
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++) {
+		if (!hfi_frame_process->in_free_resource[i])
+			continue;
+
+		CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+			ctx_data->hfi_frame_process.in_free_resource[i]);
+		cam_sync_destroy(
+			ctx_data->hfi_frame_process.in_free_resource[i]);
+		ctx_data->hfi_frame_process.in_free_resource[i] = 0;
+	}
+
+	return 0;
+}
+
+static int cam_icp_mgr_handle_frame_process(uint32_t *msg_ptr, int flag)
+{
+	int i;
+	uint32_t idx;
+	uint64_t request_id;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+	struct hfi_frame_process_info *hfi_frame_process;
+	struct cam_hw_done_event_data buf_data;
+	uint32_t clk_type;
+
+	ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+	request_id = ioconfig_ack->user_data2;
+	ctx_data = (struct cam_icp_hw_ctx_data *)
+		U64_TO_PTR(ioconfig_ack->user_data1);
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "Invalid Context req %llu", request_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	cam_icp_ctx_timer_reset(ctx_data);
+	if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
+		CAM_DBG(CAM_ICP, "ctx %u is in %d state",
+			ctx_data->ctx_id, ctx_data->state);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return 0;
+	}
+
+	CAM_DBG(CAM_REQ,
+		"ctx_id : %u, request_id :%lld dev_type: %d",
+		ctx_data->ctx_id, request_id,
+		ctx_data->icp_dev_acquire_info->dev_type);
+
+	clk_type = ICP_DEV_TYPE_TO_CLK_TYPE(
+			ctx_data->icp_dev_acquire_info->dev_type);
+	cam_icp_device_timer_reset(&icp_hw_mgr, clk_type);
+
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
+		if (hfi_frame_process->request_id[i] == request_id)
+			break;
+
+	if (i >= CAM_FRAME_CMD_MAX) {
+		CAM_ERR(CAM_ICP, "pkt not found in ctx data for req_id =%lld",
+			request_id);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return -EINVAL;
+	}
+	idx = i;
+
+	if (flag == ICP_FRAME_PROCESS_FAILURE)
+		CAM_ERR(CAM_ICP, "Done with error: ctx_id %d req %llu dev %d",
+			ctx_data->ctx_id, request_id,
+			ctx_data->icp_dev_acquire_info->dev_type);
+
+	buf_data.request_id = hfi_frame_process->request_id[idx];
+	ctx_data->ctxt_event_cb(ctx_data->context_priv, flag, &buf_data);
+	hfi_frame_process->request_id[idx] = 0;
+	if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+		CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+			ctx_data->hfi_frame_process.in_resource[idx]);
+		cam_sync_destroy(ctx_data->hfi_frame_process.in_resource[idx]);
+		ctx_data->hfi_frame_process.in_resource[idx] = 0;
+	}
+	clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+	hfi_frame_process->fw_process_flag[idx] = false;
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	return 0;
+}
+
+static int cam_icp_mgr_process_msg_frame_process(uint32_t *msg_ptr)
+{
+	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+	struct hfi_msg_frame_process_done *frame_done;
+
+	if (!msg_ptr) {
+		CAM_ERR(CAM_ICP, "msg ptr is NULL");
+		return -EINVAL;
+	}
+
+	ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+	if (ioconfig_ack->err_type != HFI_ERR_SYS_NONE) {
+		CAM_ERR(CAM_ICP, "failed with error : %u",
+			ioconfig_ack->err_type);
+		cam_icp_mgr_handle_frame_process(msg_ptr,
+			ICP_FRAME_PROCESS_FAILURE);
+		return -EIO;
+	}
+
+	frame_done =
+		(struct hfi_msg_frame_process_done *)ioconfig_ack->msg_data;
+	if (!frame_done) {
+		cam_icp_mgr_handle_frame_process(msg_ptr,
+			ICP_FRAME_PROCESS_FAILURE);
+		return -EINVAL;
+	}
+
+	if (frame_done->result)
+		return cam_icp_mgr_handle_frame_process(msg_ptr,
+			ICP_FRAME_PROCESS_FAILURE);
+	else
+		return cam_icp_mgr_handle_frame_process(msg_ptr,
+			ICP_FRAME_PROCESS_SUCCESS);
+}
+
+static int cam_icp_mgr_process_msg_config_io(uint32_t *msg_ptr)
+{
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+	struct hfi_msg_ipe_config *ipe_config_ack = NULL;
+	struct hfi_msg_bps_common *bps_config_ack = NULL;
+
+	if (!msg_ptr) {
+		CAM_ERR(CAM_ICP, "msg ptr is NULL");
+		return -EINVAL;
+	}
+
+	ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+
+	if (ioconfig_ack->opcode == HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO) {
+		ipe_config_ack =
+			(struct hfi_msg_ipe_config *)(ioconfig_ack->msg_data);
+		if (ipe_config_ack->rc) {
+			CAM_ERR(CAM_ICP, "rc = %d err = %u",
+				ipe_config_ack->rc, ioconfig_ack->err_type);
+			return -EIO;
+		}
+		ctx_data = (struct cam_icp_hw_ctx_data *)
+			U64_TO_PTR(ioconfig_ack->user_data1);
+		if (!ctx_data) {
+			CAM_ERR(CAM_ICP, "wrong ctx data from IPE response");
+			return -EINVAL;
+		}
+		ctx_data->scratch_mem_size = ipe_config_ack->scratch_mem_size;
+	} else {
+		bps_config_ack =
+			(struct hfi_msg_bps_common *)(ioconfig_ack->msg_data);
+		if (bps_config_ack->rc) {
+			CAM_ERR(CAM_ICP, "rc : %u, opcode :%u",
+				bps_config_ack->rc, ioconfig_ack->opcode);
+			return -EIO;
+		}
+		ctx_data = (struct cam_icp_hw_ctx_data *)
+			U64_TO_PTR(ioconfig_ack->user_data1);
+		if (!ctx_data) {
+			CAM_ERR(CAM_ICP, "wrong ctx data from BPS response");
+			return -EINVAL;
+		}
+	}
+	complete(&ctx_data->wait_complete);
+
+	return 0;
+}
+
+static int cam_icp_mgr_process_msg_create_handle(uint32_t *msg_ptr)
+{
+	struct hfi_msg_create_handle_ack *create_handle_ack = NULL;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	int rc = 0;
+
+	create_handle_ack = (struct hfi_msg_create_handle_ack *)msg_ptr;
+	if (!create_handle_ack) {
+		CAM_ERR(CAM_ICP, "Invalid create_handle_ack");
+		return -EINVAL;
+	}
+
+	ctx_data =
+		(struct cam_icp_hw_ctx_data *)(uintptr_t)
+		create_handle_ack->user_data1;
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "Invalid ctx_data");
+		return -EINVAL;
+	}
+
+	if (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE) {
+		ctx_data->fw_handle = create_handle_ack->fw_handle;
+		CAM_DBG(CAM_ICP, "fw_handle = %x", ctx_data->fw_handle);
+	} else {
+		CAM_WARN(CAM_ICP,
+			"This ctx is no longer in use current state: %d",
+			ctx_data->state);
+		ctx_data->fw_handle = 0;
+		rc = -EPERM;
+	}
+	complete(&ctx_data->wait_complete);
+	return rc;
+}
+
+static int cam_icp_mgr_process_msg_ping_ack(uint32_t *msg_ptr)
+{
+	struct hfi_msg_ping_ack *ping_ack = NULL;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+
+	ping_ack = (struct hfi_msg_ping_ack *)msg_ptr;
+	if (!ping_ack) {
+		CAM_ERR(CAM_ICP, "Empty ping ack message");
+		return -EINVAL;
+	}
+
+	ctx_data = (struct cam_icp_hw_ctx_data *)
+		U64_TO_PTR(ping_ack->user_data);
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "Invalid ctx_data");
+		return -EINVAL;
+	}
+
+	if (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE)
+		complete(&ctx_data->wait_complete);
+
+	return 0;
+}
+
+static int cam_icp_mgr_process_indirect_ack_msg(uint32_t *msg_ptr)
+{
+	int rc;
+
+	if (!msg_ptr) {
+		CAM_ERR(CAM_ICP, "msg ptr is NULL");
+		return -EINVAL;
+	}
+
+	switch (msg_ptr[ICP_PACKET_OPCODE]) {
+	case HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO:
+	case HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO:
+		CAM_DBG(CAM_ICP, "received IPE/BPS_CONFIG_IO:");
+		rc = cam_icp_mgr_process_msg_config_io(msg_ptr);
+		if (rc)
+			return rc;
+		break;
+
+	case HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS:
+	case HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS:
+		rc = cam_icp_mgr_process_msg_frame_process(msg_ptr);
+		if (rc)
+			return rc;
+		break;
+	default:
+		CAM_ERR(CAM_ICP, "Invalid opcode : %u",
+			msg_ptr[ICP_PACKET_OPCODE]);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_icp_mgr_process_direct_ack_msg(uint32_t *msg_ptr)
+{
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	int rc = 0;
+
+	a5_dev_intf = icp_hw_mgr.a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+	switch (msg_ptr[ICP_PACKET_OPCODE]) {
+	case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT:
+	case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT:
+		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+		ctx_data = (struct cam_icp_hw_ctx_data *)
+			U64_TO_PTR(ioconfig_ack->user_data1);
+		if (ctx_data->state != CAM_ICP_CTX_STATE_FREE)
+			complete(&ctx_data->wait_complete);
+		CAM_DBG(CAM_ICP, "received IPE/BPS/ ABORT: ctx_state =%d",
+			ctx_data->state);
+		break;
+	case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY:
+	case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
+		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+		ctx_data = (struct cam_icp_hw_ctx_data *)
+			U64_TO_PTR(ioconfig_ack->user_data1);
+		if ((ctx_data->state == CAM_ICP_CTX_STATE_RELEASE) ||
+			(ctx_data->state == CAM_ICP_CTX_STATE_IN_USE)) {
+			complete(&ctx_data->wait_complete);
+		}
+		CAM_DBG(CAM_ICP, "received IPE/BPS/ DESTROY: ctx_state =%d",
+			ctx_data->state);
+		break;
+	case HFI_IPEBPS_CMD_OPCODE_MEM_MAP:
+		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+		ctx_data =
+			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+		if (ctx_data->state != CAM_ICP_CTX_STATE_FREE)
+			complete(&ctx_data->wait_complete);
+		CAM_DBG(CAM_ICP,
+			"received IPE/BPS MAP ACK:ctx_state =%d err_status =%u",
+			ctx_data->state, ioconfig_ack->err_type);
+		break;
+	case HFI_IPEBPS_CMD_OPCODE_MEM_UNMAP:
+		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+		ctx_data =
+			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+		if (ctx_data->state != CAM_ICP_CTX_STATE_FREE)
+			complete(&ctx_data->wait_complete);
+		CAM_DBG(CAM_ICP,
+			"received IPE/BPS UNMAP ACK:ctx_state =%d err_status =%u",
+			ctx_data->state, ioconfig_ack->err_type);
+		break;
+	default:
+		CAM_ERR(CAM_ICP, "Invalid opcode : %u",
+			msg_ptr[ICP_PACKET_OPCODE]);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int cam_icp_ipebps_reset(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	struct cam_hw_intf *ipe0_dev_intf;
+	struct cam_hw_intf *ipe1_dev_intf;
+	struct cam_hw_intf *bps_dev_intf;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if (hw_mgr->bps_ctxt_cnt) {
+		rc = bps_dev_intf->hw_ops.process_cmd(
+			bps_dev_intf->hw_priv,
+			CAM_ICP_BPS_CMD_RESET,
+			NULL, 0);
+		if (rc)
+			CAM_ERR(CAM_ICP, "bps reset failed");
+	}
+
+	if (hw_mgr->ipe_ctxt_cnt) {
+		rc = ipe0_dev_intf->hw_ops.process_cmd(
+			ipe0_dev_intf->hw_priv,
+			CAM_ICP_IPE_CMD_RESET,
+			NULL, 0);
+		if (rc)
+			CAM_ERR(CAM_ICP, "ipe0 reset failed");
+
+		if (ipe1_dev_intf) {
+			rc = ipe1_dev_intf->hw_ops.process_cmd(
+				ipe1_dev_intf->hw_priv,
+				CAM_ICP_IPE_CMD_RESET,
+				NULL, 0);
+			if (rc)
+				CAM_ERR(CAM_ICP, "ipe1 reset failed");
+		}
+	}
+
+	return 0;
+}
+
+static int cam_icp_mgr_trigger_recovery(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	struct sfr_buf *sfr_buffer = NULL;
+
+	CAM_DBG(CAM_ICP, "Enter");
+
+	if (atomic_read(&hw_mgr->recovery)) {
+		CAM_ERR(CAM_ICP, "Recovery is set");
+		return rc;
+	}
+
+	sfr_buffer = (struct sfr_buf *)icp_hw_mgr.hfi_mem.sfr_buf.kva;
+	CAM_WARN(CAM_ICP, "SFR:%s", sfr_buffer->msg);
+
+	cam_icp_mgr_ipe_bps_get_gdsc_control(hw_mgr);
+	cam_icp_ipebps_reset(hw_mgr);
+
+	atomic_set(&hw_mgr->recovery, 1);
+	CAM_DBG(CAM_ICP, "Done");
+	return rc;
+}
+static int cam_icp_mgr_process_fatal_error(
+	struct cam_icp_hw_mgr *hw_mgr, uint32_t *msg_ptr)
+{
+	struct hfi_msg_event_notify *event_notify;
+	int rc = 0;
+
+	CAM_DBG(CAM_ICP, "Enter");
+
+	event_notify = (struct hfi_msg_event_notify *)msg_ptr;
+	if (!event_notify) {
+		CAM_ERR(CAM_ICP, "Empty event message");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ICP, "evt_id: %u evt_data1: %u evt_data2: %u",
+		event_notify->event_id,
+		event_notify->event_data1,
+		event_notify->event_data2);
+
+	if (event_notify->event_id == HFI_EVENT_SYS_ERROR) {
+		CAM_INFO(CAM_ICP, "received HFI_EVENT_SYS_ERROR");
+		rc = cam_icp_mgr_trigger_recovery(hw_mgr);
+		cam_icp_mgr_process_dbg_buf(icp_hw_mgr.a5_dbg_lvl);
+	}
+
+	return rc;
+}
+
+static void cam_icp_mgr_process_dbg_buf(unsigned int debug_lvl)
+{
+	uint32_t *msg_ptr = NULL, *pkt_ptr = NULL;
+	struct hfi_msg_debug *dbg_msg;
+	uint32_t read_len, size_processed = 0;
+	uint64_t timestamp = 0;
+	char *dbg_buf;
+	int rc = 0;
+
+	rc = hfi_read_message(icp_hw_mgr.dbg_buf, Q_DBG, &read_len);
+	if (rc)
+		return;
+
+	msg_ptr = (uint32_t *)icp_hw_mgr.dbg_buf;
+	while (true) {
+		pkt_ptr = msg_ptr;
+		if (pkt_ptr[ICP_PACKET_TYPE] == HFI_MSG_SYS_DEBUG) {
+			dbg_msg = (struct hfi_msg_debug *)pkt_ptr;
+			dbg_buf = (char *)&dbg_msg->msg_data;
+			timestamp = ((((uint64_t)(dbg_msg->timestamp_hi) << 32)
+				| dbg_msg->timestamp_lo) >> 16);
+			trace_cam_icp_fw_dbg(dbg_buf, timestamp/2);
+			if (!debug_lvl)
+				CAM_INFO(CAM_ICP, "FW_DBG:%s", dbg_buf);
+		}
+		size_processed += (pkt_ptr[ICP_PACKET_SIZE] >>
+			BYTE_WORD_SHIFT);
+		if (size_processed >= read_len)
+			return;
+		msg_ptr += (pkt_ptr[ICP_PACKET_SIZE] >>
+		BYTE_WORD_SHIFT);
+		pkt_ptr = NULL;
+		dbg_msg = NULL;
+		dbg_buf = NULL;
+	}
+}
+
+static int cam_icp_process_msg_pkt_type(
+	struct cam_icp_hw_mgr *hw_mgr,
+	uint32_t *msg_ptr,
+	uint32_t *msg_processed_len)
+{
+	int rc = 0;
+	int size_processed = 0;
+
+	switch (msg_ptr[ICP_PACKET_TYPE]) {
+	case HFI_MSG_SYS_INIT_DONE:
+		CAM_DBG(CAM_ICP, "received SYS_INIT_DONE");
+		complete(&hw_mgr->a5_complete);
+		size_processed = (
+			(struct hfi_msg_init_done *)msg_ptr)->size;
+		break;
+
+	case HFI_MSG_SYS_PC_PREP_DONE:
+		CAM_DBG(CAM_ICP, "HFI_MSG_SYS_PC_PREP_DONE is received\n");
+		complete(&hw_mgr->a5_complete);
+		size_processed = sizeof(struct hfi_msg_pc_prep_done);
+		break;
+
+	case HFI_MSG_SYS_PING_ACK:
+		CAM_DBG(CAM_ICP, "received SYS_PING_ACK");
+		rc = cam_icp_mgr_process_msg_ping_ack(msg_ptr);
+		size_processed = sizeof(struct hfi_msg_ping_ack);
+		break;
+
+	case HFI_MSG_IPEBPS_CREATE_HANDLE_ACK:
+		CAM_DBG(CAM_ICP, "received IPEBPS_CREATE_HANDLE_ACK");
+		rc = cam_icp_mgr_process_msg_create_handle(msg_ptr);
+		size_processed = sizeof(struct hfi_msg_create_handle_ack);
+		break;
+
+	case HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK:
+		CAM_DBG(CAM_ICP, "received ASYNC_INDIRECT_ACK");
+		rc = cam_icp_mgr_process_indirect_ack_msg(msg_ptr);
+		size_processed = (
+			(struct hfi_msg_ipebps_async_ack *)msg_ptr)->size;
+		break;
+
+	case  HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK:
+		CAM_DBG(CAM_ICP, "received ASYNC_DIRECT_ACK");
+		rc = cam_icp_mgr_process_direct_ack_msg(msg_ptr);
+		size_processed = (
+			(struct hfi_msg_ipebps_async_ack *)msg_ptr)->size;
+		break;
+
+	case HFI_MSG_EVENT_NOTIFY:
+		CAM_DBG(CAM_ICP, "received EVENT_NOTIFY");
+		size_processed = (
+			(struct hfi_msg_event_notify *)msg_ptr)->size;
+		rc = cam_icp_mgr_process_fatal_error(hw_mgr, msg_ptr);
+		if (rc)
+			CAM_ERR(CAM_ICP, "failed in processing evt notify");
+
+		break;
+
+	default:
+		CAM_ERR(CAM_ICP, "invalid msg : %u",
+			msg_ptr[ICP_PACKET_TYPE]);
+		rc = -EINVAL;
+		break;
+	}
+
+	*msg_processed_len = size_processed;
+	return rc;
+}
+
+static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
+{
+	uint32_t read_len, msg_processed_len;
+	uint32_t *msg_ptr = NULL;
+	struct hfi_msg_work_data *task_data;
+	struct cam_icp_hw_mgr *hw_mgr;
+	int rc = 0;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_ICP, "Invalid data");
+		return -EINVAL;
+	}
+
+	task_data = data;
+	hw_mgr = priv;
+
+	rc = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG, &read_len);
+	if (rc) {
+		CAM_DBG(CAM_ICP, "Unable to read msg q rc %d", rc);
+	} else {
+		read_len = read_len << BYTE_WORD_SHIFT;
+		msg_ptr = (uint32_t *)icp_hw_mgr.msg_buf;
+		while (true) {
+			cam_icp_process_msg_pkt_type(hw_mgr, msg_ptr,
+				&msg_processed_len);
+
+			if (!msg_processed_len) {
+				CAM_ERR(CAM_ICP, "Failed to read");
+				rc = -EINVAL;
+				break;
+			}
+
+			read_len -= msg_processed_len;
+			if (read_len > 0) {
+				msg_ptr += (msg_processed_len >>
+				BYTE_WORD_SHIFT);
+				msg_processed_len = 0;
+			} else {
+				break;
+			}
+		}
+	}
+
+	cam_icp_mgr_process_dbg_buf(icp_hw_mgr.a5_dbg_lvl);
+
+	if ((task_data->irq_status & A5_WDT_0) ||
+		(task_data->irq_status & A5_WDT_1)) {
+		CAM_ERR_RATE_LIMIT(CAM_ICP, "watch dog interrupt from A5");
+
+		rc = cam_icp_mgr_trigger_recovery(hw_mgr);
+	}
+
+	return rc;
+}
+
+int32_t cam_icp_hw_mgr_cb(uint32_t irq_status, void *data)
+{
+	int32_t rc = 0;
+	unsigned long flags;
+	struct cam_icp_hw_mgr *hw_mgr = data;
+	struct crm_workq_task *task;
+	struct hfi_msg_work_data *task_data;
+
+	if (!data) {
+		CAM_ERR(CAM_ICP, "irq cb data is NULL");
+		return rc;
+	}
+
+	spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.msg_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "no empty task");
+		spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+		return -ENOMEM;
+	}
+
+	task_data = (struct hfi_msg_work_data *)task->payload;
+	task_data->data = hw_mgr;
+	task_data->irq_status = irq_status;
+	task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_icp_mgr_process_msg;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+
+	return rc;
+}
+
+static void cam_icp_free_hfi_mem(void)
+{
+	int rc;
+
+	cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
+	rc = cam_mem_mgr_free_memory_region(&icp_hw_mgr.hfi_mem.sec_heap);
+	if (rc)
+		CAM_ERR(CAM_ICP, "failed to unreserve sec heap");
+
+	cam_smmu_dealloc_qdss(icp_hw_mgr.iommu_hdl);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.msg_q);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
+}
+
+static int cam_icp_alloc_secheap_mem(struct cam_mem_mgr_memory_desc *secheap)
+{
+	int rc;
+	struct cam_mem_mgr_request_desc alloc;
+	struct cam_mem_mgr_memory_desc out;
+	struct cam_smmu_region_info secheap_info;
+
+	memset(&alloc, 0, sizeof(alloc));
+	memset(&out, 0, sizeof(out));
+
+	rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+		CAM_SMMU_REGION_SECHEAP,
+		&secheap_info);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to get secheap memory info");
+		return rc;
+	}
+
+	alloc.size = secheap_info.iova_len;
+	alloc.align = 0;
+	alloc.flags = 0;
+	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+	rc = cam_mem_mgr_reserve_memory_region(&alloc,
+		CAM_SMMU_REGION_SECHEAP,
+		&out);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to reserve secheap memory");
+		return rc;
+	}
+
+	*secheap = out;
+	CAM_DBG(CAM_ICP, "kva: %llX, iova: %x, hdl: %x, len: %lld",
+		out.kva, out.iova, out.mem_handle, out.len);
+
+	return rc;
+}
+
+static int cam_icp_alloc_sfr_mem(struct cam_mem_mgr_memory_desc *sfr)
+{
+	int rc;
+	struct cam_mem_mgr_request_desc alloc;
+	struct cam_mem_mgr_memory_desc out;
+
+	memset(&alloc, 0, sizeof(alloc));
+	memset(&out, 0, sizeof(out));
+	alloc.size = SZ_8K;
+	alloc.align = 0;
+	alloc.flags = CAM_MEM_FLAG_HW_READ_WRITE |
+		CAM_MEM_FLAG_HW_SHARED_ACCESS;
+
+	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+	rc = cam_mem_mgr_request_mem(&alloc, &out);
+	if (rc)
+		return rc;
+
+	*sfr = out;
+	CAM_DBG(CAM_ICP, "kva: %llX, iova: %x, hdl: %x, len: %lld",
+		out.kva, out.iova, out.mem_handle, out.len);
+
+	return rc;
+}
+
+static int cam_icp_alloc_shared_mem(struct cam_mem_mgr_memory_desc *qtbl)
+{
+	int rc;
+	struct cam_mem_mgr_request_desc alloc;
+	struct cam_mem_mgr_memory_desc out;
+
+	memset(&alloc, 0, sizeof(alloc));
+	memset(&out, 0, sizeof(out));
+	alloc.size = SZ_1M;
+	alloc.align = 0;
+	alloc.flags = CAM_MEM_FLAG_HW_READ_WRITE |
+		CAM_MEM_FLAG_HW_SHARED_ACCESS;
+	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+	rc = cam_mem_mgr_request_mem(&alloc, &out);
+	if (rc)
+		return rc;
+
+	*qtbl = out;
+	CAM_DBG(CAM_ICP, "kva: %llX, iova: %x, hdl: %x, len: %lld",
+		out.kva, out.iova, out.mem_handle, out.len);
+
+	return rc;
+}
+
+static int cam_icp_allocate_fw_mem(void)
+{
+	int rc;
+	uintptr_t kvaddr;
+	size_t len;
+	dma_addr_t iova;
+
+	rc = cam_smmu_alloc_firmware(icp_hw_mgr.iommu_hdl,
+		&iova, &kvaddr, &len);
+	if (rc)
+		return -ENOMEM;
+
+	icp_hw_mgr.hfi_mem.fw_buf.len = len;
+	icp_hw_mgr.hfi_mem.fw_buf.kva = kvaddr;
+	icp_hw_mgr.hfi_mem.fw_buf.iova = iova;
+	icp_hw_mgr.hfi_mem.fw_buf.smmu_hdl = icp_hw_mgr.iommu_hdl;
+
+	CAM_DBG(CAM_ICP, "kva: %zX, iova: %llx, len: %zu",
+		kvaddr, iova, len);
+
+	return rc;
+}
+
+static int cam_icp_allocate_qdss_mem(void)
+{
+	int rc;
+	size_t len;
+	dma_addr_t iova;
+
+	rc = cam_smmu_alloc_qdss(icp_hw_mgr.iommu_hdl,
+		&iova, &len);
+	if (rc)
+		return rc;
+
+	icp_hw_mgr.hfi_mem.qdss_buf.len = len;
+	icp_hw_mgr.hfi_mem.qdss_buf.iova = iova;
+	icp_hw_mgr.hfi_mem.qdss_buf.smmu_hdl = icp_hw_mgr.iommu_hdl;
+
+	CAM_DBG(CAM_ICP, "iova: %llx, len: %zu", iova, len);
+
+	return rc;
+}
+
+static int cam_icp_get_io_mem_info(void)
+{
+	int rc;
+	size_t len;
+	dma_addr_t iova;
+
+	rc = cam_smmu_get_io_region_info(icp_hw_mgr.iommu_hdl,
+		&iova, &len);
+	if (rc)
+		return rc;
+
+	icp_hw_mgr.hfi_mem.io_mem.iova_len = len;
+	icp_hw_mgr.hfi_mem.io_mem.iova_start = iova;
+
+	CAM_DBG(CAM_ICP, "iova: %llx, len: %zu", iova, len);
+
+	return rc;
+}
+
+static int cam_icp_allocate_hfi_mem(void)
+{
+	int rc;
+
+	rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+		CAM_SMMU_REGION_SHARED,
+		&icp_hw_mgr.hfi_mem.shmem);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to get shared memory info");
+		return rc;
+	}
+
+	rc = cam_icp_allocate_fw_mem();
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate FW memory");
+		return rc;
+	}
+
+	rc = cam_icp_allocate_qdss_mem();
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate qdss memory");
+		goto fw_alloc_failed;
+	}
+
+	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.qtbl);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate qtbl memory");
+		goto qtbl_alloc_failed;
+	}
+
+	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate cmd q memory");
+		goto cmd_q_alloc_failed;
+	}
+
+	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.msg_q);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate msg q memory");
+		goto msg_q_alloc_failed;
+	}
+
+	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate dbg q memory");
+		goto dbg_q_alloc_failed;
+	}
+
+	rc = cam_icp_alloc_sfr_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate sfr buffer");
+		goto sfr_buf_alloc_failed;
+	}
+
+	rc = cam_icp_alloc_secheap_mem(&icp_hw_mgr.hfi_mem.sec_heap);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate sec heap memory");
+		goto sec_heap_alloc_failed;
+	}
+
+	rc = cam_icp_get_io_mem_info();
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to get I/O region info");
+		goto get_io_mem_failed;
+	}
+
+	return rc;
+get_io_mem_failed:
+	cam_mem_mgr_free_memory_region(&icp_hw_mgr.hfi_mem.sec_heap);
+sec_heap_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
+sfr_buf_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+dbg_q_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.msg_q);
+msg_q_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+cmd_q_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
+qtbl_alloc_failed:
+	cam_smmu_dealloc_qdss(icp_hw_mgr.iommu_hdl);
+fw_alloc_failed:
+	cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
+	return rc;
+}
+
+static int cam_icp_mgr_get_free_ctx(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int i = 0;
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+		mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex);
+		if (hw_mgr->ctx_data[i].state == CAM_ICP_CTX_STATE_FREE) {
+			hw_mgr->ctx_data[i].state = CAM_ICP_CTX_STATE_IN_USE;
+			mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+			break;
+		}
+		mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+	}
+
+	return i;
+}
+
+static void cam_icp_mgr_put_ctx(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	ctx_data->state = CAM_ICP_CTX_STATE_FREE;
+}
+
+static int cam_icp_mgr_send_pc_prep(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
+		return -EINVAL;
+	}
+
+	reinit_completion(&hw_mgr->a5_complete);
+	CAM_DBG(CAM_ICP, "Sending HFI init command");
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv, CAM_ICP_A5_CMD_PC_PREP, NULL, 0);
+	if (rc)
+		return rc;
+
+	CAM_DBG(CAM_ICP, "Wait for PC_PREP_DONE Message\n");
+	rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
+		msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "PC_PREP response timed out %d\n", rc);
+	}
+	CAM_DBG(CAM_ICP, "Done Waiting for PC_PREP Message\n");
+
+	return rc;
+}
+
+static int cam_ipe_bps_deint(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+		return 0;
+	}
+
+	if (ipe1_dev_intf && hw_mgr->ipe_clk_state) {
+		ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
+				NULL, 0);
+	}
+
+	if (hw_mgr->ipe_clk_state)
+		ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+	if (hw_mgr->bps_clk_state)
+		bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+
+
+	hw_mgr->bps_clk_state = false;
+	hw_mgr->ipe_clk_state = false;
+
+	return 0;
+}
+
+static int cam_icp_mgr_hw_close_u(void *hw_priv, void *hw_close_args)
+{
+	struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+	int rc = 0;
+
+	CAM_DBG(CAM_ICP, "UMD calls close");
+	if (!hw_mgr) {
+		CAM_ERR(CAM_ICP, "Null hw mgr");
+		return 0;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	rc = cam_icp_mgr_hw_close(hw_mgr, NULL);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
+static int cam_icp_mgr_hw_close_k(void *hw_priv, void *hw_close_args)
+{
+	struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+
+	CAM_DBG(CAM_ICP, "KMD calls close");
+	if (!hw_mgr) {
+		CAM_ERR(CAM_ICP, "Null hw mgr");
+		return 0;
+	}
+
+	return cam_icp_mgr_hw_close(hw_mgr, NULL);
+
+}
+
+static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+
+	CAM_DBG(CAM_ICP, "ENTER");
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+	if (!hw_mgr->icp_pc_flag || atomic_read(&hw_mgr->recovery)) {
+		cam_hfi_disable_cpu(
+			a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+		rc = cam_icp_mgr_hw_close_k(hw_mgr, NULL);
+	} else {
+		CAM_DBG(CAM_PERF, "Sending PC prep ICP PC enabled");
+		rc = cam_icp_mgr_send_pc_prep(hw_mgr);
+		cam_hfi_disable_cpu(
+			a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+	}
+	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+	CAM_DBG(CAM_ICP, "EXIT");
+
+	return rc;
+}
+
+static int cam_icp_mgr_hfi_resume(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct hfi_mem_info hfi_mem;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+	hfi_mem.qtbl.kva = icp_hw_mgr.hfi_mem.qtbl.kva;
+	hfi_mem.qtbl.iova = icp_hw_mgr.hfi_mem.qtbl.iova;
+	hfi_mem.qtbl.len = icp_hw_mgr.hfi_mem.qtbl.len;
+	 CAM_DBG(CAM_ICP, "qtbl kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.qtbl.kva, hfi_mem.qtbl.iova, hfi_mem.qtbl.len);
+
+	hfi_mem.cmd_q.kva = icp_hw_mgr.hfi_mem.cmd_q.kva;
+	hfi_mem.cmd_q.iova = icp_hw_mgr.hfi_mem.cmd_q.iova;
+	hfi_mem.cmd_q.len = icp_hw_mgr.hfi_mem.cmd_q.len;
+	CAM_DBG(CAM_ICP, "cmd_q kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.cmd_q.kva, hfi_mem.cmd_q.iova, hfi_mem.cmd_q.len);
+
+	hfi_mem.msg_q.kva = icp_hw_mgr.hfi_mem.msg_q.kva;
+	hfi_mem.msg_q.iova = icp_hw_mgr.hfi_mem.msg_q.iova;
+	hfi_mem.msg_q.len = icp_hw_mgr.hfi_mem.msg_q.len;
+	CAM_DBG(CAM_ICP, "msg_q kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.msg_q.kva, hfi_mem.msg_q.iova, hfi_mem.msg_q.len);
+
+	hfi_mem.dbg_q.kva = icp_hw_mgr.hfi_mem.dbg_q.kva;
+	hfi_mem.dbg_q.iova = icp_hw_mgr.hfi_mem.dbg_q.iova;
+	hfi_mem.dbg_q.len = icp_hw_mgr.hfi_mem.dbg_q.len;
+	CAM_DBG(CAM_ICP, "dbg_q kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.dbg_q.kva, hfi_mem.dbg_q.iova, hfi_mem.dbg_q.len);
+
+	hfi_mem.sfr_buf.kva = icp_hw_mgr.hfi_mem.sfr_buf.kva;
+	hfi_mem.sfr_buf.iova = icp_hw_mgr.hfi_mem.sfr_buf.iova;
+	hfi_mem.sfr_buf.len = icp_hw_mgr.hfi_mem.sfr_buf.len;
+	CAM_DBG(CAM_ICP, "sfr kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.sfr_buf.kva, hfi_mem.sfr_buf.iova,
+		hfi_mem.sfr_buf.len);
+
+	hfi_mem.sec_heap.kva = icp_hw_mgr.hfi_mem.sec_heap.kva;
+	hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
+	hfi_mem.sec_heap.len = icp_hw_mgr.hfi_mem.sec_heap.len;
+
+	hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
+	hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
+
+	hfi_mem.qdss.iova = icp_hw_mgr.hfi_mem.qdss_buf.iova;
+	hfi_mem.qdss.len = icp_hw_mgr.hfi_mem.qdss_buf.len;
+
+	hfi_mem.io_mem.iova = icp_hw_mgr.hfi_mem.io_mem.iova_start;
+	hfi_mem.io_mem.len = icp_hw_mgr.hfi_mem.io_mem.iova_len;
+
+	CAM_DBG(CAM_ICP, "IO region IOVA = %X length = %lld",
+			hfi_mem.io_mem.iova,
+			hfi_mem.io_mem.len);
+
+	return cam_hfi_resume(&hfi_mem,
+		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
+		hw_mgr->a5_jtag_debug);
+}
+
+static int cam_icp_mgr_abort_handle(
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int rc = 0;
+	unsigned long rem_jiffies;
+	size_t packet_size;
+	int timeout = 1000;
+	struct hfi_cmd_ipebps_async *abort_cmd;
+
+	packet_size =
+		sizeof(struct hfi_cmd_ipebps_async) +
+		sizeof(struct hfi_cmd_abort) -
+		sizeof(((struct hfi_cmd_ipebps_async *)0)->payload.direct);
+	abort_cmd = kzalloc(packet_size, GFP_KERNEL);
+	CAM_DBG(CAM_ICP, "abort pkt size = %d", (int) packet_size);
+	if (!abort_cmd) {
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	abort_cmd->size = packet_size;
+	abort_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		abort_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_ABORT;
+	else
+		abort_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_ABORT;
+
+	reinit_completion(&ctx_data->wait_complete);
+	abort_cmd->num_fw_handles = 1;
+	abort_cmd->fw_handles[0] = ctx_data->fw_handle;
+	abort_cmd->user_data1 = PTR_TO_U64(ctx_data);
+	abort_cmd->user_data2 = (uint64_t)0x0;
+
+	rc = hfi_write_cmd(abort_cmd);
+	if (rc) {
+		kfree(abort_cmd);
+		return rc;
+	}
+	CAM_DBG(CAM_ICP, "fw_handle = %x ctx_data = %pK",
+		ctx_data->fw_handle, ctx_data);
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "FW timeout/err in abort handle command");
+		cam_icp_mgr_process_dbg_buf(icp_hw_mgr.a5_dbg_lvl);
+		cam_hfi_queue_dump();
+	}
+
+	kfree(abort_cmd);
+	return rc;
+}
+
+static int cam_icp_mgr_destroy_handle(
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int rc = 0;
+	int timeout = 1000;
+	unsigned long rem_jiffies;
+	size_t packet_size;
+	struct hfi_cmd_ipebps_async *destroy_cmd;
+
+	packet_size =
+		sizeof(struct hfi_cmd_ipebps_async) +
+		sizeof(struct hfi_cmd_abort_destroy) -
+		sizeof(((struct hfi_cmd_ipebps_async *)0)->payload.direct);
+	destroy_cmd = kzalloc(packet_size, GFP_KERNEL);
+	if (!destroy_cmd) {
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	destroy_cmd->size = packet_size;
+	destroy_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		destroy_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY;
+	else
+		destroy_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY;
+
+	reinit_completion(&ctx_data->wait_complete);
+	destroy_cmd->num_fw_handles = 1;
+	destroy_cmd->fw_handles[0] = ctx_data->fw_handle;
+	destroy_cmd->user_data1 = PTR_TO_U64(ctx_data);
+	destroy_cmd->user_data2 = (uint64_t)0x0;
+	memcpy(destroy_cmd->payload.direct, &ctx_data->temp_payload,
+		sizeof(uint64_t));
+
+	rc = hfi_write_cmd(destroy_cmd);
+	if (rc) {
+		kfree(destroy_cmd);
+		return rc;
+	}
+	CAM_DBG(CAM_ICP, "fw_handle = %x ctx_data = %pK",
+		ctx_data->fw_handle, ctx_data);
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "FW response timeout: %d for %u",
+			rc, ctx_data->ctx_id);
+		cam_icp_mgr_process_dbg_buf(icp_hw_mgr.a5_dbg_lvl);
+		cam_hfi_queue_dump();
+	}
+	kfree(destroy_cmd);
+	return rc;
+}
+
+static int cam_icp_mgr_release_ctx(struct cam_icp_hw_mgr *hw_mgr, int ctx_id)
+{
+	int i = 0;
+
+	if (ctx_id >= CAM_ICP_CTX_MAX) {
+		CAM_ERR(CAM_ICP, "ctx_id is wrong: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+	if (hw_mgr->ctx_data[ctx_id].state !=
+		CAM_ICP_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+		CAM_DBG(CAM_ICP,
+			"ctx with id: %d not in right state to release: %d",
+			ctx_id, hw_mgr->ctx_data[ctx_id].state);
+		return 0;
+	}
+	cam_icp_mgr_ipe_bps_power_collapse(hw_mgr,
+		&hw_mgr->ctx_data[ctx_id], 0);
+	hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_RELEASE;
+	CAM_DBG(CAM_ICP, "E: ctx_id = %d recovery = %d",
+		ctx_id, atomic_read(&hw_mgr->recovery));
+	cam_icp_mgr_abort_handle(&hw_mgr->ctx_data[ctx_id]);
+	cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id]);
+	cam_icp_mgr_cleanup_ctx(&hw_mgr->ctx_data[ctx_id]);
+
+	hw_mgr->ctx_data[ctx_id].fw_handle = 0;
+	hw_mgr->ctx_data[ctx_id].scratch_mem_size = 0;
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
+		clear_bit(i, hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+	kfree(hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+	hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap = NULL;
+	cam_icp_hw_mgr_clk_info_update(hw_mgr, &hw_mgr->ctx_data[ctx_id]);
+	hw_mgr->ctx_data[ctx_id].clk_info.curr_fc = 0;
+	hw_mgr->ctx_data[ctx_id].clk_info.base_clk = 0;
+	hw_mgr->ctxt_cnt--;
+	kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
+	hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
+	hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_FREE;
+	cam_icp_ctx_timer_stop(&hw_mgr->ctx_data[ctx_id]);
+	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+
+	CAM_DBG(CAM_ICP, "X: ctx_id = %d", ctx_id);
+	return 0;
+}
+
+static void cam_icp_mgr_device_deinit(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+		return;
+	}
+
+	if (ipe1_dev_intf)
+		ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv, NULL, 0);
+	ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+	bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+	hw_mgr->bps_clk_state = false;
+	hw_mgr->ipe_clk_state = false;
+}
+
+static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
+{
+	struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct cam_icp_a5_set_irq_cb irq_cb;
+	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
+	int rc = 0;
+
+	CAM_DBG(CAM_ICP, "E");
+	if (hw_mgr->fw_download == false) {
+		CAM_DBG(CAM_ICP, "hw mgr is already closed");
+		return 0;
+	}
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_DBG(CAM_ICP, "a5_dev_intf is NULL");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+	fw_buf_info.kva = 0;
+	fw_buf_info.iova = 0;
+	fw_buf_info.len = 0;
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_SET_FW_BUF,
+		&fw_buf_info,
+		sizeof(fw_buf_info));
+	if (rc)
+		CAM_ERR(CAM_ICP, "nullify the fw buf failed");
+	cam_hfi_deinit(
+		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+	irq_cb.icp_hw_mgr_cb = NULL;
+	irq_cb.data = NULL;
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_SET_IRQ_CB,
+		&irq_cb, sizeof(irq_cb));
+	if (rc)
+		CAM_ERR(CAM_ICP, "deregister irq call back failed");
+
+	cam_icp_free_hfi_mem();
+	hw_mgr->fw_download = false;
+
+	CAM_DBG(CAM_ICP, "Exit");
+	return rc;
+}
+
+static int cam_icp_mgr_device_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong");
+		return -EINVAL;
+	}
+
+	rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
+	if (rc)
+		goto a5_dev_init_failed;
+
+	rc = bps_dev_intf->hw_ops.init(bps_dev_intf->hw_priv, NULL, 0);
+	if (rc)
+		goto bps_dev_init_failed;
+
+	rc = ipe0_dev_intf->hw_ops.init(ipe0_dev_intf->hw_priv, NULL, 0);
+	if (rc)
+		goto ipe0_dev_init_failed;
+
+	if (ipe1_dev_intf) {
+		rc = ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
+						NULL, 0);
+		if (rc)
+			goto ipe1_dev_init_failed;
+	}
+
+	hw_mgr->bps_clk_state = true;
+	hw_mgr->ipe_clk_state = true;
+
+	return rc;
+ipe1_dev_init_failed:
+	ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+	hw_mgr->ipe_clk_state = false;
+ipe0_dev_init_failed:
+	bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+	hw_mgr->bps_clk_state = false;
+bps_dev_init_failed:
+	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+a5_dev_init_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_fw_download(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct cam_icp_a5_set_irq_cb irq_cb;
+	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+	irq_cb.icp_hw_mgr_cb = cam_icp_hw_mgr_cb;
+	irq_cb.data = hw_mgr;
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_SET_IRQ_CB,
+		&irq_cb, sizeof(irq_cb));
+	if (rc)
+		goto set_irq_failed;
+
+	fw_buf_info.kva = icp_hw_mgr.hfi_mem.fw_buf.kva;
+	fw_buf_info.iova = icp_hw_mgr.hfi_mem.fw_buf.iova;
+	fw_buf_info.len = icp_hw_mgr.hfi_mem.fw_buf.len;
+
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_SET_FW_BUF,
+		&fw_buf_info, sizeof(fw_buf_info));
+	if (rc)
+		goto set_irq_failed;
+
+	cam_hfi_enable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_FW_DOWNLOAD,
+		NULL, 0);
+	if (rc)
+		goto fw_download_failed;
+
+	return rc;
+fw_download_failed:
+	cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+set_irq_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_hfi_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct hfi_mem_info hfi_mem;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+	hfi_mem.qtbl.kva = icp_hw_mgr.hfi_mem.qtbl.kva;
+	hfi_mem.qtbl.iova = icp_hw_mgr.hfi_mem.qtbl.iova;
+	hfi_mem.qtbl.len = icp_hw_mgr.hfi_mem.qtbl.len;
+
+	hfi_mem.cmd_q.kva = icp_hw_mgr.hfi_mem.cmd_q.kva;
+	hfi_mem.cmd_q.iova = icp_hw_mgr.hfi_mem.cmd_q.iova;
+	hfi_mem.cmd_q.len = icp_hw_mgr.hfi_mem.cmd_q.len;
+
+	hfi_mem.msg_q.kva = icp_hw_mgr.hfi_mem.msg_q.kva;
+	hfi_mem.msg_q.iova = icp_hw_mgr.hfi_mem.msg_q.iova;
+	hfi_mem.msg_q.len = icp_hw_mgr.hfi_mem.msg_q.len;
+
+	hfi_mem.dbg_q.kva = icp_hw_mgr.hfi_mem.dbg_q.kva;
+	hfi_mem.dbg_q.iova = icp_hw_mgr.hfi_mem.dbg_q.iova;
+	hfi_mem.dbg_q.len = icp_hw_mgr.hfi_mem.dbg_q.len;
+
+	hfi_mem.sfr_buf.kva = icp_hw_mgr.hfi_mem.sfr_buf.kva;
+	hfi_mem.sfr_buf.iova = icp_hw_mgr.hfi_mem.sfr_buf.iova;
+	hfi_mem.sfr_buf.len = icp_hw_mgr.hfi_mem.sfr_buf.len;
+
+	hfi_mem.sec_heap.kva = icp_hw_mgr.hfi_mem.sec_heap.kva;
+	hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
+	hfi_mem.sec_heap.len = icp_hw_mgr.hfi_mem.sec_heap.len;
+
+	hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
+	hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
+
+	hfi_mem.qdss.iova = icp_hw_mgr.hfi_mem.qdss_buf.iova;
+	hfi_mem.qdss.len = icp_hw_mgr.hfi_mem.qdss_buf.len;
+
+	hfi_mem.io_mem.iova = icp_hw_mgr.hfi_mem.io_mem.iova_start;
+	hfi_mem.io_mem.len = icp_hw_mgr.hfi_mem.io_mem.iova_len;
+
+	return cam_hfi_init(0, &hfi_mem,
+		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
+		hw_mgr->a5_jtag_debug);
+}
+
+static int cam_icp_mgr_send_fw_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+		return -EINVAL;
+	}
+
+	reinit_completion(&hw_mgr->a5_complete);
+	CAM_DBG(CAM_ICP, "Sending HFI init command");
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_SEND_INIT,
+		NULL, 0);
+	if (rc)
+		return rc;
+
+	rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
+		msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+		cam_icp_mgr_process_dbg_buf(icp_hw_mgr.a5_dbg_lvl);
+		cam_hfi_queue_dump();
+	}
+	CAM_DBG(CAM_ICP, "Done Waiting for INIT DONE Message");
+
+	return rc;
+}
+
+static int cam_icp_mgr_hw_open_u(void *hw_mgr_priv, void *download_fw_args)
+{
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	int rc = 0;
+
+	if (!hw_mgr) {
+		CAM_ERR(CAM_ICP, "Null hw mgr");
+		return 0;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	rc = cam_icp_mgr_hw_open(hw_mgr, download_fw_args);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
+static int cam_icp_mgr_hw_open_k(void *hw_mgr_priv, void *download_fw_args)
+{
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+
+	if (!hw_mgr) {
+		CAM_ERR(CAM_ICP, "Null hw mgr");
+		return 0;
+	}
+
+	return cam_icp_mgr_hw_open(hw_mgr, download_fw_args);
+}
+
+static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	bool downloadFromResume = true;
+
+	CAM_DBG(CAM_ICP, "Enter");
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5 dev intf is wrong");
+		return -EINVAL;
+	}
+
+	if (hw_mgr->fw_download  == false) {
+		CAM_DBG(CAM_ICP, "Downloading FW");
+		rc = cam_icp_mgr_hw_open_k(hw_mgr, &downloadFromResume);
+		CAM_DBG(CAM_ICP, "FW Download Done Exit");
+		return rc;
+	}
+
+	rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
+	if (rc)
+		return -EINVAL;
+
+	rc = cam_icp_mgr_hfi_resume(hw_mgr);
+	if (rc)
+		goto hfi_resume_failed;
+
+	CAM_DBG(CAM_ICP, "Exit");
+	return rc;
+hfi_resume_failed:
+	cam_icp_mgr_icp_power_collapse(hw_mgr);
+	return rc;
+}
+
+static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	bool icp_pc = false;
+	int rc = 0;
+
+	if (!hw_mgr) {
+		CAM_ERR(CAM_ICP, "hw_mgr is NULL");
+		return -EINVAL;
+	}
+
+	if (hw_mgr->fw_download) {
+		CAM_DBG(CAM_ICP, "FW already downloaded");
+		return rc;
+	}
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+	rc = cam_icp_allocate_hfi_mem();
+	if (rc)
+		goto alloc_hfi_mem_failed;
+
+	rc = cam_icp_mgr_device_init(hw_mgr);
+	if (rc)
+		goto dev_init_fail;
+
+	rc = cam_icp_mgr_fw_download(hw_mgr);
+	if (rc)
+		goto fw_download_failed;
+
+	rc = cam_icp_mgr_hfi_init(hw_mgr);
+	if (rc)
+		goto hfi_init_failed;
+
+	rc = cam_icp_mgr_send_fw_init(hw_mgr);
+	if (rc)
+		goto fw_init_failed;
+
+	hw_mgr->ctxt_cnt = 0;
+	hw_mgr->fw_download = true;
+	atomic_set(&hw_mgr->recovery, 0);
+
+	CAM_INFO(CAM_ICP, "FW download done successfully");
+
+	rc = cam_ipe_bps_deint(hw_mgr);
+	if (download_fw_args)
+		icp_pc = *((bool *)download_fw_args);
+
+	if (download_fw_args && icp_pc == true && hw_mgr->icp_pc_flag) {
+		rc = cam_ipe_bps_deint(hw_mgr);
+		CAM_DBG(CAM_ICP, "deinit all clocks");
+	}
+
+	if (download_fw_args && icp_pc == true)
+		return rc;
+
+	rc = cam_ipe_bps_deint(hw_mgr);
+	rc = cam_icp_mgr_icp_power_collapse(hw_mgr);
+	CAM_DBG(CAM_ICP, "deinit all clocks at boot up");
+
+	return rc;
+
+fw_init_failed:
+	cam_hfi_deinit(
+		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+hfi_init_failed:
+	cam_hfi_disable_cpu(
+		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+fw_download_failed:
+	cam_icp_mgr_device_deinit(hw_mgr);
+dev_init_fail:
+	cam_icp_free_hfi_mem();
+alloc_hfi_mem_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_handle_config_err(
+	struct cam_hw_config_args *config_args,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	int idx)
+{
+	struct cam_hw_done_event_data buf_data;
+
+	buf_data.request_id = *(uint64_t *)config_args->priv;
+	ctx_data->ctxt_event_cb(ctx_data->context_priv, false, &buf_data);
+
+	ctx_data->hfi_frame_process.request_id[idx] = 0;
+	ctx_data->hfi_frame_process.fw_process_flag[idx] = false;
+	clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+
+	return 0;
+}
+
+static int cam_icp_mgr_enqueue_config(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_hw_config_args *config_args)
+{
+	int rc = 0;
+	uint64_t request_id = 0;
+	struct crm_workq_task *task;
+	struct hfi_cmd_work_data *task_data;
+	struct hfi_cmd_ipebps_async *hfi_cmd;
+	struct cam_hw_update_entry *hw_update_entries;
+	struct icp_frame_info *frame_info = NULL;
+
+	frame_info = (struct icp_frame_info *)config_args->priv;
+	request_id = frame_info->request_id;
+	hw_update_entries = config_args->hw_update_entries;
+	CAM_DBG(CAM_ICP, "req_id = %lld %pK", request_id, config_args->priv);
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "no empty task");
+		return -ENOMEM;
+	}
+
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)hw_update_entries->addr;
+	hfi_cmd = (struct hfi_cmd_ipebps_async *)hw_update_entries->addr;
+	task_data->request_id = request_id;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
+	uint32_t io_buf_addr)
+{
+	int rc = 0;
+	struct hfi_cmd_work_data *task_data;
+	struct hfi_cmd_ipebps_async ioconfig_cmd;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
+	struct crm_workq_task *task;
+	uint32_t size_in_words;
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task)
+		return -ENOMEM;
+
+	ioconfig_cmd.size = sizeof(struct hfi_cmd_ipebps_async);
+	ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO;
+	else
+		ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
+
+	reinit_completion(&ctx_data->wait_complete);
+
+	ioconfig_cmd.num_fw_handles = 1;
+	ioconfig_cmd.fw_handles[0] = ctx_data->fw_handle;
+	ioconfig_cmd.payload.indirect = io_buf_addr;
+	ioconfig_cmd.user_data1 = PTR_TO_U64(ctx_data);
+	ioconfig_cmd.user_data2 = (uint64_t)0x0;
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)&ioconfig_cmd;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	size_in_words = (*(uint32_t *)task_data->data) >> 2;
+	CAM_DBG(CAM_ICP, "size_in_words %u", size_in_words);
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc)
+		return rc;
+
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+		msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+		cam_icp_mgr_process_dbg_buf(icp_hw_mgr.a5_dbg_lvl);
+		cam_hfi_queue_dump();
+	}
+
+	return rc;
+}
+
+static int cam_icp_mgr_send_recfg_io(struct cam_icp_hw_ctx_data *ctx_data,
+	struct hfi_cmd_ipebps_async *ioconfig_cmd, uint64_t req_id)
+{
+	int rc = 0;
+	struct hfi_cmd_work_data *task_data;
+	struct crm_workq_task *task;
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task)
+		return -ENOMEM;
+
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)ioconfig_cmd;
+	task_data->request_id = req_id;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+static int cam_icp_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
+{
+	int rc = 0;
+	int idx;
+	uint64_t req_id;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_config_args *config_args = config_hw_args;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct icp_frame_info *frame_info = NULL;
+
+	if (!hw_mgr || !config_args) {
+		CAM_ERR(CAM_ICP, "Invalid arguments %pK %pK",
+			hw_mgr, config_args);
+		return -EINVAL;
+	}
+
+	if (!config_args->num_hw_update_entries) {
+		CAM_ERR(CAM_ICP, "No hw update enteries are available");
+		return -EINVAL;
+	}
+
+	ctx_data = config_args->ctxt_to_hw_map;
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	mutex_lock(&ctx_data->ctx_mutex);
+	if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_ERR(CAM_ICP, "ctx id :%u is not in use",
+			ctx_data->ctx_id);
+		return -EINVAL;
+	}
+
+	frame_info = (struct icp_frame_info *)config_args->priv;
+	req_id = frame_info->request_id;
+	idx = cam_icp_clk_idx_from_req_id(ctx_data, req_id);
+
+	cam_icp_mgr_ipe_bps_clk_update(hw_mgr, ctx_data, idx);
+	ctx_data->hfi_frame_process.fw_process_flag[idx] = true;
+
+	CAM_DBG(CAM_ICP, "req_id %llu, io config %llu", req_id,
+		frame_info->io_config);
+
+	if (frame_info->io_config != 0) {
+		CAM_INFO(CAM_ICP, "Send recfg io");
+		rc = cam_icp_mgr_send_recfg_io(ctx_data,
+			&frame_info->hfi_cfg_io_cmd, req_id);
+		if (rc)
+			CAM_ERR(CAM_ICP, "Fail to send reconfig io cmd");
+	}
+
+	rc = cam_icp_mgr_enqueue_config(hw_mgr, config_args);
+	if (rc)
+		goto config_err;
+	CAM_DBG(CAM_REQ,
+		"req_id = %lld on ctx_id %u for dev %d queued to FW",
+		req_id, ctx_data->ctx_id,
+		ctx_data->icp_dev_acquire_info->dev_type);
+	mutex_unlock(&ctx_data->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return 0;
+config_err:
+	cam_icp_mgr_handle_config_err(config_args, ctx_data, idx);
+	mutex_unlock(&ctx_data->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_icp_mgr_prepare_frame_process_cmd(
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct hfi_cmd_ipebps_async *hfi_cmd,
+	uint64_t request_id,
+	uint32_t fw_cmd_buf_iova_addr)
+{
+	hfi_cmd->size = sizeof(struct hfi_cmd_ipebps_async);
+	hfi_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS;
+	else
+		hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS;
+	hfi_cmd->num_fw_handles = 1;
+	hfi_cmd->fw_handles[0] = ctx_data->fw_handle;
+	hfi_cmd->payload.indirect = fw_cmd_buf_iova_addr;
+	hfi_cmd->user_data1 = PTR_TO_U64(ctx_data);
+	hfi_cmd->user_data2 = request_id;
+
+	CAM_DBG(CAM_ICP, "ctx_data : %pK, request_id :%lld cmd_buf %x",
+		(void *)ctx_data->context_priv, request_id,
+		fw_cmd_buf_iova_addr);
+
+	return 0;
+}
+
+static bool cam_icp_mgr_is_valid_inconfig(struct cam_packet *packet)
+{
+	int i, num_in_map_entries = 0;
+	bool in_config_valid = false;
+	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
+	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
+					packet->io_configs_offset/4);
+
+	for (i = 0 ; i < packet->num_io_configs; i++)
+		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT)
+			num_in_map_entries++;
+
+	if (num_in_map_entries <= CAM_MAX_IN_RES) {
+		in_config_valid = true;
+	} else {
+		CAM_ERR(CAM_ICP, "In config entries(%u) more than allowed(%u)",
+				num_in_map_entries, CAM_MAX_IN_RES);
+	}
+
+	CAM_DBG(CAM_ICP, "number of in_config info: %u %u %u %u",
+			packet->num_io_configs, IPE_IO_IMAGES_MAX,
+			num_in_map_entries, CAM_MAX_IN_RES);
+
+	return in_config_valid;
+}
+
+static bool cam_icp_mgr_is_valid_outconfig(struct cam_packet *packet)
+{
+	int i, num_out_map_entries = 0;
+	bool out_config_valid = false;
+	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
+	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
+					packet->io_configs_offset/4);
+
+	for (i = 0 ; i < packet->num_io_configs; i++)
+		if (io_cfg_ptr[i].direction == CAM_BUF_OUTPUT)
+			num_out_map_entries++;
+
+	if (num_out_map_entries <= CAM_MAX_OUT_RES) {
+		out_config_valid = true;
+	} else {
+		CAM_ERR(CAM_ICP, "Out config entries(%u) more than allowed(%u)",
+				num_out_map_entries, CAM_MAX_OUT_RES);
+	}
+
+	CAM_DBG(CAM_ICP, "number of out_config info: %u %u %u %u",
+			packet->num_io_configs, IPE_IO_IMAGES_MAX,
+			num_out_map_entries, CAM_MAX_OUT_RES);
+
+	return out_config_valid;
+}
+
+static int cam_icp_mgr_pkt_validation(struct cam_packet *packet)
+{
+	if (((packet->header.op_code & 0xff) !=
+		CAM_ICP_OPCODE_IPE_UPDATE) &&
+		((packet->header.op_code & 0xff) !=
+		CAM_ICP_OPCODE_BPS_UPDATE)) {
+		CAM_ERR(CAM_ICP, "Invalid Opcode in pkt: %d",
+			packet->header.op_code & 0xff);
+		return -EINVAL;
+	}
+
+	if (packet->num_io_configs > IPE_IO_IMAGES_MAX) {
+		CAM_ERR(CAM_ICP, "Invalid number of io configs: %d %d",
+			IPE_IO_IMAGES_MAX, packet->num_io_configs);
+		return -EINVAL;
+	}
+
+	if (packet->num_cmd_buf > CAM_ICP_CTX_MAX_CMD_BUFFERS) {
+		CAM_ERR(CAM_ICP, "Invalid number of cmd buffers: %d %d",
+			CAM_ICP_CTX_MAX_CMD_BUFFERS, packet->num_cmd_buf);
+		return -EINVAL;
+	}
+
+	if (!cam_icp_mgr_is_valid_inconfig(packet) ||
+		!cam_icp_mgr_is_valid_outconfig(packet)) {
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ICP, "number of cmd/patch info: %u %u %u %u",
+			packet->num_cmd_buf,
+			packet->num_io_configs, IPE_IO_IMAGES_MAX,
+			packet->num_patches);
+	return 0;
+}
+
+static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_packet *packet, struct cam_icp_hw_ctx_data *ctx_data,
+	uint32_t *fw_cmd_buf_iova_addr)
+{
+	int rc = 0;
+	int i, j, k;
+	int num_cmd_buf = 0;
+	uint64_t addr;
+	size_t len;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	uintptr_t cpu_addr = 0;
+	struct ipe_frame_process_data *frame_process_data = NULL;
+	struct bps_frame_process_data *bps_frame_process_data = NULL;
+	struct frame_set *ipe_set = NULL;
+	struct frame_buffer *bps_bufs = NULL;
+
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
+
+	*fw_cmd_buf_iova_addr = 0;
+	for (i = 0; i < packet->num_cmd_buf; i++, num_cmd_buf++) {
+		if (cmd_desc[i].type == CAM_CMD_BUF_FW) {
+			rc = cam_mem_get_io_buf(cmd_desc[i].mem_handle,
+				hw_mgr->iommu_hdl, &addr, &len);
+			if (rc) {
+				CAM_ERR(CAM_ICP, "get cmd buf failed %x",
+					hw_mgr->iommu_hdl);
+
+				if (num_cmd_buf > 0)
+					num_cmd_buf--;
+				return rc;
+			}
+			*fw_cmd_buf_iova_addr = addr;
+			*fw_cmd_buf_iova_addr =
+				(*fw_cmd_buf_iova_addr + cmd_desc[i].offset);
+			rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+				&cpu_addr, &len);
+			if (rc || !cpu_addr) {
+				CAM_ERR(CAM_ICP, "get cmd buf failed %x",
+					hw_mgr->iommu_hdl);
+				*fw_cmd_buf_iova_addr = 0;
+
+				if (num_cmd_buf > 0)
+					num_cmd_buf--;
+				return rc;
+			}
+			if ((len <= cmd_desc[i].offset) ||
+				(cmd_desc[i].size < cmd_desc[i].length) ||
+				((len - cmd_desc[i].offset) <
+				cmd_desc[i].length)) {
+				CAM_ERR(CAM_ICP, "Invalid offset or length");
+				return -EINVAL;
+			}
+			cpu_addr = cpu_addr + cmd_desc[i].offset;
+		}
+	}
+
+	if (!cpu_addr) {
+		CAM_ERR(CAM_ICP, "invalid number of cmd buf");
+		return -EINVAL;
+	}
+
+	if (ctx_data->icp_dev_acquire_info->dev_type !=
+		CAM_ICP_RES_TYPE_BPS) {
+		CAM_DBG(CAM_ICP, "cpu addr = %zx", cpu_addr);
+		frame_process_data = (struct ipe_frame_process_data *)cpu_addr;
+		CAM_DBG(CAM_ICP, "%u %u %u", frame_process_data->max_num_cores,
+			frame_process_data->target_time,
+			frame_process_data->frames_in_batch);
+		frame_process_data->strip_lib_out_addr = 0;
+		frame_process_data->iq_settings_addr = 0;
+		frame_process_data->scratch_buffer_addr = 0;
+		frame_process_data->ubwc_stats_buffer_addr = 0;
+		frame_process_data->cdm_buffer_addr = 0;
+		frame_process_data->cdm_prog_base = 0;
+		for (i = 0; i < frame_process_data->frames_in_batch; i++) {
+			ipe_set = &frame_process_data->framesets[i];
+			for (j = 0; j < IPE_IO_IMAGES_MAX; j++) {
+				for (k = 0; k < MAX_NUM_OF_IMAGE_PLANES; k++) {
+					ipe_set->buffers[j].buf_ptr[k] = 0;
+					ipe_set->buffers[j].meta_buf_ptr[k] = 0;
+				}
+			}
+		}
+	} else {
+		CAM_DBG(CAM_ICP, "cpu addr = %zx", cpu_addr);
+		bps_frame_process_data =
+			(struct bps_frame_process_data *)cpu_addr;
+		CAM_DBG(CAM_ICP, "%u %u",
+			bps_frame_process_data->max_num_cores,
+			bps_frame_process_data->target_time);
+		bps_frame_process_data->ubwc_stats_buffer_addr = 0;
+		bps_frame_process_data->cdm_buffer_addr = 0;
+		bps_frame_process_data->iq_settings_addr = 0;
+		bps_frame_process_data->strip_lib_out_addr = 0;
+		bps_frame_process_data->cdm_prog_addr = 0;
+		for (i = 0; i < BPS_IO_IMAGES_MAX; i++) {
+			bps_bufs = &bps_frame_process_data->buffers[i];
+			for (j = 0; j < MAX_NUM_OF_IMAGE_PLANES; j++) {
+				bps_bufs->buf_ptr[j] = 0;
+				bps_bufs->meta_buf_ptr[j] = 0;
+			}
+		}
+	}
+
+	return rc;
+}
+
+static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_packet *packet,
+	struct cam_hw_prepare_update_args *prepare_args,
+	int32_t index)
+{
+	int i, j, k, rc = 0;
+	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+	int32_t sync_in_obj[CAM_MAX_IN_RES];
+	int32_t merged_sync_in_obj;
+
+	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
+				packet->io_configs_offset/4);
+	prepare_args->num_out_map_entries = 0;
+	prepare_args->num_in_map_entries = 0;
+
+	for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) {
+		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
+			sync_in_obj[j++] = io_cfg_ptr[i].fence;
+			prepare_args->num_in_map_entries++;
+		} else {
+			prepare_args->out_map_entries[k++].sync_id =
+				io_cfg_ptr[i].fence;
+			prepare_args->num_out_map_entries++;
+		}
+		CAM_DBG(CAM_REQ,
+			"ctx_id: %u req_id: %llu dir[%d]: %u, fence: %u resource_type = %u memh %x",
+			ctx_data->ctx_id, packet->header.request_id, i,
+			io_cfg_ptr[i].direction, io_cfg_ptr[i].fence,
+			io_cfg_ptr[i].resource_type,
+			io_cfg_ptr[i].mem_handle[0]);
+	}
+
+	if (prepare_args->num_in_map_entries > 1)
+		prepare_args->num_in_map_entries =
+			cam_common_util_remove_duplicate_arr(
+			sync_in_obj, prepare_args->num_in_map_entries);
+
+	if (prepare_args->num_in_map_entries > 1) {
+		rc = cam_sync_merge(&sync_in_obj[0],
+			prepare_args->num_in_map_entries, &merged_sync_in_obj);
+		if (rc) {
+			prepare_args->num_out_map_entries = 0;
+			prepare_args->num_in_map_entries = 0;
+			return rc;
+		}
+
+		ctx_data->hfi_frame_process.in_resource[index] =
+			merged_sync_in_obj;
+		prepare_args->in_map_entries[0].sync_id = merged_sync_in_obj;
+		prepare_args->num_in_map_entries = 1;
+		CAM_DBG(CAM_REQ, "ctx_id: %u req_id: %llu Merged Sync obj: %d",
+			ctx_data->ctx_id, packet->header.request_id,
+			merged_sync_in_obj);
+	} else if (prepare_args->num_in_map_entries == 1) {
+		prepare_args->in_map_entries[0].sync_id = sync_in_obj[0];
+		prepare_args->num_in_map_entries = 1;
+		ctx_data->hfi_frame_process.in_resource[index] = 0;
+	} else {
+		CAM_ERR(CAM_ICP, "No input fences");
+		prepare_args->num_in_map_entries = 0;
+		ctx_data->hfi_frame_process.in_resource[index] = 0;
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_icp_process_stream_settings(
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_cmd_mem_regions *cmd_mem_regions,
+	bool map_unmap)
+{
+	int rc = 0, i = 0;
+	size_t packet_size, map_cmd_size, len;
+	uint64_t iova;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
+	struct hfi_cmd_ipe_bps_map  *map_cmd;
+	struct hfi_cmd_ipebps_async *async_direct;
+
+	map_cmd_size =
+		sizeof(struct hfi_cmd_ipe_bps_map) +
+		((cmd_mem_regions->num_regions - 1) *
+		sizeof(struct mem_map_region_data));
+
+	map_cmd = kzalloc(map_cmd_size, GFP_KERNEL);
+	if (!map_cmd)
+		return -ENOMEM;
+
+	for (i = 0; i < cmd_mem_regions->num_regions; i++) {
+		rc = cam_mem_get_io_buf(
+			cmd_mem_regions->map_info_array[i].mem_handle,
+			icp_hw_mgr.iommu_hdl, &iova, &len);
+		if (rc) {
+			CAM_ERR(CAM_ICP,
+				"Failed to get cmd region iova for handle %u",
+				cmd_mem_regions->map_info_array[i].mem_handle);
+			kfree(map_cmd);
+			return -EINVAL;
+		}
+
+		map_cmd->mem_map_region_sets[i].start_addr = (uint32_t)iova +
+			(cmd_mem_regions->map_info_array[i].offset);
+		map_cmd->mem_map_region_sets[i].len = (uint32_t) len;
+
+		CAM_DBG(CAM_ICP, "Region %u mem_handle %d iova %pK len %u",
+			(i+1), cmd_mem_regions->map_info_array[i].mem_handle,
+			(uint32_t)iova, (uint32_t)len);
+	}
+
+	map_cmd->mem_map_request_num = cmd_mem_regions->num_regions;
+	map_cmd->user_data = 0;
+
+	packet_size =
+		sizeof(struct hfi_cmd_ipebps_async) +
+		(sizeof(struct hfi_cmd_ipe_bps_map) +
+		((cmd_mem_regions->num_regions - 1) *
+		sizeof(struct mem_map_region_data))) -
+		sizeof(((struct hfi_cmd_ipebps_async *)0)->payload.direct);
+
+	async_direct = kzalloc(packet_size, GFP_KERNEL);
+	if (!async_direct) {
+		kfree(map_cmd);
+		return -ENOMEM;
+	}
+
+	async_direct->size = packet_size;
+	async_direct->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
+	if (map_unmap)
+		async_direct->opcode = HFI_IPEBPS_CMD_OPCODE_MEM_MAP;
+	else
+		async_direct->opcode = HFI_IPEBPS_CMD_OPCODE_MEM_UNMAP;
+	async_direct->num_fw_handles = 1;
+	async_direct->fw_handles[0] = ctx_data->fw_handle;
+	async_direct->user_data1 = (uint64_t)ctx_data;
+	async_direct->user_data2 = (uint64_t)0x0;
+	memcpy(async_direct->payload.direct, map_cmd,
+		map_cmd_size);
+
+	reinit_completion(&ctx_data->wait_complete);
+	rc = hfi_write_cmd(async_direct);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "hfi write failed  rc %d", rc);
+		goto end;
+	}
+
+	CAM_DBG(CAM_ICP, "Sent FW %s cmd",
+		map_unmap ? "Map" : "Unmap");
+
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+		msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+		cam_hfi_queue_dump();
+	}
+
+end:
+	kfree(map_cmd);
+	kfree(async_direct);
+	return rc;
+}
+
+static int cam_icp_packet_generic_blob_handler(void *user_data,
+	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
+{
+	struct cam_icp_clk_bw_request *soc_req;
+	struct cam_icp_clk_bw_request *clk_info;
+	struct cam_icp_clk_bw_request_v2 *soc_req_v2;
+	struct cam_icp_clk_bw_req_internal_v2 *clk_info_v2;
+	struct cam_cmd_mem_regions *cmd_mem_regions;
+	struct icp_cmd_generic_blob *blob;
+	struct cam_icp_hw_ctx_data *ctx_data;
+	uint32_t index;
+	size_t io_buf_size, clk_update_size;
+	int rc = 0;
+	uintptr_t pResource;
+
+	if (!blob_data || (blob_size == 0)) {
+		CAM_ERR(CAM_ICP, "Invalid blob info %pK %d", blob_data,
+			blob_size);
+		return -EINVAL;
+	}
+
+	blob = (struct icp_cmd_generic_blob *)user_data;
+	ctx_data = blob->ctx;
+	index = blob->frame_info_idx;
+
+	switch (blob_type) {
+	case CAM_ICP_CMD_GENERIC_BLOB_CLK:
+		CAM_WARN(CAM_ICP,
+			"Using deprecated blob type GENERIC_BLOB_CLK");
+		if (blob_size != sizeof(struct cam_icp_clk_bw_request)) {
+			CAM_ERR(CAM_ICP, "Mismatch blob size %d expected %lu",
+				blob_size,
+				sizeof(struct cam_icp_clk_bw_request));
+			return -EINVAL;
+		}
+
+		if (ctx_data->bw_config_version == CAM_ICP_BW_CONFIG_UNKNOWN) {
+			ctx_data->bw_config_version = CAM_ICP_BW_CONFIG_V1;
+		} else if (ctx_data->bw_config_version !=
+			CAM_ICP_BW_CONFIG_V1) {
+			CAM_ERR(CAM_ICP,
+				"Mismatch blob versions %d expected v1 %d, blob_type=%d",
+				ctx_data->bw_config_version,
+				CAM_ICP_BW_CONFIG_V1, blob_type);
+			return -EINVAL;
+		}
+
+		clk_info = &ctx_data->hfi_frame_process.clk_info[index];
+
+		soc_req = (struct cam_icp_clk_bw_request *)blob_data;
+		*clk_info = *soc_req;
+		CAM_DBG(CAM_ICP, "budget:%llu fc: %llu %d BW %lld %lld",
+			clk_info->budget_ns, clk_info->frame_cycles,
+			clk_info->rt_flag, clk_info->uncompressed_bw,
+			clk_info->compressed_bw);
+		break;
+
+	case CAM_ICP_CMD_GENERIC_BLOB_CLK_V2:
+		if (blob_size < sizeof(struct cam_icp_clk_bw_request_v2)) {
+			CAM_ERR(CAM_ICP, "Mismatch blob size %d expected %lu",
+				blob_size,
+				sizeof(struct cam_icp_clk_bw_request_v2));
+			return -EINVAL;
+		}
+
+		if (ctx_data->bw_config_version == CAM_ICP_BW_CONFIG_UNKNOWN) {
+			ctx_data->bw_config_version = CAM_ICP_BW_CONFIG_V2;
+		} else if (ctx_data->bw_config_version !=
+			CAM_ICP_BW_CONFIG_V2) {
+			CAM_ERR(CAM_ICP,
+				"Mismatch blob versions %d expected v2 %d, blob_type=%d",
+				ctx_data->bw_config_version,
+				CAM_ICP_BW_CONFIG_V2, blob_type);
+			return -EINVAL;
+		}
+
+		soc_req_v2 = (struct cam_icp_clk_bw_request_v2 *)blob_data;
+		if (soc_req_v2->num_paths > CAM_ICP_MAX_PER_PATH_VOTES) {
+			CAM_ERR(CAM_ICP, "Invalid num paths: %d",
+				soc_req_v2->num_paths);
+			return -EINVAL;
+		}
+
+		/* Check for integer overflow */
+		if (soc_req_v2->num_paths != 1) {
+			if (sizeof(struct cam_axi_per_path_bw_vote) >
+				((UINT_MAX -
+				sizeof(struct cam_icp_clk_bw_request_v2)) /
+				(soc_req_v2->num_paths - 1))) {
+				CAM_ERR(CAM_ICP,
+					"Size exceeds limit paths:%u size per path:%lu",
+					soc_req_v2->num_paths - 1,
+					sizeof(
+					struct cam_axi_per_path_bw_vote));
+				return -EINVAL;
+			}
+		}
+
+		clk_update_size = sizeof(struct cam_icp_clk_bw_request_v2) +
+			((soc_req_v2->num_paths - 1) *
+			sizeof(struct cam_axi_per_path_bw_vote));
+		if (blob_size < clk_update_size) {
+			CAM_ERR(CAM_ICP, "Invalid blob size: %u",
+				blob_size);
+			return -EINVAL;
+		}
+
+		clk_info = &ctx_data->hfi_frame_process.clk_info[index];
+		clk_info_v2 = &ctx_data->hfi_frame_process.clk_info_v2[index];
+
+		memcpy(clk_info_v2, soc_req_v2, clk_update_size);
+
+		/* Use v1 structure for clk fields */
+		clk_info->budget_ns = clk_info_v2->budget_ns;
+		clk_info->frame_cycles = clk_info_v2->frame_cycles;
+		clk_info->rt_flag = clk_info_v2->rt_flag;
+
+		CAM_DBG(CAM_ICP, "budget=%llu, frame_cycle=%llu, rt_flag=%d",
+			clk_info_v2->budget_ns, clk_info_v2->frame_cycles,
+			clk_info_v2->rt_flag);
+		break;
+
+	case CAM_ICP_CMD_GENERIC_BLOB_CFG_IO:
+		CAM_DBG(CAM_ICP, "CAM_ICP_CMD_GENERIC_BLOB_CFG_IO");
+		pResource = *((uint32_t *)blob_data);
+		if (copy_from_user(&ctx_data->icp_dev_io_info,
+			(void __user *)pResource,
+			sizeof(struct cam_icp_acquire_dev_info))) {
+			CAM_ERR(CAM_ICP, "Failed in copy from user");
+			return -EFAULT;
+		}
+		CAM_DBG(CAM_ICP, "buf handle %d",
+			ctx_data->icp_dev_io_info.io_config_cmd_handle);
+		rc = cam_mem_get_io_buf(
+			ctx_data->icp_dev_io_info.io_config_cmd_handle,
+			icp_hw_mgr.iommu_hdl,
+			blob->io_buf_addr, &io_buf_size);
+		if (rc)
+			CAM_ERR(CAM_ICP, "Failed in blob update");
+		else
+			CAM_DBG(CAM_ICP, "io buf addr %llu",
+				*blob->io_buf_addr);
+		break;
+
+	case CAM_ICP_CMD_GENERIC_BLOB_FW_MEM_MAP:
+		cmd_mem_regions =
+			(struct cam_cmd_mem_regions *)blob_data;
+		if (cmd_mem_regions->num_regions <= 0) {
+			rc = -EINVAL;
+			CAM_ERR(CAM_ICP,
+				"Invalid number of regions for FW map %u",
+				cmd_mem_regions->num_regions);
+		} else {
+			CAM_DBG(CAM_ICP,
+				"Processing blob for mapping %u regions",
+				cmd_mem_regions->num_regions);
+			rc = cam_icp_process_stream_settings(ctx_data,
+				cmd_mem_regions, true);
+		}
+		break;
+
+	case CAM_ICP_CMD_GENERIC_BLOB_FW_MEM_UNMAP:
+		cmd_mem_regions =
+			(struct cam_cmd_mem_regions *)blob_data;
+		if (cmd_mem_regions->num_regions <= 0) {
+			rc = -EINVAL;
+			CAM_ERR(CAM_ICP,
+				"Invalid number of regions for FW unmap %u",
+				cmd_mem_regions->num_regions);
+		} else {
+			CAM_DBG(CAM_ICP,
+				"Processing blob for unmapping %u regions",
+				cmd_mem_regions->num_regions);
+			rc = cam_icp_process_stream_settings(ctx_data,
+				cmd_mem_regions, false);
+		}
+		break;
+
+	default:
+		CAM_WARN(CAM_ICP, "Invalid blob type %d", blob_type);
+		break;
+	}
+	return rc;
+}
+
+static int cam_icp_process_generic_cmd_buffer(
+	struct cam_packet *packet,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	int32_t index,
+	uint64_t *io_buf_addr)
+{
+	int i, rc = 0;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct icp_cmd_generic_blob cmd_generic_blob;
+
+	cmd_generic_blob.ctx = ctx_data;
+	cmd_generic_blob.frame_info_idx = index;
+	cmd_generic_blob.io_buf_addr = io_buf_addr;
+
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
+	for (i = 0; i < packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+		if (cmd_desc[i].meta_data != CAM_ICP_CMD_META_GENERIC_BLOB)
+			continue;
+
+		rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i],
+			cam_icp_packet_generic_blob_handler, &cmd_generic_blob);
+		if (rc)
+			CAM_ERR(CAM_ICP, "Failed in processing blobs %d", rc);
+	}
+
+	return rc;
+}
+
+static int cam_icp_mgr_process_cfg_io_cmd(
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct hfi_cmd_ipebps_async *ioconfig_cmd,
+	uint64_t request_id,
+	uint64_t io_config)
+{
+	ioconfig_cmd->size = sizeof(struct hfi_cmd_ipebps_async);
+	ioconfig_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		ioconfig_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO;
+	else
+		ioconfig_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
+
+	ioconfig_cmd->num_fw_handles = 1;
+	ioconfig_cmd->fw_handles[0] = ctx_data->fw_handle;
+	ioconfig_cmd->payload.indirect = io_config;
+	ioconfig_cmd->user_data1 = PTR_TO_U64(ctx_data);
+	ioconfig_cmd->user_data2 = request_id;
+
+	return 0;
+}
+
+static int cam_icp_mgr_update_hfi_frame_process(
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_packet *packet,
+	struct cam_hw_prepare_update_args *prepare_args,
+	int32_t *idx)
+{
+	int32_t index, rc;
+	struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
+
+	index = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap,
+		ctx_data->hfi_frame_process.bits);
+	if (index < 0 || index >= CAM_FRAME_CMD_MAX) {
+		CAM_ERR(CAM_ICP, "request idx is wrong: %d", index);
+		return -EINVAL;
+	}
+	set_bit(index, ctx_data->hfi_frame_process.bitmap);
+
+	ctx_data->hfi_frame_process.request_id[index] =
+		packet->header.request_id;
+	ctx_data->hfi_frame_process.frame_info[index].request_id =
+		packet->header.request_id;
+	ctx_data->hfi_frame_process.frame_info[index].io_config = 0;
+	rc = cam_icp_process_generic_cmd_buffer(packet, ctx_data, index,
+		&ctx_data->hfi_frame_process.frame_info[index].io_config);
+	if (rc) {
+		clear_bit(index, ctx_data->hfi_frame_process.bitmap);
+		ctx_data->hfi_frame_process.request_id[index] = -1;
+		return rc;
+	}
+
+	if (ctx_data->hfi_frame_process.frame_info[index].io_config)  {
+		hfi_cmd = (struct hfi_cmd_ipebps_async *)
+		&ctx_data->hfi_frame_process.frame_info[index].hfi_cfg_io_cmd;
+		rc = cam_icp_mgr_process_cfg_io_cmd(ctx_data, hfi_cmd,
+			packet->header.request_id,
+		ctx_data->hfi_frame_process.frame_info[index].io_config);
+	}
+	*idx = index;
+
+	return rc;
+}
+
+static void cam_icp_mgr_print_io_bufs(struct cam_packet *packet,
+	int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+	bool *mem_found)
+{
+	uint64_t   iova_addr;
+	size_t     src_buf_size;
+	int        i;
+	int        j;
+	int        rc = 0;
+	int32_t    mmu_hdl;
+
+	struct cam_buf_io_cfg  *io_cfg = NULL;
+
+	if (mem_found)
+		*mem_found = false;
+
+	io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+		packet->io_configs_offset / 4);
+
+	for (i = 0; i < packet->num_io_configs; i++) {
+		for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+			if (!io_cfg[i].mem_handle[j])
+				break;
+
+			if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+				GET_FD_FROM_HANDLE(pf_buf_info)) {
+				CAM_INFO(CAM_ICP,
+					"Found PF at port: %d mem %x fd: %x",
+					io_cfg[i].resource_type,
+					io_cfg[i].mem_handle[j],
+					pf_buf_info);
+				if (mem_found)
+					*mem_found = true;
+			}
+
+			CAM_INFO(CAM_ICP, "port: %d f: %u format: %d dir %d",
+				io_cfg[i].resource_type,
+				io_cfg[i].fence,
+				io_cfg[i].format,
+				io_cfg[i].direction);
+
+			mmu_hdl = cam_mem_is_secure_buf(
+				io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+				iommu_hdl;
+			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+				mmu_hdl, &iova_addr, &src_buf_size);
+			if (rc < 0) {
+				CAM_ERR(CAM_UTIL, "get src buf address fail");
+				continue;
+			}
+			if (iova_addr >> 32) {
+				CAM_ERR(CAM_ICP, "Invalid mapped address");
+				rc = -EINVAL;
+				continue;
+			}
+
+			CAM_INFO(CAM_ICP,
+				"pln %d dir %d w %d h %d s %u sh %u sz %d addr 0x%x off 0x%x memh %x",
+				j, io_cfg[i].direction,
+				io_cfg[i].planes[j].width,
+				io_cfg[i].planes[j].height,
+				io_cfg[i].planes[j].plane_stride,
+				io_cfg[i].planes[j].slice_height,
+				(int32_t)src_buf_size,
+				(unsigned int)iova_addr,
+				io_cfg[i].offsets[j],
+				io_cfg[i].mem_handle[j]);
+
+			iova_addr += io_cfg[i].offsets[j];
+
+		}
+	}
+}
+
+static int cam_icp_mgr_config_stream_settings(
+	void *hw_mgr_priv, void *hw_stream_settings)
+{
+	int        rc = 0;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct cam_packet *packet = NULL;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct icp_cmd_generic_blob cmd_generic_blob;
+	struct cam_hw_stream_setttings *config_args =
+		hw_stream_settings;
+
+	if ((!hw_stream_settings) ||
+		(!hw_mgr) || (!config_args->packet)) {
+		CAM_ERR(CAM_ICP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	ctx_data = config_args->ctxt_to_hw_map;
+	mutex_lock(&ctx_data->ctx_mutex);
+	packet = config_args->packet;
+
+	cmd_generic_blob.ctx = ctx_data;
+	cmd_generic_blob.frame_info_idx = -1;
+	cmd_generic_blob.io_buf_addr = NULL;
+
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
+
+	if (!cmd_desc[0].length ||
+		cmd_desc[0].meta_data != CAM_ICP_CMD_META_GENERIC_BLOB) {
+		CAM_ERR(CAM_ICP, "Invalid cmd buffer length/metadata");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[0],
+		cam_icp_packet_generic_blob_handler, &cmd_generic_blob);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed in processing cmd mem blob %d", rc);
+
+end:
+	mutex_unlock(&ctx_data->ctx_mutex);
+	return rc;
+}
+
+static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
+	void *prepare_hw_update_args)
+{
+	int        rc = 0;
+	int32_t    idx;
+	uint32_t   fw_cmd_buf_iova_addr;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct cam_packet *packet = NULL;
+	struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_prepare_update_args *prepare_args =
+		prepare_hw_update_args;
+
+	if ((!prepare_args) || (!hw_mgr) || (!prepare_args->packet)) {
+		CAM_ERR(CAM_ICP, "Invalid args");
+		return -EINVAL;
+	}
+
+	ctx_data = prepare_args->ctxt_to_hw_map;
+	mutex_lock(&ctx_data->ctx_mutex);
+	if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_ICP, "ctx id: %u is not in use",
+			ctx_data->ctx_id);
+		return -EINVAL;
+	}
+
+	packet = prepare_args->packet;
+
+	if (cam_packet_util_validate_packet(packet, prepare_args->remain_len))
+		return -EINVAL;
+
+	rc = cam_icp_mgr_pkt_validation(packet);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return rc;
+	}
+
+	rc = cam_icp_mgr_process_cmd_desc(hw_mgr, packet,
+		ctx_data, &fw_cmd_buf_iova_addr);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return rc;
+	}
+
+	prepare_args->pf_data->packet = packet;
+
+	CAM_DBG(CAM_REQ, "req id = %lld for ctx = %u",
+		packet->header.request_id, ctx_data->ctx_id);
+	/* Update Buffer Address from handles and patch information */
+	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl,
+		hw_mgr->iommu_sec_hdl);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return rc;
+	}
+
+	rc = cam_icp_mgr_update_hfi_frame_process(ctx_data, packet,
+		prepare_args, &idx);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return rc;
+	}
+
+	rc = cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
+		packet, prepare_args, idx);
+	if (rc) {
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0)
+			cam_sync_destroy(
+				ctx_data->hfi_frame_process.in_resource[idx]);
+		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+		ctx_data->hfi_frame_process.request_id[idx] = -1;
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return rc;
+	}
+
+	hfi_cmd = (struct hfi_cmd_ipebps_async *)
+			&ctx_data->hfi_frame_process.hfi_frame_cmd[idx];
+	cam_icp_mgr_prepare_frame_process_cmd(
+		ctx_data, hfi_cmd, packet->header.request_id,
+		fw_cmd_buf_iova_addr);
+
+	prepare_args->num_hw_update_entries = 1;
+	prepare_args->hw_update_entries[0].addr = (uintptr_t)hfi_cmd;
+	prepare_args->priv = &ctx_data->hfi_frame_process.frame_info[idx];
+
+	CAM_DBG(CAM_ICP, "X: req id = %lld ctx_id = %u",
+		packet->header.request_id, ctx_data->ctx_id);
+	mutex_unlock(&ctx_data->ctx_mutex);
+	return rc;
+}
+
+static int cam_icp_mgr_send_abort_status(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	struct hfi_frame_process_info *hfi_frame_process;
+	int idx;
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+		if (!hfi_frame_process->request_id[idx])
+			continue;
+
+		ctx_data->ctxt_event_cb(ctx_data->context_priv, true,
+			&hfi_frame_process->request_id[idx]);
+
+		/* now release memory for hfi frame process command */
+		hfi_frame_process->request_id[idx] = 0;
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+			CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+				ctx_data->hfi_frame_process.in_resource[idx]);
+			cam_sync_destroy(
+				ctx_data->hfi_frame_process.in_resource[idx]);
+			ctx_data->hfi_frame_process.in_resource[idx] = 0;
+	}
+		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+	}
+	mutex_unlock(&ctx_data->ctx_mutex);
+	return 0;
+}
+
+static int cam_icp_mgr_delete_sync(void *priv, void *data)
+{
+	struct hfi_cmd_work_data *task_data = NULL;
+	struct cam_icp_hw_ctx_data *ctx_data;
+	struct hfi_frame_process_info *hfi_frame_process;
+	int idx;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_ICP, "Invalid params%pK %pK", data, priv);
+		return -EINVAL;
+	}
+
+	task_data = (struct hfi_cmd_work_data *)data;
+	ctx_data = task_data->data;
+
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "Null Context");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+		if (!hfi_frame_process->in_free_resource[idx])
+			continue;
+		//cam_sync_destroy(
+			//ctx_data->hfi_frame_process.in_free_resource[idx]);
+		ctx_data->hfi_frame_process.in_resource[idx] = 0;
+	}
+	mutex_unlock(&ctx_data->ctx_mutex);
+	return 0;
+}
+
+static int cam_icp_mgr_delete_sync_obj(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int rc = 0;
+	struct crm_workq_task *task;
+	struct hfi_cmd_work_data *task_data;
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "no empty task");
+		return -ENOMEM;
+	}
+
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)ctx_data;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_delete_sync;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+static int cam_icp_mgr_flush_all(struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_hw_flush_args *flush_args)
+{
+	struct hfi_frame_process_info *hfi_frame_process;
+	int idx;
+	bool clear_in_resource = false;
+
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+		if (!hfi_frame_process->request_id[idx])
+			continue;
+
+		/* now release memory for hfi frame process command */
+		hfi_frame_process->request_id[idx] = 0;
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+			ctx_data->hfi_frame_process.in_free_resource[idx] =
+				ctx_data->hfi_frame_process.in_resource[idx];
+			ctx_data->hfi_frame_process.in_resource[idx] = 0;
+		}
+		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+		clear_in_resource = true;
+	}
+
+	if (clear_in_resource)
+		cam_icp_mgr_delete_sync_obj(ctx_data);
+
+	return 0;
+}
+
+static int cam_icp_mgr_flush_req(struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_hw_flush_args *flush_args)
+{
+	int64_t request_id;
+	struct hfi_frame_process_info *hfi_frame_process;
+	int idx;
+	bool clear_in_resource = false;
+
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	request_id = *(int64_t *)flush_args->flush_req_pending[0];
+	for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+		if (!hfi_frame_process->request_id[idx])
+			continue;
+
+		if (hfi_frame_process->request_id[idx] != request_id)
+			continue;
+
+		/* now release memory for hfi frame process command */
+		hfi_frame_process->request_id[idx] = 0;
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+			ctx_data->hfi_frame_process.in_free_resource[idx] =
+				ctx_data->hfi_frame_process.in_resource[idx];
+			ctx_data->hfi_frame_process.in_resource[idx] = 0;
+		}
+		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+		clear_in_resource = true;
+	}
+
+	if (clear_in_resource)
+		cam_icp_mgr_delete_sync_obj(ctx_data);
+
+	return 0;
+}
+
+static int cam_icp_mgr_hw_flush(void *hw_priv, void *hw_flush_args)
+{
+	struct cam_hw_flush_args *flush_args = hw_flush_args;
+	struct cam_icp_hw_ctx_data *ctx_data;
+	struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+
+	if ((!hw_priv) || (!hw_flush_args)) {
+		CAM_ERR(CAM_ICP, "Input params are Null:");
+		return -EINVAL;
+	}
+
+	ctx_data = flush_args->ctxt_to_hw_map;
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "Ctx data is NULL");
+		return -EINVAL;
+	}
+
+	if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) ||
+		(flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) {
+		CAM_ERR(CAM_ICP, "Invalid lush type: %d",
+			flush_args->flush_type);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_REQ, "ctx_id %d Flush type %d",
+		ctx_data->ctx_id, flush_args->flush_type);
+
+	switch (flush_args->flush_type) {
+	case CAM_FLUSH_TYPE_ALL:
+		mutex_lock(&hw_mgr->hw_mgr_mutex);
+		if (!atomic_read(&hw_mgr->recovery)
+			&& flush_args->num_req_active) {
+			mutex_unlock(&hw_mgr->hw_mgr_mutex);
+			cam_icp_mgr_abort_handle(ctx_data);
+		} else {
+			mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		}
+		mutex_lock(&ctx_data->ctx_mutex);
+		cam_icp_mgr_flush_all(ctx_data, flush_args);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		break;
+	case CAM_FLUSH_TYPE_REQ:
+		mutex_lock(&ctx_data->ctx_mutex);
+		if (flush_args->num_req_active) {
+			CAM_ERR(CAM_ICP, "Flush request is not supported");
+			mutex_unlock(&ctx_data->ctx_mutex);
+			return -EINVAL;
+		}
+		if (flush_args->num_req_pending)
+			cam_icp_mgr_flush_req(ctx_data, flush_args);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		break;
+	default:
+		CAM_ERR(CAM_ICP, "Invalid flush type: %d",
+			flush_args->flush_type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
+{
+	int rc = 0;
+	int ctx_id = 0;
+	struct cam_hw_release_args *release_hw = release_hw_args;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+
+	if (!release_hw || !hw_mgr) {
+		CAM_ERR(CAM_ICP, "Invalid args: %pK %pK", release_hw, hw_mgr);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ICP, "Enter recovery set %d",
+		atomic_read(&hw_mgr->recovery));
+	ctx_data = release_hw->ctxt_to_hw_map;
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "NULL ctx data");
+		return -EINVAL;
+	}
+
+	ctx_id = ctx_data->ctx_id;
+	if (ctx_id < 0 || ctx_id >= CAM_ICP_CTX_MAX) {
+		CAM_ERR(CAM_ICP, "Invalid ctx id: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+	if (hw_mgr->ctx_data[ctx_id].state != CAM_ICP_CTX_STATE_ACQUIRED) {
+		CAM_DBG(CAM_ICP, "ctx is not in use: %d", ctx_id);
+		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (!atomic_read(&hw_mgr->recovery) && release_hw->active_req) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		cam_icp_mgr_abort_handle(ctx_data);
+		cam_icp_mgr_send_abort_status(ctx_data);
+	} else {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	rc = cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
+	if (!hw_mgr->ctxt_cnt) {
+		CAM_DBG(CAM_ICP, "Last Release");
+		cam_icp_mgr_icp_power_collapse(hw_mgr);
+		cam_icp_hw_mgr_reset_clk_info(hw_mgr);
+		rc = cam_ipe_bps_deint(hw_mgr);
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	if ((!hw_mgr->bps_ctxt_cnt || !hw_mgr->ipe_ctxt_cnt))
+		cam_icp_device_timer_stop(hw_mgr);
+
+	CAM_DBG(CAM_ICP, "Release done for ctx_id %d", ctx_id);
+	return rc;
+}
+
+static int cam_icp_mgr_create_handle(uint32_t dev_type,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	struct hfi_cmd_create_handle create_handle;
+	struct hfi_cmd_work_data *task_data;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
+	struct crm_workq_task *task;
+	int rc = 0;
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task)
+		return -ENOMEM;
+
+	create_handle.size = sizeof(struct hfi_cmd_create_handle);
+	create_handle.pkt_type = HFI_CMD_IPEBPS_CREATE_HANDLE;
+	create_handle.handle_type = dev_type;
+	create_handle.user_data1 = PTR_TO_U64(ctx_data);
+	reinit_completion(&ctx_data->wait_complete);
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)&create_handle;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc)
+		return rc;
+
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+		cam_icp_mgr_process_dbg_buf(icp_hw_mgr.a5_dbg_lvl);
+		cam_hfi_queue_dump();
+	}
+
+	if (ctx_data->fw_handle == 0) {
+		CAM_ERR(CAM_ICP, "Invalid handle created");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	struct hfi_cmd_ping_pkt ping_pkt;
+	struct hfi_cmd_work_data *task_data;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
+	struct crm_workq_task *task;
+	int rc = 0;
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "No free task to send ping command");
+		return -ENOMEM;
+	}
+
+	ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
+	ping_pkt.pkt_type = HFI_CMD_SYS_PING;
+	ping_pkt.user_data = PTR_TO_U64(ctx_data);
+	init_completion(&ctx_data->wait_complete);
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)&ping_pkt;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc)
+		return rc;
+
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+		cam_icp_mgr_process_dbg_buf(icp_hw_mgr.a5_dbg_lvl);
+		cam_hfi_queue_dump();
+	}
+
+	return rc;
+}
+
+static int cam_icp_get_acquire_info(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_hw_acquire_args *args,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int i;
+	int acquire_size;
+	struct cam_icp_acquire_dev_info icp_dev_acquire_info;
+	struct cam_icp_res_info *p_icp_out = NULL;
+
+	if (copy_from_user(&icp_dev_acquire_info,
+		(void __user *)args->acquire_info,
+		sizeof(struct cam_icp_acquire_dev_info))) {
+		CAM_ERR(CAM_ICP, "Failed in acquire");
+		return -EFAULT;
+	}
+
+	if (icp_dev_acquire_info.secure_mode > CAM_SECURE_MODE_SECURE) {
+		CAM_ERR(CAM_ICP, "Invalid mode:%d",
+			icp_dev_acquire_info.secure_mode);
+		return -EINVAL;
+	}
+
+	if ((icp_dev_acquire_info.num_out_res > ICP_MAX_OUTPUT_SUPPORTED) ||
+		(icp_dev_acquire_info.num_out_res <= 0)) {
+		CAM_ERR(CAM_ICP, "Invalid num of out resources: %u",
+			icp_dev_acquire_info.num_out_res);
+		return -EINVAL;
+	}
+
+	if (icp_dev_acquire_info.dev_type >= CAM_ICP_RES_TYPE_MAX) {
+		CAM_ERR(CAM_ICP, "Invalid device type: %d",
+			icp_dev_acquire_info.dev_type);
+		return -EFAULT;
+	}
+
+	acquire_size = sizeof(struct cam_icp_acquire_dev_info) +
+		((icp_dev_acquire_info.num_out_res - 1) *
+		sizeof(struct cam_icp_res_info));
+	ctx_data->icp_dev_acquire_info = kzalloc(acquire_size, GFP_KERNEL);
+	if (!ctx_data->icp_dev_acquire_info)
+		return -ENOMEM;
+
+	if (copy_from_user(ctx_data->icp_dev_acquire_info,
+		(void __user *)args->acquire_info, acquire_size)) {
+		CAM_ERR(CAM_ICP, "Failed in acquire: size = %d", acquire_size);
+		kfree(ctx_data->icp_dev_acquire_info);
+		ctx_data->icp_dev_acquire_info = NULL;
+		return -EFAULT;
+	}
+
+	CAM_DBG(CAM_ICP, "%x %x %x %x %x %x %x",
+		ctx_data->icp_dev_acquire_info->dev_type,
+		ctx_data->icp_dev_acquire_info->in_res.format,
+		ctx_data->icp_dev_acquire_info->in_res.width,
+		ctx_data->icp_dev_acquire_info->in_res.height,
+		ctx_data->icp_dev_acquire_info->in_res.fps,
+		ctx_data->icp_dev_acquire_info->num_out_res,
+		ctx_data->icp_dev_acquire_info->scratch_mem_size);
+
+	p_icp_out = ctx_data->icp_dev_acquire_info->out_res;
+	for (i = 0; i < icp_dev_acquire_info.num_out_res; i++)
+		CAM_DBG(CAM_ICP, "out[i] %x %x %x %x",
+			p_icp_out[i].format,
+			p_icp_out[i].width,
+			p_icp_out[i].height,
+			p_icp_out[i].fps);
+
+	return 0;
+}
+
+static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
+{
+	int rc = 0, bitmap_size = 0;
+	uint32_t ctx_id = 0;
+	uint64_t io_buf_addr;
+	size_t io_buf_size;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct cam_hw_acquire_args *args = acquire_hw_args;
+	struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
+	struct cam_cmd_mem_regions cmd_mem_region;
+
+	if ((!hw_mgr_priv) || (!acquire_hw_args)) {
+		CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", hw_mgr_priv,
+			acquire_hw_args);
+		return -EINVAL;
+	}
+
+	if (args->num_acq > 1) {
+		CAM_ERR(CAM_ICP, "number of resources are wrong: %u",
+			args->num_acq);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ICP, "ENTER");
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	ctx_id = cam_icp_mgr_get_free_ctx(hw_mgr);
+	if (ctx_id >= CAM_ICP_CTX_MAX) {
+		CAM_ERR(CAM_ICP, "No free ctx space in hw_mgr");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -ENOSPC;
+	}
+	ctx_data = &hw_mgr->ctx_data[ctx_id];
+	ctx_data->ctx_id = ctx_id;
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	rc = cam_icp_get_acquire_info(hw_mgr, args, ctx_data);
+	if (rc)
+		goto acquire_info_failed;
+
+	icp_dev_acquire_info = ctx_data->icp_dev_acquire_info;
+
+	CAM_DBG(CAM_ICP, "acquire io buf handle %d",
+		icp_dev_acquire_info->io_config_cmd_handle);
+	rc = cam_mem_get_io_buf(
+		icp_dev_acquire_info->io_config_cmd_handle,
+		hw_mgr->iommu_hdl,
+		&io_buf_addr, &io_buf_size);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "unable to get src buf info from io desc");
+		goto get_io_buf_failed;
+	}
+
+	CAM_DBG(CAM_ICP, "hdl: %d, addr: %pK, size: %zu",
+		icp_dev_acquire_info->io_config_cmd_handle,
+		(void *)io_buf_addr, io_buf_size);
+
+	if (!hw_mgr->ctxt_cnt) {
+		rc = cam_icp_clk_info_init(hw_mgr, ctx_data);
+		if (rc)
+			goto get_io_buf_failed;
+
+		rc = cam_icp_mgr_icp_resume(hw_mgr);
+		if (rc)
+			goto get_io_buf_failed;
+
+		if (icp_hw_mgr.a5_debug_type)
+			hfi_set_debug_level(icp_hw_mgr.a5_debug_type,
+				icp_hw_mgr.a5_dbg_lvl);
+
+		hfi_set_fw_dump_level(icp_hw_mgr.a5_fw_dump_lvl);
+
+		rc = cam_icp_send_ubwc_cfg(hw_mgr);
+		if (rc)
+			goto ubwc_cfg_failed;
+	}
+
+
+	rc = cam_icp_mgr_ipe_bps_resume(hw_mgr, ctx_data);
+	if (rc)
+		goto ipe_bps_resume_failed;
+
+	rc = cam_icp_mgr_send_ping(ctx_data);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "ping ack not received");
+		goto send_ping_failed;
+	}
+	CAM_DBG(CAM_ICP, "ping ack received");
+
+	rc = cam_icp_mgr_create_handle(icp_dev_acquire_info->dev_type,
+		ctx_data);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "create handle failed");
+		goto create_handle_failed;
+	}
+
+	cmd_mem_region.num_regions = 1;
+	cmd_mem_region.map_info_array[0].mem_handle =
+		icp_dev_acquire_info->io_config_cmd_handle;
+	cmd_mem_region.map_info_array[0].offset = 0;
+	cmd_mem_region.map_info_array[0].size =
+		icp_dev_acquire_info->io_config_cmd_size;
+	cmd_mem_region.map_info_array[0].flags = 0;
+
+	rc = cam_icp_process_stream_settings(ctx_data,
+		&cmd_mem_region, true);
+	if (rc) {
+		CAM_ERR(CAM_ICP,
+			"sending config io mapping failed rc %d", rc);
+		goto send_map_info_failed;
+	}
+
+	rc = cam_icp_mgr_send_config_io(ctx_data, io_buf_addr);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "IO Config command failed %d", rc);
+		goto ioconfig_failed;
+	}
+
+	rc = cam_icp_process_stream_settings(ctx_data,
+		&cmd_mem_region, false);
+	if (rc) {
+		CAM_ERR(CAM_ICP,
+			"sending config io unmapping failed %d", rc);
+		goto send_map_info_failed;
+	}
+
+	ctx_data->context_priv = args->context_data;
+	args->ctxt_to_hw_map = ctx_data;
+
+	bitmap_size = BITS_TO_LONGS(CAM_FRAME_CMD_MAX) * sizeof(long);
+	ctx_data->hfi_frame_process.bitmap =
+			kzalloc(bitmap_size, GFP_KERNEL);
+	if (!ctx_data->hfi_frame_process.bitmap)
+		goto ioconfig_failed;
+
+	ctx_data->hfi_frame_process.bits = bitmap_size * BITS_PER_BYTE;
+	hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
+	icp_dev_acquire_info->scratch_mem_size = ctx_data->scratch_mem_size;
+
+	if (copy_to_user((void __user *)args->acquire_info,
+		icp_dev_acquire_info, sizeof(struct cam_icp_acquire_dev_info)))
+		goto copy_to_user_failed;
+
+	cam_icp_ctx_clk_info_init(ctx_data);
+	ctx_data->state = CAM_ICP_CTX_STATE_ACQUIRED;
+	mutex_unlock(&ctx_data->ctx_mutex);
+	CAM_DBG(CAM_ICP, "scratch size = %x fw_handle = %x",
+			(unsigned int)icp_dev_acquire_info->scratch_mem_size,
+			(unsigned int)ctx_data->fw_handle);
+	/* Start device timer*/
+	if (((hw_mgr->bps_ctxt_cnt == 1) || (hw_mgr->ipe_ctxt_cnt == 1)))
+		cam_icp_device_timer_start(hw_mgr);
+	/* Start context timer*/
+	cam_icp_ctx_timer_start(ctx_data);
+	hw_mgr->ctxt_cnt++;
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	CAM_DBG(CAM_ICP, "Acquire Done for ctx_id %u dev name %s dev type %d",
+		ctx_data->ctx_id, cam_icp_dev_type_to_name(
+		icp_dev_acquire_info->dev_type),
+		icp_dev_acquire_info->dev_type);
+
+	return 0;
+
+copy_to_user_failed:
+	kfree(ctx_data->hfi_frame_process.bitmap);
+	ctx_data->hfi_frame_process.bitmap = NULL;
+ioconfig_failed:
+	cam_icp_process_stream_settings(ctx_data,
+		&cmd_mem_region, false);
+send_map_info_failed:
+	cam_icp_mgr_destroy_handle(ctx_data);
+create_handle_failed:
+send_ping_failed:
+	cam_icp_mgr_ipe_bps_power_collapse(hw_mgr, ctx_data, 0);
+ipe_bps_resume_failed:
+ubwc_cfg_failed:
+	if (!hw_mgr->ctxt_cnt)
+		cam_icp_mgr_icp_power_collapse(hw_mgr);
+get_io_buf_failed:
+	kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
+	hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
+acquire_info_failed:
+	cam_icp_mgr_put_ctx(ctx_data);
+	cam_icp_mgr_process_dbg_buf(icp_hw_mgr.a5_dbg_lvl);
+	mutex_unlock(&ctx_data->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_icp_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args)
+{
+	int rc = 0;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd *query_cap = hw_caps_args;
+
+	if ((!hw_mgr_priv) || (!hw_caps_args)) {
+		CAM_ERR(CAM_ICP, "Invalid params: %pK %pK",
+			hw_mgr_priv, hw_caps_args);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (copy_from_user(&icp_hw_mgr.icp_caps,
+		u64_to_user_ptr(query_cap->caps_handle),
+		sizeof(struct cam_icp_query_cap_cmd))) {
+		CAM_ERR(CAM_ICP, "copy_from_user failed");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	rc = hfi_get_hw_caps(&icp_hw_mgr.icp_caps);
+	if (rc)
+		goto end;
+
+	icp_hw_mgr.icp_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl;
+	icp_hw_mgr.icp_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl;
+
+	if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle),
+		&icp_hw_mgr.icp_caps, sizeof(struct cam_icp_query_cap_cmd))) {
+		CAM_ERR(CAM_ICP, "copy_to_user failed");
+		rc = -EFAULT;
+	}
+end:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_icp_mgr_alloc_devs(struct device_node *of_node)
+{
+	int rc;
+	uint32_t num_dev;
+
+	rc = of_property_read_u32(of_node, "num-a5", &num_dev);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "getting num of a5 failed");
+		goto num_a5_failed;
+	}
+
+	icp_hw_mgr.devices[CAM_ICP_DEV_A5] = kzalloc(
+		sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
+	if (!icp_hw_mgr.devices[CAM_ICP_DEV_A5]) {
+		rc = -ENOMEM;
+		goto num_a5_failed;
+	}
+
+	rc = of_property_read_u32(of_node, "num-ipe", &num_dev);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "getting number of ipe dev nodes failed");
+		goto num_ipe_failed;
+	}
+
+	if (!icp_hw_mgr.ipe1_enable)
+		num_dev = 1;
+
+	icp_hw_mgr.devices[CAM_ICP_DEV_IPE] = kcalloc(num_dev,
+		sizeof(struct cam_hw_intf *), GFP_KERNEL);
+	if (!icp_hw_mgr.devices[CAM_ICP_DEV_IPE]) {
+		rc = -ENOMEM;
+		goto num_ipe_failed;
+	}
+
+	rc = of_property_read_u32(of_node, "num-bps", &num_dev);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "read num bps devices failed");
+		goto num_bps_failed;
+	}
+	icp_hw_mgr.devices[CAM_ICP_DEV_BPS] = kcalloc(num_dev,
+		sizeof(struct cam_hw_intf *), GFP_KERNEL);
+	if (!icp_hw_mgr.devices[CAM_ICP_DEV_BPS]) {
+		rc = -ENOMEM;
+		goto num_bps_failed;
+	}
+
+	icp_hw_mgr.ipe_bps_pc_flag = of_property_read_bool(of_node,
+		"ipe_bps_pc_en");
+
+	icp_hw_mgr.icp_pc_flag = of_property_read_bool(of_node,
+		"icp_pc_en");
+
+	return 0;
+num_bps_failed:
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_IPE]);
+num_ipe_failed:
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_A5]);
+num_a5_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_init_devs(struct device_node *of_node)
+{
+	int rc = 0;
+	int count, i;
+	const char *name = NULL;
+	struct device_node *child_node = NULL;
+	struct platform_device *child_pdev = NULL;
+	struct cam_hw_intf *child_dev_intf = NULL;
+
+	rc = cam_icp_mgr_alloc_devs(of_node);
+	if (rc)
+		return rc;
+
+	count = of_property_count_strings(of_node, "compat-hw-name");
+	if (!count) {
+		CAM_ERR(CAM_ICP, "no compat hw found in dev tree, cnt = %d",
+			count);
+		rc = -EINVAL;
+		goto compat_hw_name_failed;
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "compat-hw-name",
+			i, &name);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "getting dev object name failed");
+			goto compat_hw_name_failed;
+		}
+
+		child_node = of_find_node_by_name(NULL, name);
+		if (!child_node) {
+			CAM_ERR(CAM_ICP, "Cannot find node in dtsi %s", name);
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+
+		child_pdev = of_find_device_by_node(child_node);
+		if (!child_pdev) {
+			CAM_ERR(CAM_ICP, "failed to find device on bus %s",
+				child_node->name);
+			rc = -ENODEV;
+			of_node_put(child_node);
+			goto compat_hw_name_failed;
+		}
+
+		child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
+			child_pdev);
+		if (!child_dev_intf) {
+			CAM_ERR(CAM_ICP, "no child device");
+			of_node_put(child_node);
+			if (!icp_hw_mgr.ipe1_enable)
+				continue;
+			goto compat_hw_name_failed;
+		}
+		icp_hw_mgr.devices[child_dev_intf->hw_type]
+			[child_dev_intf->hw_idx] = child_dev_intf;
+
+		if (!child_dev_intf->hw_ops.process_cmd)
+			goto compat_hw_name_failed;
+
+		of_node_put(child_node);
+	}
+
+	icp_hw_mgr.a5_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_A5][0];
+	icp_hw_mgr.bps_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_BPS][0];
+	icp_hw_mgr.ipe0_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_IPE][0];
+	if (icp_hw_mgr.ipe1_enable)
+		icp_hw_mgr.ipe1_dev_intf =
+			icp_hw_mgr.devices[CAM_ICP_DEV_IPE][1];
+
+	return 0;
+compat_hw_name_failed:
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_BPS]);
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_IPE]);
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_A5]);
+	return rc;
+}
+
+static int cam_icp_mgr_create_wq(void)
+{
+	int rc;
+	int i;
+
+	rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK,
+		&icp_hw_mgr.cmd_work, CRM_WORKQ_USAGE_NON_IRQ,
+		0);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "unable to create a command worker");
+		goto cmd_work_failed;
+	}
+
+	rc = cam_req_mgr_workq_create("icp_message_queue", ICP_WORKQ_NUM_TASK,
+		&icp_hw_mgr.msg_work, CRM_WORKQ_USAGE_IRQ, 0);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "unable to create a message worker");
+		goto msg_work_failed;
+	}
+
+	rc = cam_req_mgr_workq_create("icp_timer_queue", ICP_WORKQ_NUM_TASK,
+		&icp_hw_mgr.timer_work, CRM_WORKQ_USAGE_IRQ, 0);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "unable to create a timer worker");
+		goto timer_work_failed;
+	}
+
+	icp_hw_mgr.cmd_work_data =
+		kzalloc(sizeof(struct hfi_cmd_work_data) * ICP_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!icp_hw_mgr.cmd_work_data)
+		goto cmd_work_data_failed;
+
+	icp_hw_mgr.msg_work_data =
+		kzalloc(sizeof(struct hfi_msg_work_data) * ICP_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!icp_hw_mgr.msg_work_data)
+		goto msg_work_data_failed;
+
+	icp_hw_mgr.timer_work_data =
+		kzalloc(sizeof(struct hfi_msg_work_data) * ICP_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!icp_hw_mgr.timer_work_data)
+		goto timer_work_data_failed;
+
+	rc = cam_icp_hw_mgr_create_debugfs_entry();
+	if (rc)
+		goto debugfs_create_failed;
+
+	for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
+		icp_hw_mgr.msg_work->task.pool[i].payload =
+				&icp_hw_mgr.msg_work_data[i];
+
+	for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
+		icp_hw_mgr.cmd_work->task.pool[i].payload =
+				&icp_hw_mgr.cmd_work_data[i];
+
+	for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
+		icp_hw_mgr.timer_work->task.pool[i].payload =
+				&icp_hw_mgr.timer_work_data[i];
+	return 0;
+
+debugfs_create_failed:
+	kfree(icp_hw_mgr.timer_work_data);
+timer_work_data_failed:
+	kfree(icp_hw_mgr.msg_work_data);
+msg_work_data_failed:
+	kfree(icp_hw_mgr.cmd_work_data);
+cmd_work_data_failed:
+	cam_req_mgr_workq_destroy(&icp_hw_mgr.timer_work);
+timer_work_failed:
+	cam_req_mgr_workq_destroy(&icp_hw_mgr.msg_work);
+msg_work_failed:
+	cam_req_mgr_workq_destroy(&icp_hw_mgr.cmd_work);
+cmd_work_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+	struct cam_icp_hw_mgr  *hw_mgr = hw_mgr_priv;
+
+	if (!hw_mgr_priv || !cmd_args) {
+		CAM_ERR(CAM_ICP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	switch (hw_cmd_args->cmd_type) {
+	case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+		cam_icp_mgr_print_io_bufs(
+			hw_cmd_args->u.pf_args.pf_data.packet,
+			hw_mgr->iommu_hdl,
+			hw_mgr->iommu_sec_hdl,
+			hw_cmd_args->u.pf_args.buf_info,
+			hw_cmd_args->u.pf_args.mem_found);
+
+		break;
+	default:
+		CAM_ERR(CAM_ICP, "Invalid cmd");
+	}
+
+	return rc;
+}
+
+int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
+	int *iommu_hdl)
+{
+	int i, rc = 0;
+	struct cam_hw_mgr_intf *hw_mgr_intf;
+	struct cam_cpas_query_cap query;
+	uint32_t cam_caps, camera_hw_version;
+
+	hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
+	if (!of_node || !hw_mgr_intf) {
+		CAM_ERR(CAM_ICP, "Invalid args of_node %pK hw_mgr %pK",
+			of_node, hw_mgr_intf);
+		return -EINVAL;
+	}
+
+	hw_mgr_intf->hw_mgr_priv = &icp_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_icp_mgr_get_hw_caps;
+	hw_mgr_intf->hw_acquire = cam_icp_mgr_acquire_hw;
+	hw_mgr_intf->hw_release = cam_icp_mgr_release_hw;
+	hw_mgr_intf->hw_prepare_update = cam_icp_mgr_prepare_hw_update;
+	hw_mgr_intf->hw_config_stream_settings =
+		cam_icp_mgr_config_stream_settings;
+	hw_mgr_intf->hw_config = cam_icp_mgr_config_hw;
+	hw_mgr_intf->hw_open = cam_icp_mgr_hw_open_u;
+	hw_mgr_intf->hw_close = cam_icp_mgr_hw_close_u;
+	hw_mgr_intf->hw_flush = cam_icp_mgr_hw_flush;
+	hw_mgr_intf->hw_cmd = cam_icp_mgr_cmd;
+
+	icp_hw_mgr.secure_mode = CAM_SECURE_MODE_NON_SECURE;
+	mutex_init(&icp_hw_mgr.hw_mgr_mutex);
+	spin_lock_init(&icp_hw_mgr.hw_mgr_lock);
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++)
+		mutex_init(&icp_hw_mgr.ctx_data[i].ctx_mutex);
+
+	cam_cpas_get_hw_info(&query.camera_family,
+		&query.camera_version, &query.cpas_version, &cam_caps);
+	cam_cpas_get_cpas_hw_version(&camera_hw_version);
+
+	if (camera_hw_version == CAM_CPAS_TITAN_480_V100) {
+		if (cam_caps & CPAS_TITAN_480_IPE0_BIT)
+			icp_hw_mgr.ipe0_enable = true;
+		if (cam_caps & CPAS_BPS_BIT)
+			icp_hw_mgr.bps_enable = true;
+	} else {
+		if (cam_caps & CPAS_IPE0_BIT)
+			icp_hw_mgr.ipe0_enable = true;
+		if (cam_caps & CPAS_IPE1_BIT)
+			icp_hw_mgr.ipe1_enable = true;
+		if (cam_caps & CPAS_BPS_BIT)
+			icp_hw_mgr.bps_enable = true;
+	}
+
+	rc = cam_icp_mgr_init_devs(of_node);
+	if (rc)
+		goto dev_init_failed;
+
+	rc = cam_smmu_get_handle("icp", &icp_hw_mgr.iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "get mmu handle failed: %d", rc);
+		goto icp_get_hdl_failed;
+	}
+
+	rc = cam_smmu_get_handle("cam-secure", &icp_hw_mgr.iommu_sec_hdl);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "get secure mmu handle failed: %d", rc);
+		goto secure_hdl_failed;
+	}
+
+	rc = cam_icp_mgr_create_wq();
+	if (rc)
+		goto icp_wq_create_failed;
+
+	if (iommu_hdl)
+		*iommu_hdl = icp_hw_mgr.iommu_hdl;
+
+	init_completion(&icp_hw_mgr.a5_complete);
+	return rc;
+
+icp_wq_create_failed:
+	cam_smmu_destroy_handle(icp_hw_mgr.iommu_sec_hdl);
+	icp_hw_mgr.iommu_sec_hdl = -1;
+secure_hdl_failed:
+	cam_smmu_destroy_handle(icp_hw_mgr.iommu_hdl);
+	icp_hw_mgr.iommu_hdl = -1;
+icp_get_hdl_failed:
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_BPS]);
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_IPE]);
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_A5]);
+dev_init_failed:
+	mutex_destroy(&icp_hw_mgr.hw_mgr_mutex);
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++)
+		mutex_destroy(&icp_hw_mgr.ctx_data[i].ctx_mutex);
+
+	return rc;
+}

+ 397 - 0
drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h

@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_ICP_HW_MGR_H
+#define CAM_ICP_HW_MGR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <cam_icp.h>
+#include "cam_icp_hw_intf.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_a5_hw_intf.h"
+#include "hfi_session_defs.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "cam_smmu_api.h"
+#include "cam_soc_util.h"
+#include "cam_req_mgr_timer.h"
+
+#define CAM_ICP_ROLE_PARENT     1
+#define CAM_ICP_ROLE_CHILD      2
+
+#define CAM_FRAME_CMD_MAX       20
+
+#define CAM_MAX_OUT_RES         6
+#define CAM_MAX_IN_RES          8
+
+#define ICP_WORKQ_NUM_TASK      100
+#define ICP_WORKQ_TASK_CMD_TYPE 1
+#define ICP_WORKQ_TASK_MSG_TYPE 2
+
+#define ICP_PACKET_SIZE         0
+#define ICP_PACKET_TYPE         1
+#define ICP_PACKET_OPCODE       2
+#define ICP_MAX_OUTPUT_SUPPORTED 6
+
+#define ICP_FRAME_PROCESS_SUCCESS 0
+#define ICP_FRAME_PROCESS_FAILURE 1
+#define ICP_MSG_BUF_SIZE        256
+#define ICP_DBG_BUF_SIZE        102400
+
+#define ICP_CLK_HW_IPE          0x0
+#define ICP_CLK_HW_BPS          0x1
+#define ICP_CLK_HW_MAX          0x2
+
+#define ICP_OVER_CLK_THRESHOLD  5
+
+#define CPAS_IPE0_BIT           0x1000
+#define CPAS_IPE1_BIT           0x2000
+#define CPAS_BPS_BIT            0x400
+#define CPAS_TITAN_480_IPE0_BIT 0x800
+
+#define ICP_PWR_CLP_BPS         0x00000001
+#define ICP_PWR_CLP_IPE0        0x00010000
+#define ICP_PWR_CLP_IPE1        0x00020000
+
+#define CAM_ICP_CTX_STATE_FREE      0x0
+#define CAM_ICP_CTX_STATE_IN_USE    0x1
+#define CAM_ICP_CTX_STATE_ACQUIRED  0x2
+#define CAM_ICP_CTX_STATE_RELEASE   0x3
+
+#define CAM_ICP_CTX_MAX_CMD_BUFFERS 0x2
+
+/* Current appliacble vote paths, based on number of UAPI definitions */
+#define CAM_ICP_MAX_PER_PATH_VOTES 6
+
+/**
+ * struct icp_hfi_mem_info
+ * @qtbl: Memory info of queue table
+ * @cmd_q: Memory info of command queue
+ * @msg_q: Memory info of message queue
+ * @dbg_q: Memory info of debug queue
+ * @sec_heap: Memory info of secondary heap
+ * @fw_buf: Memory info of firmware
+ * @qdss_buf: Memory info of qdss
+ * @sfr_buf: Memory info for sfr buffer
+ * @shmem: Memory info for shared region
+ * @io_mem: Memory info for io region
+ */
+struct icp_hfi_mem_info {
+	struct cam_mem_mgr_memory_desc qtbl;
+	struct cam_mem_mgr_memory_desc cmd_q;
+	struct cam_mem_mgr_memory_desc msg_q;
+	struct cam_mem_mgr_memory_desc dbg_q;
+	struct cam_mem_mgr_memory_desc sec_heap;
+	struct cam_mem_mgr_memory_desc fw_buf;
+	struct cam_mem_mgr_memory_desc qdss_buf;
+	struct cam_mem_mgr_memory_desc sfr_buf;
+	struct cam_smmu_region_info shmem;
+	struct cam_smmu_region_info io_mem;
+};
+
+/**
+ * struct hfi_cmd_work_data
+ * @type: Task type
+ * @data: Pointer to command data
+ * @request_id: Request id
+ */
+struct hfi_cmd_work_data {
+	uint32_t type;
+	void *data;
+	int32_t request_id;
+};
+
+/**
+ * struct hfi_msg_work_data
+ * @type: Task type
+ * @data: Pointer to message data
+ * @irq_status: IRQ status
+ */
+struct hfi_msg_work_data {
+	uint32_t type;
+	void *data;
+	uint32_t irq_status;
+};
+
+/**
+ * struct clk_work_data
+ * @type: Task type
+ * @data: Pointer to clock info
+ */
+struct clk_work_data {
+	uint32_t type;
+	void *data;
+};
+
+/*
+ * struct icp_frame_info
+ * @request_id: request id
+ * @io_config: the address of io config
+ * @hfi_cfg_io_cmd: command struct to be sent to hfi
+ */
+struct icp_frame_info {
+	uint64_t request_id;
+	uint64_t io_config;
+	struct hfi_cmd_ipebps_async hfi_cfg_io_cmd;
+};
+
+/**
+ * struct cam_icp_clk_bw_request_v2
+ *
+ * @budget_ns: Time required to process frame
+ * @frame_cycles: Frame cycles needed to process the frame
+ * @rt_flag: Flag to indicate real time stream
+ * @num_paths: Number of paths for per path bw vote
+ * @axi_path: Per path vote info for IPE/BPS
+ */
+struct cam_icp_clk_bw_req_internal_v2 {
+	uint64_t budget_ns;
+	uint32_t frame_cycles;
+	uint32_t rt_flag;
+	uint32_t num_paths;
+	struct cam_axi_per_path_bw_vote axi_path[CAM_ICP_MAX_PER_PATH_VOTES];
+};
+
+/**
+ * struct hfi_frame_process_info
+ * @hfi_frame_cmd: Frame process command info
+ * @bitmap: Bitmap for hfi_frame_cmd
+ * @bits: Used in hfi_frame_cmd bitmap
+ * @lock: Lock for hfi_frame_cmd
+ * @request_id: Request id list
+ * @num_out_resources: Number of out syncs
+ * @out_resource: Out sync info
+ * @fw_process_flag: Frame process flag
+ * @clk_info: Clock information for a request
+ * @clk_info_v2: Clock info for AXI bw voting v2
+ * @frame_info: information needed to process request
+ */
+struct hfi_frame_process_info {
+	struct hfi_cmd_ipebps_async hfi_frame_cmd[CAM_FRAME_CMD_MAX];
+	void *bitmap;
+	size_t bits;
+	struct mutex lock;
+	uint64_t request_id[CAM_FRAME_CMD_MAX];
+	uint32_t num_out_resources[CAM_FRAME_CMD_MAX];
+	uint32_t out_resource[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES];
+	uint32_t in_resource[CAM_FRAME_CMD_MAX];
+	uint32_t in_free_resource[CAM_FRAME_CMD_MAX];
+	uint32_t fw_process_flag[CAM_FRAME_CMD_MAX];
+	struct cam_icp_clk_bw_request clk_info[CAM_FRAME_CMD_MAX];
+	struct cam_icp_clk_bw_req_internal_v2 clk_info_v2[CAM_FRAME_CMD_MAX];
+	struct icp_frame_info frame_info[CAM_FRAME_CMD_MAX];
+};
+
+/**
+ * struct cam_ctx_clk_info
+ * @curr_fc: Context latest request frame cycles
+ * @rt_flag: Flag to indicate real time request
+ * @base_clk: Base clock to process the request
+ * @reserved: Reserved field
+ * #uncompressed_bw: Current bandwidth voting
+ * @compressed_bw: Current compressed bandwidth voting
+ * @clk_rate: Supported clock rates for the context
+ * @num_paths: Number of valid AXI paths
+ * @axi_path: ctx based per path bw vote
+ */
+struct cam_ctx_clk_info {
+	uint32_t curr_fc;
+	uint32_t rt_flag;
+	uint32_t base_clk;
+	uint32_t reserved;
+	uint64_t uncompressed_bw;
+	uint64_t compressed_bw;
+	int32_t clk_rate[CAM_MAX_VOTE];
+	uint32_t num_paths;
+	struct cam_axi_per_path_bw_vote axi_path[CAM_ICP_MAX_PER_PATH_VOTES];
+};
+/**
+ * struct cam_icp_hw_ctx_data
+ * @context_priv: Context private data
+ * @ctx_mutex: Mutex for context
+ * @fw_handle: Firmware handle
+ * @scratch_mem_size: Scratch memory size
+ * @acquire_dev_cmd: Acquire command
+ * @icp_dev_acquire_info: Acquire device info
+ * @ctxt_event_cb: Context callback function
+ * @state: context state
+ * @role: Role of a context in case of chaining
+ * @chain_ctx: Peer context
+ * @hfi_frame_process: Frame process command
+ * @wait_complete: Completion info
+ * @temp_payload: Payload for destroy handle data
+ * @ctx_id: Context Id
+ * @bw_config_version: BW config version indicator
+ * @clk_info: Current clock info of a context
+ * @watch_dog: watchdog timer handle
+ * @watch_dog_reset_counter: Counter for watch dog reset
+ * @icp_dev_io_info: io config resource
+ */
+struct cam_icp_hw_ctx_data {
+	void *context_priv;
+	struct mutex ctx_mutex;
+	uint32_t fw_handle;
+	uint32_t scratch_mem_size;
+	struct cam_acquire_dev_cmd acquire_dev_cmd;
+	struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
+	cam_hw_event_cb_func ctxt_event_cb;
+	uint32_t state;
+	uint32_t role;
+	struct cam_icp_hw_ctx_data *chain_ctx;
+	struct hfi_frame_process_info hfi_frame_process;
+	struct completion wait_complete;
+	struct ipe_bps_destroy temp_payload;
+	uint32_t ctx_id;
+	uint32_t bw_config_version;
+	struct cam_ctx_clk_info clk_info;
+	struct cam_req_mgr_timer *watch_dog;
+	uint32_t watch_dog_reset_counter;
+	struct cam_icp_acquire_dev_info icp_dev_io_info;
+};
+
+/**
+ * struct icp_cmd_generic_blob
+ * @ctx: Current context info
+ * @frame_info_idx: Index used for frame process info
+ * @io_buf_addr: pointer to io buffer address
+ */
+struct icp_cmd_generic_blob {
+	struct cam_icp_hw_ctx_data *ctx;
+	uint32_t frame_info_idx;
+	uint64_t *io_buf_addr;
+};
+
+/**
+ * struct cam_icp_clk_info
+ * @base_clk: Base clock to process request
+ * @curr_clk: Current clock of hadrware
+ * @threshold: Threshold for overclk count
+ * @over_clked: Over clock count
+ * @uncompressed_bw: Current bandwidth voting
+ * @compressed_bw: Current compressed bandwidth voting
+ * @num_paths: Number of AXI vote paths
+ * @axi_path: Current per path bw vote info
+ * @hw_type: IPE/BPS device type
+ * @watch_dog: watchdog timer handle
+ * @watch_dog_reset_counter: Counter for watch dog reset
+ */
+struct cam_icp_clk_info {
+	uint32_t base_clk;
+	uint32_t curr_clk;
+	uint32_t threshold;
+	uint32_t over_clked;
+	uint64_t uncompressed_bw;
+	uint64_t compressed_bw;
+	uint32_t num_paths;
+	struct cam_axi_per_path_bw_vote axi_path[CAM_ICP_MAX_PER_PATH_VOTES];
+	uint32_t hw_type;
+	struct cam_req_mgr_timer *watch_dog;
+	uint32_t watch_dog_reset_counter;
+};
+
+/**
+ * struct cam_icp_hw_mgr
+ * @hw_mgr_mutex: Mutex for ICP hardware manager
+ * @hw_mgr_lock: Spinlock for ICP hardware manager
+ * @devices: Devices of ICP hardware manager
+ * @ctx_data: Context data
+ * @icp_caps: ICP capabilities
+ * @fw_download: Firmware download state
+ * @iommu_hdl: Non secure IOMMU handle
+ * @iommu_sec_hdl: Secure IOMMU handle
+ * @hfi_mem: Memory for hfi
+ * @cmd_work: Work queue for hfi commands
+ * @msg_work: Work queue for hfi messages
+ * @timer_work: Work queue for timer watchdog
+ * @msg_buf: Buffer for message data from firmware
+ * @dbg_buf: Buffer for debug data from firmware
+ * @a5_complete: Completion info
+ * @cmd_work_data: Pointer to command work queue task
+ * @msg_work_data: Pointer to message work queue task
+ * @timer_work_data: Pointer to timer work queue task
+ * @ctxt_cnt: Active context count
+ * @ipe_ctxt_cnt: IPE Active context count
+ * @bps_ctxt_cnt: BPS Active context count
+ * @dentry: Debugfs entry
+ * @a5_debug: A5 debug flag
+ * @icp_pc_flag: Flag to enable/disable power collapse
+ * @ipe_bps_pc_flag: Flag to enable/disable
+ *                   power collapse for ipe & bps
+ * @icp_debug_clk: Set clock based on debug value
+ * @icp_default_clk: Set this clok if user doesn't supply
+ * @clk_info: Clock info of hardware
+ * @secure_mode: Flag to enable/disable secure camera
+ * @a5_jtag_debug: entry to enable A5 JTAG debugging
+ * @a5_debug_type : entry to enable FW debug message/qdss
+ * @a5_dbg_lvl : debug level set to FW.
+ * @a5_fw_dump_lvl : level set for dumping the FW data
+ * @ipe0_enable: Flag for IPE0
+ * @ipe1_enable: Flag for IPE1
+ * @bps_enable: Flag for BPS
+ * @a5_dev_intf : Device interface for A5
+ * @ipe0_dev_intf: Device interface for IPE0
+ * @ipe1_dev_intf: Device interface for IPE1
+ * @bps_dev_intf: Device interface for BPS
+ * @ipe_clk_state: IPE clock state flag
+ * @bps_clk_state: BPS clock state flag
+ * @recovery: Flag to validate if in previous session FW
+ *            reported a fatal error or wdt. If set FW is
+ *            re-downloaded for new camera session.
+ */
+struct cam_icp_hw_mgr {
+	struct mutex hw_mgr_mutex;
+	spinlock_t hw_mgr_lock;
+
+	struct cam_hw_intf **devices[CAM_ICP_DEV_MAX];
+	struct cam_icp_hw_ctx_data ctx_data[CAM_ICP_CTX_MAX];
+	struct cam_icp_query_cap_cmd icp_caps;
+
+	bool fw_download;
+	int32_t iommu_hdl;
+	int32_t iommu_sec_hdl;
+	struct icp_hfi_mem_info hfi_mem;
+	struct cam_req_mgr_core_workq *cmd_work;
+	struct cam_req_mgr_core_workq *msg_work;
+	struct cam_req_mgr_core_workq *timer_work;
+	uint32_t msg_buf[ICP_MSG_BUF_SIZE];
+	uint32_t dbg_buf[ICP_DBG_BUF_SIZE];
+	struct completion a5_complete;
+	struct hfi_cmd_work_data *cmd_work_data;
+	struct hfi_msg_work_data *msg_work_data;
+	struct hfi_msg_work_data *timer_work_data;
+	uint32_t ctxt_cnt;
+	uint32_t ipe_ctxt_cnt;
+	uint32_t bps_ctxt_cnt;
+	struct dentry *dentry;
+	bool a5_debug;
+	bool icp_pc_flag;
+	bool ipe_bps_pc_flag;
+	uint64_t icp_debug_clk;
+	uint64_t icp_default_clk;
+	struct cam_icp_clk_info clk_info[ICP_CLK_HW_MAX];
+	bool secure_mode;
+	bool a5_jtag_debug;
+	u64 a5_debug_type;
+	u64 a5_dbg_lvl;
+	u64 a5_fw_dump_lvl;
+	bool ipe0_enable;
+	bool ipe1_enable;
+	bool bps_enable;
+	struct cam_hw_intf *a5_dev_intf;
+	struct cam_hw_intf *ipe0_dev_intf;
+	struct cam_hw_intf *ipe1_dev_intf;
+	struct cam_hw_intf *bps_dev_intf;
+	bool ipe_clk_state;
+	bool bps_clk_state;
+	atomic_t recovery;
+};
+
+static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
+static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args);
+static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr);
+static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr);
+#endif /* CAM_ICP_HW_MGR_H */

+ 74 - 0
drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h

@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_A5_HW_INTF_H
+#define CAM_A5_HW_INTF_H
+
+#include <linux/timer.h>
+#include <cam_defs.h>
+#include <cam_icp.h>
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_intf.h"
+
+enum cam_icp_a5_cmd_type {
+	CAM_ICP_A5_CMD_FW_DOWNLOAD,
+	CAM_ICP_A5_CMD_POWER_COLLAPSE,
+	CAM_ICP_A5_CMD_POWER_RESUME,
+	CAM_ICP_A5_CMD_SET_FW_BUF,
+	CAM_ICP_A5_CMD_ACQUIRE,
+	CAM_ICP_A5_SET_IRQ_CB,
+	CAM_ICP_A5_TEST_IRQ,
+	CAM_ICP_A5_SEND_INIT,
+	CAM_ICP_A5_CMD_VOTE_CPAS,
+	CAM_ICP_A5_CMD_CPAS_START,
+	CAM_ICP_A5_CMD_CPAS_STOP,
+	CAM_ICP_A5_CMD_UBWC_CFG,
+	CAM_ICP_A5_CMD_PC_PREP,
+	CAM_ICP_A5_CMD_MAX,
+};
+
+struct cam_icp_a5_set_fw_buf_info {
+	uint32_t iova;
+	uint64_t kva;
+	uint64_t len;
+};
+
+/**
+ * struct cam_icp_a5_query_cap - ICP query device capability payload
+ * @fw_version: firmware version info
+ * @api_version: api version info
+ * @num_ipe: number of ipes
+ * @num_bps: number of bps
+ * @num_dev: number of device capabilities in dev_caps
+ * @reserved: reserved
+ * @dev_ver: returned device capability array
+ * @CAM_QUERY_CAP IOCTL
+ */
+struct cam_icp_a5_query_cap {
+	struct cam_icp_ver fw_version;
+	struct cam_icp_ver api_version;
+	uint32_t num_ipe;
+	uint32_t num_bps;
+	uint32_t num_dev;
+	uint32_t reserved;
+	struct cam_icp_dev_ver dev_ver[CAM_ICP_DEV_TYPE_MAX];
+};
+
+struct cam_icp_a5_acquire_dev {
+	uint32_t ctx_id;
+	struct cam_icp_acquire_dev_info icp_acquire_info;
+	struct cam_icp_res_info icp_out_acquire_info[2];
+	uint32_t fw_handle;
+};
+
+struct cam_icp_a5_set_irq_cb {
+	int32_t (*icp_hw_mgr_cb)(uint32_t irq_status, void *data);
+	void *data;
+};
+
+struct cam_icp_a5_test_irq {
+	uint32_t test_irq;
+};
+#endif /* CAM_A5_HW_INTF_H */

+ 37 - 0
drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h

@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_BPS_HW_INTF_H
+#define CAM_BPS_HW_INTF_H
+
+#include <cam_defs.h>
+#include <cam_icp.h>
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_intf.h"
+
+/* BPS register */
+#define BPS_TOP_RST_CMD              0x1008
+#define BPS_CDM_RST_CMD              0x10
+#define BPS_CDM_IRQ_STATUS           0x44
+#define BPS_TOP_IRQ_STATUS           0x100C
+
+/* BPS CDM/TOP status register */
+#define BPS_RST_DONE_IRQ_STATUS_BIT  0x1
+
+enum cam_icp_bps_cmd_type {
+	CAM_ICP_BPS_CMD_FW_DOWNLOAD,
+	CAM_ICP_BPS_CMD_POWER_COLLAPSE,
+	CAM_ICP_BPS_CMD_POWER_RESUME,
+	CAM_ICP_BPS_CMD_SET_FW_BUF,
+	CAM_ICP_BPS_CMD_VOTE_CPAS,
+	CAM_ICP_BPS_CMD_CPAS_START,
+	CAM_ICP_BPS_CMD_CPAS_STOP,
+	CAM_ICP_BPS_CMD_UPDATE_CLK,
+	CAM_ICP_BPS_CMD_DISABLE_CLK,
+	CAM_ICP_BPS_CMD_RESET,
+	CAM_ICP_BPS_CMD_MAX,
+};
+
+#endif /* CAM_BPS_HW_INTF_H */

+ 33 - 0
drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h

@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_ICP_HW_INTF_H
+#define CAM_ICP_HW_INTF_H
+
+#define CAM_ICP_CMD_BUF_MAX_SIZE     128
+#define CAM_ICP_MSG_BUF_MAX_SIZE     CAM_ICP_CMD_BUF_MAX_SIZE
+
+#define CAM_ICP_BW_CONFIG_UNKNOWN 0
+#define CAM_ICP_BW_CONFIG_V1      1
+#define CAM_ICP_BW_CONFIG_V2      2
+
+enum cam_a5_hw_type {
+	CAM_ICP_DEV_A5,
+	CAM_ICP_DEV_IPE,
+	CAM_ICP_DEV_BPS,
+	CAM_ICP_DEV_MAX,
+};
+
+/**
+ * struct cam_a5_clk_update_cmd - Payload for hw manager command
+ *
+ * @curr_clk_rate:        clk rate to HW
+ * @ipe_bps_pc_enable     power collpase enable flag
+ */
+struct cam_a5_clk_update_cmd {
+	uint32_t  curr_clk_rate;
+	bool  ipe_bps_pc_enable;
+};
+#endif

+ 37 - 0
drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h

@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_IPE_HW_INTF_H
+#define CAM_IPE_HW_INTF_H
+
+#include <cam_defs.h>
+#include <cam_icp.h>
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_intf.h"
+
+/* IPE registers */
+#define IPE_TOP_RST_CMD              0x1008
+#define IPE_CDM_RST_CMD              0x10
+#define IPE_CDM_IRQ_STATUS           0x44
+#define IPE_TOP_IRQ_STATUS           0x100C
+
+/* IPE CDM/TOP status register */
+#define IPE_RST_DONE_IRQ_STATUS_BIT  0x1
+
+enum cam_icp_ipe_cmd_type {
+	CAM_ICP_IPE_CMD_FW_DOWNLOAD,
+	CAM_ICP_IPE_CMD_POWER_COLLAPSE,
+	CAM_ICP_IPE_CMD_POWER_RESUME,
+	CAM_ICP_IPE_CMD_SET_FW_BUF,
+	CAM_ICP_IPE_CMD_VOTE_CPAS,
+	CAM_ICP_IPE_CMD_CPAS_START,
+	CAM_ICP_IPE_CMD_CPAS_STOP,
+	CAM_ICP_IPE_CMD_UPDATE_CLK,
+	CAM_ICP_IPE_CMD_DISABLE_CLK,
+	CAM_ICP_IPE_CMD_RESET,
+	CAM_ICP_IPE_CMD_MAX,
+};
+
+#endif /* CAM_IPE_HW_INTF_H */

+ 47 - 0
drivers/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h

@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_ICP_HW_MGR_INTF_H
+#define CAM_ICP_HW_MGR_INTF_H
+
+#include <linux/of.h>
+#include <cam_icp.h>
+#include <cam_defs.h>
+#include "cam_cpas_api.h"
+
+#define ICP_CLK_TURBO_HZ         600000000
+#define ICP_CLK_SVS_HZ           400000000
+
+#define CAM_ICP_A5_BW_BYTES_VOTE 40000000
+
+#define CAM_ICP_CTX_MAX          54
+
+#define CPAS_IPE1_BIT            0x2000
+
+#define CAM_IPE_DEFAULT_AXI_PATH    CAM_AXI_PATH_DATA_IPE_WR_VID
+#define CAM_IPE_DEFAULT_AXI_TRANSAC CAM_AXI_TRANSACTION_WRITE
+#define CAM_BPS_DEFAULT_AXI_PATH    CAM_AXI_PATH_DATA_ALL
+#define CAM_BPS_DEFAULT_AXI_TRANSAC CAM_AXI_TRANSACTION_WRITE
+#define CAM_ICP_DEFAULT_AXI_PATH    CAM_AXI_PATH_DATA_ALL
+#define CAM_ICP_DEFAULT_AXI_TRANSAC CAM_AXI_TRANSACTION_READ
+
+int cam_icp_hw_mgr_init(struct device_node *of_node,
+	uint64_t *hw_mgr_hdl, int *iommu_hdl);
+
+/**
+ * struct cam_icp_cpas_vote
+ * @ahb_vote: AHB vote info
+ * @axi_vote: AXI vote info
+ * @ahb_vote_valid: Flag for ahb vote data
+ * @axi_vote_valid: flag for axi vote data
+ */
+struct cam_icp_cpas_vote {
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	uint32_t ahb_vote_valid;
+	uint32_t axi_vote_valid;
+};
+
+#endif /* CAM_ICP_HW_MGR_INTF_H */

+ 14 - 0
drivers/cam_icp/icp_hw/ipe_hw/Makefile

@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/icp_hw/ipe_hw
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_icp/fw_inc
+ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += ipe_dev.o ipe_core.o ipe_soc.o

Beberapa file tidak ditampilkan karena terlalu banyak file yang berubah dalam diff ini