Camera: Bring over camera driver changes

Bring over camera driver changes as of msm-4.19
commit  5a5551a7 (Merge "msm: camera: reqmgr: Fix CRM
shift one req issue").

Change-Id: Ic0c2b2d74d1b3470c1c51d98228e312fb13c501a
Signed-off-by: Jigarkumar Zala <jzala@codeaurora.org>
This commit is contained in:
Jigarkumar Zala
2019-05-24 17:56:58 -07:00
parent 9be583aa80
commit 05349feaa2
356 changed files with 134959 additions and 10 deletions

12
drivers/cam_cdm/Makefile Normal file
View File

@@ -0,0 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
ccflags-y += -I$(srctree)/techpack/camera/include/uapi/media
ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_smmu
ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_utils
ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_cpas/include
ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_core
ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_req_mgr
obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm_util.o cam_cdm_intf.o cam_cdm_soc.o\
cam_cdm_core_common.o cam_cdm_virtual_core.o \
cam_cdm_hw_core.o

253
drivers/cam_cdm/cam_cdm.h Normal file
View File

@@ -0,0 +1,253 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CDM_H_
#define _CAM_CDM_H_
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/random.h>
#include <linux/spinlock_types.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/bug.h>
#include "cam_cdm_intf_api.h"
#include "cam_soc_util.h"
#include "cam_cpas_api.h"
#include "cam_hw_intf.h"
#include "cam_hw.h"
#include "cam_debug_util.h"
#define CAM_MAX_SW_CDM_VERSION_SUPPORTED 1
#define CAM_SW_CDM_INDEX 0
#define CAM_CDM_INFLIGHT_WORKS 5
#define CAM_CDM_HW_RESET_TIMEOUT 300
#define CAM_CDM_HW_ID_MASK 0xF
#define CAM_CDM_HW_ID_SHIFT 0x5
#define CAM_CDM_CLIENTS_ID_MASK 0x1F
#define CAM_CDM_GET_HW_IDX(x) (((x) >> CAM_CDM_HW_ID_SHIFT) & \
CAM_CDM_HW_ID_MASK)
#define CAM_CDM_CREATE_CLIENT_HANDLE(hw_idx, client_idx) \
((((hw_idx) & CAM_CDM_HW_ID_MASK) << CAM_CDM_HW_ID_SHIFT) | \
((client_idx) & CAM_CDM_CLIENTS_ID_MASK))
#define CAM_CDM_GET_CLIENT_IDX(x) ((x) & CAM_CDM_CLIENTS_ID_MASK)
#define CAM_PER_CDM_MAX_REGISTERED_CLIENTS (CAM_CDM_CLIENTS_ID_MASK + 1)
#define CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM (CAM_CDM_HW_ID_MASK + 1)
/* enum cam_cdm_reg_attr - read, write, read and write permissions.*/
enum cam_cdm_reg_attr {
CAM_REG_ATTR_READ,
CAM_REG_ATTR_WRITE,
CAM_REG_ATTR_READ_WRITE,
};
/* enum cam_cdm_hw_process_intf_cmd - interface commands.*/
enum cam_cdm_hw_process_intf_cmd {
CAM_CDM_HW_INTF_CMD_ACQUIRE,
CAM_CDM_HW_INTF_CMD_RELEASE,
CAM_CDM_HW_INTF_CMD_SUBMIT_BL,
CAM_CDM_HW_INTF_CMD_RESET_HW,
CAM_CDM_HW_INTF_CMD_INVALID,
};
/* enum cam_cdm_regs - CDM driver offset enums.*/
enum cam_cdm_regs {
/*cfg_offsets 0*/
CDM_CFG_HW_VERSION,
CDM_CFG_TITAN_VERSION,
CDM_CFG_RST_CMD,
CDM_CFG_CGC_CFG,
CDM_CFG_CORE_CFG,
CDM_CFG_CORE_EN,
CDM_CFG_FE_CFG,
/*irq_offsets 7*/
CDM_IRQ_MASK,
CDM_IRQ_CLEAR,
CDM_IRQ_CLEAR_CMD,
CDM_IRQ_SET,
CDM_IRQ_SET_CMD,
CDM_IRQ_STATUS,
CDM_IRQ_USR_DATA,
/*BL FIFO Registers 14*/
CDM_BL_FIFO_BASE_REG,
CDM_BL_FIFO_LEN_REG,
CDM_BL_FIFO_STORE_REG,
CDM_BL_FIFO_CFG,
CDM_BL_FIFO_RB,
CDM_BL_FIFO_BASE_RB,
CDM_BL_FIFO_LEN_RB,
CDM_BL_FIFO_PENDING_REQ_RB,
/*CDM System Debug Registers 22*/
CDM_DBG_WAIT_STATUS,
CDM_DBG_SCRATCH_0_REG,
CDM_DBG_SCRATCH_1_REG,
CDM_DBG_SCRATCH_2_REG,
CDM_DBG_SCRATCH_3_REG,
CDM_DBG_SCRATCH_4_REG,
CDM_DBG_SCRATCH_5_REG,
CDM_DBG_SCRATCH_6_REG,
CDM_DBG_SCRATCH_7_REG,
CDM_DBG_LAST_AHB_ADDR,
CDM_DBG_LAST_AHB_DATA,
CDM_DBG_CORE_DBUG,
CDM_DBG_LAST_AHB_ERR_ADDR,
CDM_DBG_LAST_AHB_ERR_DATA,
CDM_DBG_CURRENT_BL_BASE,
CDM_DBG_CURRENT_BL_LEN,
CDM_DBG_CURRENT_USED_AHB_BASE,
CDM_DBG_DEBUG_STATUS,
/*FE Bus Miser Registers 40*/
CDM_BUS_MISR_CFG_0,
CDM_BUS_MISR_CFG_1,
CDM_BUS_MISR_RD_VAL,
/*Performance Counter registers 43*/
CDM_PERF_MON_CTRL,
CDM_PERF_MON_0,
CDM_PERF_MON_1,
CDM_PERF_MON_2,
/*Spare registers 47*/
CDM_SPARE,
};
/* struct cam_cdm_reg_offset - struct for offset with attribute.*/
struct cam_cdm_reg_offset {
uint32_t offset;
enum cam_cdm_reg_attr attribute;
};
/* struct cam_cdm_reg_offset_table - struct for whole offset table.*/
struct cam_cdm_reg_offset_table {
uint32_t first_offset;
uint32_t last_offset;
uint32_t reg_count;
const struct cam_cdm_reg_offset *offsets;
uint32_t offset_max_size;
};
/* enum cam_cdm_flags - Bit fields for CDM flags used */
enum cam_cdm_flags {
CAM_CDM_FLAG_SHARED_CDM,
CAM_CDM_FLAG_PRIVATE_CDM,
};
/* enum cam_cdm_type - Enum for possible CAM CDM types */
enum cam_cdm_type {
CAM_VIRTUAL_CDM,
CAM_HW_CDM,
};
/* enum cam_cdm_mem_base_index - Enum for possible CAM CDM types */
enum cam_cdm_mem_base_index {
CAM_HW_CDM_BASE_INDEX,
CAM_HW_CDM_MAX_INDEX = CAM_SOC_MAX_BLOCK,
};
/* struct cam_cdm_client - struct for cdm clients data.*/
struct cam_cdm_client {
struct cam_cdm_acquire_data data;
void __iomem *changebase_addr;
uint32_t stream_on;
uint32_t refcount;
struct mutex lock;
uint32_t handle;
};
/* struct cam_cdm_work_payload - struct for cdm work payload data.*/
struct cam_cdm_work_payload {
struct cam_hw_info *hw;
uint32_t irq_status;
uint32_t irq_data;
struct work_struct work;
};
/* enum cam_cdm_bl_cb_type - Enum for possible CAM CDM cb request types */
enum cam_cdm_bl_cb_type {
CAM_HW_CDM_BL_CB_CLIENT = 1,
CAM_HW_CDM_BL_CB_INTERNAL,
};
/* struct cam_cdm_bl_cb_request_entry - callback entry for work to process.*/
struct cam_cdm_bl_cb_request_entry {
uint8_t bl_tag;
enum cam_cdm_bl_cb_type request_type;
uint32_t client_hdl;
void *userdata;
uint32_t cookie;
struct list_head entry;
};
/* struct cam_cdm_hw_intf_cmd_submit_bl - cdm interface submit command.*/
struct cam_cdm_hw_intf_cmd_submit_bl {
uint32_t handle;
struct cam_cdm_bl_request *data;
};
/* struct cam_cdm_hw_mem - CDM hw memory struct */
struct cam_cdm_hw_mem {
int32_t handle;
uint32_t vaddr;
uintptr_t kmdvaddr;
size_t size;
};
/* struct cam_cdm - CDM hw device struct */
struct cam_cdm {
uint32_t index;
char name[128];
enum cam_cdm_id id;
enum cam_cdm_flags flags;
struct completion reset_complete;
struct completion bl_complete;
struct workqueue_struct *work_queue;
struct list_head bl_request_list;
struct cam_hw_version version;
uint32_t hw_version;
uint32_t hw_family_version;
struct cam_iommu_handle iommu_hdl;
struct cam_cdm_reg_offset_table *offset_tbl;
struct cam_cdm_utils_ops *ops;
struct cam_cdm_client *clients[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
uint8_t bl_tag;
atomic_t error;
atomic_t bl_done;
struct cam_cdm_hw_mem gen_irq;
uint32_t cpas_handle;
};
/* struct cam_cdm_private_dt_data - CDM hw custom dt data */
struct cam_cdm_private_dt_data {
bool dt_cdm_shared;
uint32_t dt_num_supported_clients;
const char *dt_cdm_client_name[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
};
/* struct cam_cdm_intf_devices - CDM mgr interface devices */
struct cam_cdm_intf_devices {
struct mutex lock;
uint32_t refcount;
struct cam_hw_intf *device;
struct cam_cdm_private_dt_data *data;
};
/* struct cam_cdm_intf_mgr - CDM mgr interface device struct */
struct cam_cdm_intf_mgr {
bool probe_done;
struct cam_cdm_intf_devices nodes[CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM];
uint32_t cdm_count;
uint32_t dt_supported_hw_cdm;
int32_t refcount;
};
int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
uint32_t *index);
int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
uint32_t index);
#endif /* _CAM_CDM_H_ */

View File

@@ -0,0 +1,587 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include "cam_soc_util.h"
#include "cam_smmu_api.h"
#include "cam_io_util.h"
#include "cam_cdm_intf_api.h"
#include "cam_cdm.h"
#include "cam_cdm_soc.h"
#include "cam_cdm_core_common.h"
static void cam_cdm_get_client_refcount(struct cam_cdm_client *client)
{
mutex_lock(&client->lock);
CAM_DBG(CAM_CDM, "CDM client get refcount=%d",
client->refcount);
client->refcount++;
mutex_unlock(&client->lock);
}
static void cam_cdm_put_client_refcount(struct cam_cdm_client *client)
{
mutex_lock(&client->lock);
CAM_DBG(CAM_CDM, "CDM client put refcount=%d",
client->refcount);
if (client->refcount > 0) {
client->refcount--;
} else {
CAM_ERR(CAM_CDM, "Refcount put when zero");
WARN_ON(1);
}
mutex_unlock(&client->lock);
}
bool cam_cdm_set_cam_hw_version(
uint32_t ver, struct cam_hw_version *cam_version)
{
switch (ver) {
case CAM_CDM170_VERSION:
case CAM_CDM175_VERSION:
case CAM_CDM480_VERSION:
cam_version->major = (ver & 0xF0000000);
cam_version->minor = (ver & 0xFFF0000);
cam_version->incr = (ver & 0xFFFF);
cam_version->reserved = 0;
return true;
default:
CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util", ver);
break;
}
return false;
}
bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
struct cam_cpas_irq_data *irq_data)
{
if (!irq_data)
return false;
CAM_DBG(CAM_CDM, "CPAS error callback type=%d", irq_data->irq_type);
return false;
}
struct cam_cdm_utils_ops *cam_cdm_get_ops(
uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version)
{
if (by_cam_version == false) {
switch (ver) {
case CAM_CDM170_VERSION:
case CAM_CDM175_VERSION:
case CAM_CDM480_VERSION:
return &CDM170_ops;
default:
CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util",
ver);
}
} else if (cam_version) {
if (((cam_version->major == 1) &&
(cam_version->minor == 0) &&
(cam_version->incr == 0)) ||
((cam_version->major == 1) &&
(cam_version->minor == 1) &&
(cam_version->incr == 0)) ||
((cam_version->major == 1) &&
(cam_version->minor == 2) &&
(cam_version->incr == 0))) {
CAM_DBG(CAM_CDM,
"cam_hw_version=%x:%x:%x supported",
cam_version->major, cam_version->minor,
cam_version->incr);
return &CDM170_ops;
}
CAM_ERR(CAM_CDM, "cam_hw_version=%x:%x:%x not supported",
cam_version->major, cam_version->minor,
cam_version->incr);
}
return NULL;
}
struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
uint32_t tag, struct list_head *bl_list)
{
struct cam_cdm_bl_cb_request_entry *node;
list_for_each_entry(node, bl_list, entry) {
if (node->bl_tag == tag)
return node;
}
CAM_ERR(CAM_CDM, "Could not find the bl request for tag=%x", tag);
return NULL;
}
int cam_cdm_get_caps(void *hw_priv,
void *get_hw_cap_args, uint32_t arg_size)
{
struct cam_hw_info *cdm_hw = hw_priv;
struct cam_cdm *cdm_core;
if ((cdm_hw) && (cdm_hw->core_info) && (get_hw_cap_args) &&
(sizeof(struct cam_iommu_handle) == arg_size)) {
cdm_core = (struct cam_cdm *)cdm_hw->core_info;
*((struct cam_iommu_handle *)get_hw_cap_args) =
cdm_core->iommu_hdl;
return 0;
}
return -EINVAL;
}
int cam_cdm_find_free_client_slot(struct cam_cdm *hw)
{
int i;
for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
if (hw->clients[i] == NULL) {
CAM_DBG(CAM_CDM, "Found client slot %d", i);
return i;
}
}
CAM_ERR(CAM_CDM, "No more client slots");
return -EBUSY;
}
void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
enum cam_cdm_cb_status status, void *data)
{
int i;
struct cam_cdm *core = NULL;
struct cam_cdm_client *client = NULL;
if (!cdm_hw) {
CAM_ERR(CAM_CDM, "CDM Notify called with NULL hw info");
return;
}
core = (struct cam_cdm *)cdm_hw->core_info;
if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
int client_idx;
struct cam_cdm_bl_cb_request_entry *node =
(struct cam_cdm_bl_cb_request_entry *)data;
client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl);
client = core->clients[client_idx];
if ((!client) || (client->handle != node->client_hdl)) {
CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
node->client_hdl);
return;
}
cam_cdm_get_client_refcount(client);
if (client->data.cam_cdm_callback) {
CAM_DBG(CAM_CDM, "Calling client=%s cb cookie=%d",
client->data.identifier, node->cookie);
client->data.cam_cdm_callback(node->client_hdl,
node->userdata, CAM_CDM_CB_STATUS_BL_SUCCESS,
node->cookie);
CAM_DBG(CAM_CDM, "Exit client cb cookie=%d",
node->cookie);
} else {
CAM_ERR(CAM_CDM, "No cb registered for client hdl=%x",
node->client_hdl);
}
cam_cdm_put_client_refcount(client);
return;
}
for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
if (core->clients[i] != NULL) {
client = core->clients[i];
mutex_lock(&client->lock);
CAM_DBG(CAM_CDM, "Found client slot %d", i);
if (client->data.cam_cdm_callback) {
if (status == CAM_CDM_CB_STATUS_PAGEFAULT) {
unsigned long iova =
(unsigned long)data;
client->data.cam_cdm_callback(
client->handle,
client->data.userdata,
CAM_CDM_CB_STATUS_PAGEFAULT,
(iova & 0xFFFFFFFF));
}
} else {
CAM_ERR(CAM_CDM,
"No cb registered for client hdl=%x",
client->handle);
}
mutex_unlock(&client->lock);
}
}
}
int cam_cdm_stream_ops_internal(void *hw_priv,
void *start_args, bool operation)
{
struct cam_hw_info *cdm_hw = hw_priv;
struct cam_cdm *core = NULL;
int rc = -EPERM;
int client_idx;
struct cam_cdm_client *client;
uint32_t *handle = start_args;
if (!hw_priv)
return -EINVAL;
core = (struct cam_cdm *)cdm_hw->core_info;
client_idx = CAM_CDM_GET_CLIENT_IDX(*handle);
client = core->clients[client_idx];
if (!client) {
CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client, *handle);
return -EINVAL;
}
cam_cdm_get_client_refcount(client);
if (*handle != client->handle) {
CAM_ERR(CAM_CDM, "client id given handle=%x invalid", *handle);
cam_cdm_put_client_refcount(client);
return -EINVAL;
}
if (operation == true) {
if (true == client->stream_on) {
CAM_ERR(CAM_CDM,
"Invalid CDM client is already streamed ON");
cam_cdm_put_client_refcount(client);
return rc;
}
} else {
if (client->stream_on == false) {
CAM_ERR(CAM_CDM,
"Invalid CDM client is already streamed Off");
cam_cdm_put_client_refcount(client);
return rc;
}
}
mutex_lock(&cdm_hw->hw_mutex);
if (operation == true) {
if (!cdm_hw->open_count) {
struct cam_ahb_vote ahb_vote;
struct cam_axi_vote axi_vote = {0};
ahb_vote.type = CAM_VOTE_ABSOLUTE;
ahb_vote.vote.level = CAM_SVS_VOTE;
axi_vote.num_paths = 1;
axi_vote.axi_path[0].path_data_type =
CAM_AXI_PATH_DATA_ALL;
axi_vote.axi_path[0].transac_type =
CAM_AXI_TRANSACTION_READ;
axi_vote.axi_path[0].camnoc_bw =
CAM_CPAS_DEFAULT_AXI_BW;
axi_vote.axi_path[0].mnoc_ab_bw =
CAM_CPAS_DEFAULT_AXI_BW;
axi_vote.axi_path[0].mnoc_ib_bw =
CAM_CPAS_DEFAULT_AXI_BW;
rc = cam_cpas_start(core->cpas_handle,
&ahb_vote, &axi_vote);
if (rc != 0) {
CAM_ERR(CAM_CDM, "CPAS start failed");
goto end;
}
CAM_DBG(CAM_CDM, "CDM init first time");
if (core->id == CAM_CDM_VIRTUAL) {
CAM_DBG(CAM_CDM,
"Virtual CDM HW init first time");
rc = 0;
} else {
CAM_DBG(CAM_CDM, "CDM HW init first time");
rc = cam_hw_cdm_init(hw_priv, NULL, 0);
if (rc == 0) {
rc = cam_hw_cdm_alloc_genirq_mem(
hw_priv);
if (rc != 0) {
CAM_ERR(CAM_CDM,
"Genirqalloc failed");
cam_hw_cdm_deinit(hw_priv,
NULL, 0);
}
} else {
CAM_ERR(CAM_CDM, "CDM HW init failed");
}
}
if (rc == 0) {
cdm_hw->open_count++;
client->stream_on = true;
} else {
if (cam_cpas_stop(core->cpas_handle))
CAM_ERR(CAM_CDM, "CPAS stop failed");
}
} else {
cdm_hw->open_count++;
CAM_DBG(CAM_CDM, "CDM HW already ON count=%d",
cdm_hw->open_count);
rc = 0;
client->stream_on = true;
}
} else {
if (cdm_hw->open_count) {
cdm_hw->open_count--;
CAM_DBG(CAM_CDM, "stream OFF CDM %d",
cdm_hw->open_count);
if (!cdm_hw->open_count) {
CAM_DBG(CAM_CDM, "CDM Deinit now");
if (core->id == CAM_CDM_VIRTUAL) {
CAM_DBG(CAM_CDM,
"Virtual CDM HW Deinit");
rc = 0;
} else {
CAM_DBG(CAM_CDM, "CDM HW Deinit now");
rc = cam_hw_cdm_deinit(
hw_priv, NULL, 0);
if (cam_hw_cdm_release_genirq_mem(
hw_priv))
CAM_ERR(CAM_CDM,
"Genirq release fail");
}
if (rc) {
CAM_ERR(CAM_CDM,
"Deinit failed in streamoff");
} else {
client->stream_on = false;
rc = cam_cpas_stop(core->cpas_handle);
if (rc)
CAM_ERR(CAM_CDM,
"CPAS stop failed");
}
} else {
client->stream_on = false;
rc = 0;
CAM_DBG(CAM_CDM,
"Client stream off success =%d",
cdm_hw->open_count);
}
} else {
CAM_DBG(CAM_CDM, "stream OFF CDM Invalid %d",
cdm_hw->open_count);
rc = -ENXIO;
}
}
end:
cam_cdm_put_client_refcount(client);
mutex_unlock(&cdm_hw->hw_mutex);
return rc;
}
int cam_cdm_stream_start(void *hw_priv,
void *start_args, uint32_t size)
{
int rc = 0;
if (!hw_priv)
return -EINVAL;
rc = cam_cdm_stream_ops_internal(hw_priv, start_args, true);
return rc;
}
int cam_cdm_stream_stop(void *hw_priv,
void *start_args, uint32_t size)
{
int rc = 0;
if (!hw_priv)
return -EINVAL;
rc = cam_cdm_stream_ops_internal(hw_priv, start_args, false);
return rc;
}
int cam_cdm_process_cmd(void *hw_priv,
uint32_t cmd, void *cmd_args, uint32_t arg_size)
{
struct cam_hw_info *cdm_hw = hw_priv;
struct cam_hw_soc_info *soc_data = NULL;
struct cam_cdm *core = NULL;
int rc = -EINVAL;
if ((!hw_priv) || (!cmd_args) ||
(cmd >= CAM_CDM_HW_INTF_CMD_INVALID))
return rc;
soc_data = &cdm_hw->soc_info;
core = (struct cam_cdm *)cdm_hw->core_info;
switch (cmd) {
case CAM_CDM_HW_INTF_CMD_SUBMIT_BL: {
struct cam_cdm_hw_intf_cmd_submit_bl *req;
int idx;
struct cam_cdm_client *client;
if (sizeof(struct cam_cdm_hw_intf_cmd_submit_bl) != arg_size) {
CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
arg_size);
break;
}
req = (struct cam_cdm_hw_intf_cmd_submit_bl *)cmd_args;
if ((req->data->type < 0) ||
(req->data->type > CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA)) {
CAM_ERR(CAM_CDM, "Invalid req bl cmd addr type=%d",
req->data->type);
break;
}
idx = CAM_CDM_GET_CLIENT_IDX(req->handle);
client = core->clients[idx];
if ((!client) || (req->handle != client->handle)) {
CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
req->handle);
break;
}
cam_cdm_get_client_refcount(client);
if ((req->data->flag == true) &&
(!client->data.cam_cdm_callback)) {
CAM_ERR(CAM_CDM,
"CDM request cb without registering cb");
cam_cdm_put_client_refcount(client);
break;
}
if (client->stream_on != true) {
CAM_ERR(CAM_CDM,
"Invalid CDM needs to be streamed ON first");
cam_cdm_put_client_refcount(client);
break;
}
if (core->id == CAM_CDM_VIRTUAL)
rc = cam_virtual_cdm_submit_bl(cdm_hw, req, client);
else
rc = cam_hw_cdm_submit_bl(cdm_hw, req, client);
cam_cdm_put_client_refcount(client);
break;
}
case CAM_CDM_HW_INTF_CMD_ACQUIRE: {
struct cam_cdm_acquire_data *data;
int idx;
struct cam_cdm_client *client;
if (sizeof(struct cam_cdm_acquire_data) != arg_size) {
CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
arg_size);
break;
}
mutex_lock(&cdm_hw->hw_mutex);
data = (struct cam_cdm_acquire_data *)cmd_args;
CAM_DBG(CAM_CDM, "Trying to acquire client=%s in hw idx=%d",
data->identifier, core->index);
idx = cam_cdm_find_free_client_slot(core);
if ((idx < 0) || (core->clients[idx])) {
mutex_unlock(&cdm_hw->hw_mutex);
CAM_ERR(CAM_CDM,
"Fail to client slots, client=%s in hw idx=%d",
data->identifier, core->index);
break;
}
core->clients[idx] = kzalloc(sizeof(struct cam_cdm_client),
GFP_KERNEL);
if (!core->clients[idx]) {
mutex_unlock(&cdm_hw->hw_mutex);
rc = -ENOMEM;
break;
}
mutex_unlock(&cdm_hw->hw_mutex);
client = core->clients[idx];
mutex_init(&client->lock);
data->ops = core->ops;
if (core->id == CAM_CDM_VIRTUAL) {
data->cdm_version.major = 1;
data->cdm_version.minor = 0;
data->cdm_version.incr = 0;
data->cdm_version.reserved = 0;
data->ops = cam_cdm_get_ops(0,
&data->cdm_version, true);
if (!data->ops) {
mutex_destroy(&client->lock);
mutex_lock(&cdm_hw->hw_mutex);
kfree(core->clients[idx]);
core->clients[idx] = NULL;
mutex_unlock(
&cdm_hw->hw_mutex);
rc = -EPERM;
CAM_ERR(CAM_CDM, "Invalid ops for virtual cdm");
break;
}
} else {
data->cdm_version = core->version;
}
cam_cdm_get_client_refcount(client);
mutex_lock(&client->lock);
memcpy(&client->data, data,
sizeof(struct cam_cdm_acquire_data));
client->handle = CAM_CDM_CREATE_CLIENT_HANDLE(
core->index,
idx);
client->stream_on = false;
data->handle = client->handle;
CAM_DBG(CAM_CDM, "Acquired client=%s in hwidx=%d",
data->identifier, core->index);
mutex_unlock(&client->lock);
rc = 0;
break;
}
case CAM_CDM_HW_INTF_CMD_RELEASE: {
uint32_t *handle = cmd_args;
int idx;
struct cam_cdm_client *client;
if (sizeof(uint32_t) != arg_size) {
CAM_ERR(CAM_CDM,
"Invalid CDM cmd %d size=%x for handle=%x",
cmd, arg_size, *handle);
return -EINVAL;
}
idx = CAM_CDM_GET_CLIENT_IDX(*handle);
mutex_lock(&cdm_hw->hw_mutex);
client = core->clients[idx];
if ((!client) || (*handle != client->handle)) {
CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x",
client, *handle);
mutex_unlock(&cdm_hw->hw_mutex);
break;
}
cam_cdm_put_client_refcount(client);
mutex_lock(&client->lock);
if (client->refcount != 0) {
CAM_ERR(CAM_CDM, "CDM Client refcount not zero %d",
client->refcount);
rc = -EPERM;
mutex_unlock(&client->lock);
mutex_unlock(&cdm_hw->hw_mutex);
break;
}
core->clients[idx] = NULL;
mutex_unlock(&client->lock);
mutex_destroy(&client->lock);
kfree(client);
mutex_unlock(&cdm_hw->hw_mutex);
rc = 0;
break;
}
case CAM_CDM_HW_INTF_CMD_RESET_HW: {
CAM_ERR(CAM_CDM, "CDM HW reset not supported for handle =%x",
*((uint32_t *)cmd_args));
break;
}
default:
CAM_ERR(CAM_CDM, "CDM HW intf command not valid =%d", cmd);
break;
}
return rc;
}

View File

@@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CDM_CORE_COMMON_H_
#define _CAM_CDM_CORE_COMMON_H_
#include "cam_mem_mgr.h"
#define CAM_CDM170_VERSION 0x10000000
#define CAM_CDM175_VERSION 0x10010000
#define CAM_CDM480_VERSION 0x10020000
extern struct cam_cdm_utils_ops CDM170_ops;
int cam_hw_cdm_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
int cam_hw_cdm_deinit(void *hw_priv, void *init_hw_args, uint32_t arg_size);
int cam_hw_cdm_alloc_genirq_mem(void *hw_priv);
int cam_hw_cdm_release_genirq_mem(void *hw_priv);
int cam_cdm_get_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size);
int cam_cdm_stream_ops_internal(void *hw_priv, void *start_args,
bool operation);
int cam_cdm_stream_start(void *hw_priv, void *start_args, uint32_t size);
int cam_cdm_stream_stop(void *hw_priv, void *start_args, uint32_t size);
int cam_cdm_process_cmd(void *hw_priv, uint32_t cmd, void *cmd_args,
uint32_t arg_size);
bool cam_cdm_set_cam_hw_version(
uint32_t ver, struct cam_hw_version *cam_version);
bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
struct cam_cpas_irq_data *irq_data);
struct cam_cdm_utils_ops *cam_cdm_get_ops(
uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
struct cam_cdm_hw_intf_cmd_submit_bl *req,
struct cam_cdm_client *client);
int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
struct cam_cdm_hw_intf_cmd_submit_bl *req,
struct cam_cdm_client *client);
struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
uint32_t tag, struct list_head *bl_list);
void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
enum cam_cdm_cb_status status, void *data);
#endif /* _CAM_CDM_CORE_COMMON_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,573 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include "cam_cdm_intf_api.h"
#include "cam_cdm.h"
#include "cam_cdm_virtual.h"
#include "cam_soc_util.h"
#include "cam_cdm_soc.h"
static struct cam_cdm_intf_mgr cdm_mgr;
static DEFINE_MUTEX(cam_cdm_mgr_lock);
static const struct of_device_id msm_cam_cdm_intf_dt_match[] = {
{ .compatible = "qcom,cam-cdm-intf", },
{}
};
static int get_cdm_mgr_refcount(void)
{
int rc = 0;
mutex_lock(&cam_cdm_mgr_lock);
if (cdm_mgr.probe_done == false) {
CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
rc = -EPERM;
} else {
CAM_DBG(CAM_CDM, "CDM intf mgr get refcount=%d",
cdm_mgr.refcount);
cdm_mgr.refcount++;
}
mutex_unlock(&cam_cdm_mgr_lock);
return rc;
}
static void put_cdm_mgr_refcount(void)
{
mutex_lock(&cam_cdm_mgr_lock);
if (cdm_mgr.probe_done == false) {
CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
} else {
CAM_DBG(CAM_CDM, "CDM intf mgr put refcount=%d",
cdm_mgr.refcount);
if (cdm_mgr.refcount > 0) {
cdm_mgr.refcount--;
} else {
CAM_ERR(CAM_CDM, "Refcount put when zero");
WARN_ON(1);
}
}
mutex_unlock(&cam_cdm_mgr_lock);
}
static int get_cdm_iommu_handle(struct cam_iommu_handle *cdm_handles,
uint32_t hw_idx)
{
int rc = -EPERM;
struct cam_hw_intf *hw = cdm_mgr.nodes[hw_idx].device;
if (hw->hw_ops.get_hw_caps) {
rc = hw->hw_ops.get_hw_caps(hw->hw_priv, cdm_handles,
sizeof(struct cam_iommu_handle));
}
return rc;
}
static int get_cdm_index_by_id(char *identifier,
uint32_t cell_index, uint32_t *hw_index)
{
int rc = -EPERM, i, j;
char client_name[128];
CAM_DBG(CAM_CDM, "Looking for HW id of =%s and index=%d",
identifier, cell_index);
snprintf(client_name, sizeof(client_name), "%s", identifier);
CAM_DBG(CAM_CDM, "Looking for HW id of %s count:%d", client_name,
cdm_mgr.cdm_count);
mutex_lock(&cam_cdm_mgr_lock);
for (i = 0; i < cdm_mgr.cdm_count; i++) {
mutex_lock(&cdm_mgr.nodes[i].lock);
CAM_DBG(CAM_CDM, "dt_num_supported_clients=%d",
cdm_mgr.nodes[i].data->dt_num_supported_clients);
for (j = 0; j <
cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) {
CAM_DBG(CAM_CDM, "client name:%s",
cdm_mgr.nodes[i].data->dt_cdm_client_name[j]);
if (!strcmp(
cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
client_name)) {
rc = 0;
*hw_index = i;
break;
}
}
mutex_unlock(&cdm_mgr.nodes[i].lock);
if (rc == 0)
break;
}
mutex_unlock(&cam_cdm_mgr_lock);
return rc;
}
int cam_cdm_get_iommu_handle(char *identifier,
struct cam_iommu_handle *cdm_handles)
{
int i, j, rc = -EPERM;
if ((!identifier) || (!cdm_handles))
return -EINVAL;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
CAM_DBG(CAM_CDM, "Looking for Iommu handle of %s", identifier);
for (i = 0; i < cdm_mgr.cdm_count; i++) {
mutex_lock(&cdm_mgr.nodes[i].lock);
if (!cdm_mgr.nodes[i].data) {
mutex_unlock(&cdm_mgr.nodes[i].lock);
continue;
}
for (j = 0; j <
cdm_mgr.nodes[i].data->dt_num_supported_clients;
j++) {
if (!strcmp(
cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
identifier)) {
rc = get_cdm_iommu_handle(cdm_handles, i);
break;
}
}
mutex_unlock(&cdm_mgr.nodes[i].lock);
if (rc == 0)
break;
}
put_cdm_mgr_refcount();
return rc;
}
EXPORT_SYMBOL(cam_cdm_get_iommu_handle);
int cam_cdm_acquire(struct cam_cdm_acquire_data *data)
{
int rc = -EPERM;
struct cam_hw_intf *hw;
uint32_t hw_index = 0;
if ((!data) || (!data->base_array_cnt))
return -EINVAL;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
if (data->id > CAM_CDM_HW_ANY) {
CAM_ERR(CAM_CDM,
"only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported");
rc = -EPERM;
goto end;
}
rc = get_cdm_index_by_id(data->identifier, data->cell_index,
&hw_index);
if ((rc < 0) && (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM)) {
CAM_ERR(CAM_CDM, "Failed to identify associated hw id");
goto end;
} else {
CAM_DBG(CAM_CDM, "hw_index:%d", hw_index);
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.process_cmd) {
rc = hw->hw_ops.process_cmd(hw->hw_priv,
CAM_CDM_HW_INTF_CMD_ACQUIRE, data,
sizeof(struct cam_cdm_acquire_data));
if (rc < 0) {
CAM_ERR(CAM_CDM, "CDM hw acquire failed");
goto end;
}
} else {
CAM_ERR(CAM_CDM, "idx %d doesn't have acquire ops",
hw_index);
rc = -EPERM;
}
}
end:
if (rc < 0) {
CAM_ERR(CAM_CDM, "CDM acquire failed for id=%d name=%s, idx=%d",
data->id, data->identifier, data->cell_index);
put_cdm_mgr_refcount();
}
return rc;
}
EXPORT_SYMBOL(cam_cdm_acquire);
int cam_cdm_release(uint32_t handle)
{
uint32_t hw_index;
int rc = -EPERM;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
hw_index = CAM_CDM_GET_HW_IDX(handle);
if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.process_cmd) {
rc = hw->hw_ops.process_cmd(hw->hw_priv,
CAM_CDM_HW_INTF_CMD_RELEASE, &handle,
sizeof(handle));
if (rc < 0)
CAM_ERR(CAM_CDM,
"hw release failed for handle=%x",
handle);
} else
CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
hw_index);
}
put_cdm_mgr_refcount();
if (rc == 0)
put_cdm_mgr_refcount();
return rc;
}
EXPORT_SYMBOL(cam_cdm_release);
int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data)
{
uint32_t hw_index;
int rc = -EINVAL;
struct cam_hw_intf *hw;
if (!data)
return rc;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
rc = -EPERM;
return rc;
}
hw_index = CAM_CDM_GET_HW_IDX(handle);
if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
struct cam_cdm_hw_intf_cmd_submit_bl req;
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.process_cmd) {
req.data = data;
req.handle = handle;
rc = hw->hw_ops.process_cmd(hw->hw_priv,
CAM_CDM_HW_INTF_CMD_SUBMIT_BL, &req,
sizeof(struct cam_cdm_hw_intf_cmd_submit_bl));
if (rc < 0)
CAM_ERR(CAM_CDM,
"hw submit bl failed for handle=%x",
handle);
} else {
CAM_ERR(CAM_CDM, "hw idx %d doesn't have submit ops",
hw_index);
}
}
put_cdm_mgr_refcount();
return rc;
}
EXPORT_SYMBOL(cam_cdm_submit_bls);
int cam_cdm_stream_on(uint32_t handle)
{
uint32_t hw_index;
int rc = -EINVAL;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
rc = -EPERM;
return rc;
}
hw_index = CAM_CDM_GET_HW_IDX(handle);
if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.start) {
rc = hw->hw_ops.start(hw->hw_priv, &handle,
sizeof(uint32_t));
if (rc < 0)
CAM_ERR(CAM_CDM,
"hw start failed handle=%x",
handle);
} else {
CAM_ERR(CAM_CDM,
"hw idx %d doesn't have start ops",
hw_index);
}
}
put_cdm_mgr_refcount();
return rc;
}
EXPORT_SYMBOL(cam_cdm_stream_on);
int cam_cdm_stream_off(uint32_t handle)
{
uint32_t hw_index;
int rc = -EINVAL;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
rc = -EPERM;
return rc;
}
hw_index = CAM_CDM_GET_HW_IDX(handle);
if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.stop) {
rc = hw->hw_ops.stop(hw->hw_priv, &handle,
sizeof(uint32_t));
if (rc < 0)
CAM_ERR(CAM_CDM, "hw stop failed handle=%x",
handle);
} else {
CAM_ERR(CAM_CDM, "hw idx %d doesn't have stop ops",
hw_index);
}
}
put_cdm_mgr_refcount();
return rc;
}
EXPORT_SYMBOL(cam_cdm_stream_off);
int cam_cdm_reset_hw(uint32_t handle)
{
uint32_t hw_index;
int rc = -EINVAL;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
rc = -EPERM;
return rc;
}
hw_index = CAM_CDM_GET_HW_IDX(handle);
if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.process_cmd) {
rc = hw->hw_ops.process_cmd(hw->hw_priv,
CAM_CDM_HW_INTF_CMD_RESET_HW, &handle,
sizeof(handle));
if (rc < 0)
CAM_ERR(CAM_CDM,
"CDM hw release failed for handle=%x",
handle);
} else {
CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
hw_index);
}
}
put_cdm_mgr_refcount();
return rc;
}
EXPORT_SYMBOL(cam_cdm_reset_hw);
int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
uint32_t *index)
{
int rc = -EINVAL;
if ((!hw) || (!data) || (!index))
return rc;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
mutex_lock(&cam_cdm_mgr_lock);
if ((type == CAM_VIRTUAL_CDM) &&
(!cdm_mgr.nodes[CAM_SW_CDM_INDEX].device)) {
mutex_lock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = hw;
cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = data;
*index = cdm_mgr.cdm_count;
mutex_unlock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
cdm_mgr.cdm_count++;
rc = 0;
} else if ((type == CAM_HW_CDM) && (cdm_mgr.cdm_count > 0)) {
mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
cdm_mgr.nodes[cdm_mgr.cdm_count].device = hw;
cdm_mgr.nodes[cdm_mgr.cdm_count].data = data;
*index = cdm_mgr.cdm_count;
mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
cdm_mgr.cdm_count++;
rc = 0;
} else {
CAM_ERR(CAM_CDM, "CDM registration failed type=%d count=%d",
type, cdm_mgr.cdm_count);
}
mutex_unlock(&cam_cdm_mgr_lock);
put_cdm_mgr_refcount();
return rc;
}
int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
uint32_t index)
{
int rc = -EINVAL;
if ((!hw) || (!data))
return rc;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
rc = -EPERM;
return rc;
}
mutex_lock(&cam_cdm_mgr_lock);
if ((type == CAM_VIRTUAL_CDM) &&
(hw == cdm_mgr.nodes[CAM_SW_CDM_INDEX].device) &&
(index == CAM_SW_CDM_INDEX)) {
mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = NULL;
cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = NULL;
mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
rc = 0;
} else if ((type == CAM_HW_CDM) &&
(hw == cdm_mgr.nodes[index].device)) {
mutex_lock(&cdm_mgr.nodes[index].lock);
cdm_mgr.nodes[index].device = NULL;
cdm_mgr.nodes[index].data = NULL;
mutex_unlock(&cdm_mgr.nodes[index].lock);
cdm_mgr.cdm_count--;
rc = 0;
} else {
CAM_ERR(CAM_CDM, "CDM Deregistration failed type=%d index=%d",
type, index);
}
mutex_unlock(&cam_cdm_mgr_lock);
put_cdm_mgr_refcount();
return rc;
}
static int cam_cdm_intf_probe(struct platform_device *pdev)
{
int i, rc;
rc = cam_cdm_intf_mgr_soc_get_dt_properties(pdev, &cdm_mgr);
if (rc) {
CAM_ERR(CAM_CDM, "Failed to get dt properties");
return rc;
}
mutex_lock(&cam_cdm_mgr_lock);
for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
mutex_init(&cdm_mgr.nodes[i].lock);
cdm_mgr.nodes[i].device = NULL;
cdm_mgr.nodes[i].data = NULL;
cdm_mgr.nodes[i].refcount = 0;
}
cdm_mgr.probe_done = true;
cdm_mgr.refcount = 0;
mutex_unlock(&cam_cdm_mgr_lock);
rc = cam_virtual_cdm_probe(pdev);
if (rc) {
mutex_lock(&cam_cdm_mgr_lock);
cdm_mgr.probe_done = false;
for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
(cdm_mgr.nodes[i].refcount != 0))
CAM_ERR(CAM_CDM,
"Valid node present in index=%d", i);
mutex_destroy(&cdm_mgr.nodes[i].lock);
cdm_mgr.nodes[i].device = NULL;
cdm_mgr.nodes[i].data = NULL;
cdm_mgr.nodes[i].refcount = 0;
}
mutex_unlock(&cam_cdm_mgr_lock);
}
CAM_DBG(CAM_CDM, "CDM Intf probe done");
return rc;
}
static int cam_cdm_intf_remove(struct platform_device *pdev)
{
int i, rc = -EBUSY;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
if (cam_virtual_cdm_remove(pdev)) {
CAM_ERR(CAM_CDM, "Virtual CDM remove failed");
goto end;
}
put_cdm_mgr_refcount();
mutex_lock(&cam_cdm_mgr_lock);
if (cdm_mgr.refcount != 0) {
CAM_ERR(CAM_CDM, "cdm manger refcount not zero %d",
cdm_mgr.refcount);
goto end;
}
for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
(cdm_mgr.nodes[i].refcount != 0)) {
CAM_ERR(CAM_CDM, "Valid node present in index=%d", i);
mutex_unlock(&cam_cdm_mgr_lock);
goto end;
}
mutex_destroy(&cdm_mgr.nodes[i].lock);
cdm_mgr.nodes[i].device = NULL;
cdm_mgr.nodes[i].data = NULL;
cdm_mgr.nodes[i].refcount = 0;
}
cdm_mgr.probe_done = false;
rc = 0;
end:
mutex_unlock(&cam_cdm_mgr_lock);
return rc;
}
static struct platform_driver cam_cdm_intf_driver = {
.probe = cam_cdm_intf_probe,
.remove = cam_cdm_intf_remove,
.driver = {
.name = "msm_cam_cdm_intf",
.owner = THIS_MODULE,
.of_match_table = msm_cam_cdm_intf_dt_match,
.suppress_bind_attrs = true,
},
};
static int __init cam_cdm_intf_init_module(void)
{
return platform_driver_register(&cam_cdm_intf_driver);
}
static void __exit cam_cdm_intf_exit_module(void)
{
platform_driver_unregister(&cam_cdm_intf_driver);
}
module_init(cam_cdm_intf_init_module);
module_exit(cam_cdm_intf_exit_module);
MODULE_DESCRIPTION("MSM Camera CDM Intf driver");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,202 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CDM_API_H_
#define _CAM_CDM_API_H_
#include <media/cam_defs.h>
#include "cam_cdm_util.h"
#include "cam_soc_util.h"
/* enum cam_cdm_id - Enum for possible CAM CDM hardwares */
enum cam_cdm_id {
CAM_CDM_VIRTUAL,
CAM_CDM_HW_ANY,
CAM_CDM_CPAS_0,
CAM_CDM_IPE0,
CAM_CDM_IPE1,
CAM_CDM_BPS,
CAM_CDM_VFE,
CAM_CDM_MAX
};
/* enum cam_cdm_cb_status - Enum for possible CAM CDM callback */
enum cam_cdm_cb_status {
CAM_CDM_CB_STATUS_BL_SUCCESS,
CAM_CDM_CB_STATUS_INVALID_BL_CMD,
CAM_CDM_CB_STATUS_PAGEFAULT,
CAM_CDM_CB_STATUS_HW_RESET_ONGOING,
CAM_CDM_CB_STATUS_HW_RESET_DONE,
CAM_CDM_CB_STATUS_UNKNOWN_ERROR,
};
/* enum cam_cdm_bl_cmd_addr_type - Enum for possible CDM bl cmd addr types */
enum cam_cdm_bl_cmd_addr_type {
CAM_CDM_BL_CMD_TYPE_MEM_HANDLE,
CAM_CDM_BL_CMD_TYPE_HW_IOVA,
CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA,
};
/**
* struct cam_cdm_acquire_data - Cam CDM acquire data structure
*
* @identifier : Input identifier string which is the device label from dt
* like vfe, ife, jpeg etc
* @cell_index : Input integer identifier pointing to the cell index from dt
* of the device. This can be used to form a unique string
* with @identifier like vfe0, ife1, jpeg0 etc
* @id : ID of a specific or any CDM HW which needs to be acquired.
* @userdata : Input private data which will be returned as part
* of callback.
* @cam_cdm_callback : Input callback pointer for triggering the
* callbacks from CDM driver
* @handle : CDM Client handle
* @userdata : Private data given at the time of acquire
* @status : Callback status
* @cookie : Cookie if the callback is gen irq status
* @base_array_cnt : Input number of ioremapped address pair pointing
* in base_array, needed only if selected cdm is a virtual.
* @base_array : Input pointer to ioremapped address pair arrary
* needed only if selected cdm is a virtual.
* @cdm_version : CDM version is output while acquiring HW cdm and
* it is Input while acquiring virtual cdm, Currently fixing it
* to one version below acquire API.
* @ops : Output pointer updated by cdm driver to the CDM
* util ops for this HW version of CDM acquired.
* @handle : Output Unique handle generated for this acquire
*
*/
struct cam_cdm_acquire_data {
char identifier[128];
uint32_t cell_index;
enum cam_cdm_id id;
void *userdata;
void (*cam_cdm_callback)(uint32_t handle, void *userdata,
enum cam_cdm_cb_status status, uint64_t cookie);
uint32_t base_array_cnt;
struct cam_soc_reg_map *base_array[CAM_SOC_MAX_BLOCK];
struct cam_hw_version cdm_version;
struct cam_cdm_utils_ops *ops;
uint32_t handle;
};
/**
* struct cam_cdm_bl_cmd - Cam CDM HW bl command
*
* @bl_addr : Union of all three type for CDM BL commands
* @mem_handle : Input mem handle of bl cmd
* @offset : Input offset of the actual bl cmd in the memory pointed
* by mem_handle
* @len : Input length of the BL command, Cannot be more than 1MB and
* this is will be validated with offset+size of the memory pointed
* by mem_handle
*
*/
struct cam_cdm_bl_cmd {
union {
int32_t mem_handle;
uint32_t *hw_iova;
uintptr_t kernel_iova;
} bl_addr;
uint32_t offset;
uint32_t len;
};
/**
* struct cam_cdm_bl_request - Cam CDM HW base & length (BL) request
*
* @flag : 1 for callback needed and 0 for no callback when this BL
* request is done
* @userdata :Input private data which will be returned as part
* of callback if request for this bl request in flags.
* @cookie : Cookie if the callback is gen irq status
* @type : type of the submitted bl cmd address.
* @cmd_arrary_count : Input number of BL commands to be submitted to CDM
* @bl_cmd_array : Input payload holding the BL cmd's arrary
* to be sumbitted.
*
*/
struct cam_cdm_bl_request {
int flag;
void *userdata;
uint64_t cookie;
enum cam_cdm_bl_cmd_addr_type type;
uint32_t cmd_arrary_count;
struct cam_cdm_bl_cmd cmd[1];
};
/**
* @brief : API to get the CDM capabilities for a camera device type
*
* @identifier : Input pointer to a string which is the device label from dt
* like vfe, ife, jpeg etc, We do not need cell index
* assuming all devices of a single type maps to one SMMU
* client
* @cdm_handles : Input iommu handle memory pointer to update handles
*
* @return 0 on success
*/
int cam_cdm_get_iommu_handle(char *identifier,
struct cam_iommu_handle *cdm_handles);
/**
* @brief : API to acquire a CDM
*
* @data : Input data for the CDM to be acquired
*
* @return 0 on success
*/
int cam_cdm_acquire(struct cam_cdm_acquire_data *data);
/**
* @brief : API to release a previously acquired CDM
*
* @handle : Input handle for the CDM to be released
*
* @return 0 on success
*/
int cam_cdm_release(uint32_t handle);
/**
* @brief : API to submit the base & length (BL's) for acquired CDM
*
* @handle : Input cdm handle to which the BL's needs to be sumbitted.
* @data : Input pointer to the BL's to be sumbitted
*
* @return 0 on success
*/
int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data);
/**
* @brief : API to stream ON a previously acquired CDM,
* during this we turn on/off clocks/power based on active clients.
*
* @handle : Input handle for the CDM to be released
*
* @return 0 on success
*/
int cam_cdm_stream_on(uint32_t handle);
/**
* @brief : API to stream OFF a previously acquired CDM,
* during this we turn on/off clocks/power based on active clients.
*
* @handle : Input handle for the CDM to be released
*
* @return 0 on success
*/
int cam_cdm_stream_off(uint32_t handle);
/**
* @brief : API to reset previously acquired CDM,
* this can be only performed only the CDM is private.
*
* @handle : Input handle of the CDM to reset
*
* @return 0 on success
*/
int cam_cdm_reset_hw(uint32_t handle);
#endif /* _CAM_CDM_API_H_ */

View File

@@ -0,0 +1,199 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include "cam_soc_util.h"
#include "cam_smmu_api.h"
#include "cam_cdm.h"
#include "cam_soc_util.h"
#include "cam_io_util.h"
#define CAM_CDM_OFFSET_FROM_REG(x, y) ((x)->offsets[y].offset)
#define CAM_CDM_ATTR_FROM_REG(x, y) ((x)->offsets[y].attribute)
bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
enum cam_cdm_regs reg, uint32_t *value)
{
void __iomem *reg_addr;
struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
void __iomem *base =
cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
resource_size_t mem_len =
cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
CAM_DBG(CAM_CDM, "E: b=%pK blen=%d reg=%x off=%x", (void __iomem *)base,
(int)mem_len, reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl,
reg)));
CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x", (void __iomem *)base,
reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)));
if ((reg > cdm->offset_tbl->offset_max_size) ||
(reg > cdm->offset_tbl->last_offset)) {
CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid reg=%d\n", reg);
goto permission_error;
} else {
reg_addr = (base + (CAM_CDM_OFFSET_FROM_REG(
cdm->offset_tbl, reg)));
if (reg_addr > (base + mem_len)) {
CAM_ERR_RATE_LIMIT(CAM_CDM,
"Invalid mapped region %d", reg);
goto permission_error;
}
*value = cam_io_r_mb(reg_addr);
CAM_DBG(CAM_CDM, "X b=%pK reg=%x off=%x val=%x",
(void __iomem *)base, reg,
(CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)),
*value);
return false;
}
permission_error:
*value = 0;
return true;
}
bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
enum cam_cdm_regs reg, uint32_t value)
{
void __iomem *reg_addr;
struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
void __iomem *base =
cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
resource_size_t mem_len =
cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x val=%x", (void __iomem *)base,
reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)), value);
if ((reg > cdm->offset_tbl->offset_max_size) ||
(reg > cdm->offset_tbl->last_offset)) {
CAM_ERR_RATE_LIMIT(CAM_CDM, "CDM accessing invalid reg=%d\n",
reg);
goto permission_error;
} else {
reg_addr = (base + CAM_CDM_OFFSET_FROM_REG(
cdm->offset_tbl, reg));
if (reg_addr > (base + mem_len)) {
CAM_ERR_RATE_LIMIT(CAM_CDM,
"Accessing invalid region %d:%d\n",
reg, (CAM_CDM_OFFSET_FROM_REG(
cdm->offset_tbl, reg)));
goto permission_error;
}
cam_io_w_mb(value, reg_addr);
return false;
}
permission_error:
return true;
}
int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
struct cam_cdm_private_dt_data *ptr)
{
int i, rc = -EINVAL;
ptr->dt_num_supported_clients = of_property_count_strings(
pdev->dev.of_node,
"cdm-client-names");
CAM_DBG(CAM_CDM, "Num supported cdm_client = %d",
ptr->dt_num_supported_clients);
if (ptr->dt_num_supported_clients >
CAM_PER_CDM_MAX_REGISTERED_CLIENTS) {
CAM_ERR(CAM_CDM, "Invalid count of client names count=%d",
ptr->dt_num_supported_clients);
rc = -EINVAL;
return rc;
}
if (ptr->dt_num_supported_clients < 0) {
CAM_DBG(CAM_CDM, "No cdm client names found");
ptr->dt_num_supported_clients = 0;
ptr->dt_cdm_shared = false;
} else {
ptr->dt_cdm_shared = true;
}
for (i = 0; i < ptr->dt_num_supported_clients; i++) {
rc = of_property_read_string_index(pdev->dev.of_node,
"cdm-client-names", i, &(ptr->dt_cdm_client_name[i]));
CAM_DBG(CAM_CDM, "cdm-client-names[%d] = %s", i,
ptr->dt_cdm_client_name[i]);
if (rc < 0) {
CAM_ERR(CAM_CDM, "Reading cdm-client-names failed");
break;
}
}
return rc;
}
int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
const struct of_device_id *table)
{
int rc;
struct cam_hw_soc_info *soc_ptr;
const struct of_device_id *id;
if (!cdm_hw || (cdm_hw->soc_info.soc_private)
|| !(cdm_hw->soc_info.pdev))
return -EINVAL;
soc_ptr = &cdm_hw->soc_info;
rc = cam_soc_util_get_dt_properties(soc_ptr);
if (rc != 0) {
CAM_ERR(CAM_CDM, "Failed to retrieve the CDM dt properties");
} else {
soc_ptr->soc_private = kzalloc(
sizeof(struct cam_cdm_private_dt_data),
GFP_KERNEL);
if (!soc_ptr->soc_private)
return -ENOMEM;
rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev,
soc_ptr->soc_private);
if (rc != 0) {
CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
goto error;
}
id = of_match_node(table, soc_ptr->pdev->dev.of_node);
if ((!id) || !(id->data)) {
CAM_ERR(CAM_CDM, "Failed to retrieve the CDM id table");
goto error;
}
CAM_DBG(CAM_CDM, "CDM Hw Id compatible =%s", id->compatible);
((struct cam_cdm *)cdm_hw->core_info)->offset_tbl =
(struct cam_cdm_reg_offset_table *)id->data;
strlcpy(((struct cam_cdm *)cdm_hw->core_info)->name,
id->compatible,
sizeof(((struct cam_cdm *)cdm_hw->core_info)->name));
}
return rc;
error:
rc = -EINVAL;
kfree(soc_ptr->soc_private);
soc_ptr->soc_private = NULL;
return rc;
}
int cam_cdm_intf_mgr_soc_get_dt_properties(
struct platform_device *pdev, struct cam_cdm_intf_mgr *mgr)
{
int rc;
rc = of_property_read_u32(pdev->dev.of_node,
"num-hw-cdm", &mgr->dt_supported_hw_cdm);
CAM_DBG(CAM_CDM, "Number of HW cdm supported =%d",
mgr->dt_supported_hw_cdm);
return rc;
}

View File

@@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CDM_SOC_H_
#define _CAM_CDM_SOC_H_
int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
const struct of_device_id *table);
bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
enum cam_cdm_regs reg, uint32_t *value);
bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
enum cam_cdm_regs reg, uint32_t value);
int cam_cdm_intf_mgr_soc_get_dt_properties(
struct platform_device *pdev,
struct cam_cdm_intf_mgr *mgr);
int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
struct cam_cdm_private_dt_data *ptr);
#endif /* _CAM_CDM_SOC_H_ */

View File

@@ -0,0 +1,717 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/bug.h>
#include "cam_cdm_intf_api.h"
#include "cam_cdm_util.h"
#include "cam_cdm.h"
#include "cam_io_util.h"
#define CAM_CDM_DWORD 4
#define CAM_CDM_SW_CMD_COUNT 2
#define CAM_CMD_LENGTH_MASK 0xFFFF
#define CAM_CDM_COMMAND_OFFSET 24
#define CAM_CDM_REG_OFFSET_MASK 0x00FFFFFF
#define CAM_CDM_DMI_DATA_HI_OFFSET 8
#define CAM_CDM_DMI_DATA_OFFSET 8
#define CAM_CDM_DMI_DATA_LO_OFFSET 12
static unsigned int CDMCmdHeaderSizes[
CAM_CDM_CMD_PRIVATE_BASE + CAM_CDM_SW_CMD_COUNT] = {
0, /* UNUSED*/
3, /* DMI*/
0, /* UNUSED*/
2, /* RegContinuous*/
1, /* RegRandom*/
2, /* BUFFER_INDIREC*/
2, /* GenerateIRQ*/
3, /* WaitForEvent*/
1, /* ChangeBase*/
1, /* PERF_CONTROL*/
3, /* DMI32*/
3, /* DMI64*/
};
/**
* struct cdm_regrandom_cmd - Definition for CDM random register command.
* @count: Number of register writes
* @reserved: reserved bits
* @cmd: Command ID (CDMCmd)
*/
struct cdm_regrandom_cmd {
unsigned int count : 16;
unsigned int reserved : 8;
unsigned int cmd : 8;
} __attribute__((__packed__));
/**
* struct cdm_regcontinuous_cmd - Definition for a CDM register range command.
* @count: Number of register writes
* @reserved0: reserved bits
* @cmd: Command ID (CDMCmd)
* @offset: Start address of the range of registers
* @reserved1: reserved bits
*/
struct cdm_regcontinuous_cmd {
unsigned int count : 16;
unsigned int reserved0 : 8;
unsigned int cmd : 8;
unsigned int offset : 24;
unsigned int reserved1 : 8;
} __attribute__((__packed__));
/**
* struct cdm_dmi_cmd - Definition for a CDM DMI command.
* @length: Number of bytes in LUT - 1
* @reserved: reserved bits
* @cmd: Command ID (CDMCmd)
* @addr: Address of the LUT in memory
* @DMIAddr: Address of the target DMI config register
* @DMISel: DMI identifier
*/
struct cdm_dmi_cmd {
unsigned int length : 16;
unsigned int reserved : 8;
unsigned int cmd : 8;
unsigned int addr;
unsigned int DMIAddr : 24;
unsigned int DMISel : 8;
} __attribute__((__packed__));
/**
* struct cdm_indirect_cmd - Definition for a CDM indirect buffer command.
* @length: Number of bytes in buffer - 1
* @reserved: reserved bits
* @cmd: Command ID (CDMCmd)
* @addr: Device address of the indirect buffer
*/
struct cdm_indirect_cmd {
unsigned int length : 16;
unsigned int reserved : 8;
unsigned int cmd : 8;
unsigned int addr;
} __attribute__((__packed__));
/**
* struct cdm_changebase_cmd - Definition for CDM base address change command.
* @base: Base address to be changed to
* @cmd:Command ID (CDMCmd)
*/
struct cdm_changebase_cmd {
unsigned int base : 24;
unsigned int cmd : 8;
} __attribute__((__packed__));
/**
* struct cdm_wait_event_cmd - Definition for a CDM Gen IRQ command.
* @mask: Mask for the events
* @id: ID to read back for debug
* @iw_reserved: reserved bits
* @iw: iw AHB write bit
* @cmd:Command ID (CDMCmd)
* @offset: Offset to where data is written
* @offset_reserved: reserved bits
* @data: data returned in IRQ_USR_DATA
*/
struct cdm_wait_event_cmd {
unsigned int mask : 8;
unsigned int id : 8;
unsigned int iw_reserved : 7;
unsigned int iw : 1;
unsigned int cmd : 8;
unsigned int offset : 24;
unsigned int offset_reserved : 8;
unsigned int data;
} __attribute__((__packed__));
/**
* struct cdm_genirq_cmd - Definition for a CDM Wait event command.
* @reserved: reserved bits
* @cmd:Command ID (CDMCmd)
* @userdata: userdata returned in IRQ_USR_DATA
*/
struct cdm_genirq_cmd {
unsigned int reserved : 24;
unsigned int cmd : 8;
unsigned int userdata;
} __attribute__((__packed__));
/**
* struct cdm_perf_ctrl_cmd_t - Definition for CDM perf control command.
* @perf: perf command
* @reserved: reserved bits
* @cmd:Command ID (CDMCmd)
*/
struct cdm_perf_ctrl_cmd {
unsigned int perf : 2;
unsigned int reserved : 22;
unsigned int cmd : 8;
} __attribute__((__packed__));
uint32_t cdm_get_cmd_header_size(unsigned int command)
{
return CDMCmdHeaderSizes[command];
}
uint32_t cdm_required_size_reg_continuous(uint32_t numVals)
{
return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT) + numVals;
}
uint32_t cdm_required_size_reg_random(uint32_t numRegVals)
{
return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM) +
(2 * numRegVals);
}
uint32_t cdm_required_size_dmi(void)
{
return cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
}
uint32_t cdm_required_size_genirq(void)
{
return cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ);
}
uint32_t cdm_required_size_indirect(void)
{
return cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
}
uint32_t cdm_required_size_changebase(void)
{
return cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
}
uint32_t cdm_offsetof_dmi_addr(void)
{
return offsetof(struct cdm_dmi_cmd, addr);
}
uint32_t cdm_offsetof_indirect_addr(void)
{
return offsetof(struct cdm_indirect_cmd, addr);
}
uint32_t *cdm_write_regcontinuous(uint32_t *pCmdBuffer, uint32_t reg,
uint32_t numVals, uint32_t *pVals)
{
uint32_t i;
struct cdm_regcontinuous_cmd *pHeader =
(struct cdm_regcontinuous_cmd *)pCmdBuffer;
pHeader->count = numVals;
pHeader->cmd = CAM_CDM_CMD_REG_CONT;
pHeader->reserved0 = 0;
pHeader->reserved1 = 0;
pHeader->offset = reg;
pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
for (i = 0; i < numVals; i++)
(((uint32_t *)pCmdBuffer)[i]) = (((uint32_t *)pVals)[i]);
pCmdBuffer += numVals;
return pCmdBuffer;
}
uint32_t *cdm_write_regrandom(uint32_t *pCmdBuffer, uint32_t numRegVals,
uint32_t *pRegVals)
{
uint32_t i;
uint32_t *dst, *src;
struct cdm_regrandom_cmd *pHeader =
(struct cdm_regrandom_cmd *)pCmdBuffer;
pHeader->count = numRegVals;
pHeader->cmd = CAM_CDM_CMD_REG_RANDOM;
pHeader->reserved = 0;
pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
dst = pCmdBuffer;
src = pRegVals;
for (i = 0; i < numRegVals; i++) {
*dst++ = *src++;
*dst++ = *src++;
}
return dst;
}
uint32_t *cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd,
uint32_t DMIAddr, uint8_t DMISel, uint32_t dmiBufferAddr,
uint32_t length)
{
struct cdm_dmi_cmd *pHeader = (struct cdm_dmi_cmd *)pCmdBuffer;
pHeader->cmd = dmiCmd;
pHeader->addr = dmiBufferAddr;
pHeader->length = length - 1;
pHeader->DMIAddr = DMIAddr;
pHeader->DMISel = DMISel;
pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
return pCmdBuffer;
}
uint32_t *cdm_write_indirect(uint32_t *pCmdBuffer, uint32_t indirectBufAddr,
uint32_t length)
{
struct cdm_indirect_cmd *pHeader =
(struct cdm_indirect_cmd *)pCmdBuffer;
pHeader->cmd = CAM_CDM_CMD_BUFF_INDIRECT;
pHeader->addr = indirectBufAddr;
pHeader->length = length - 1;
pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
return pCmdBuffer;
}
uint32_t *cdm_write_changebase(uint32_t *pCmdBuffer, uint32_t base)
{
struct cdm_changebase_cmd *pHeader =
(struct cdm_changebase_cmd *)pCmdBuffer;
pHeader->cmd = CAM_CDM_CMD_CHANGE_BASE;
pHeader->base = base;
pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
return pCmdBuffer;
}
void cdm_write_genirq(uint32_t *pCmdBuffer, uint32_t userdata)
{
struct cdm_genirq_cmd *pHeader = (struct cdm_genirq_cmd *)pCmdBuffer;
pHeader->cmd = CAM_CDM_CMD_GEN_IRQ;
pHeader->userdata = userdata;
}
struct cam_cdm_utils_ops CDM170_ops = {
cdm_get_cmd_header_size,
cdm_required_size_reg_continuous,
cdm_required_size_reg_random,
cdm_required_size_dmi,
cdm_required_size_genirq,
cdm_required_size_indirect,
cdm_required_size_changebase,
cdm_offsetof_dmi_addr,
cdm_offsetof_indirect_addr,
cdm_write_regcontinuous,
cdm_write_regrandom,
cdm_write_dmi,
cdm_write_indirect,
cdm_write_changebase,
cdm_write_genirq,
};
int cam_cdm_get_ioremap_from_base(uint32_t hw_base,
uint32_t base_array_size,
struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
void __iomem **device_base)
{
int ret = -EINVAL, i;
for (i = 0; i < base_array_size; i++) {
if (base_table[i])
CAM_DBG(CAM_CDM, "In loop %d ioremap for %x addr=%x",
i, (base_table[i])->mem_cam_base, hw_base);
if ((base_table[i]) &&
((base_table[i])->mem_cam_base == hw_base)) {
*device_base = (base_table[i])->mem_base;
ret = 0;
break;
}
}
return ret;
}
static int cam_cdm_util_reg_cont_write(void __iomem *base_addr,
uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
{
int ret = 0;
uint32_t *data;
struct cdm_regcontinuous_cmd *reg_cont;
if ((cmd_buf_size < cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) ||
(!base_addr)) {
CAM_ERR(CAM_CDM, "invalid base addr and data length %d %pK",
cmd_buf_size, base_addr);
return -EINVAL;
}
reg_cont = (struct cdm_regcontinuous_cmd *)cmd_buf;
if ((!reg_cont->count) || (reg_cont->count > 0x10000) ||
(((reg_cont->count * sizeof(uint32_t)) +
cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) >
cmd_buf_size)) {
CAM_ERR(CAM_CDM, "buffer size %d is not sufficient for count%d",
cmd_buf_size, reg_cont->count);
return -EINVAL;
}
data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
cam_io_memcpy(base_addr + reg_cont->offset, data,
reg_cont->count * sizeof(uint32_t));
*used_bytes = (reg_cont->count * sizeof(uint32_t)) +
(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT));
return ret;
}
static int cam_cdm_util_reg_random_write(void __iomem *base_addr,
uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
{
uint32_t i;
struct cdm_regrandom_cmd *reg_random;
uint32_t *data;
if (!base_addr) {
CAM_ERR(CAM_CDM, "invalid base address");
return -EINVAL;
}
reg_random = (struct cdm_regrandom_cmd *) cmd_buf;
if ((!reg_random->count) || (reg_random->count > 0x10000) ||
(((reg_random->count * (sizeof(uint32_t) * 2)) +
cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) >
cmd_buf_size)) {
CAM_ERR(CAM_CDM, "invalid reg_count %d cmd_buf_size %d",
reg_random->count, cmd_buf_size);
return -EINVAL;
}
data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
for (i = 0; i < reg_random->count; i++) {
CAM_DBG(CAM_CDM, "reg random: offset %pK, value 0x%x",
((void __iomem *)(base_addr + data[0])),
data[1]);
cam_io_w(data[1], base_addr + data[0]);
data += 2;
}
*used_bytes = ((reg_random->count * (sizeof(uint32_t) * 2)) +
(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)));
return 0;
}
static int cam_cdm_util_swd_dmi_write(uint32_t cdm_cmd_type,
void __iomem *base_addr, uint32_t *cmd_buf, uint32_t cmd_buf_size,
uint32_t *used_bytes)
{
uint32_t i;
struct cdm_dmi_cmd *swd_dmi;
uint32_t *data;
swd_dmi = (struct cdm_dmi_cmd *)cmd_buf;
if (cmd_buf_size < (cdm_required_size_dmi() + swd_dmi->length + 1)) {
CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d",
swd_dmi->length + 1);
return -EINVAL;
}
data = cmd_buf + cdm_required_size_dmi();
if (cdm_cmd_type == CAM_CDM_CMD_SWD_DMI_64) {
for (i = 0; i < (swd_dmi->length + 1)/8; i++) {
cam_io_w_mb(data[0], base_addr +
swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
cam_io_w_mb(data[1], base_addr +
swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_HI_OFFSET);
data += 2;
}
} else if (cdm_cmd_type == CAM_CDM_CMD_DMI) {
for (i = 0; i < (swd_dmi->length + 1)/4; i++) {
cam_io_w_mb(data[0], base_addr +
swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_OFFSET);
data += 1;
}
} else {
for (i = 0; i < (swd_dmi->length + 1)/4; i++) {
cam_io_w_mb(data[0], base_addr +
swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
data += 1;
}
}
*used_bytes = (4 * cdm_required_size_dmi()) + swd_dmi->length + 1;
return 0;
}
int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
uint32_t *cmd_buf, uint32_t cmd_buf_size,
struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
uint32_t base_array_size, uint8_t bl_tag)
{
int ret = 0;
uint32_t cdm_cmd_type = 0, total_cmd_buf_size = 0;
uint32_t used_bytes = 0;
total_cmd_buf_size = cmd_buf_size;
while (cmd_buf_size > 0) {
CAM_DBG(CAM_CDM, "cmd data=%x", *cmd_buf);
cdm_cmd_type = (*cmd_buf >> CAM_CDM_COMMAND_OFFSET);
switch (cdm_cmd_type) {
case CAM_CDM_CMD_REG_CONT: {
ret = cam_cdm_util_reg_cont_write(*current_device_base,
cmd_buf, cmd_buf_size, &used_bytes);
if (ret)
break;
if (used_bytes > 0) {
cmd_buf_size -= used_bytes;
cmd_buf += used_bytes/4;
}
}
break;
case CAM_CDM_CMD_REG_RANDOM: {
ret = cam_cdm_util_reg_random_write(
*current_device_base, cmd_buf, cmd_buf_size,
&used_bytes);
if (ret)
break;
if (used_bytes > 0) {
cmd_buf_size -= used_bytes;
cmd_buf += used_bytes / 4;
}
}
break;
case CAM_CDM_CMD_DMI:
case CAM_CDM_CMD_SWD_DMI_32:
case CAM_CDM_CMD_SWD_DMI_64: {
if (*current_device_base == 0) {
CAM_ERR(CAM_CDM,
"Got SWI DMI cmd =%d for invalid hw",
cdm_cmd_type);
ret = -EINVAL;
break;
}
ret = cam_cdm_util_swd_dmi_write(cdm_cmd_type,
*current_device_base, cmd_buf, cmd_buf_size,
&used_bytes);
if (ret)
break;
if (used_bytes > 0) {
cmd_buf_size -= used_bytes;
cmd_buf += used_bytes / 4;
}
}
break;
case CAM_CDM_CMD_CHANGE_BASE: {
struct cdm_changebase_cmd *change_base_cmd =
(struct cdm_changebase_cmd *)cmd_buf;
ret = cam_cdm_get_ioremap_from_base(
change_base_cmd->base, base_array_size,
base_table, current_device_base);
if (ret != 0) {
CAM_ERR(CAM_CDM,
"Get ioremap change base failed %x",
change_base_cmd->base);
break;
}
CAM_DBG(CAM_CDM, "Got ioremap for %x addr=%pK",
change_base_cmd->base,
current_device_base);
cmd_buf_size -= (4 *
cdm_required_size_changebase());
cmd_buf += cdm_required_size_changebase();
}
break;
default:
CAM_ERR(CAM_CDM, "unsupported cdm_cmd_type type 0%x",
cdm_cmd_type);
ret = -EINVAL;
break;
}
if (ret < 0)
break;
}
return ret;
}
static long cam_cdm_util_dump_dmi_cmd(uint32_t *cmd_buf_addr)
{
long ret = 0;
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_DMI];
CAM_INFO(CAM_CDM, "DMI");
return ret;
}
static long cam_cdm_util_dump_buff_indirect(uint32_t *cmd_buf_addr)
{
long ret = 0;
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_BUFF_INDIRECT];
CAM_INFO(CAM_CDM, "Buff Indirect");
return ret;
}
static long cam_cdm_util_dump_reg_cont_cmd(uint32_t *cmd_buf_addr)
{
long ret = 0;
struct cdm_regcontinuous_cmd *p_regcont_cmd;
uint32_t *temp_ptr = cmd_buf_addr;
int i = 0;
p_regcont_cmd = (struct cdm_regcontinuous_cmd *)temp_ptr;
temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT];
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT];
CAM_INFO(CAM_CDM, "REG_CONT: COUNT: %u OFFSET: 0x%X",
p_regcont_cmd->count, p_regcont_cmd->offset);
for (i = 0; i < p_regcont_cmd->count; i++) {
CAM_INFO(CAM_CDM, "DATA_%d: 0x%X", i,
*temp_ptr);
temp_ptr++;
ret++;
}
return ret;
}
static long cam_cdm_util_dump_reg_random_cmd(uint32_t *cmd_buf_addr)
{
struct cdm_regrandom_cmd *p_regrand_cmd;
uint32_t *temp_ptr = cmd_buf_addr;
long ret = 0;
int i = 0;
p_regrand_cmd = (struct cdm_regrandom_cmd *)temp_ptr;
temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM];
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM];
CAM_INFO(CAM_CDM, "REG_RAND: COUNT: %u",
p_regrand_cmd->count);
for (i = 0; i < p_regrand_cmd->count; i++) {
CAM_INFO(CAM_CDM, "OFFSET_%d: 0x%X DATA_%d: 0x%X",
i, *temp_ptr & CAM_CDM_REG_OFFSET_MASK, i,
*(temp_ptr + 1));
temp_ptr += 2;
ret += 2;
}
return ret;
}
static long cam_cdm_util_dump_gen_irq_cmd(uint32_t *cmd_buf_addr)
{
long ret = 0;
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_GEN_IRQ];
CAM_INFO(CAM_CDM, "GEN_IRQ");
return ret;
}
static long cam_cdm_util_dump_wait_event_cmd(uint32_t *cmd_buf_addr)
{
long ret = 0;
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_WAIT_EVENT];
CAM_INFO(CAM_CDM, "WAIT_EVENT");
return ret;
}
static long cam_cdm_util_dump_change_base_cmd(uint32_t *cmd_buf_addr)
{
long ret = 0;
struct cdm_changebase_cmd *p_cbase_cmd;
uint32_t *temp_ptr = cmd_buf_addr;
p_cbase_cmd = (struct cdm_changebase_cmd *)temp_ptr;
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_CHANGE_BASE];
CAM_INFO(CAM_CDM, "CHANGE_BASE: 0x%X",
p_cbase_cmd->base);
return ret;
}
static long cam_cdm_util_dump_perf_ctrl_cmd(uint32_t *cmd_buf_addr)
{
long ret = 0;
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_PERF_CTRL];
CAM_INFO(CAM_CDM, "PERF_CTRL");
return ret;
}
void cam_cdm_util_dump_cmd_buf(
uint32_t *cmd_buf_start, uint32_t *cmd_buf_end)
{
uint32_t *buf_now = cmd_buf_start;
uint32_t cmd = 0;
if (!cmd_buf_start || !cmd_buf_end) {
CAM_INFO(CAM_CDM, "Invalid args");
return;
}
do {
cmd = *buf_now;
cmd = cmd >> CAM_CDM_COMMAND_OFFSET;
switch (cmd) {
case CAM_CDM_CMD_DMI:
case CAM_CDM_CMD_DMI_32:
case CAM_CDM_CMD_DMI_64:
buf_now += cam_cdm_util_dump_dmi_cmd(buf_now);
break;
case CAM_CDM_CMD_REG_CONT:
buf_now += cam_cdm_util_dump_reg_cont_cmd(buf_now);
break;
case CAM_CDM_CMD_REG_RANDOM:
buf_now += cam_cdm_util_dump_reg_random_cmd(buf_now);
break;
case CAM_CDM_CMD_BUFF_INDIRECT:
buf_now += cam_cdm_util_dump_buff_indirect(buf_now);
break;
case CAM_CDM_CMD_GEN_IRQ:
buf_now += cam_cdm_util_dump_gen_irq_cmd(buf_now);
break;
case CAM_CDM_CMD_WAIT_EVENT:
buf_now += cam_cdm_util_dump_wait_event_cmd(buf_now);
break;
case CAM_CDM_CMD_CHANGE_BASE:
buf_now += cam_cdm_util_dump_change_base_cmd(buf_now);
break;
case CAM_CDM_CMD_PERF_CTRL:
buf_now += cam_cdm_util_dump_perf_ctrl_cmd(buf_now);
break;
default:
CAM_INFO(CAM_CDM, "Invalid CMD: 0x%x buf 0x%x",
cmd, *buf_now);
buf_now++;
break;
}
} while (buf_now <= cmd_buf_end);
}

View File

@@ -0,0 +1,161 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CDM_UTIL_H_
#define _CAM_CDM_UTIL_H_
enum cam_cdm_command {
CAM_CDM_CMD_UNUSED = 0x0,
CAM_CDM_CMD_DMI = 0x1,
CAM_CDM_CMD_NOT_DEFINED = 0x2,
CAM_CDM_CMD_REG_CONT = 0x3,
CAM_CDM_CMD_REG_RANDOM = 0x4,
CAM_CDM_CMD_BUFF_INDIRECT = 0x5,
CAM_CDM_CMD_GEN_IRQ = 0x6,
CAM_CDM_CMD_WAIT_EVENT = 0x7,
CAM_CDM_CMD_CHANGE_BASE = 0x8,
CAM_CDM_CMD_PERF_CTRL = 0x9,
CAM_CDM_CMD_DMI_32 = 0xa,
CAM_CDM_CMD_DMI_64 = 0xb,
CAM_CDM_CMD_PRIVATE_BASE = 0xc,
CAM_CDM_CMD_SWD_DMI_32 = (CAM_CDM_CMD_PRIVATE_BASE + 0x64),
CAM_CDM_CMD_SWD_DMI_64 = (CAM_CDM_CMD_PRIVATE_BASE + 0x65),
CAM_CDM_CMD_PRIVATE_BASE_MAX = 0x7F
};
/**
* struct cam_cdm_utils_ops - Camera CDM util ops
*
* @cdm_get_cmd_header_size: Returns the size of the given command header
* in DWORDs.
* @command Command ID
* @return Size of the command in DWORDs
*
* @cdm_required_size_reg_continuous: Calculates the size of a reg-continuous
* command in dwords.
* @numVals Number of continuous values
* @return Size in dwords
*
* @cdm_required_size_reg_random: Calculates the size of a reg-random command
* in dwords.
* @numRegVals Number of register/value pairs
* @return Size in dwords
*
* @cdm_required_size_dmi: Calculates the size of a DMI command in dwords.
* @return Size in dwords
*
* @cdm_required_size_genirq: Calculates size of a Genirq command in dwords.
* @return Size in dwords
*
* @cdm_required_size_indirect: Calculates the size of an indirect command
* in dwords.
* @return Size in dwords
*
* @cdm_required_size_changebase: Calculates the size of a change-base command
* in dwords.
* @return Size in dwords
*
* @cdm_offsetof_dmi_addr: Returns the offset of address field in the DMI
* command header.
* @return Offset of addr field
*
* @cdm_offsetof_indirect_addr: Returns the offset of address field in the
* indirect command header.
* @return Offset of addr field
*
* @cdm_write_regcontinuous: Writes a command into the command buffer.
* @pCmdBuffer: Pointer to command buffer
* @reg: Beginning of the register address range where
* values will be written.
* @numVals: Number of values (registers) that will be written
* @pVals : An array of values that will be written
* @return Pointer in command buffer pointing past the written commands
*
* @cdm_write_regrandom: Writes a command into the command buffer in
* register/value pairs.
* @pCmdBuffer: Pointer to command buffer
* @numRegVals: Number of register/value pairs that will be written
* @pRegVals: An array of register/value pairs that will be written
* The even indices are registers and the odd indices
* arevalues, e.g., {reg1, val1, reg2, val2, ...}.
* @return Pointer in command buffer pointing past the written commands
*
* @cdm_write_dmi: Writes a DMI command into the command bufferM.
* @pCmdBuffer: Pointer to command buffer
* @dmiCmd: DMI command
* @DMIAddr: Address of the DMI
* @DMISel: Selected bank that the DMI will write to
* @length: Size of data in bytes
* @return Pointer in command buffer pointing past the written commands
*
* @cdm_write_indirect: Writes a indirect command into the command buffer.
* @pCmdBuffer: Pointer to command buffer
* @indirectBufferAddr: Device address of the indirect cmd buffer.
* @length: Size of data in bytes
* @return Pointer in command buffer pointing past the written commands
*
* @cdm_write_changebase: Writes a changing CDM (address) base command into
* the command buffer.
* @pCmdBuffer: Pointer to command buffer
* @base: New base (device) address
* @return Pointer in command buffer pointing past the written commands
*
* @cdm_write_genirq: Writes a gen irq command into the command buffer.
* @pCmdBuffer: Pointer to command buffer
* @userdata: userdata or cookie return by hardware during irq.
*/
struct cam_cdm_utils_ops {
uint32_t (*cdm_get_cmd_header_size)(unsigned int command);
uint32_t (*cdm_required_size_reg_continuous)(uint32_t numVals);
uint32_t (*cdm_required_size_reg_random)(uint32_t numRegVals);
uint32_t (*cdm_required_size_dmi)(void);
uint32_t (*cdm_required_size_genirq)(void);
uint32_t (*cdm_required_size_indirect)(void);
uint32_t (*cdm_required_size_changebase)(void);
uint32_t (*cdm_offsetof_dmi_addr)(void);
uint32_t (*cdm_offsetof_indirect_addr)(void);
uint32_t* (*cdm_write_regcontinuous)(
uint32_t *pCmdBuffer,
uint32_t reg,
uint32_t numVals,
uint32_t *pVals);
uint32_t *(*cdm_write_regrandom)(
uint32_t *pCmdBuffer,
uint32_t numRegVals,
uint32_t *pRegVals);
uint32_t *(*cdm_write_dmi)(
uint32_t *pCmdBuffer,
uint8_t dmiCmd,
uint32_t DMIAddr,
uint8_t DMISel,
uint32_t dmiBufferAddr,
uint32_t length);
uint32_t *(*cdm_write_indirect)(
uint32_t *pCmdBuffer,
uint32_t indirectBufferAddr,
uint32_t length);
uint32_t *(*cdm_write_changebase)(
uint32_t *pCmdBuffer,
uint32_t base);
void (*cdm_write_genirq)(
uint32_t *pCmdBuffer,
uint32_t userdata);
};
/**
* cam_cdm_util_log_cmd_bufs()
*
* @brief: Util function to log cdm command buffers
*
* @cmd_buffer_start: Pointer to start of cmd buffer
* @cmd_buffer_end: Pointer to end of cmd buffer
*
*/
void cam_cdm_util_dump_cmd_buf(
uint32_t *cmd_buffer_start, uint32_t *cmd_buffer_end);
#endif /* _CAM_CDM_UTIL_H_ */

View File

@@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CDM_VIRTUAL_H_
#define _CAM_CDM_VIRTUAL_H_
#include "cam_cdm_intf_api.h"
int cam_virtual_cdm_probe(struct platform_device *pdev);
int cam_virtual_cdm_remove(struct platform_device *pdev);
int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
uint32_t *cmd_buf, uint32_t cmd_buf_size,
struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
uint32_t base_array_size, uint8_t bl_tag);
#endif /* _CAM_CDM_VIRTUAL_H_ */

View File

@@ -0,0 +1,382 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include "cam_soc_util.h"
#include "cam_smmu_api.h"
#include "cam_cdm_intf_api.h"
#include "cam_cdm.h"
#include "cam_cdm_util.h"
#include "cam_cdm_virtual.h"
#include "cam_cdm_core_common.h"
#include "cam_cdm_soc.h"
#include "cam_io_util.h"
#define CAM_CDM_VIRTUAL_NAME "qcom,cam_virtual_cdm"
static void cam_virtual_cdm_work(struct work_struct *work)
{
struct cam_cdm_work_payload *payload;
struct cam_hw_info *cdm_hw;
struct cam_cdm *core;
payload = container_of(work, struct cam_cdm_work_payload, work);
if (payload) {
cdm_hw = payload->hw;
core = (struct cam_cdm *)cdm_hw->core_info;
if (payload->irq_status & 0x2) {
struct cam_cdm_bl_cb_request_entry *node;
CAM_DBG(CAM_CDM, "CDM HW Gen/inline IRQ with data=%x",
payload->irq_data);
mutex_lock(&cdm_hw->hw_mutex);
node = cam_cdm_find_request_by_bl_tag(
payload->irq_data,
&core->bl_request_list);
if (node) {
if (node->request_type ==
CAM_HW_CDM_BL_CB_CLIENT) {
cam_cdm_notify_clients(cdm_hw,
CAM_CDM_CB_STATUS_BL_SUCCESS,
(void *)node);
} else if (node->request_type ==
CAM_HW_CDM_BL_CB_INTERNAL) {
CAM_ERR(CAM_CDM, "Invalid node=%pK %d",
node, node->request_type);
}
list_del_init(&node->entry);
kfree(node);
} else {
CAM_ERR(CAM_CDM, "Invalid node for inline irq");
}
mutex_unlock(&cdm_hw->hw_mutex);
}
if (payload->irq_status & 0x1) {
CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
complete(&core->reset_complete);
}
kfree(payload);
}
}
int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
struct cam_cdm_hw_intf_cmd_submit_bl *req,
struct cam_cdm_client *client)
{
int i, rc = -EINVAL;
struct cam_cdm_bl_request *cdm_cmd = req->data;
struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
mutex_lock(&client->lock);
for (i = 0; i < req->data->cmd_arrary_count ; i++) {
uintptr_t vaddr_ptr = 0;
size_t len = 0;
if ((!cdm_cmd->cmd[i].len) &&
(cdm_cmd->cmd[i].len > 0x100000)) {
CAM_ERR(CAM_CDM,
"len(%d) is invalid count=%d total cnt=%d",
cdm_cmd->cmd[i].len, i,
req->data->cmd_arrary_count);
rc = -EINVAL;
break;
}
if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
rc = cam_mem_get_cpu_buf(
cdm_cmd->cmd[i].bl_addr.mem_handle, &vaddr_ptr,
&len);
} else if (req->data->type ==
CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA) {
rc = 0;
vaddr_ptr = cdm_cmd->cmd[i].bl_addr.kernel_iova;
len = cdm_cmd->cmd[i].offset + cdm_cmd->cmd[i].len;
} else {
CAM_ERR(CAM_CDM,
"Only mem hdl/Kernel va type is supported %d",
req->data->type);
rc = -EINVAL;
break;
}
if ((!rc) && (vaddr_ptr) && (len) &&
(len >= cdm_cmd->cmd[i].offset)) {
if ((len - cdm_cmd->cmd[i].offset) <
cdm_cmd->cmd[i].len) {
CAM_ERR(CAM_CDM, "Not enough buffer");
rc = -EINVAL;
break;
}
CAM_DBG(CAM_CDM,
"hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu",
cdm_cmd->cmd[i].bl_addr.mem_handle,
(void *)vaddr_ptr, cdm_cmd->cmd[i].offset,
cdm_cmd->cmd[i].len, len);
rc = cam_cdm_util_cmd_buf_write(
&client->changebase_addr,
((uint32_t *)vaddr_ptr +
((cdm_cmd->cmd[i].offset)/4)),
cdm_cmd->cmd[i].len, client->data.base_array,
client->data.base_array_cnt, core->bl_tag);
if (rc) {
CAM_ERR(CAM_CDM,
"write failed for cnt=%d:%d len %u",
i, req->data->cmd_arrary_count,
cdm_cmd->cmd[i].len);
break;
}
} else {
CAM_ERR(CAM_CDM,
"Sanity check failed for hdl=%x len=%zu:%d",
cdm_cmd->cmd[i].bl_addr.mem_handle, len,
cdm_cmd->cmd[i].offset);
CAM_ERR(CAM_CDM,
"Sanity check failed for cmd_count=%d cnt=%d",
i, req->data->cmd_arrary_count);
rc = -EINVAL;
break;
}
if (!rc) {
struct cam_cdm_work_payload *payload;
CAM_DBG(CAM_CDM,
"write BL success for cnt=%d with tag=%d",
i, core->bl_tag);
if ((true == req->data->flag) &&
(i == req->data->cmd_arrary_count)) {
struct cam_cdm_bl_cb_request_entry *node;
node = kzalloc(sizeof(
struct cam_cdm_bl_cb_request_entry),
GFP_KERNEL);
if (!node) {
rc = -ENOMEM;
break;
}
node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
node->client_hdl = req->handle;
node->cookie = req->data->cookie;
node->bl_tag = core->bl_tag;
node->userdata = req->data->userdata;
mutex_lock(&cdm_hw->hw_mutex);
list_add_tail(&node->entry,
&core->bl_request_list);
mutex_unlock(&cdm_hw->hw_mutex);
payload = kzalloc(sizeof(
struct cam_cdm_work_payload),
GFP_ATOMIC);
if (payload) {
payload->irq_status = 0x2;
payload->irq_data = core->bl_tag;
payload->hw = cdm_hw;
INIT_WORK((struct work_struct *)
&payload->work,
cam_virtual_cdm_work);
queue_work(core->work_queue,
&payload->work);
}
}
core->bl_tag++;
CAM_DBG(CAM_CDM,
"Now commit the BL nothing for virtual");
if (!rc && (core->bl_tag == 63))
core->bl_tag = 0;
}
}
mutex_unlock(&client->lock);
return rc;
}
int cam_virtual_cdm_probe(struct platform_device *pdev)
{
struct cam_hw_info *cdm_hw = NULL;
struct cam_hw_intf *cdm_hw_intf = NULL;
struct cam_cdm *cdm_core = NULL;
struct cam_cdm_private_dt_data *soc_private = NULL;
int rc;
struct cam_cpas_register_params cpas_parms;
cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
if (!cdm_hw_intf)
return -ENOMEM;
cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
if (!cdm_hw) {
kfree(cdm_hw_intf);
return -ENOMEM;
}
cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
if (!cdm_hw->core_info) {
kfree(cdm_hw);
kfree(cdm_hw_intf);
return -ENOMEM;
}
cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
cdm_hw->soc_info.pdev = pdev;
cdm_hw_intf->hw_type = CAM_VIRTUAL_CDM;
cdm_hw->soc_info.soc_private = kzalloc(
sizeof(struct cam_cdm_private_dt_data), GFP_KERNEL);
if (!cdm_hw->soc_info.soc_private) {
rc = -ENOMEM;
goto soc_load_failed;
}
rc = cam_cdm_soc_load_dt_private(pdev, cdm_hw->soc_info.soc_private);
if (rc) {
CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
kfree(cdm_hw->soc_info.soc_private);
cdm_hw->soc_info.soc_private = NULL;
goto soc_load_failed;
}
cdm_core = (struct cam_cdm *)cdm_hw->core_info;
soc_private = (struct cam_cdm_private_dt_data *)
cdm_hw->soc_info.soc_private;
if (soc_private->dt_cdm_shared == true)
cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
else
cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
cdm_core->bl_tag = 0;
INIT_LIST_HEAD(&cdm_core->bl_request_list);
init_completion(&cdm_core->reset_complete);
cdm_hw_intf->hw_priv = cdm_hw;
cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
cdm_hw_intf->hw_ops.init = NULL;
cdm_hw_intf->hw_ops.deinit = NULL;
cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
cdm_hw_intf->hw_ops.read = NULL;
cdm_hw_intf->hw_ops.write = NULL;
cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
cdm_hw_intf->hw_idx);
platform_set_drvdata(pdev, cdm_hw_intf);
cdm_hw->open_count = 0;
cdm_core->iommu_hdl.non_secure = -1;
cdm_core->iommu_hdl.secure = -1;
mutex_init(&cdm_hw->hw_mutex);
spin_lock_init(&cdm_hw->hw_lock);
init_completion(&cdm_hw->hw_complete);
mutex_lock(&cdm_hw->hw_mutex);
cdm_core->id = CAM_CDM_VIRTUAL;
memcpy(cdm_core->name, CAM_CDM_VIRTUAL_NAME,
sizeof(CAM_CDM_VIRTUAL_NAME));
cdm_core->work_queue = alloc_workqueue(cdm_core->name,
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
CAM_CDM_INFLIGHT_WORKS);
cdm_core->ops = NULL;
cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
cpas_parms.cell_index = cdm_hw->soc_info.index;
cpas_parms.dev = &pdev->dev;
cpas_parms.userdata = cdm_hw_intf;
strlcpy(cpas_parms.identifier, "cam-cdm-intf",
CAM_HW_IDENTIFIER_LENGTH);
rc = cam_cpas_register_client(&cpas_parms);
if (rc) {
CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
goto cpas_registration_failed;
}
CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
cpas_parms.client_handle);
cdm_core->cpas_handle = cpas_parms.client_handle;
CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
soc_private, CAM_VIRTUAL_CDM, &cdm_core->index);
if (rc) {
CAM_ERR(CAM_CDM, "Virtual CDM Interface registration failed");
goto intf_registration_failed;
}
CAM_DBG(CAM_CDM, "CDM%d registered to intf successful",
cdm_hw_intf->hw_idx);
mutex_unlock(&cdm_hw->hw_mutex);
return 0;
intf_registration_failed:
cam_cpas_unregister_client(cdm_core->cpas_handle);
cpas_registration_failed:
kfree(cdm_hw->soc_info.soc_private);
flush_workqueue(cdm_core->work_queue);
destroy_workqueue(cdm_core->work_queue);
mutex_unlock(&cdm_hw->hw_mutex);
mutex_destroy(&cdm_hw->hw_mutex);
soc_load_failed:
kfree(cdm_hw->core_info);
kfree(cdm_hw);
kfree(cdm_hw_intf);
return rc;
}
int cam_virtual_cdm_remove(struct platform_device *pdev)
{
struct cam_hw_info *cdm_hw = NULL;
struct cam_hw_intf *cdm_hw_intf = NULL;
struct cam_cdm *cdm_core = NULL;
int rc = -EBUSY;
cdm_hw_intf = platform_get_drvdata(pdev);
if (!cdm_hw_intf) {
CAM_ERR(CAM_CDM, "Failed to get dev private data");
return rc;
}
cdm_hw = cdm_hw_intf->hw_priv;
if (!cdm_hw) {
CAM_ERR(CAM_CDM,
"Failed to get virtual private data for type=%d idx=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
return rc;
}
cdm_core = cdm_hw->core_info;
if (!cdm_core) {
CAM_ERR(CAM_CDM,
"Failed to get virtual core data for type=%d idx=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
return rc;
}
rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
if (rc) {
CAM_ERR(CAM_CDM, "CPAS unregister failed");
return rc;
}
rc = cam_cdm_intf_deregister_hw_cdm(cdm_hw_intf,
cdm_hw->soc_info.soc_private, CAM_VIRTUAL_CDM,
cdm_core->index);
if (rc) {
CAM_ERR(CAM_CDM,
"Virtual CDM Interface de-registration failed");
return rc;
}
flush_workqueue(cdm_core->work_queue);
destroy_workqueue(cdm_core->work_queue);
mutex_destroy(&cdm_hw->hw_mutex);
kfree(cdm_hw->soc_info.soc_private);
kfree(cdm_hw->core_info);
kfree(cdm_hw);
kfree(cdm_hw_intf);
rc = 0;
return rc;
}

View File

@@ -0,0 +1,135 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_HW_CDM170_REG_H_
#define _CAM_HW_CDM170_REG_H_
#define CAM_CDM_REG_OFFSET_FIRST 0x0
#define CAM_CDM_REG_OFFSET_LAST 0x200
#define CAM_CDM_REGS_COUNT 0x30
#define CAM_CDM_HWFIFO_SIZE 0x40
#define CAM_CDM_OFFSET_HW_VERSION 0x0
#define CAM_CDM_OFFSET_TITAN_VERSION 0x4
#define CAM_CDM_OFFSET_RST_CMD 0x10
#define CAM_CDM_OFFSET_CGC_CFG 0x14
#define CAM_CDM_OFFSET_CORE_CFG 0x18
#define CAM_CDM_OFFSET_CORE_EN 0x1c
#define CAM_CDM_OFFSET_FE_CFG 0x20
#define CAM_CDM_OFFSET_IRQ_MASK 0x30
#define CAM_CDM_OFFSET_IRQ_CLEAR 0x34
#define CAM_CDM_OFFSET_IRQ_CLEAR_CMD 0x38
#define CAM_CDM_OFFSET_IRQ_SET 0x3c
#define CAM_CDM_OFFSET_IRQ_SET_CMD 0x40
#define CAM_CDM_OFFSET_IRQ_STATUS 0x44
#define CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK 0x1
#define CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK 0x2
#define CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK 0x4
#define CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK 0x10000
#define CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK 0x20000
#define CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK 0x40000
#define CAM_CDM_OFFSET_BL_FIFO_BASE_REG 0x50
#define CAM_CDM_OFFSET_BL_FIFO_LEN_REG 0x54
#define CAM_CDM_OFFSET_BL_FIFO_STORE_REG 0x58
#define CAM_CDM_OFFSET_BL_FIFO_CFG 0x5c
#define CAM_CDM_OFFSET_BL_FIFO_RB 0x60
#define CAM_CDM_OFFSET_BL_FIFO_BASE_RB 0x64
#define CAM_CDM_OFFSET_BL_FIFO_LEN_RB 0x68
#define CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB 0x6c
#define CAM_CDM_OFFSET_IRQ_USR_DATA 0x80
#define CAM_CDM_OFFSET_WAIT_STATUS 0x84
#define CAM_CDM_OFFSET_SCRATCH_0_REG 0x90
#define CAM_CDM_OFFSET_SCRATCH_1_REG 0x94
#define CAM_CDM_OFFSET_SCRATCH_2_REG 0x98
#define CAM_CDM_OFFSET_SCRATCH_3_REG 0x9c
#define CAM_CDM_OFFSET_SCRATCH_4_REG 0xa0
#define CAM_CDM_OFFSET_SCRATCH_5_REG 0xa4
#define CAM_CDM_OFFSET_SCRATCH_6_REG 0xa8
#define CAM_CDM_OFFSET_SCRATCH_7_REG 0xac
#define CAM_CDM_OFFSET_LAST_AHB_ADDR 0xd0
#define CAM_CDM_OFFSET_LAST_AHB_DATA 0xd4
#define CAM_CDM_OFFSET_CORE_DBUG 0xd8
#define CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR 0xe0
#define CAM_CDM_OFFSET_LAST_AHB_ERR_DATA 0xe4
#define CAM_CDM_OFFSET_CURRENT_BL_BASE 0xe8
#define CAM_CDM_OFFSET_CURRENT_BL_LEN 0xec
#define CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE 0xf0
#define CAM_CDM_OFFSET_DEBUG_STATUS 0xf4
#define CAM_CDM_OFFSET_BUS_MISR_CFG_0 0x100
#define CAM_CDM_OFFSET_BUS_MISR_CFG_1 0x104
#define CAM_CDM_OFFSET_BUS_MISR_RD_VAL 0x108
#define CAM_CDM_OFFSET_PERF_MON_CTRL 0x110
#define CAM_CDM_OFFSET_PERF_MON_0 0x114
#define CAM_CDM_OFFSET_PERF_MON_1 0x118
#define CAM_CDM_OFFSET_PERF_MON_2 0x11c
#define CAM_CDM_OFFSET_SPARE 0x200
/*
* Always make sure below register offsets are aligned with
* enum cam_cdm_regs offsets
*/
struct cam_cdm_reg_offset cam170_cpas_cdm_register_offsets[] = {
{ CAM_CDM_OFFSET_HW_VERSION, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_TITAN_VERSION, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_RST_CMD, CAM_REG_ATTR_WRITE },
{ CAM_CDM_OFFSET_CGC_CFG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_CORE_CFG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_CORE_EN, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_FE_CFG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_IRQ_MASK, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_IRQ_CLEAR, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_IRQ_CLEAR_CMD, CAM_REG_ATTR_WRITE },
{ CAM_CDM_OFFSET_IRQ_SET, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_IRQ_SET_CMD, CAM_REG_ATTR_WRITE },
{ CAM_CDM_OFFSET_IRQ_STATUS, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_IRQ_USR_DATA, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BL_FIFO_BASE_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BL_FIFO_LEN_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BL_FIFO_STORE_REG, CAM_REG_ATTR_WRITE },
{ CAM_CDM_OFFSET_BL_FIFO_CFG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BL_FIFO_RB, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BL_FIFO_BASE_RB, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_BL_FIFO_LEN_RB, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_WAIT_STATUS, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_SCRATCH_0_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_1_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_2_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_3_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_4_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_5_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_6_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_7_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_LAST_AHB_ADDR, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_LAST_AHB_DATA, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_CORE_DBUG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_LAST_AHB_ERR_DATA, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_CURRENT_BL_BASE, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_CURRENT_BL_LEN, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_DEBUG_STATUS, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_BUS_MISR_CFG_0, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BUS_MISR_CFG_1, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BUS_MISR_RD_VAL, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_PERF_MON_CTRL, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_PERF_MON_0, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_PERF_MON_1, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_PERF_MON_2, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_SPARE, CAM_REG_ATTR_READ_WRITE }
};
struct cam_cdm_reg_offset_table cam170_cpas_cdm_offset_table = {
.first_offset = 0x0,
.last_offset = 0x200,
.reg_count = 0x30,
.offsets = cam170_cpas_cdm_register_offsets,
.offset_max_size = (sizeof(cam170_cpas_cdm_register_offsets)/
sizeof(struct cam_cdm_reg_offset)),
};
#endif /* _CAM_HW_CDM170_REG_H_ */