msm: camera: common: Merge camera-kernel.3.1 changes in camera-kernel.4.0

msm: camera: tfe: Fix variable initialization issues
msm: camera: isp: Dual tfe event check with proper hw idx
msm: camera: smmu: Add support for non-contiguous mermory region
msm: camera: smmu: Use iommu best match algo for camera
msm: camera: ope: Optimize allocation of IO configuration
msm: camera: ope: Fix for KW Issues
msm: camera: ope: Add support for stripe level height configuration
msm: camera: tfe: Enable the delay line clc
msm: camera: ope: Fix false alarm for OPE HW timeout
msm: camera: tfe: Support register dump per request
msm: camera: ope: Increase max number of stripes
msm: camera: ope: Change packer and unpacker format in case NV12
msm: camera: tfe: Add packet code get command for tfe
msm: camera: ope: Trigger recovery in case of violation on write bus
msm: camera: ope: Protect ope hw reset with mutex
msm: camera: ope: Add a check for valid request in cdm callback
msm: camera: ope: Remove the BW & clock vote in release context
msm: camera: ope: Reduce OPE BUS memory
msm: camera: ope: Fix return value for ope acquire
msm: camera: ope: Fix false alarm for OPE request timeout
msm: camera: ope: Avoid deadlock during recovery after HW hang
msm: camera: tfe: tfe debug enhancement
msm: camera: cdm: Fix irq_data value in case of inline irq
msm: camera: flash: Switch off flash on provider crash
msm: camera: ope: Initialize ope hw mutex structure
msm: camera: cdm: Flush all available FIFOs during reset
msm: camera: cpas: Add mandatory bw option for axi ports clocks
msm: camera: ope: Use vzalloc to allocate the write bus ctx structure
msm: camera: ope: Fix handling of init hw failure
msm: camera: tfe: Enable per frame register dump for rdi only context
msm: camera: cdm: Protect cdm core status bits with mutex
msm: camera: cdm: correct the error check in cmd submit irq
msm: camera: ope: Fix unclock access during HW reset
msm: camera: ope: Program frame level settings after idle event
msm: camera: ope: Delay releasing of resources for last context
msm: camera: isp: Increase default SOF freeze timeout
msm: camera: smmu: Add map and unmap monitor
msm: camera: isp: Add trace events across ISP
msm: camera: smmu: Profile time taken for map, unmap
msm: camera: ope: Start context timer on receiving new request
msm: camera: tfe: Reduce stack size during set axi bw
msm: camera: cdm: Check for HW state before dumping registers
msm: camera: ope: Reduce stack footprint during acquire
msm: camera: tfe: Disable clock if tfe2 is not supported
msm: camera: cdm: Avoid cdm pause incase of BL submit
msm: camera: tfe: Optimize CSID IRQ logging
msm: camera: ope: Move request id validity check outside of lock
msm: camera: tfe: Correct the tfe hw manager dump logic
msm: camera: ope: Synchronize flush and submit BLs
msm: camera: cdm: Protect cdm reset status
msm: camera: cdm: Handle cdm deinit sequence properly
msm: camera: tfe: Reduce reset timeout to 100ms
msm: camera: ope: Fix hang detection
msm: camera: ope: Make non-fatal logs as debug and info logs
msm: camera: tfe: set overflow pending bit to zero after HW reset
msm: camera: ope: Do not disable CDM during error handling
msm: camera: ope: Add support for OPE Replay
msm: camera: ope: Stop OPE in case of init failure
msm: camera: ope: Synchronize process cmd and flush request
msm: camera: cdm: Fix CDM IRQ handling
msm: camera: tfe: LDAR dump for TFE
msm: camera: ope: Fix the length check for debug buffer
msm: camera: cdm: Fix CDM reset logic
msm: camera: ope: Dump debug registers in case of HW hang
msm: camera: tfe: Support the RDI bus port for line based mode
msm: camera: cdm: Handle out of order reset done events
msm: camera: ope: Consider other contexts during timeout
msm: camera: ope: Put GenIRQ in last stripe BL
msm: camera: tfe: Process the rdi interrupts for rdi only resource
msm: camera: jpeg: Check the HW state before accessing register
msm: camera: csiphy: Update csiphy power-up sequence for lito v2
msm: camera: cdm: Secure freeing of request lists using locks
msm: camera: cpas: Add support for Scuba camnoc
msm: camera: csiphy: Clear secure phy flags on release
msm: camera: tfe: validate the tfe bw num paths
msm: camera: ope: Reorder the reset order in ope acquire
msm: camera: ope: Dump debug registers in case of reset failure
msm: camera: ope: Add logic to detect hang in CDM
msm: camera: isp: Increase max count of cfg to support more init packets
msm: camera: core: Fix cpas axi clk rate overflow.

CRs-Fixed: 2668666
Change-Id: I882ca4bd117bebc7d1c62bc82299d69d7b5c9388
Signed-off-by: Trishansh Bhardwaj <tbhardwa@codeaurora.org>
This commit is contained in:
Trishansh Bhardwaj
2020-04-21 12:11:13 +05:30
parent 76586bbff2
commit 172d34b6f7
52 changed files with 3504 additions and 822 deletions

View File

@@ -67,6 +67,7 @@
/* BL_FIFO configurations*/ /* BL_FIFO configurations*/
#define CAM_CDM_BL_FIFO_LENGTH_MAX_DEFAULT 0x40 #define CAM_CDM_BL_FIFO_LENGTH_MAX_DEFAULT 0x40
#define CAM_CDM_BL_FIFO_LENGTH_CFG_SHIFT 0x10 #define CAM_CDM_BL_FIFO_LENGTH_CFG_SHIFT 0x10
#define CAM_CDM_BL_FIFO_FLUSH_SHIFT 0x3
#define CAM_CDM_BL_FIFO_REQ_SIZE_MAX 0x00 #define CAM_CDM_BL_FIFO_REQ_SIZE_MAX 0x00
#define CAM_CDM_BL_FIFO_REQ_SIZE_MAX_DIV2 0x01 #define CAM_CDM_BL_FIFO_REQ_SIZE_MAX_DIV2 0x01
@@ -100,6 +101,7 @@
#define CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK 0x10000 #define CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK 0x10000
#define CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK 0x20000 #define CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK 0x20000
#define CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK 0x40000 #define CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK 0x40000
#define CAM_CDM_IRQ_STATUS_USR_DATA_MASK 0xFF
#define CAM_CDM_IRQ_STATUS_ERRORS \ #define CAM_CDM_IRQ_STATUS_ERRORS \
(CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK | \ (CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK | \
@@ -371,6 +373,7 @@ enum cam_cdm_hw_process_intf_cmd {
CAM_CDM_HW_INTF_CMD_RESET_HW, CAM_CDM_HW_INTF_CMD_RESET_HW,
CAM_CDM_HW_INTF_CMD_FLUSH_HW, CAM_CDM_HW_INTF_CMD_FLUSH_HW,
CAM_CDM_HW_INTF_CMD_HANDLE_ERROR, CAM_CDM_HW_INTF_CMD_HANDLE_ERROR,
CAM_CDM_HW_INTF_CMD_HANG_DETECT,
CAM_CDM_HW_INTF_CMD_INVALID, CAM_CDM_HW_INTF_CMD_INVALID,
}; };
@@ -466,6 +469,8 @@ struct cam_cdm_bl_fifo {
struct mutex fifo_lock; struct mutex fifo_lock;
uint8_t bl_tag; uint8_t bl_tag;
uint32_t bl_depth; uint32_t bl_depth;
uint8_t last_bl_tag_done;
uint32_t work_record;
}; };
/** /**
@@ -493,6 +498,7 @@ struct cam_cdm_bl_fifo {
* @gen_irq: memory region in which gen_irq command will be written * @gen_irq: memory region in which gen_irq command will be written
* @cpas_handle: handle for cpas driver * @cpas_handle: handle for cpas driver
* @arbitration: type of arbitration to be used for the CDM * @arbitration: type of arbitration to be used for the CDM
* @rst_done_cnt: CMD reset done count
*/ */
struct cam_cdm { struct cam_cdm {
uint32_t index; uint32_t index;
@@ -515,6 +521,7 @@ struct cam_cdm {
struct cam_cdm_hw_mem gen_irq[CAM_CDM_BL_FIFO_MAX]; struct cam_cdm_hw_mem gen_irq[CAM_CDM_BL_FIFO_MAX];
uint32_t cpas_handle; uint32_t cpas_handle;
enum cam_cdm_arbitration arbitration; enum cam_cdm_arbitration arbitration;
uint32_t rst_done_cnt;
}; };
/* struct cam_cdm_private_dt_data - CDM hw custom dt data */ /* struct cam_cdm_private_dt_data - CDM hw custom dt data */

View File

@@ -262,6 +262,47 @@ void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
} }
} }
static int cam_cdm_stream_handle_init(void *hw_priv, bool init)
{
struct cam_hw_info *cdm_hw = hw_priv;
struct cam_cdm *core = NULL;
int rc = -EPERM;
core = (struct cam_cdm *)cdm_hw->core_info;
if (init) {
rc = cam_hw_cdm_init(hw_priv, NULL, 0);
if (rc) {
CAM_ERR(CAM_CDM, "CDM HW init failed");
return rc;
}
if (core->arbitration !=
CAM_CDM_ARBITRATION_PRIORITY_BASED) {
rc = cam_hw_cdm_alloc_genirq_mem(
hw_priv);
if (rc) {
CAM_ERR(CAM_CDM,
"Genirqalloc failed");
cam_hw_cdm_deinit(hw_priv,
NULL, 0);
}
}
} else {
rc = cam_hw_cdm_deinit(hw_priv, NULL, 0);
if (rc)
CAM_ERR(CAM_CDM, "Deinit failed in streamoff");
if (core->arbitration !=
CAM_CDM_ARBITRATION_PRIORITY_BASED) {
if (cam_hw_cdm_release_genirq_mem(hw_priv))
CAM_ERR(CAM_CDM, "Genirq release fail");
}
}
return rc;
}
int cam_cdm_stream_ops_internal(void *hw_priv, int cam_cdm_stream_ops_internal(void *hw_priv,
void *start_args, bool operation) void *start_args, bool operation)
{ {
@@ -337,19 +378,7 @@ int cam_cdm_stream_ops_internal(void *hw_priv,
rc = 0; rc = 0;
} else { } else {
CAM_DBG(CAM_CDM, "CDM HW init first time"); CAM_DBG(CAM_CDM, "CDM HW init first time");
rc = cam_hw_cdm_init(hw_priv, NULL, 0); rc = cam_cdm_stream_handle_init(hw_priv, true);
if (rc == 0) {
rc = cam_hw_cdm_alloc_genirq_mem(
hw_priv);
if (rc != 0) {
CAM_ERR(CAM_CDM,
"Genirqalloc failed");
cam_hw_cdm_deinit(hw_priv,
NULL, 0);
}
} else {
CAM_ERR(CAM_CDM, "CDM HW init failed");
}
} }
if (rc == 0) { if (rc == 0) {
cdm_hw->open_count++; cdm_hw->open_count++;
@@ -378,17 +407,10 @@ int cam_cdm_stream_ops_internal(void *hw_priv,
rc = 0; rc = 0;
} else { } else {
CAM_DBG(CAM_CDM, "CDM HW Deinit now"); CAM_DBG(CAM_CDM, "CDM HW Deinit now");
rc = cam_hw_cdm_deinit( rc = cam_cdm_stream_handle_init(hw_priv,
hw_priv, NULL, 0); false);
if (cam_hw_cdm_release_genirq_mem(
hw_priv))
CAM_ERR(CAM_CDM,
"Genirq release fail");
} }
if (rc) { if (rc == 0) {
CAM_ERR(CAM_CDM,
"Deinit failed in streamoff");
} else {
client->stream_on = false; client->stream_on = false;
rc = cam_cpas_stop(core->cpas_handle); rc = cam_cpas_stop(core->cpas_handle);
if (rc) if (rc)
@@ -763,6 +785,41 @@ int cam_cdm_process_cmd(void *hw_priv,
mutex_unlock(&cdm_hw->hw_mutex); mutex_unlock(&cdm_hw->hw_mutex);
break; break;
} }
case CAM_CDM_HW_INTF_CMD_HANG_DETECT: {
uint32_t *handle = cmd_args;
int idx;
struct cam_cdm_client *client;
if (sizeof(uint32_t) != arg_size) {
CAM_ERR(CAM_CDM,
"Invalid CDM cmd %d size=%x for handle=%x",
cmd, arg_size, *handle);
return -EINVAL;
}
idx = CAM_CDM_GET_CLIENT_IDX(*handle);
mutex_lock(&cdm_hw->hw_mutex);
client = core->clients[idx];
if (!client) {
CAM_ERR(CAM_CDM,
"Client not present for handle %d",
*handle);
mutex_unlock(&cdm_hw->hw_mutex);
break;
}
if (*handle != client->handle) {
CAM_ERR(CAM_CDM,
"handle mismatch, client handle %d index %d received handle %d",
client->handle, idx, *handle);
mutex_unlock(&cdm_hw->hw_mutex);
break;
}
rc = cam_hw_cdm_hang_detect(cdm_hw, *handle);
mutex_unlock(&cdm_hw->hw_mutex);
break;
}
default: default:
CAM_ERR(CAM_CDM, "CDM HW intf command not valid =%d", cmd); CAM_ERR(CAM_CDM, "CDM HW intf command not valid =%d", cmd);
break; break;

View File

@@ -50,6 +50,7 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
int cam_hw_cdm_reset_hw(struct cam_hw_info *cdm_hw, uint32_t handle); int cam_hw_cdm_reset_hw(struct cam_hw_info *cdm_hw, uint32_t handle);
int cam_hw_cdm_flush_hw(struct cam_hw_info *cdm_hw, uint32_t handle); int cam_hw_cdm_flush_hw(struct cam_hw_info *cdm_hw, uint32_t handle);
int cam_hw_cdm_handle_error(struct cam_hw_info *cdm_hw, uint32_t handle); int cam_hw_cdm_handle_error(struct cam_hw_info *cdm_hw, uint32_t handle);
int cam_hw_cdm_hang_detect(struct cam_hw_info *cdm_hw, uint32_t handle);
struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag( struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
uint32_t tag, struct list_head *bl_list); uint32_t tag, struct list_head *bl_list);
void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw, void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,

View File

@@ -129,26 +129,21 @@ static int cam_hw_cdm_enable_bl_done_irq(struct cam_hw_info *cdm_hw,
return rc; return rc;
} }
static int cam_hw_cdm_enable_core(struct cam_hw_info *cdm_hw, bool enable) static int cam_hw_cdm_pause_core(struct cam_hw_info *cdm_hw, bool pause)
{ {
int rc = 0; int rc = 0;
struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
uint32_t val = 0x1;
if (enable == true) { if (pause)
if (cam_cdm_write_hw_reg(cdm_hw, val |= 0x2;
core->offsets->cmn_reg->core_en,
0x01)) { if (cam_cdm_write_hw_reg(cdm_hw,
CAM_ERR(CAM_CDM, "Failed to Write CDM HW core enable"); core->offsets->cmn_reg->core_en, val)) {
rc = -EIO; CAM_ERR(CAM_CDM, "Failed to Write CDM HW core_en");
} rc = -EIO;
} else {
if (cam_cdm_write_hw_reg(cdm_hw,
core->offsets->cmn_reg->core_en,
0x02)) {
CAM_ERR(CAM_CDM, "Failed to Write CDM HW core disable");
rc = -EIO;
}
} }
return rc; return rc;
} }
@@ -307,10 +302,13 @@ void cam_hw_cdm_dump_core_debug_registers(
struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->core_en, &dump_reg); cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->core_en, &dump_reg);
CAM_ERR(CAM_CDM, "CDM HW core status=%x", dump_reg); CAM_INFO(CAM_CDM, "CDM HW core status=%x", dump_reg);
/* First pause CDM, If it fails still proceed to dump debug info */ cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->usr_data,
cam_hw_cdm_enable_core(cdm_hw, false); &dump_reg);
CAM_INFO(CAM_CDM, "CDM HW core userdata=0x%x", dump_reg);
usleep_range(1000, 1010);
cam_cdm_read_hw_reg(cdm_hw, cam_cdm_read_hw_reg(cdm_hw,
core->offsets->cmn_reg->debug_status, core->offsets->cmn_reg->debug_status,
@@ -379,8 +377,6 @@ void cam_hw_cdm_dump_core_debug_registers(
core->offsets->cmn_reg->current_used_ahb_base, &dump_reg); core->offsets->cmn_reg->current_used_ahb_base, &dump_reg);
CAM_INFO(CAM_CDM, "CDM HW current AHB base=%x", dump_reg); CAM_INFO(CAM_CDM, "CDM HW current AHB base=%x", dump_reg);
/* Enable CDM back */
cam_hw_cdm_enable_core(cdm_hw, true);
} }
enum cam_cdm_arbitration cam_cdm_get_arbitration_type( enum cam_cdm_arbitration cam_cdm_get_arbitration_type(
@@ -633,10 +629,13 @@ int cam_hw_cdm_submit_gen_irq(
int rc; int rc;
bool bit_wr_enable = false; bool bit_wr_enable = false;
if (core->bl_fifo[fifo_idx].bl_tag > 63) { if (core->bl_fifo[fifo_idx].bl_tag >
(core->bl_fifo[fifo_idx].bl_depth - 1)) {
CAM_ERR(CAM_CDM, CAM_ERR(CAM_CDM,
"bl_tag invalid =%d", "Invalid bl_tag=%d bl_depth=%d fifo_idx=%d",
core->bl_fifo[fifo_idx].bl_tag); core->bl_fifo[fifo_idx].bl_tag,
core->bl_fifo[fifo_idx].bl_depth,
fifo_idx);
rc = -EINVAL; rc = -EINVAL;
goto end; goto end;
} }
@@ -742,6 +741,78 @@ end:
return rc; return rc;
} }
static int cam_hw_cdm_arb_submit_bl(struct cam_hw_info *cdm_hw,
struct cam_cdm_hw_intf_cmd_submit_bl *req, int i,
uint32_t fifo_idx, dma_addr_t hw_vaddr_ptr)
{
struct cam_cdm_bl_request *cdm_cmd = req->data;
struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
uintptr_t cpu_addr;
struct cam_cdm_bl_cb_request_entry *node;
int rc = 0;
size_t len = 0;
node = kzalloc(sizeof(
struct cam_cdm_bl_cb_request_entry),
GFP_KERNEL);
if (!node)
return -ENOMEM;
node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
node->client_hdl = req->handle;
node->cookie = req->data->cookie;
node->bl_tag = core->bl_fifo[fifo_idx].bl_tag -
1;
node->userdata = req->data->userdata;
list_add_tail(&node->entry,
&core->bl_fifo[fifo_idx]
.bl_request_list);
cdm_cmd->cmd[i].arbitrate = true;
rc = cam_mem_get_cpu_buf(
cdm_cmd->cmd[i].bl_addr.mem_handle,
&cpu_addr, &len);
if (rc || !cpu_addr) {
CAM_ERR(CAM_OPE, "get cmd buffailed %x",
cdm_cmd->cmd[i].bl_addr
.mem_handle);
return rc;
}
core->ops->cdm_write_genirq(
((uint32_t *)cpu_addr +
cdm_cmd->cmd[i].offset / 4 +
cdm_cmd->cmd[i].len / 4),
core->bl_fifo[fifo_idx].bl_tag - 1,
1, fifo_idx);
rc = cam_hw_cdm_bl_write(cdm_hw,
(uint32_t)hw_vaddr_ptr +
cdm_cmd->cmd[i].offset,
cdm_cmd->cmd[i].len + 7,
core->bl_fifo[fifo_idx].bl_tag - 1,
1, fifo_idx);
if (rc) {
CAM_ERR(CAM_CDM,
"CDM hw bl write failed tag=%d",
core->bl_fifo[fifo_idx].bl_tag -
1);
list_del_init(&node->entry);
kfree(node);
return -EIO;
}
rc = cam_hw_cdm_commit_bl_write(cdm_hw,
fifo_idx);
if (rc) {
CAM_ERR(CAM_CDM,
"CDM hw commit failed tag=%d",
core->bl_fifo[fifo_idx].bl_tag -
1);
list_del_init(&node->entry);
kfree(node);
return -EIO;
}
return 0;
}
int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw, int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
struct cam_cdm_hw_intf_cmd_submit_bl *req, struct cam_cdm_hw_intf_cmd_submit_bl *req,
struct cam_cdm_client *client) struct cam_cdm_client *client)
@@ -771,13 +842,17 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
bl_fifo->bl_depth); bl_fifo->bl_depth);
} }
if (test_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status) ||
test_bit(CAM_CDM_RESET_HW_STATUS, &core->cdm_status))
return -EAGAIN;
mutex_lock(&core->bl_fifo[fifo_idx].fifo_lock); mutex_lock(&core->bl_fifo[fifo_idx].fifo_lock);
mutex_lock(&client->lock); mutex_lock(&client->lock);
if (test_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status) ||
test_bit(CAM_CDM_RESET_HW_STATUS, &core->cdm_status)) {
mutex_unlock(&client->lock);
mutex_unlock(&core->bl_fifo[fifo_idx].fifo_lock);
return -EAGAIN;
}
rc = cam_hw_cdm_bl_fifo_pending_bl_rb_in_fifo(cdm_hw, rc = cam_hw_cdm_bl_fifo_pending_bl_rb_in_fifo(cdm_hw,
fifo_idx, &pending_bl); fifo_idx, &pending_bl);
@@ -867,18 +942,28 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
if (core->bl_fifo[fifo_idx].bl_tag >= if (core->bl_fifo[fifo_idx].bl_tag >=
(bl_fifo->bl_depth - 1)) (bl_fifo->bl_depth - 1))
core->bl_fifo[fifo_idx].bl_tag = 0; core->bl_fifo[fifo_idx].bl_tag = 0;
rc = cam_hw_cdm_bl_write(cdm_hw, if (core->arbitration ==
((uint32_t)hw_vaddr_ptr + CAM_CDM_ARBITRATION_PRIORITY_BASED &&
cdm_cmd->cmd[i].offset), (req->data->flag == true) &&
(cdm_cmd->cmd[i].len - 1), (i == (req->data->cmd_arrary_count -
core->bl_fifo[fifo_idx].bl_tag, 1))) {
cdm_cmd->cmd[i].arbitrate, CAM_DBG(CAM_CDM,
fifo_idx); "GenIRQ in same bl, will sumbit later");
if (rc) { } else {
CAM_ERR(CAM_CDM, "Hw bl write failed %d:%d", rc = cam_hw_cdm_bl_write(cdm_hw,
i, req->data->cmd_arrary_count); ((uint32_t)hw_vaddr_ptr +
rc = -EIO; cdm_cmd->cmd[i].offset),
break; (cdm_cmd->cmd[i].len - 1),
core->bl_fifo[fifo_idx].bl_tag,
cdm_cmd->cmd[i].arbitrate,
fifo_idx);
if (rc) {
CAM_ERR(CAM_CDM,
"Hw bl write failed %d:%d",
i, req->data->cmd_arrary_count);
rc = -EIO;
break;
}
} }
} else { } else {
CAM_ERR(CAM_CDM, CAM_ERR(CAM_CDM,
@@ -893,20 +978,31 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
if (!rc) { if (!rc) {
CAM_DBG(CAM_CDM, CAM_DBG(CAM_CDM,
"write BL success for cnt=%d with tag=%d total_cnt=%d", "write BL done cnt=%d with tag=%d total_cnt=%d",
i, core->bl_fifo[fifo_idx].bl_tag, i, core->bl_fifo[fifo_idx].bl_tag,
req->data->cmd_arrary_count); req->data->cmd_arrary_count);
CAM_DBG(CAM_CDM, "Now commit the BL"); if (core->arbitration ==
if (cam_hw_cdm_commit_bl_write(cdm_hw, fifo_idx)) { CAM_CDM_ARBITRATION_PRIORITY_BASED &&
CAM_ERR(CAM_CDM, (req->data->flag == true) &&
"Cannot commit the BL %d tag=%d", (i == (req->data->cmd_arrary_count -
1))) {
CAM_DBG(CAM_CDM,
"GenIRQ in same blcommit later");
} else {
CAM_DBG(CAM_CDM, "Now commit the BL");
if (cam_hw_cdm_commit_bl_write(cdm_hw,
fifo_idx)) {
CAM_ERR(CAM_CDM,
"commit failed BL %d tag=%d",
i, core->bl_fifo[fifo_idx]
.bl_tag);
rc = -EIO;
break;
}
CAM_DBG(CAM_CDM, "commit success BL %d tag=%d",
i, core->bl_fifo[fifo_idx].bl_tag); i, core->bl_fifo[fifo_idx].bl_tag);
rc = -EIO;
break;
} }
CAM_DBG(CAM_CDM, "BL commit success BL %d tag=%d", i,
core->bl_fifo[fifo_idx].bl_tag);
core->bl_fifo[fifo_idx].bl_tag++; core->bl_fifo[fifo_idx].bl_tag++;
if (cdm_cmd->cmd[i].enable_debug_gen_irq) { if (cdm_cmd->cmd[i].enable_debug_gen_irq) {
@@ -923,11 +1019,21 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
if ((req->data->flag == true) && if ((req->data->flag == true) &&
(i == (req->data->cmd_arrary_count - (i == (req->data->cmd_arrary_count -
1))) { 1))) {
rc = cam_hw_cdm_submit_gen_irq( if (core->arbitration !=
cdm_hw, req, fifo_idx, CAM_CDM_ARBITRATION_PRIORITY_BASED) {
cdm_cmd->gen_irq_arb); rc = cam_hw_cdm_submit_gen_irq(
if (rc == 0) cdm_hw, req, fifo_idx,
core->bl_fifo[fifo_idx].bl_tag++; cdm_cmd->gen_irq_arb);
if (rc == 0)
core->bl_fifo[fifo_idx]
.bl_tag++;
break;
}
rc = cam_hw_cdm_arb_submit_bl(cdm_hw, req, i,
fifo_idx, hw_vaddr_ptr);
if (rc)
break;
} }
} }
} }
@@ -971,6 +1077,8 @@ static void cam_hw_cdm_reset_cleanup(
kfree(node); kfree(node);
} }
core->bl_fifo[i].bl_tag = 0; core->bl_fifo[i].bl_tag = 0;
core->bl_fifo[i].last_bl_tag_done = -1;
core->bl_fifo[i].work_record = 0;
} }
} }
@@ -985,14 +1093,22 @@ static void cam_hw_cdm_work(struct work_struct *work)
if (payload) { if (payload) {
cdm_hw = payload->hw; cdm_hw = payload->hw;
core = (struct cam_cdm *)cdm_hw->core_info; core = (struct cam_cdm *)cdm_hw->core_info;
if (payload->fifo_idx >= core->offsets->reg_data->num_bl_fifo) {
CAM_ERR(CAM_CDM, "Invalid fifo idx %d",
payload->fifo_idx);
kfree(payload);
return;
}
CAM_DBG(CAM_CDM, "IRQ status=0x%x", payload->irq_status); CAM_DBG(CAM_CDM, "IRQ status=0x%x", payload->irq_status);
if (payload->irq_status & if (payload->irq_status &
CAM_CDM_IRQ_STATUS_INLINE_IRQ_MASK) { CAM_CDM_IRQ_STATUS_INLINE_IRQ_MASK) {
struct cam_cdm_bl_cb_request_entry *node, *tnode; struct cam_cdm_bl_cb_request_entry *node, *tnode;
CAM_DBG(CAM_CDM, "inline IRQ data=0x%x", CAM_DBG(CAM_CDM, "inline IRQ data=0x%x last tag: 0x%x",
payload->irq_data); payload->irq_data,
core->bl_fifo[payload->fifo_idx]
.last_bl_tag_done);
if (payload->irq_data == 0xff) { if (payload->irq_data == 0xff) {
CAM_INFO(CAM_CDM, "Debug genirq received"); CAM_INFO(CAM_CDM, "Debug genirq received");
@@ -1002,37 +1118,47 @@ static void cam_hw_cdm_work(struct work_struct *work)
mutex_lock(&core->bl_fifo[payload->fifo_idx] mutex_lock(&core->bl_fifo[payload->fifo_idx]
.fifo_lock); .fifo_lock);
list_for_each_entry_safe(node, tnode,
if (core->bl_fifo[payload->fifo_idx].work_record)
core->bl_fifo[payload->fifo_idx].work_record--;
if (core->bl_fifo[payload->fifo_idx]
.last_bl_tag_done !=
payload->irq_data) {
core->bl_fifo[payload->fifo_idx]
.last_bl_tag_done =
payload->irq_data;
list_for_each_entry_safe(node, tnode,
&core->bl_fifo[payload->fifo_idx] &core->bl_fifo[payload->fifo_idx]
.bl_request_list, .bl_request_list,
entry) { entry) {
if (node->request_type == if (node->request_type ==
CAM_HW_CDM_BL_CB_CLIENT) { CAM_HW_CDM_BL_CB_CLIENT) {
cam_cdm_notify_clients(cdm_hw, cam_cdm_notify_clients(cdm_hw,
CAM_CDM_CB_STATUS_BL_SUCCESS, CAM_CDM_CB_STATUS_BL_SUCCESS,
(void *)node); (void *)node);
} else if (node->request_type == } else if (node->request_type ==
CAM_HW_CDM_BL_CB_INTERNAL) { CAM_HW_CDM_BL_CB_INTERNAL) {
CAM_ERR(CAM_CDM, CAM_ERR(CAM_CDM,
"Invalid node=%pK %d", node, "Invalid node=%pK %d",
node->request_type); node,
node->request_type);
}
list_del_init(&node->entry);
if (node->bl_tag == payload->irq_data) {
kfree(node);
break;
}
} }
list_del_init(&node->entry); } else {
if (node->bl_tag == payload->irq_data) { CAM_DBG(CAM_CDM,
kfree(node); "Skip GenIRQ, tag 0x%x fifo %d",
break; payload->irq_data, payload->fifo_idx);
}
kfree(node);
} }
mutex_unlock(&core->bl_fifo[payload->fifo_idx] mutex_unlock(&core->bl_fifo[payload->fifo_idx]
.fifo_lock); .fifo_lock);
} }
if (payload->irq_status &
CAM_CDM_IRQ_STATUS_RST_DONE_MASK) {
CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
complete(&core->reset_complete);
}
if (payload->irq_status & if (payload->irq_status &
CAM_CDM_IRQ_STATUS_BL_DONE_MASK) { CAM_CDM_IRQ_STATUS_BL_DONE_MASK) {
if (test_bit(payload->fifo_idx, &core->cdm_status)) { if (test_bit(payload->fifo_idx, &core->cdm_status)) {
@@ -1051,7 +1177,14 @@ static void cam_hw_cdm_work(struct work_struct *work)
for (i = 0; i < core->offsets->reg_data->num_bl_fifo; for (i = 0; i < core->offsets->reg_data->num_bl_fifo;
i++) i++)
mutex_lock(&core->bl_fifo[i].fifo_lock); mutex_lock(&core->bl_fifo[i].fifo_lock);
/*
* First pause CDM, If it fails still proceed
* to dump debug info
*/
cam_hw_cdm_pause_core(cdm_hw, true);
cam_hw_cdm_dump_core_debug_registers(cdm_hw); cam_hw_cdm_dump_core_debug_registers(cdm_hw);
/* Resume CDM back */
cam_hw_cdm_pause_core(cdm_hw, false);
for (i = 0; i < core->offsets->reg_data->num_bl_fifo; for (i = 0; i < core->offsets->reg_data->num_bl_fifo;
i++) i++)
mutex_unlock(&core->bl_fifo[i].fifo_lock); mutex_unlock(&core->bl_fifo[i].fifo_lock);
@@ -1083,7 +1216,17 @@ static void cam_hw_cdm_iommu_fault_handler(struct iommu_domain *domain,
mutex_lock(&cdm_hw->hw_mutex); mutex_lock(&cdm_hw->hw_mutex);
for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++)
mutex_lock(&core->bl_fifo[i].fifo_lock); mutex_lock(&core->bl_fifo[i].fifo_lock);
cam_hw_cdm_dump_core_debug_registers(cdm_hw); if (cdm_hw->hw_state == CAM_HW_STATE_POWER_UP) {
/*
* First pause CDM, If it fails still proceed
* to dump debug info
*/
cam_hw_cdm_pause_core(cdm_hw, true);
cam_hw_cdm_dump_core_debug_registers(cdm_hw);
/* Resume CDM back */
cam_hw_cdm_pause_core(cdm_hw, false);
} else
CAM_INFO(CAM_CDM, "CDM hw is power in off state");
for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++)
mutex_unlock(&core->bl_fifo[i].fifo_lock); mutex_unlock(&core->bl_fifo[i].fifo_lock);
mutex_unlock(&cdm_hw->hw_mutex); mutex_unlock(&cdm_hw->hw_mutex);
@@ -1109,7 +1252,12 @@ irqreturn_t cam_hw_cdm_irq(int irq_num, void *data)
int i; int i;
CAM_DBG(CAM_CDM, "Got irq"); CAM_DBG(CAM_CDM, "Got irq");
spin_lock(&cdm_hw->hw_lock);
if (cdm_hw->hw_state == CAM_HW_STATE_POWER_DOWN) {
CAM_DBG(CAM_CDM, "CDM is in power down state");
spin_unlock(&cdm_hw->hw_lock);
return IRQ_HANDLED;
}
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo_irq; i++) { for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo_irq; i++) {
if (cam_cdm_read_hw_reg(cdm_hw, if (cam_cdm_read_hw_reg(cdm_hw,
cdm_core->offsets->irq_reg[i]->irq_status, cdm_core->offsets->irq_reg[i]->irq_status,
@@ -1126,35 +1274,46 @@ irqreturn_t cam_hw_cdm_irq(int irq_num, void *data)
if (cam_cdm_write_hw_reg(cdm_hw, if (cam_cdm_write_hw_reg(cdm_hw,
cdm_core->offsets->irq_reg[0]->irq_clear_cmd, 0x01)) cdm_core->offsets->irq_reg[0]->irq_clear_cmd, 0x01))
CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ cmd 0"); CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ clr cmd");
if (cam_cdm_read_hw_reg(cdm_hw,
cdm_core->offsets->cmn_reg->usr_data,
&user_data))
CAM_ERR(CAM_CDM, "Failed to read CDM HW IRQ data");
spin_unlock(&cdm_hw->hw_lock);
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo_irq; i++) { for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo_irq; i++) {
if (!irq_status[i]) if (!irq_status[i])
continue; continue;
if (irq_status[i] & CAM_CDM_IRQ_STATUS_RST_DONE_MASK) {
cdm_core->rst_done_cnt++;
continue;
}
payload[i] = kzalloc(sizeof(struct cam_cdm_work_payload), payload[i] = kzalloc(sizeof(struct cam_cdm_work_payload),
GFP_ATOMIC); GFP_ATOMIC);
if (!payload[i]) if (!payload[i]) {
CAM_ERR(CAM_CDM,
"failed to allocate memory for fifo %d payload",
i);
continue; continue;
}
if (irq_status[i] & CAM_CDM_IRQ_STATUS_INLINE_IRQ_MASK) { if (irq_status[i] & CAM_CDM_IRQ_STATUS_INLINE_IRQ_MASK) {
if (cam_cdm_read_hw_reg(cdm_hw,
cdm_core->offsets->cmn_reg->usr_data,
&user_data)) {
CAM_ERR(CAM_CDM,
"Failed to read CDM HW IRQ data");
kfree(payload[i]);
return IRQ_HANDLED;
}
payload[i]->irq_data = user_data >> (i * 0x8); payload[i]->irq_data = (user_data >> (i * 0x8)) &
CAM_CDM_IRQ_STATUS_USR_DATA_MASK;
if (payload[i]->irq_data == if (payload[i]->irq_data ==
CAM_CDM_DBG_GEN_IRQ_USR_DATA) CAM_CDM_DBG_GEN_IRQ_USR_DATA)
CAM_INFO(CAM_CDM, "Debug gen_irq received"); CAM_INFO(CAM_CDM, "Debug gen_irq received");
} }
CAM_DBG(CAM_CDM,
"Rcvd of fifo %d userdata 0x%x tag 0x%x irq_stat 0x%x",
i, user_data, payload[i]->irq_data, irq_status[i]);
payload[i]->fifo_idx = i; payload[i]->fifo_idx = i;
payload[i]->irq_status = irq_status[i]; payload[i]->irq_status = irq_status[i];
payload[i]->hw = cdm_hw; payload[i]->hw = cdm_hw;
@@ -1162,10 +1321,9 @@ irqreturn_t cam_hw_cdm_irq(int irq_num, void *data)
INIT_WORK((struct work_struct *)&payload[i]->work, INIT_WORK((struct work_struct *)&payload[i]->work,
cam_hw_cdm_work); cam_hw_cdm_work);
trace_cam_log_event("CDM_DONE", "CDM_DONE_IRQ", trace_cam_log_event("CDM_DONE", "CDM_DONE_IRQ",
payload[i]->irq_status, payload[i]->irq_status,
cdm_hw->soc_info.index); cdm_hw->soc_info.index);
if (cam_cdm_write_hw_reg(cdm_hw, if (cam_cdm_write_hw_reg(cdm_hw,
cdm_core->offsets->irq_reg[i]->irq_clear, cdm_core->offsets->irq_reg[i]->irq_clear,
payload[i]->irq_status)) { payload[i]->irq_status)) {
@@ -1175,18 +1333,30 @@ irqreturn_t cam_hw_cdm_irq(int irq_num, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
work_status = queue_work( cdm_core->bl_fifo[i].work_record++;
work_status = queue_work(
cdm_core->bl_fifo[i].work_queue, cdm_core->bl_fifo[i].work_queue,
&payload[i]->work); &payload[i]->work);
if (work_status == false) { if (work_status == false) {
CAM_ERR(CAM_CDM, CAM_ERR(CAM_CDM,
"Failed to queue work for irq=0x%x", "Failed to queue work for FIFO: %d irq=0x%x",
payload[i]->irq_status); i, payload[i]->irq_status);
kfree(payload[i]); kfree(payload[i]);
} }
} }
if (cdm_core->rst_done_cnt ==
cdm_core->offsets->reg_data->num_bl_fifo_irq) {
CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
complete(&cdm_core->reset_complete);
}
if (cdm_core->rst_done_cnt &&
cdm_core->rst_done_cnt !=
cdm_core->offsets->reg_data->num_bl_fifo_irq)
CAM_INFO(CAM_CDM,
"Reset IRQ received for %d fifos instead of %d",
cdm_core->rst_done_cnt,
cdm_core->offsets->reg_data->num_bl_fifo_irq);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@@ -1254,16 +1424,24 @@ int cam_hw_cdm_reset_hw(struct cam_hw_info *cdm_hw, uint32_t handle)
struct cam_cdm *cdm_core = NULL; struct cam_cdm *cdm_core = NULL;
long time_left; long time_left;
int i, rc = -EIO; int i, rc = -EIO;
int reset_val = 1;
cdm_core = (struct cam_cdm *)cdm_hw->core_info; cdm_core = (struct cam_cdm *)cdm_hw->core_info;
set_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status);
reinit_completion(&cdm_core->reset_complete);
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
mutex_lock(&cdm_core->bl_fifo[i].fifo_lock); mutex_lock(&cdm_core->bl_fifo[i].fifo_lock);
set_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status);
cdm_core->rst_done_cnt = 0;
reinit_completion(&cdm_core->reset_complete);
/* First pause CDM, If it fails still proceed to reset CDM HW */
cam_hw_cdm_pause_core(cdm_hw, true);
usleep_range(1000, 1010);
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) { for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) {
reset_val = reset_val |
(1 << (i + CAM_CDM_BL_FIFO_FLUSH_SHIFT));
if (cam_cdm_write_hw_reg(cdm_hw, if (cam_cdm_write_hw_reg(cdm_hw,
cdm_core->offsets->irq_reg[i]->irq_mask, cdm_core->offsets->irq_reg[i]->irq_mask,
0x70003)) { 0x70003)) {
@@ -1273,7 +1451,7 @@ int cam_hw_cdm_reset_hw(struct cam_hw_info *cdm_hw, uint32_t handle)
} }
if (cam_cdm_write_hw_reg(cdm_hw, if (cam_cdm_write_hw_reg(cdm_hw,
cdm_core->offsets->cmn_reg->rst_cmd, 0x9)) { cdm_core->offsets->cmn_reg->rst_cmd, reset_val)) {
CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset"); CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset");
goto end; goto end;
} }
@@ -1318,16 +1496,21 @@ int cam_hw_cdm_handle_error_info(
long time_left; long time_left;
int i, rc = -EIO, reset_hw_hdl = 0x0; int i, rc = -EIO, reset_hw_hdl = 0x0;
uint32_t current_bl_data = 0, current_fifo = 0, current_tag = 0; uint32_t current_bl_data = 0, current_fifo = 0, current_tag = 0;
int reset_val = 1;
cdm_core = (struct cam_cdm *)cdm_hw->core_info; cdm_core = (struct cam_cdm *)cdm_hw->core_info;
set_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status);
set_bit(CAM_CDM_FLUSH_HW_STATUS, &cdm_core->cdm_status);
reinit_completion(&cdm_core->reset_complete);
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
mutex_lock(&cdm_core->bl_fifo[i].fifo_lock); mutex_lock(&cdm_core->bl_fifo[i].fifo_lock);
cdm_core->rst_done_cnt = 0;
reinit_completion(&cdm_core->reset_complete);
set_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status);
set_bit(CAM_CDM_FLUSH_HW_STATUS, &cdm_core->cdm_status);
/* First pause CDM, If it fails still proceed to dump debug info */
cam_hw_cdm_pause_core(cdm_hw, true);
rc = cam_cdm_read_hw_reg(cdm_hw, rc = cam_cdm_read_hw_reg(cdm_hw,
cdm_core->offsets->cmn_reg->current_bl_len, cdm_core->offsets->cmn_reg->current_bl_len,
&current_bl_data); &current_bl_data);
@@ -1349,6 +1532,8 @@ int cam_hw_cdm_handle_error_info(
cam_hw_cdm_dump_core_debug_registers(cdm_hw); cam_hw_cdm_dump_core_debug_registers(cdm_hw);
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) { for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) {
reset_val = reset_val |
(1 << (i + CAM_CDM_BL_FIFO_FLUSH_SHIFT));
if (cam_cdm_write_hw_reg(cdm_hw, if (cam_cdm_write_hw_reg(cdm_hw,
cdm_core->offsets->irq_reg[i]->irq_mask, cdm_core->offsets->irq_reg[i]->irq_mask,
0x70003)) { 0x70003)) {
@@ -1358,7 +1543,7 @@ int cam_hw_cdm_handle_error_info(
} }
if (cam_cdm_write_hw_reg(cdm_hw, if (cam_cdm_write_hw_reg(cdm_hw,
cdm_core->offsets->cmn_reg->rst_cmd, 0x9)) { cdm_core->offsets->cmn_reg->rst_cmd, reset_val)) {
CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset"); CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset");
goto end; goto end;
} }
@@ -1437,14 +1622,38 @@ int cam_hw_cdm_handle_error(
cdm_core = (struct cam_cdm *)cdm_hw->core_info; cdm_core = (struct cam_cdm *)cdm_hw->core_info;
/* First pause CDM, If it fails still proceed to dump debug info */
cam_hw_cdm_enable_core(cdm_hw, false);
rc = cam_hw_cdm_handle_error_info(cdm_hw, handle); rc = cam_hw_cdm_handle_error_info(cdm_hw, handle);
return rc; return rc;
} }
int cam_hw_cdm_hang_detect(
struct cam_hw_info *cdm_hw,
uint32_t handle)
{
struct cam_cdm *cdm_core = NULL;
int i, rc = -1;
cdm_core = (struct cam_cdm *)cdm_hw->core_info;
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
mutex_lock(&cdm_core->bl_fifo[i].fifo_lock);
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
if (cdm_core->bl_fifo[i].work_record) {
CAM_WARN(CAM_CDM,
"workqueue got delayed, work_record :%u",
cdm_core->bl_fifo[i].work_record);
rc = 0;
break;
}
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
mutex_unlock(&cdm_core->bl_fifo[i].fifo_lock);
return rc;
}
int cam_hw_cdm_get_cdm_config(struct cam_hw_info *cdm_hw) int cam_hw_cdm_get_cdm_config(struct cam_hw_info *cdm_hw)
{ {
struct cam_hw_soc_info *soc_info = NULL; struct cam_hw_soc_info *soc_info = NULL;
@@ -1517,6 +1726,7 @@ int cam_hw_cdm_init(void *hw_priv,
struct cam_hw_soc_info *soc_info = NULL; struct cam_hw_soc_info *soc_info = NULL;
struct cam_cdm *cdm_core = NULL; struct cam_cdm *cdm_core = NULL;
int rc, i, reset_hw_hdl = 0x0; int rc, i, reset_hw_hdl = 0x0;
unsigned long flags;
if (!hw_priv) if (!hw_priv)
return -EINVAL; return -EINVAL;
@@ -1530,6 +1740,9 @@ int cam_hw_cdm_init(void *hw_priv,
CAM_ERR(CAM_CDM, "Enable platform failed"); CAM_ERR(CAM_CDM, "Enable platform failed");
goto end; goto end;
} }
spin_lock_irqsave(&cdm_hw->hw_lock, flags);
cdm_hw->hw_state = CAM_HW_STATE_POWER_UP;
spin_unlock_irqrestore(&cdm_hw->hw_lock, flags);
CAM_DBG(CAM_CDM, "Enable soc done"); CAM_DBG(CAM_CDM, "Enable soc done");
@@ -1540,6 +1753,10 @@ int cam_hw_cdm_init(void *hw_priv,
clear_bit(i, &cdm_core->cdm_status); clear_bit(i, &cdm_core->cdm_status);
reinit_completion(&cdm_core->bl_fifo[i].bl_complete); reinit_completion(&cdm_core->bl_fifo[i].bl_complete);
} }
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) {
cdm_core->bl_fifo[i].last_bl_tag_done = -1;
cdm_core->bl_fifo[i].work_record = 0;
}
rc = cam_hw_cdm_reset_hw(cdm_hw, reset_hw_hdl); rc = cam_hw_cdm_reset_hw(cdm_hw, reset_hw_hdl);
@@ -1548,7 +1765,6 @@ int cam_hw_cdm_init(void *hw_priv,
goto disable_return; goto disable_return;
} else { } else {
CAM_DBG(CAM_CDM, "CDM Init success"); CAM_DBG(CAM_CDM, "CDM Init success");
cdm_hw->hw_state = CAM_HW_STATE_POWER_UP;
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
cam_cdm_write_hw_reg(cdm_hw, cam_cdm_write_hw_reg(cdm_hw,
cdm_core->offsets->irq_reg[i]->irq_mask, cdm_core->offsets->irq_reg[i]->irq_mask,
@@ -1559,6 +1775,9 @@ int cam_hw_cdm_init(void *hw_priv,
disable_return: disable_return:
rc = -EIO; rc = -EIO;
spin_lock_irqsave(&cdm_hw->hw_lock, flags);
cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
spin_unlock_irqrestore(&cdm_hw->hw_lock, flags);
cam_soc_util_disable_platform_resource(soc_info, true, true); cam_soc_util_disable_platform_resource(soc_info, true, true);
end: end:
return rc; return rc;
@@ -1570,19 +1789,74 @@ int cam_hw_cdm_deinit(void *hw_priv,
struct cam_hw_info *cdm_hw = hw_priv; struct cam_hw_info *cdm_hw = hw_priv;
struct cam_hw_soc_info *soc_info = NULL; struct cam_hw_soc_info *soc_info = NULL;
struct cam_cdm *cdm_core = NULL; struct cam_cdm *cdm_core = NULL;
int rc = 0; struct cam_cdm_bl_cb_request_entry *node, *tnode;
int rc = 0, i;
uint32_t reset_val = 1;
long time_left;
unsigned long flags;
if (!hw_priv) if (!hw_priv)
return -EINVAL; return -EINVAL;
soc_info = &cdm_hw->soc_info; soc_info = &cdm_hw->soc_info;
cdm_core = cdm_hw->core_info; cdm_core = (struct cam_cdm *)cdm_hw->core_info;
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
mutex_lock(&cdm_core->bl_fifo[i].fifo_lock);
/*clear bl request */
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) {
list_for_each_entry_safe(node, tnode,
&cdm_core->bl_fifo[i].bl_request_list, entry) {
list_del_init(&node->entry);
kfree(node);
}
}
set_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status);
cdm_core->rst_done_cnt = 0;
reinit_completion(&cdm_core->reset_complete);
/* First pause CDM, If it fails still proceed to reset CDM HW */
cam_hw_cdm_pause_core(cdm_hw, true);
usleep_range(1000, 1010);
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) {
reset_val = reset_val |
(1 << (i + CAM_CDM_BL_FIFO_FLUSH_SHIFT));
if (cam_cdm_write_hw_reg(cdm_hw,
cdm_core->offsets->irq_reg[i]->irq_mask,
0x70003)) {
CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ mask");
}
}
if (cam_cdm_write_hw_reg(cdm_hw,
cdm_core->offsets->cmn_reg->rst_cmd, reset_val)) {
CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset");
}
CAM_DBG(CAM_CDM, "Waiting for CDM HW reset done");
time_left = wait_for_completion_timeout(&cdm_core->reset_complete,
msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT));
if (time_left <= 0) {
rc = -ETIMEDOUT;
CAM_ERR(CAM_CDM, "CDM HW reset Wait failed rc=%d", rc);
}
clear_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status);
for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
mutex_unlock(&cdm_core->bl_fifo[i].fifo_lock);
spin_lock_irqsave(&cdm_hw->hw_lock, flags);
cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
spin_unlock_irqrestore(&cdm_hw->hw_lock, flags);
rc = cam_soc_util_disable_platform_resource(soc_info, true, true); rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc) { if (rc) {
CAM_ERR(CAM_CDM, "disable platform failed"); CAM_ERR(CAM_CDM, "disable platform failed");
} else { } else {
CAM_DBG(CAM_CDM, "CDM Deinit success"); CAM_DBG(CAM_CDM, "CDM Deinit success");
cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
} }
return rc; return rc;
@@ -1653,6 +1927,7 @@ static int cam_hw_cdm_component_bind(struct device *dev,
goto release_private_mem; goto release_private_mem;
} }
cdm_core->rst_done_cnt = 0;
init_completion(&cdm_core->reset_complete); init_completion(&cdm_core->reset_complete);
cdm_hw_intf->hw_priv = cdm_hw; cdm_hw_intf->hw_priv = cdm_hw;
cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps; cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;

View File

@@ -480,6 +480,33 @@ int cam_cdm_handle_error(uint32_t handle)
} }
EXPORT_SYMBOL(cam_cdm_handle_error); EXPORT_SYMBOL(cam_cdm_handle_error);
int cam_cdm_detect_hang_error(uint32_t handle)
{
uint32_t hw_index;
int rc = -EINVAL;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
rc = -EPERM;
return rc;
}
hw_index = CAM_CDM_GET_HW_IDX(handle);
if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.process_cmd)
rc = hw->hw_ops.process_cmd(hw->hw_priv,
CAM_CDM_HW_INTF_CMD_HANG_DETECT,
&handle,
sizeof(handle));
}
put_cdm_mgr_refcount();
return rc;
}
EXPORT_SYMBOL(cam_cdm_detect_hang_error);
int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw, int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
struct cam_cdm_private_dt_data *data, enum cam_cdm_type type, struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
uint32_t *index) uint32_t *index)

View File

@@ -280,4 +280,13 @@ int cam_cdm_handle_error(uint32_t handle);
*/ */
struct cam_cdm_utils_ops *cam_cdm_publish_ops(void); struct cam_cdm_utils_ops *cam_cdm_publish_ops(void);
/**
* @brief : API to detect hang in previously acquired CDM,
* this should be only performed only if the CDM is private.
*
* @handle : Input handle of the CDM to detect hang
*
* @return 0 on success
*/
int cam_cdm_detect_hang_error(uint32_t handle);
#endif /* _CAM_CDM_API_H_ */ #endif /* _CAM_CDM_API_H_ */

View File

@@ -462,7 +462,7 @@ static int cam_cpas_util_set_camnoc_axi_clk_rate(
if (soc_private->control_camnoc_axi_clk) { if (soc_private->control_camnoc_axi_clk) {
struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
uint64_t required_camnoc_bw = 0, intermediate_result = 0; uint64_t required_camnoc_bw = 0, intermediate_result = 0;
int32_t clk_rate = 0; int64_t clk_rate = 0;
for (i = 0; i < CAM_CPAS_MAX_TREE_NODES; i++) { for (i = 0; i < CAM_CPAS_MAX_TREE_NODES; i++) {
tree_node = soc_private->tree_node[i]; tree_node = soc_private->tree_node[i];
@@ -511,7 +511,7 @@ static int cam_cpas_util_set_camnoc_axi_clk_rate(
do_div(intermediate_result, soc_private->camnoc_bus_width); do_div(intermediate_result, soc_private->camnoc_bus_width);
clk_rate = intermediate_result; clk_rate = intermediate_result;
CAM_DBG(CAM_CPAS, "Setting camnoc axi clk rate : %llu %d", CAM_DBG(CAM_CPAS, "Setting camnoc axi clk rate : %llu %lld",
required_camnoc_bw, clk_rate); required_camnoc_bw, clk_rate);
/* /*
@@ -524,7 +524,7 @@ static int cam_cpas_util_set_camnoc_axi_clk_rate(
rc = cam_soc_util_set_src_clk_rate(soc_info, clk_rate); rc = cam_soc_util_set_src_clk_rate(soc_info, clk_rate);
if (rc) if (rc)
CAM_ERR(CAM_CPAS, CAM_ERR(CAM_CPAS,
"Failed in setting camnoc axi clk %llu %d %d", "Failed in setting camnoc axi clk %llu %lld %d",
required_camnoc_bw, clk_rate, rc); required_camnoc_bw, clk_rate, rc);
cpas_core->applied_camnoc_axi_rate = clk_rate; cpas_core->applied_camnoc_axi_rate = clk_rate;

View File

@@ -24,6 +24,7 @@
#include "cpastop_v480_100.h" #include "cpastop_v480_100.h"
#include "cpastop_v580_100.h" #include "cpastop_v580_100.h"
#include "cpastop_v540_100.h" #include "cpastop_v540_100.h"
#include "cpastop_v520_100.h"
struct cam_camnoc_info *camnoc_info; struct cam_camnoc_info *camnoc_info;
@@ -86,6 +87,25 @@ static const uint32_t cam_cpas_hw_version_map
0, 0,
0, 0,
}, },
/* for camera_520 */
{
CAM_CPAS_TITAN_520_V100,
0,
0,
0,
0,
0,
},
/* for camera_540 */
{
CAM_CPAS_TITAN_540_V100,
0,
0,
0,
0,
0,
},
}; };
static int cam_cpas_translate_camera_cpas_version_id( static int cam_cpas_translate_camera_cpas_version_id(
@@ -113,6 +133,14 @@ static int cam_cpas_translate_camera_cpas_version_id(
*cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_480; *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_480;
break; break;
case CAM_CPAS_CAMERA_VERSION_520:
*cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_520;
break;
case CAM_CPAS_CAMERA_VERSION_540:
*cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_540;
break;
case CAM_CPAS_CAMERA_VERSION_580: case CAM_CPAS_CAMERA_VERSION_580:
*cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_580; *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_580;
break; break;
@@ -716,6 +744,9 @@ static int cam_cpastop_init_hw_version(struct cam_hw_info *cpas_hw,
case CAM_CPAS_TITAN_540_V100: case CAM_CPAS_TITAN_540_V100:
camnoc_info = &cam540_cpas100_camnoc_info; camnoc_info = &cam540_cpas100_camnoc_info;
break; break;
case CAM_CPAS_TITAN_520_V100:
camnoc_info = &cam520_cpas100_camnoc_info;
break;
default: default:
CAM_ERR(CAM_CPAS, "Camera Version not supported %d.%d.%d", CAM_ERR(CAM_CPAS, "Camera Version not supported %d.%d.%d",
hw_caps->camera_version.major, hw_caps->camera_version.major,

View File

@@ -0,0 +1,240 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#ifndef _CPASTOP_V520_100_H_
#define _CPASTOP_V520_100_H_
#define TEST_IRQ_ENABLE 0
static struct cam_camnoc_irq_sbm cam_cpas_v520_100_irq_sbm = {
.sbm_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0xA40, /* SBM_FAULTINEN0_LOW */
.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
(TEST_IRQ_ENABLE ?
0x2 : /* SBM_FAULTINEN0_LOW_PORT6_MASK */
0x0) /* SBM_FAULTINEN0_LOW_PORT1_MASK */,
},
.sbm_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0xA48, /* SBM_FAULTINSTATUS0_LOW */
},
.sbm_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0xA80, /* SBM_FLAGOUTCLR0_LOW */
.value = TEST_IRQ_ENABLE ? 0x3 : 0x1,
}
};
static struct cam_camnoc_irq_err
cam_cpas_v520_100_irq_err[] = {
{
.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
.enable = true,
.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0xD08, /* ERRORLOGGER_MAINCTL_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0xD10, /* ERRORLOGGER_ERRVLD_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0xD18, /* ERRORLOGGER_ERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
.enable = TEST_IRQ_ENABLE ? true : false,
.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0xA88, /* SBM_FLAGOUTSET0_LOW */
.value = 0x1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0xA90, /* SBM_FLAGOUTSTATUS0_LOW */
},
.err_clear = {
.enable = false,
},
},
};
static struct cam_camnoc_specific
cam_cpas_v520_100_camnoc_specific[] = {
{
.port_type = CAM_CAMNOC_CDM,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xE30, /* CDM_PRIORITYLUT_LOW */
.value = 0x33333333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xE34, /* CDM_PRIORITYLUT_HIGH */
.value = 0x33333333,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xE38, /* CDM_URGENCY_LOW */
.value = 0x00000003,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xE40, /* CDM_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xE48, /* CDM_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_TFE,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
/* TFE_PRIORITYLUT_LOW */
.offset = 0x30,
.value = 0x44443333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
/* TFE_PRIORITYLUT_HIGH */
.offset = 0x34,
.value = 0x66665555,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x38, /* TFE_URGENCY_LOW */
.value = 0x00001030,
},
.danger_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x40, /* TFE_DANGERLUT_LOW */
.value = 0xffff0000,
},
.safe_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x48, /* TFE_SAFELUT_LOW */
.value = 0x00000003,
},
.ubwc_ctl = {
/*
* Do not explicitly set ubwc config register.
* Power on default values are taking care of required
* register settings.
*/
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_OPE,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x430, /* OPE_PRIORITYLUT_LOW */
.value = 0x33333333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x434, /* OPE_PRIORITYLUT_HIGH */
.value = 0x33333333,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x438, /* OPE_URGENCY_LOW */
.value = 0x00000033,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x440, /* OPE_DANGERLUT_LOW */
.value = 0xFFFFFF00,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x448, /* OPE_SAFELUT_LOW */
.value = 0xF,
},
.ubwc_ctl = {
/*
* Do not explicitly set ubwc config register.
* Power on default values are taking care of required
* register settings.
*/
.enable = false,
},
},
};
static struct cam_camnoc_err_logger_info cam520_cpas100_err_logger_offsets = {
.mainctrl = 0xD08, /* ERRLOGGER_MAINCTL_LOW */
.errvld = 0xD10, /* ERRLOGGER_ERRVLD_LOW */
.errlog0_low = 0xD20, /* ERRLOGGER_ERRLOG0_LOW */
.errlog0_high = 0xD24, /* ERRLOGGER_ERRLOG0_HIGH */
.errlog1_low = 0xD28, /* ERRLOGGER_ERRLOG1_LOW */
.errlog1_high = 0xD2C, /* ERRLOGGER_ERRLOG1_HIGH */
.errlog2_low = 0xD30, /* ERRLOGGER_ERRLOG2_LOW */
.errlog2_high = 0xD34, /* ERRLOGGER_ERRLOG2_HIGH */
.errlog3_low = 0xD38, /* ERRLOGGER_ERRLOG3_LOW */
.errlog3_high = 0xD3C, /* ERRLOGGER_ERRLOG3_HIGH */
};
static struct cam_camnoc_info cam520_cpas100_camnoc_info = {
.specific = &cam_cpas_v520_100_camnoc_specific[0],
.specific_size = ARRAY_SIZE(cam_cpas_v520_100_camnoc_specific),
.irq_sbm = &cam_cpas_v520_100_irq_sbm,
.irq_err = &cam_cpas_v520_100_irq_err[0],
.irq_err_size = ARRAY_SIZE(cam_cpas_v520_100_irq_err),
.err_logger = &cam520_cpas100_err_logger_offsets,
.errata_wa_list = NULL,
};
#endif /* _CPASTOP_V520_100_H_ */

View File

@@ -41,6 +41,8 @@ enum cam_cpas_camera_version {
CAM_CPAS_CAMERA_VERSION_170 = 0x00010700, CAM_CPAS_CAMERA_VERSION_170 = 0x00010700,
CAM_CPAS_CAMERA_VERSION_175 = 0x00010705, CAM_CPAS_CAMERA_VERSION_175 = 0x00010705,
CAM_CPAS_CAMERA_VERSION_480 = 0x00040800, CAM_CPAS_CAMERA_VERSION_480 = 0x00040800,
CAM_CPAS_CAMERA_VERSION_520 = 0x00050200,
CAM_CPAS_CAMERA_VERSION_540 = 0x00050400,
CAM_CPAS_CAMERA_VERSION_580 = 0x00050800, CAM_CPAS_CAMERA_VERSION_580 = 0x00050800,
CAM_CPAS_CAMERA_VERSION_MAX CAM_CPAS_CAMERA_VERSION_MAX
}; };
@@ -69,6 +71,8 @@ enum cam_cpas_camera_version_map_id {
CAM_CPAS_CAMERA_VERSION_ID_175 = 0x2, CAM_CPAS_CAMERA_VERSION_ID_175 = 0x2,
CAM_CPAS_CAMERA_VERSION_ID_480 = 0x3, CAM_CPAS_CAMERA_VERSION_ID_480 = 0x3,
CAM_CPAS_CAMERA_VERSION_ID_580 = 0x4, CAM_CPAS_CAMERA_VERSION_ID_580 = 0x4,
CAM_CPAS_CAMERA_VERSION_ID_520 = 0x5,
CAM_CPAS_CAMERA_VERSION_ID_540 = 0x6,
CAM_CPAS_CAMERA_VERSION_ID_MAX CAM_CPAS_CAMERA_VERSION_ID_MAX
}; };
@@ -103,6 +107,7 @@ enum cam_cpas_hw_version {
CAM_CPAS_TITAN_480_V100 = 0x480100, CAM_CPAS_TITAN_480_V100 = 0x480100,
CAM_CPAS_TITAN_580_V100 = 0x580100, CAM_CPAS_TITAN_580_V100 = 0x580100,
CAM_CPAS_TITAN_540_V100 = 0x540100, CAM_CPAS_TITAN_540_V100 = 0x540100,
CAM_CPAS_TITAN_520_V100 = 0x520100,
CAM_CPAS_TITAN_MAX CAM_CPAS_TITAN_MAX
}; };

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/ */
#ifndef _HFI_INTF_H_ #ifndef _HFI_INTF_H_

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/ */
#ifndef _CAM_HFI_REG_H_ #ifndef _CAM_HFI_REG_H_

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/ */
#include <linux/io.h> #include <linux/io.h>

View File

@@ -26,6 +26,12 @@
*/ */
#define CAM_ISP_CTX_RES_MAX 24 #define CAM_ISP_CTX_RES_MAX 24
/*
* Maximum configuration entry size - This is based on the
* worst case DUAL IFE use case plus some margin.
*/
#define CAM_ISP_CTX_CFG_MAX 25
/* /*
* Maximum entries in state monitoring array for error logging * Maximum entries in state monitoring array for error logging
*/ */

View File

@@ -4205,8 +4205,10 @@ static int cam_ife_mgr_reset_vfe_hw(struct cam_ife_hw_mgr *hw_mgr,
vfe_reset_type = CAM_VFE_HW_RESET_HW; vfe_reset_type = CAM_VFE_HW_RESET_HW;
for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) { for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
if ((!hw_mgr->ife_devices[i]) || if (!hw_mgr->ife_devices[i])
(hw_idx != hw_mgr->ife_devices[i]->hw_idx)) continue;
if (hw_idx != hw_mgr->ife_devices[i]->hw_idx)
continue; continue;
CAM_DBG(CAM_ISP, "VFE (id = %d) reset", hw_idx); CAM_DBG(CAM_ISP, "VFE (id = %d) reset", hw_idx);
vfe_hw_intf = hw_mgr->ife_devices[i]; vfe_hw_intf = hw_mgr->ife_devices[i];

View File

@@ -54,7 +54,8 @@ static int cam_tfe_mgr_regspace_data_cb(uint32_t reg_base_type,
*soc_info_ptr = NULL; *soc_info_ptr = NULL;
list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
if (hw_mgr_res->res_id != CAM_ISP_HW_TFE_IN_CAMIF) if ((hw_mgr_res->res_id != CAM_ISP_HW_TFE_IN_CAMIF) &&
!ctx->is_rdi_only_context)
continue; continue;
switch (reg_base_type) { switch (reg_base_type) {
@@ -117,9 +118,11 @@ static int cam_tfe_mgr_regspace_data_cb(uint32_t reg_base_type,
static int cam_tfe_mgr_handle_reg_dump(struct cam_tfe_hw_mgr_ctx *ctx, static int cam_tfe_mgr_handle_reg_dump(struct cam_tfe_hw_mgr_ctx *ctx,
struct cam_cmd_buf_desc *reg_dump_buf_desc, uint32_t num_reg_dump_buf, struct cam_cmd_buf_desc *reg_dump_buf_desc, uint32_t num_reg_dump_buf,
uint32_t meta_type) uint32_t meta_type,
void *soc_dump_args,
bool user_triggered_dump)
{ {
int rc = 0, i; int rc = -EINVAL, i;
if (!num_reg_dump_buf || !reg_dump_buf_desc) { if (!num_reg_dump_buf || !reg_dump_buf_desc) {
CAM_DBG(CAM_ISP, CAM_DBG(CAM_ISP,
@@ -141,8 +144,8 @@ static int cam_tfe_mgr_handle_reg_dump(struct cam_tfe_hw_mgr_ctx *ctx,
&reg_dump_buf_desc[i], &reg_dump_buf_desc[i],
ctx->applied_req_id, ctx->applied_req_id,
cam_tfe_mgr_regspace_data_cb, cam_tfe_mgr_regspace_data_cb,
NULL, soc_dump_args,
false); user_triggered_dump);
if (rc) { if (rc) {
CAM_ERR(CAM_ISP, CAM_ERR(CAM_ISP,
"Reg dump failed at idx: %d, rc: %d req_id: %llu meta type: %u", "Reg dump failed at idx: %d, rc: %d req_id: %llu meta type: %u",
@@ -152,7 +155,7 @@ static int cam_tfe_mgr_handle_reg_dump(struct cam_tfe_hw_mgr_ctx *ctx,
} }
} }
return 0; return rc;
} }
static int cam_tfe_mgr_get_hw_caps(void *hw_mgr_priv, static int cam_tfe_mgr_get_hw_caps(void *hw_mgr_priv,
@@ -678,7 +681,7 @@ static void cam_tfe_hw_mgr_dump_all_ctx(void)
list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid,
list) { list) {
for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
if (hw_mgr_res->hw_res[i]) if (!hw_mgr_res->hw_res[i])
continue; continue;
CAM_INFO_RATE_LIMIT(CAM_ISP, CAM_INFO_RATE_LIMIT(CAM_ISP,
@@ -693,7 +696,7 @@ static void cam_tfe_hw_mgr_dump_all_ctx(void)
list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in,
list) { list) {
for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
if (hw_mgr_res->hw_res[i]) if (!hw_mgr_res->hw_res[i])
continue; continue;
CAM_INFO_RATE_LIMIT(CAM_ISP, CAM_INFO_RATE_LIMIT(CAM_ISP,
@@ -1232,7 +1235,7 @@ static int cam_tfe_hw_mgr_acquire_res_tfe_csid_pxl(
if (i == CAM_TFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) { if (i == CAM_TFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
CAM_ERR(CAM_ISP, CAM_ERR(CAM_ISP,
"Can not acquire tfe csid path resource %d", "Can not acquire left tfe csid path resource %d",
path_res_id); path_res_id);
goto put_res; goto put_res;
} }
@@ -1330,6 +1333,8 @@ acquire_successful:
goto end; goto end;
} }
csid_res_temp->hw_res[1] = csid_acquire.node_res; csid_res_temp->hw_res[1] = csid_acquire.node_res;
tfe_ctx->slave_hw_idx =
csid_res_temp->hw_res[1]->hw_intf->hw_idx;
CAM_DBG(CAM_ISP, "CSID right acquired success is_dual %d", CAM_DBG(CAM_ISP, "CSID right acquired success is_dual %d",
in_port->usage_type); in_port->usage_type);
} }
@@ -1654,7 +1659,8 @@ static int cam_tfe_mgr_acquire_hw_for_ctx(
in_port); in_port);
if (rc) { if (rc) {
CAM_ERR(CAM_ISP, CAM_ERR(CAM_ISP,
"Acquire TFE CSID IPP resource Failed"); "Acquire TFE CSID IPP resource Failed dual:%d",
in_port->usage_type);
goto err; goto err;
} }
} }
@@ -1664,7 +1670,8 @@ static int cam_tfe_mgr_acquire_hw_for_ctx(
rc = cam_tfe_hw_mgr_acquire_res_tfe_csid_rdi(tfe_ctx, in_port); rc = cam_tfe_hw_mgr_acquire_res_tfe_csid_rdi(tfe_ctx, in_port);
if (rc) { if (rc) {
CAM_ERR(CAM_ISP, CAM_ERR(CAM_ISP,
"Acquire TFE CSID RDI resource Failed"); "Acquire TFE CSID RDI resource Failed dual:%d",
in_port->usage_type);
goto err; goto err;
} }
} }
@@ -1672,14 +1679,15 @@ static int cam_tfe_mgr_acquire_hw_for_ctx(
rc = cam_tfe_hw_mgr_acquire_res_tfe_in(tfe_ctx, in_port, pdaf_enable); rc = cam_tfe_hw_mgr_acquire_res_tfe_in(tfe_ctx, in_port, pdaf_enable);
if (rc) { if (rc) {
CAM_ERR(CAM_ISP, CAM_ERR(CAM_ISP,
"Acquire TFE IN resource Failed"); "Acquire TFE IN resource Failed dual:%d", in_port->usage_type);
goto err; goto err;
} }
CAM_DBG(CAM_ISP, "Acquiring TFE OUT resource..."); CAM_DBG(CAM_ISP, "Acquiring TFE OUT resource...");
rc = cam_tfe_hw_mgr_acquire_res_tfe_out(tfe_ctx, in_port); rc = cam_tfe_hw_mgr_acquire_res_tfe_out(tfe_ctx, in_port);
if (rc) { if (rc) {
CAM_ERR(CAM_ISP, "Acquire TFE OUT resource Failed"); CAM_ERR(CAM_ISP, "Acquire TFE OUT resource Failed dual:%d",
in_port->usage_type);
goto err; goto err;
} }
@@ -1713,7 +1721,8 @@ void cam_tfe_cam_cdm_callback(uint32_t handle, void *userdata,
cam_tfe_mgr_handle_reg_dump(ctx, cam_tfe_mgr_handle_reg_dump(ctx,
hw_update_data->reg_dump_buf_desc, hw_update_data->reg_dump_buf_desc,
hw_update_data->num_reg_dump_buf, hw_update_data->num_reg_dump_buf,
CAM_ISP_TFE_PACKET_META_REG_DUMP_PER_REQUEST); CAM_ISP_TFE_PACKET_META_REG_DUMP_PER_REQUEST,
NULL, false);
CAM_DBG(CAM_ISP, CAM_DBG(CAM_ISP,
"Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%llu ctx_index=%d", "Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%llu ctx_index=%d",
handle, userdata, status, cookie, ctx->ctx_index); handle, userdata, status, cookie, ctx->ctx_index);
@@ -1933,6 +1942,7 @@ static int cam_tfe_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
acquire_args->ctxt_to_hw_map = tfe_ctx; acquire_args->ctxt_to_hw_map = tfe_ctx;
tfe_ctx->ctx_in_use = 1; tfe_ctx->ctx_in_use = 1;
tfe_ctx->num_reg_dump_buf = 0;
cam_tfe_hw_mgr_put_ctx(&tfe_hw_mgr->used_ctx_list, &tfe_ctx); cam_tfe_hw_mgr_put_ctx(&tfe_hw_mgr->used_ctx_list, &tfe_ctx);
@@ -2697,6 +2707,9 @@ static int cam_tfe_mgr_reset_tfe_hw(struct cam_tfe_hw_mgr *hw_mgr,
tfe_reset_type = CAM_TFE_HW_RESET_HW; tfe_reset_type = CAM_TFE_HW_RESET_HW;
for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) { for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
if (!hw_mgr->tfe_devices[i])
continue;
if (hw_idx != hw_mgr->tfe_devices[i]->hw_idx) if (hw_idx != hw_mgr->tfe_devices[i]->hw_idx)
continue; continue;
CAM_DBG(CAM_ISP, "TFE (id = %d) reset", hw_idx); CAM_DBG(CAM_ISP, "TFE (id = %d) reset", hw_idx);
@@ -2985,6 +2998,159 @@ static int cam_tfe_mgr_write(void *hw_mgr_priv, void *write_args)
return -EPERM; return -EPERM;
} }
static int cam_tfe_mgr_user_dump_hw(
struct cam_tfe_hw_mgr_ctx *tfe_ctx,
struct cam_hw_dump_args *dump_args)
{
int rc = 0;
struct cam_hw_soc_dump_args soc_dump_args;
if (!tfe_ctx || !dump_args) {
CAM_ERR(CAM_ISP, "Invalid parameters %pK %pK",
tfe_ctx, dump_args);
return -EINVAL;
}
soc_dump_args.buf_handle = dump_args->buf_handle;
soc_dump_args.request_id = dump_args->request_id;
soc_dump_args.offset = dump_args->offset;
rc = cam_tfe_mgr_handle_reg_dump(tfe_ctx,
tfe_ctx->reg_dump_buf_desc,
tfe_ctx->num_reg_dump_buf,
CAM_ISP_PACKET_META_REG_DUMP_ON_ERROR,
&soc_dump_args,
true);
if (rc) {
CAM_DBG(CAM_ISP,
"Dump failed req: %lld handle %u offset %u rc %d",
dump_args->request_id,
dump_args->buf_handle,
dump_args->offset,
rc);
return rc;
}
dump_args->offset = soc_dump_args.offset;
return rc;
}
static int cam_tfe_mgr_dump(void *hw_mgr_priv, void *args)
{
struct cam_isp_hw_dump_args isp_hw_dump_args;
struct cam_hw_dump_args *dump_args = (struct cam_hw_dump_args *)args;
struct cam_isp_hw_mgr_res *hw_mgr_res;
struct cam_hw_intf *hw_intf;
struct cam_tfe_hw_mgr_ctx *tfe_ctx = (struct cam_tfe_hw_mgr_ctx *)
dump_args->ctxt_to_hw_map;
int i;
int rc = 0;
/* for some targets, information about the TFE registers to be dumped
* is already submitted with the hw manager. In this case, we
* can dump just the related registers and skip going to core files.
* If dump to this buffer falis due to any reason, fallback to dump
* to the LDAR buffer
*/
isp_hw_dump_args.is_dump_all = true;
if (tfe_ctx->num_reg_dump_buf) {
rc = cam_tfe_mgr_user_dump_hw(tfe_ctx, dump_args);
if (!rc)
isp_hw_dump_args.is_dump_all = false;
}
rc = cam_mem_get_cpu_buf(dump_args->buf_handle,
&isp_hw_dump_args.cpu_addr,
&isp_hw_dump_args.buf_len);
if (rc) {
CAM_ERR(CAM_ISP, "Invalid handle %u rc %d",
dump_args->buf_handle, rc);
return rc;
}
isp_hw_dump_args.offset = dump_args->offset;
isp_hw_dump_args.req_id = dump_args->request_id;
list_for_each_entry(hw_mgr_res, &tfe_ctx->res_list_tfe_csid, list) {
for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
if (!hw_mgr_res->hw_res[i])
continue;
hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
switch (hw_mgr_res->hw_res[i]->res_id) {
case CAM_TFE_CSID_PATH_RES_RDI_0:
case CAM_TFE_CSID_PATH_RES_RDI_1:
case CAM_TFE_CSID_PATH_RES_RDI_2:
if (tfe_ctx->is_rdi_only_context &&
hw_intf->hw_ops.process_cmd) {
rc = hw_intf->hw_ops.process_cmd(
hw_intf->hw_priv,
CAM_ISP_HW_CMD_DUMP_HW,
&isp_hw_dump_args,
sizeof(struct
cam_isp_hw_dump_args));
}
break;
case CAM_TFE_CSID_PATH_RES_IPP:
if (hw_intf->hw_ops.process_cmd) {
rc = hw_intf->hw_ops.process_cmd(
hw_intf->hw_priv,
CAM_ISP_HW_CMD_DUMP_HW,
&isp_hw_dump_args,
sizeof(struct
cam_isp_hw_dump_args));
}
break;
default:
CAM_DBG(CAM_ISP, "not a valid res %d",
hw_mgr_res->res_id);
break;
}
}
}
list_for_each_entry(hw_mgr_res, &tfe_ctx->res_list_tfe_in, list) {
for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
if (!hw_mgr_res->hw_res[i])
continue;
hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
switch (hw_mgr_res->hw_res[i]->res_id) {
case CAM_ISP_HW_TFE_IN_RDI0:
case CAM_ISP_HW_TFE_IN_RDI1:
case CAM_ISP_HW_TFE_IN_RDI2:
if (tfe_ctx->is_rdi_only_context &&
hw_intf->hw_ops.process_cmd) {
rc = hw_intf->hw_ops.process_cmd(
hw_intf->hw_priv,
CAM_ISP_HW_CMD_DUMP_HW,
&isp_hw_dump_args,
sizeof(struct
cam_isp_hw_dump_args));
}
break;
case CAM_ISP_HW_TFE_IN_CAMIF:
if (hw_intf->hw_ops.process_cmd) {
rc = hw_intf->hw_ops.process_cmd(
hw_intf->hw_priv,
CAM_ISP_HW_CMD_DUMP_HW,
&isp_hw_dump_args,
sizeof(struct
cam_isp_hw_dump_args));
}
break;
default:
CAM_DBG(CAM_ISP, "not a valid res %d",
hw_mgr_res->res_id);
break;
}
}
}
dump_args->offset = isp_hw_dump_args.offset;
CAM_DBG(CAM_ISP, "offset %u", dump_args->offset);
return rc;
}
static int cam_tfe_mgr_reset(void *hw_mgr_priv, void *hw_reset_args) static int cam_tfe_mgr_reset(void *hw_mgr_priv, void *hw_reset_args)
{ {
struct cam_tfe_hw_mgr *hw_mgr = hw_mgr_priv; struct cam_tfe_hw_mgr *hw_mgr = hw_mgr_priv;
@@ -3023,6 +3189,7 @@ static int cam_tfe_mgr_reset(void *hw_mgr_priv, void *hw_reset_args)
} }
} }
atomic_set(&ctx->overflow_pending, 0);
end: end:
return rc; return rc;
} }
@@ -3072,6 +3239,7 @@ static int cam_tfe_mgr_release_hw(void *hw_mgr_priv,
ctx->init_done = false; ctx->init_done = false;
ctx->is_dual = false; ctx->is_dual = false;
ctx->is_tpg = false; ctx->is_tpg = false;
ctx->num_reg_dump_buf = 0;
ctx->res_list_tpg.res_type = CAM_ISP_RESOURCE_MAX; ctx->res_list_tpg.res_type = CAM_ISP_RESOURCE_MAX;
atomic_set(&ctx->overflow_pending, 0); atomic_set(&ctx->overflow_pending, 0);
for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) { for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
@@ -3463,7 +3631,8 @@ static int cam_isp_tfe_packet_generic_blob_handler(void *user_data,
return -EINVAL; return -EINVAL;
} }
if (bw_config->num_paths > CAM_ISP_MAX_PER_PATH_VOTES) { if ((bw_config->num_paths > CAM_ISP_MAX_PER_PATH_VOTES) ||
!bw_config->num_paths) {
CAM_ERR(CAM_ISP, "Invalid num paths %d", CAM_ERR(CAM_ISP, "Invalid num paths %d",
bw_config->num_paths); bw_config->num_paths);
return -EINVAL; return -EINVAL;
@@ -3790,6 +3959,7 @@ int cam_tfe_add_command_buffers(
break; break;
case CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_FLUSH: case CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_FLUSH:
case CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_ERROR: case CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_ERROR:
case CAM_ISP_TFE_PACKET_META_REG_DUMP_PER_REQUEST:
if (split_id == CAM_ISP_HW_SPLIT_LEFT) { if (split_id == CAM_ISP_HW_SPLIT_LEFT) {
if (prepare->num_reg_dump_buf >= if (prepare->num_reg_dump_buf >=
CAM_REG_DUMP_MAX_BUF_ENTRIES) { CAM_REG_DUMP_MAX_BUF_ENTRIES) {
@@ -3934,24 +4104,50 @@ static int cam_tfe_mgr_prepare_hw_update(void *hw_mgr_priv,
fill_fence = false; fill_fence = false;
} }
ctx->num_reg_dump_buf = prepare->num_reg_dump_buf; CAM_DBG(CAM_ISP,
if ((ctx->num_reg_dump_buf) && (ctx->num_reg_dump_buf < "num_reg_dump_buf=%d ope code:%d",
CAM_REG_DUMP_MAX_BUF_ENTRIES)) { prepare->num_reg_dump_buf, prepare->packet->header.op_code);
memcpy(ctx->reg_dump_buf_desc,
prepare->reg_dump_buf_desc,
sizeof(struct cam_cmd_buf_desc) *
prepare->num_reg_dump_buf);
}
/* reg update will be done later for the initial configure */ /* reg update will be done later for the initial configure */
if (((prepare->packet->header.op_code) & 0xF) == if (((prepare->packet->header.op_code) & 0xF) ==
CAM_ISP_PACKET_INIT_DEV) { CAM_ISP_PACKET_INIT_DEV) {
prepare_hw_data->packet_opcode_type = prepare_hw_data->packet_opcode_type =
CAM_ISP_TFE_PACKET_INIT_DEV; CAM_ISP_TFE_PACKET_INIT_DEV;
if ((!prepare->num_reg_dump_buf) || (prepare->num_reg_dump_buf >
CAM_REG_DUMP_MAX_BUF_ENTRIES))
goto end;
if (!ctx->num_reg_dump_buf) {
ctx->num_reg_dump_buf =
prepare->num_reg_dump_buf;
memcpy(ctx->reg_dump_buf_desc,
prepare->reg_dump_buf_desc,
sizeof(struct cam_cmd_buf_desc) *
prepare->num_reg_dump_buf);
} else {
prepare_hw_data->num_reg_dump_buf =
prepare->num_reg_dump_buf;
memcpy(prepare_hw_data->reg_dump_buf_desc,
prepare->reg_dump_buf_desc,
sizeof(struct cam_cmd_buf_desc) *
prepare_hw_data->num_reg_dump_buf);
}
goto end; goto end;
} else } else {
prepare_hw_data->packet_opcode_type = prepare_hw_data->packet_opcode_type =
CAM_ISP_TFE_PACKET_CONFIG_DEV; CAM_ISP_TFE_PACKET_CONFIG_DEV;
prepare_hw_data->num_reg_dump_buf = prepare->num_reg_dump_buf;
if ((prepare_hw_data->num_reg_dump_buf) &&
(prepare_hw_data->num_reg_dump_buf <
CAM_REG_DUMP_MAX_BUF_ENTRIES)) {
memcpy(prepare_hw_data->reg_dump_buf_desc,
prepare->reg_dump_buf_desc,
sizeof(struct cam_cmd_buf_desc) *
prepare_hw_data->num_reg_dump_buf);
}
}
/* add reg update commands */ /* add reg update commands */
for (i = 0; i < ctx->num_base; i++) { for (i = 0; i < ctx->num_base; i++) {
@@ -4142,6 +4338,7 @@ static int cam_tfe_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
struct cam_tfe_hw_mgr_ctx *ctx = (struct cam_tfe_hw_mgr_ctx *) struct cam_tfe_hw_mgr_ctx *ctx = (struct cam_tfe_hw_mgr_ctx *)
hw_cmd_args->ctxt_to_hw_map; hw_cmd_args->ctxt_to_hw_map;
struct cam_isp_hw_cmd_args *isp_hw_cmd_args = NULL; struct cam_isp_hw_cmd_args *isp_hw_cmd_args = NULL;
struct cam_packet *packet;
if (!hw_mgr_priv || !cmd_args) { if (!hw_mgr_priv || !cmd_args) {
CAM_ERR(CAM_ISP, "Invalid arguments"); CAM_ERR(CAM_ISP, "Invalid arguments");
@@ -4180,6 +4377,17 @@ static int cam_tfe_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
else else
isp_hw_cmd_args->u.ctx_type = CAM_ISP_CTX_PIX; isp_hw_cmd_args->u.ctx_type = CAM_ISP_CTX_PIX;
break; break;
case CAM_ISP_HW_MGR_GET_PACKET_OPCODE:
packet = (struct cam_packet *)
isp_hw_cmd_args->cmd_data;
if ((packet->header.op_code & 0xF) ==
CAM_ISP_TFE_PACKET_INIT_DEV)
isp_hw_cmd_args->u.packet_op_code =
CAM_ISP_TFE_PACKET_INIT_DEV;
else
isp_hw_cmd_args->u.packet_op_code =
CAM_ISP_TFE_PACKET_CONFIG_DEV;
break;
default: default:
CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x", CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
hw_cmd_args->cmd_type); hw_cmd_args->cmd_type);
@@ -4203,11 +4411,12 @@ static int cam_tfe_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
rc = cam_tfe_mgr_handle_reg_dump(ctx, ctx->reg_dump_buf_desc, rc = cam_tfe_mgr_handle_reg_dump(ctx, ctx->reg_dump_buf_desc,
ctx->num_reg_dump_buf, ctx->num_reg_dump_buf,
CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_FLUSH); CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_FLUSH,
NULL, false);
if (rc) { if (rc) {
CAM_ERR(CAM_ISP, CAM_ERR(CAM_ISP,
"Reg dump on flush failed req id: %llu rc: %d", "Reg dump on flush failed req id: %llu num_reg_dump:0x%x rc: %d",
ctx->applied_req_id, rc); ctx->applied_req_id, ctx->num_reg_dump_buf, rc);
return rc; return rc;
} }
@@ -4219,15 +4428,15 @@ static int cam_tfe_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
ctx->last_dump_err_req_id = ctx->applied_req_id; ctx->last_dump_err_req_id = ctx->applied_req_id;
rc = cam_tfe_mgr_handle_reg_dump(ctx, ctx->reg_dump_buf_desc, rc = cam_tfe_mgr_handle_reg_dump(ctx, ctx->reg_dump_buf_desc,
ctx->num_reg_dump_buf, ctx->num_reg_dump_buf,
CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_ERROR); CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_ERROR,
NULL, false);
if (rc) { if (rc) {
CAM_ERR(CAM_ISP, CAM_ERR(CAM_ISP,
"Reg dump on error failed req id: %llu rc: %d", "Reg dump on error failed req id:%llu num_reg_dump:0x%x rc: %d",
ctx->applied_req_id, rc); ctx->applied_req_id, ctx->num_reg_dump_buf, rc);
return rc; return rc;
} }
break; break;
default: default:
CAM_ERR(CAM_ISP, "Invalid cmd"); CAM_ERR(CAM_ISP, "Invalid cmd");
} }
@@ -4695,12 +4904,15 @@ static int cam_tfe_hw_mgr_check_irq_for_dual_tfe(
{ {
int32_t rc = -EINVAL; int32_t rc = -EINVAL;
uint32_t *event_cnt = NULL; uint32_t *event_cnt = NULL;
uint32_t core_idx0 = 0; uint32_t master_hw_idx;
uint32_t core_idx1 = 1; uint32_t slave_hw_idx;
if (!tfe_hw_mgr_ctx->is_dual) if (!tfe_hw_mgr_ctx->is_dual)
return 0; return 0;
master_hw_idx = tfe_hw_mgr_ctx->master_hw_idx;
slave_hw_idx = tfe_hw_mgr_ctx->slave_hw_idx;
switch (hw_event_type) { switch (hw_event_type) {
case CAM_ISP_HW_EVENT_SOF: case CAM_ISP_HW_EVENT_SOF:
event_cnt = tfe_hw_mgr_ctx->sof_cnt; event_cnt = tfe_hw_mgr_ctx->sof_cnt;
@@ -4715,19 +4927,18 @@ static int cam_tfe_hw_mgr_check_irq_for_dual_tfe(
return 0; return 0;
} }
if (event_cnt[core_idx0] == event_cnt[core_idx1]) { if (event_cnt[master_hw_idx] == event_cnt[slave_hw_idx]) {
event_cnt[core_idx0] = 0; event_cnt[master_hw_idx] = 0;
event_cnt[core_idx1] = 0; event_cnt[slave_hw_idx] = 0;
rc = 0; return 0;
return rc;
} }
if ((event_cnt[core_idx0] && if ((event_cnt[master_hw_idx] &&
(event_cnt[core_idx0] - event_cnt[core_idx1] > 1)) || (event_cnt[master_hw_idx] - event_cnt[slave_hw_idx] > 1)) ||
(event_cnt[core_idx1] && (event_cnt[slave_hw_idx] &&
(event_cnt[core_idx1] - event_cnt[core_idx0] > 1))) { (event_cnt[slave_hw_idx] - event_cnt[master_hw_idx] > 1))) {
if (tfe_hw_mgr_ctx->dual_tfe_irq_mismatch_cnt > 10) { if (tfe_hw_mgr_ctx->dual_tfe_irq_mismatch_cnt > 10) {
rc = -1; rc = -1;
@@ -4735,15 +4946,15 @@ static int cam_tfe_hw_mgr_check_irq_for_dual_tfe(
} }
CAM_ERR_RATE_LIMIT(CAM_ISP, CAM_ERR_RATE_LIMIT(CAM_ISP,
"One TFE could not generate hw event %d id0:%d id1:%d", "One TFE could not generate hw event %d master id :%d slave id:%d",
hw_event_type, event_cnt[core_idx0], hw_event_type, event_cnt[master_hw_idx],
event_cnt[core_idx1]); event_cnt[slave_hw_idx]);
if (event_cnt[core_idx0] >= 2) { if (event_cnt[master_hw_idx] >= 2) {
event_cnt[core_idx0]--; event_cnt[master_hw_idx]--;
tfe_hw_mgr_ctx->dual_tfe_irq_mismatch_cnt++; tfe_hw_mgr_ctx->dual_tfe_irq_mismatch_cnt++;
} }
if (event_cnt[core_idx1] >= 2) { if (event_cnt[slave_hw_idx] >= 2) {
event_cnt[core_idx1]--; event_cnt[slave_hw_idx]--;
tfe_hw_mgr_ctx->dual_tfe_irq_mismatch_cnt++; tfe_hw_mgr_ctx->dual_tfe_irq_mismatch_cnt++;
} }
@@ -5078,7 +5289,7 @@ static int cam_tfe_hw_mgr_debug_register(void)
goto err; goto err;
} }
if (!debugfs_create_bool("enable_reg_dump", if (!debugfs_create_u32("enable_reg_dump",
0644, 0644,
g_tfe_hw_mgr.debug_cfg.dentry, g_tfe_hw_mgr.debug_cfg.dentry,
&g_tfe_hw_mgr.debug_cfg.enable_reg_dump)) { &g_tfe_hw_mgr.debug_cfg.enable_reg_dump)) {
@@ -5094,7 +5305,7 @@ static int cam_tfe_hw_mgr_debug_register(void)
goto err; goto err;
} }
if (!debugfs_create_bool("per_req_reg_dump", if (!debugfs_create_u32("per_req_reg_dump",
0644, 0644,
g_tfe_hw_mgr.debug_cfg.dentry, g_tfe_hw_mgr.debug_cfg.dentry,
&g_tfe_hw_mgr.debug_cfg.per_req_reg_dump)) { &g_tfe_hw_mgr.debug_cfg.per_req_reg_dump)) {
@@ -5285,6 +5496,7 @@ int cam_tfe_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
hw_mgr_intf->hw_config = cam_tfe_mgr_config_hw; hw_mgr_intf->hw_config = cam_tfe_mgr_config_hw;
hw_mgr_intf->hw_cmd = cam_tfe_mgr_cmd; hw_mgr_intf->hw_cmd = cam_tfe_mgr_cmd;
hw_mgr_intf->hw_reset = cam_tfe_mgr_reset; hw_mgr_intf->hw_reset = cam_tfe_mgr_reset;
hw_mgr_intf->hw_dump = cam_tfe_mgr_dump;
if (iommu_hdl) if (iommu_hdl)
*iommu_hdl = g_tfe_hw_mgr.mgr_common.img_iommu_hdl; *iommu_hdl = g_tfe_hw_mgr.mgr_common.img_iommu_hdl;

View File

@@ -37,8 +37,8 @@ struct cam_tfe_hw_mgr_debug {
uint64_t csid_debug; uint64_t csid_debug;
uint32_t enable_recovery; uint32_t enable_recovery;
uint32_t camif_debug; uint32_t camif_debug;
bool enable_reg_dump; uint32_t enable_reg_dump;
bool per_req_reg_dump; uint32_t per_req_reg_dump;
}; };
/** /**
@@ -79,6 +79,7 @@ struct cam_tfe_hw_mgr_debug {
* @is_dual indicate whether context is in dual TFE mode * @is_dual indicate whether context is in dual TFE mode
* @is_tpg indicate whether context use tpg * @is_tpg indicate whether context use tpg
* @master_hw_idx master hardware index in dual tfe case * @master_hw_idx master hardware index in dual tfe case
* @slave_hw_idx slave hardware index in dual tfe case
* @dual_tfe_irq_mismatch_cnt irq mismatch count value per core, used for * @dual_tfe_irq_mismatch_cnt irq mismatch count value per core, used for
* dual TFE * dual TFE
*/ */
@@ -122,6 +123,7 @@ struct cam_tfe_hw_mgr_ctx {
bool is_dual; bool is_dual;
bool is_tpg; bool is_tpg;
uint32_t master_hw_idx; uint32_t master_hw_idx;
uint32_t slave_hw_idx;
uint32_t dual_tfe_irq_mismatch_cnt; uint32_t dual_tfe_irq_mismatch_cnt;
}; };

View File

@@ -278,6 +278,7 @@ struct cam_isp_hw_dual_isp_update_args {
* @ buf_len: buf len * @ buf_len: buf len
* @ offset: offset of buffer * @ offset: offset of buffer
* @ ctxt_to_hw_map: ctx to hw map * @ ctxt_to_hw_map: ctx to hw map
* @ is_dump_all: flag to indicate if all information or just bw/clk rate
*/ */
struct cam_isp_hw_dump_args { struct cam_isp_hw_dump_args {
uint64_t req_id; uint64_t req_id;
@@ -285,6 +286,7 @@ struct cam_isp_hw_dump_args {
size_t buf_len; size_t buf_len;
size_t offset; size_t offset;
void *ctxt_to_hw_map; void *ctxt_to_hw_map;
bool is_dump_all;
}; };
/** /**

View File

@@ -1275,6 +1275,18 @@ static int cam_tfe_csid_disable_pxl_path(
pxl_reg->csid_pxl_ctrl_addr); pxl_reg->csid_pxl_ctrl_addr);
} }
if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE &&
stop_cmd == CAM_TFE_CSID_HALT_IMMEDIATELY) {
/* configure Halt for slave */
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
pxl_reg->csid_pxl_ctrl_addr);
val &= ~0xF;
val |= stop_cmd;
val |= (TFE_CSID_HALT_MODE_MASTER << 2);
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
pxl_reg->csid_pxl_ctrl_addr);
}
return rc; return rc;
} }
@@ -1822,6 +1834,10 @@ static int cam_tfe_csid_reset_retain_sw_reg(
struct cam_hw_soc_info *soc_info; struct cam_hw_soc_info *soc_info;
soc_info = &csid_hw->hw_info->soc_info; soc_info = &csid_hw->hw_info->soc_info;
/* Mask top interrupts */
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
csid_reg->cmn_reg->csid_top_irq_mask_addr);
/* clear the top interrupt first */ /* clear the top interrupt first */
cam_io_w_mb(1, soc_info->reg_map[0].mem_base + cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
csid_reg->cmn_reg->csid_top_irq_clear_addr); csid_reg->cmn_reg->csid_top_irq_clear_addr);
@@ -1841,7 +1857,6 @@ static int cam_tfe_csid_reset_retain_sw_reg(
status = cam_io_r(soc_info->reg_map[0].mem_base + status = cam_io_r(soc_info->reg_map[0].mem_base +
csid_reg->cmn_reg->csid_top_irq_status_addr); csid_reg->cmn_reg->csid_top_irq_status_addr);
CAM_DBG(CAM_ISP, "Status reg %d", status); CAM_DBG(CAM_ISP, "Status reg %d", status);
rc = 0;
} else { } else {
CAM_DBG(CAM_ISP, "CSID:%d hw reset completed %d", CAM_DBG(CAM_ISP, "CSID:%d hw reset completed %d",
csid_hw->hw_intf->hw_idx, rc); csid_hw->hw_intf->hw_idx, rc);
@@ -1875,6 +1890,7 @@ static int cam_tfe_csid_init_hw(void *hw_priv,
csid_hw_info = (struct cam_hw_info *)hw_priv; csid_hw_info = (struct cam_hw_info *)hw_priv;
csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info; csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info;
res = (struct cam_isp_resource_node *)init_args; res = (struct cam_isp_resource_node *)init_args;
csid_reg = csid_hw->csid_info->csid_reg;
if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) { if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) {
CAM_ERR(CAM_ISP, "CSID:%d Invalid res type state %d", CAM_ERR(CAM_ISP, "CSID:%d Invalid res type state %d",
@@ -1883,8 +1899,6 @@ static int cam_tfe_csid_init_hw(void *hw_priv,
return -EINVAL; return -EINVAL;
} }
csid_reg = csid_hw->csid_info->csid_reg;
mutex_lock(&csid_hw->hw_info->hw_mutex); mutex_lock(&csid_hw->hw_info->hw_mutex);
if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH && if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) { res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) {
@@ -1947,19 +1961,18 @@ static int cam_tfe_csid_deinit_hw(void *hw_priv,
return -EINVAL; return -EINVAL;
} }
CAM_DBG(CAM_ISP, "Enter");
res = (struct cam_isp_resource_node *)deinit_args; res = (struct cam_isp_resource_node *)deinit_args;
csid_hw_info = (struct cam_hw_info *)hw_priv; csid_hw_info = (struct cam_hw_info *)hw_priv;
csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info; csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info;
if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH) { if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) {
CAM_ERR(CAM_ISP, "CSID:%d Invalid Res type %d", CAM_ERR(CAM_ISP, "CSID:%d Invalid Res type %d",
csid_hw->hw_intf->hw_idx, csid_hw->hw_intf->hw_idx,
res->res_type); res->res_type);
return -EINVAL; return -EINVAL;
} }
CAM_DBG(CAM_ISP, "Enter");
mutex_lock(&csid_hw->hw_info->hw_mutex); mutex_lock(&csid_hw->hw_info->hw_mutex);
if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) { if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in De-init state", CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in De-init state",
@@ -2287,6 +2300,106 @@ static int cam_tfe_csid_get_regdump(struct cam_tfe_csid_hw *csid_hw,
return 0; return 0;
} }
static int cam_tfe_csid_dump_hw(
struct cam_tfe_csid_hw *csid_hw, void *cmd_args)
{
int i;
uint8_t *dst;
uint32_t *addr, *start;
uint64_t *clk_addr, *clk_start;
uint32_t min_len;
uint32_t num_reg;
uint32_t reg_size = 0;
size_t remain_len;
struct cam_isp_hw_dump_header *hdr;
struct cam_isp_hw_dump_args *dump_args =
(struct cam_isp_hw_dump_args *)cmd_args;
struct cam_hw_soc_info *soc_info;
if (!dump_args) {
CAM_ERR(CAM_ISP, "Invalid args");
return -EINVAL;
}
if (!dump_args->cpu_addr || !dump_args->buf_len) {
CAM_ERR(CAM_ISP,
"Invalid params %pK %zu",
(void *)dump_args->cpu_addr,
dump_args->buf_len);
return -EINVAL;
}
if (dump_args->buf_len <= dump_args->offset) {
CAM_WARN(CAM_ISP,
"Dump offset overshoot offset %zu buf_len %zu",
dump_args->offset, dump_args->buf_len);
return -ENOSPC;
}
soc_info = &csid_hw->hw_info->soc_info;
if (dump_args->is_dump_all)
reg_size = soc_info->reg_map[0].size;
min_len = reg_size +
sizeof(struct cam_isp_hw_dump_header) +
(sizeof(uint32_t) * CAM_TFE_CSID_DUMP_MISC_NUM_WORDS);
remain_len = dump_args->buf_len - dump_args->offset;
if (remain_len < min_len) {
CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu, min %u",
remain_len, min_len);
return -ENOSPC;
}
mutex_lock(&csid_hw->hw_info->hw_mutex);
if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
CAM_ERR(CAM_ISP, "CSID:%d Invalid HW State:%d",
csid_hw->hw_intf->hw_idx,
csid_hw->hw_info->hw_state);
mutex_unlock(&csid_hw->hw_info->hw_mutex);
return -EINVAL;
}
if (!dump_args->is_dump_all)
goto dump_bw;
dst = (uint8_t *)dump_args->cpu_addr + dump_args->offset;
hdr = (struct cam_isp_hw_dump_header *)dst;
scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "CSID_REG:");
addr = (uint32_t *)(dst + sizeof(struct cam_isp_hw_dump_header));
start = addr;
num_reg = soc_info->reg_map[0].size/4;
hdr->word_size = sizeof(uint32_t);
*addr = soc_info->index;
addr++;
for (i = 0; i < num_reg; i++) {
addr[0] = soc_info->mem_block[0]->start + (i*4);
addr[1] = cam_io_r(soc_info->reg_map[0].mem_base
+ (i*4));
addr += 2;
}
hdr->size = hdr->word_size * (addr - start);
dump_args->offset += hdr->size +
sizeof(struct cam_isp_hw_dump_header);
dump_bw:
dst = (char *)dump_args->cpu_addr + dump_args->offset;
hdr = (struct cam_isp_hw_dump_header *)dst;
scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "CSID_CLK_RATE:");
clk_addr = (uint64_t *)(dst +
sizeof(struct cam_isp_hw_dump_header));
clk_start = clk_addr;
hdr->word_size = sizeof(uint64_t);
*clk_addr++ = csid_hw->clk_rate;
hdr->size = hdr->word_size * (clk_addr - clk_start);
dump_args->offset += hdr->size +
sizeof(struct cam_isp_hw_dump_header);
CAM_DBG(CAM_ISP, "offset %zu", dump_args->offset);
mutex_unlock(&csid_hw->hw_info->hw_mutex);
return 0;
}
static int cam_tfe_csid_process_cmd(void *hw_priv, static int cam_tfe_csid_process_cmd(void *hw_priv,
uint32_t cmd_type, void *cmd_args, uint32_t arg_size) uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
{ {
@@ -2318,6 +2431,9 @@ static int cam_tfe_csid_process_cmd(void *hw_priv,
case CAM_TFE_CSID_CMD_GET_REG_DUMP: case CAM_TFE_CSID_CMD_GET_REG_DUMP:
rc = cam_tfe_csid_get_regdump(csid_hw, cmd_args); rc = cam_tfe_csid_get_regdump(csid_hw, cmd_args);
break; break;
case CAM_ISP_HW_CMD_DUMP_HW:
rc = cam_tfe_csid_dump_hw(csid_hw, cmd_args);
break;
default: default:
CAM_ERR(CAM_ISP, "CSID:%d unsupported cmd:%d", CAM_ERR(CAM_ISP, "CSID:%d unsupported cmd:%d",
csid_hw->hw_intf->hw_idx, cmd_type); csid_hw->hw_intf->hw_idx, cmd_type);
@@ -2335,7 +2451,7 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
const struct cam_tfe_csid_reg_offset *csid_reg; const struct cam_tfe_csid_reg_offset *csid_reg;
const struct cam_tfe_csid_csi2_rx_reg_offset *csi2_reg; const struct cam_tfe_csid_csi2_rx_reg_offset *csi2_reg;
uint32_t irq_status[TFE_CSID_IRQ_REG_MAX]; uint32_t irq_status[TFE_CSID_IRQ_REG_MAX];
bool fatal_err_detected = false; bool fatal_err_detected = false, is_error_irq = false;
uint32_t sof_irq_debug_en = 0; uint32_t sof_irq_debug_en = 0;
unsigned long flags; unsigned long flags;
uint32_t i, val; uint32_t i, val;
@@ -2392,14 +2508,6 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
cam_io_w_mb(1, soc_info->reg_map[0].mem_base + cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
csid_reg->cmn_reg->csid_irq_cmd_addr); csid_reg->cmn_reg->csid_irq_cmd_addr);
CAM_ERR_RATE_LIMIT(CAM_ISP,
"CSID %d irq status 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
csid_hw->hw_intf->hw_idx, irq_status[TFE_CSID_IRQ_REG_TOP],
irq_status[TFE_CSID_IRQ_REG_RX],
irq_status[TFE_CSID_IRQ_REG_IPP],
irq_status[TFE_CSID_IRQ_REG_RDI0],
irq_status[TFE_CSID_IRQ_REG_RDI1],
irq_status[TFE_CSID_IRQ_REG_RDI2]);
/* Software register reset complete*/ /* Software register reset complete*/
if (irq_status[TFE_CSID_IRQ_REG_TOP]) if (irq_status[TFE_CSID_IRQ_REG_TOP])
@@ -2446,25 +2554,29 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
TFE_CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME) TFE_CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME)
csid_hw->error_irq_count++; csid_hw->error_irq_count++;
if (irq_status[TFE_CSID_IRQ_REG_RX] &
TFE_CSID_CSI2_RX_ERROR_CRC)
is_error_irq = true;
if (irq_status[TFE_CSID_IRQ_REG_RX] &
TFE_CSID_CSI2_RX_ERROR_ECC)
is_error_irq = true;
if (irq_status[TFE_CSID_IRQ_REG_RX] &
TFE_CSID_CSI2_RX_ERROR_MMAPPED_VC_DT)
is_error_irq = true;
} }
spin_unlock_irqrestore(&csid_hw->spin_lock, flags); spin_unlock_irqrestore(&csid_hw->spin_lock, flags);
if (csid_hw->error_irq_count || fatal_err_detected)
is_error_irq = true;
if (csid_hw->error_irq_count > if (csid_hw->error_irq_count >
CAM_TFE_CSID_MAX_IRQ_ERROR_COUNT) { CAM_TFE_CSID_MAX_IRQ_ERROR_COUNT) {
fatal_err_detected = true; fatal_err_detected = true;
csid_hw->error_irq_count = 0; csid_hw->error_irq_count = 0;
} }
CAM_INFO(CAM_ISP,
"CSID %d irq status 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
csid_hw->hw_intf->hw_idx,
irq_status[TFE_CSID_IRQ_REG_TOP],
irq_status[TFE_CSID_IRQ_REG_RX],
irq_status[TFE_CSID_IRQ_REG_IPP],
irq_status[TFE_CSID_IRQ_REG_RDI0],
irq_status[TFE_CSID_IRQ_REG_RDI1],
irq_status[TFE_CSID_IRQ_REG_RDI2]);
if (fatal_err_detected) { if (fatal_err_detected) {
/* Reset the Rx CFG registers */ /* Reset the Rx CFG registers */
cam_io_w_mb(0, soc_info->reg_map[0].mem_base + cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
@@ -2580,6 +2692,23 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
(val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF)); (val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF));
} }
if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_RST_IRQ_LOG) {
if (irq_status[TFE_CSID_IRQ_REG_IPP] &
BIT(csid_reg->cmn_reg->path_rst_done_shift_val))
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID IPP reset complete");
if (irq_status[TFE_CSID_IRQ_REG_TOP])
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID TOP reset complete");
if (irq_status[TFE_CSID_IRQ_REG_RX] &
BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val))
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID RX reset complete");
}
/* read the IPP errors */ /* read the IPP errors */
if (csid_hw->pxl_pipe_enable) { if (csid_hw->pxl_pipe_enable) {
/* IPP reset done bit */ /* IPP reset done bit */
@@ -2611,10 +2740,24 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
cam_io_w_mb(CAM_TFE_CSID_HALT_IMMEDIATELY, cam_io_w_mb(CAM_TFE_CSID_HALT_IMMEDIATELY,
soc_info->reg_map[0].mem_base + soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_pxl_ctrl_addr); csid_reg->ipp_reg->csid_pxl_ctrl_addr);
is_error_irq = true;
} }
if (irq_status[TFE_CSID_IRQ_REG_IPP] &
TFE_CSID_PATH_IPP_ERROR_CCIF_VIOLATION)
is_error_irq = true;
} }
for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) { for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
if ((irq_status[i] &
BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) &&
(csid_hw->csid_debug &
TFE_CSID_DEBUG_ENABLE_RST_IRQ_LOG))
CAM_INFO_RATE_LIMIT(CAM_ISP,
"CSID RDI%d reset complete", i);
if (irq_status[i] & if (irq_status[i] &
BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) { BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
CAM_DBG(CAM_ISP, "CSID RDI%d reset complete", i); CAM_DBG(CAM_ISP, "CSID RDI%d reset complete", i);
@@ -2637,12 +2780,29 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
if (irq_status[i] & TFE_CSID_PATH_ERROR_FIFO_OVERFLOW) { if (irq_status[i] & TFE_CSID_PATH_ERROR_FIFO_OVERFLOW) {
/* Stop RDI path immediately */ /* Stop RDI path immediately */
is_error_irq = true;
cam_io_w_mb(CAM_TFE_CSID_HALT_IMMEDIATELY, cam_io_w_mb(CAM_TFE_CSID_HALT_IMMEDIATELY,
soc_info->reg_map[0].mem_base + soc_info->reg_map[0].mem_base +
csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr); csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr);
} }
if ((irq_status[i] & TFE_CSID_PATH_RDI_OVERFLOW_IRQ) ||
(irq_status[i] &
TFE_CSID_PATH_RDI_ERROR_CCIF_VIOLATION))
is_error_irq = true;
} }
if (is_error_irq)
CAM_ERR_RATE_LIMIT(CAM_ISP,
"CSID %d irq status TOP: 0x%x RX: 0x%x IPP: 0x%x RDI0: 0x%x RDI1: 0x%x RDI2: 0x%x",
csid_hw->hw_intf->hw_idx,
irq_status[TFE_CSID_IRQ_REG_TOP],
irq_status[TFE_CSID_IRQ_REG_RX],
irq_status[TFE_CSID_IRQ_REG_IPP],
irq_status[TFE_CSID_IRQ_REG_RDI0],
irq_status[TFE_CSID_IRQ_REG_RDI1],
irq_status[TFE_CSID_IRQ_REG_RDI2]);
if (csid_hw->irq_debug_cnt >= CAM_TFE_CSID_IRQ_SOF_DEBUG_CNT_MAX) { if (csid_hw->irq_debug_cnt >= CAM_TFE_CSID_IRQ_SOF_DEBUG_CNT_MAX) {
cam_tfe_csid_sof_irq_debug(csid_hw, &sof_irq_debug_en); cam_tfe_csid_sof_irq_debug(csid_hw, &sof_irq_debug_en);
csid_hw->irq_debug_cnt = 0; csid_hw->irq_debug_cnt = 0;
@@ -2734,7 +2894,15 @@ int cam_tfe_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
csid_reg->cmn_reg->top_tfe2_fuse_reg); csid_reg->cmn_reg->top_tfe2_fuse_reg);
if (val) { if (val) {
CAM_INFO(CAM_ISP, "TFE 2 is not supported by hardware"); CAM_INFO(CAM_ISP, "TFE 2 is not supported by hardware");
rc = -EINVAL;
rc = cam_tfe_csid_disable_soc_resources(
&tfe_csid_hw->hw_info->soc_info);
if (rc)
CAM_ERR(CAM_ISP,
"CSID:%d Disable CSID SOC failed",
tfe_csid_hw->hw_intf->hw_idx);
else
rc = -EINVAL;
goto err; goto err;
} }
} }

View File

@@ -12,6 +12,12 @@
#define CAM_TFE_CSID_CID_MAX 4 #define CAM_TFE_CSID_CID_MAX 4
/* Each word is taken as uint32_t, for dumping uint64_t count as 2 words
* 1. soc_index
* 2. clk_rate --> uint64_t -> 2 words
*/
#define CAM_TFE_CSID_DUMP_MISC_NUM_WORDS 3
#define TFE_CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED BIT(0) #define TFE_CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED BIT(0)
#define TFE_CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED BIT(1) #define TFE_CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED BIT(1)
#define TFE_CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED BIT(2) #define TFE_CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED BIT(2)
@@ -65,6 +71,7 @@
#define TFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE BIT(6) #define TFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE BIT(6)
#define TFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO BIT(7) #define TFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO BIT(7)
#define TFE_CSID_DEBUG_DISABLE_EARLY_EOF BIT(8) #define TFE_CSID_DEBUG_DISABLE_EARLY_EOF BIT(8)
#define TFE_CSID_DEBUG_ENABLE_RST_IRQ_LOG BIT(9)
/* enum cam_csid_path_halt_mode select the path halt mode control */ /* enum cam_csid_path_halt_mode select the path halt mode control */
enum cam_tfe_csid_path_halt_mode { enum cam_tfe_csid_path_halt_mode {

View File

@@ -66,6 +66,7 @@ static struct cam_tfe_camif_reg_data tfe530_camif_reg_data = {
.extern_reg_update_shift = 0x0, .extern_reg_update_shift = 0x0,
.camif_pd_rdi2_src_sel_shift = 0x2, .camif_pd_rdi2_src_sel_shift = 0x2,
.dual_tfe_sync_sel_shift = 18, .dual_tfe_sync_sel_shift = 18,
.delay_line_en_shift = 8,
.pixel_pattern_shift = 24, .pixel_pattern_shift = 24,
.pixel_pattern_mask = 0x7000000, .pixel_pattern_mask = 0x7000000,
.module_enable_shift = 0, .module_enable_shift = 0,
@@ -202,6 +203,65 @@ static struct cam_tfe_rdi_reg_data tfe530_rdi2_reg_data = {
.enable_diagnostic_hw = 0x1, .enable_diagnostic_hw = 0x1,
}; };
static struct cam_tfe_clc_hw_status tfe530_clc_hw_info[CAM_TFE_MAX_CLC] = {
{
.name = "CLC_CAMIF",
.hw_status_reg = 0x1204,
},
{
.name = "CLC_RDI0_CAMIF",
.hw_status_reg = 0x1404,
},
{
.name = "CLC_RDI1_CAMIF",
.hw_status_reg = 0x1604,
},
{
.name = "CLC_RDI2_CAMIF",
.hw_status_reg = 0x1804,
},
{
.name = "CLC_CHANNEL_GAIN",
.hw_status_reg = 0x2604,
},
{
.name = "CLC_LENS_ROLL_OFF",
.hw_status_reg = 0x2804,
},
{
.name = "CLC_WB_BDS",
.hw_status_reg = 0x2A04,
},
{
.name = "CLC_STATS_BHIST",
.hw_status_reg = 0x2C04,
},
{
.name = "CLC_STATS_TINTLESS_BG",
.hw_status_reg = 0x2E04,
},
{
.name = "CLC_STATS_BAF",
.hw_status_reg = 0x3004,
},
{
.name = "CLC_STATS_AWB_BG",
.hw_status_reg = 0x3204,
},
{
.name = "CLC_STATS_AEC_BG",
.hw_status_reg = 0x3404,
},
{
.name = "CLC_STATS_RAW_OUT",
.hw_status_reg = 0x3604,
},
{
.name = "CLC_STATS_CROP_POST_BDS",
.hw_status_reg = 0x3804,
},
};
static struct cam_tfe_top_hw_info tfe530_top_hw_info = { static struct cam_tfe_top_hw_info tfe530_top_hw_info = {
.common_reg = &tfe530_top_commong_reg, .common_reg = &tfe530_top_commong_reg,
.camif_hw_info = { .camif_hw_info = {
@@ -385,6 +445,9 @@ static struct cam_tfe_bus_hw_info tfe530_bus_hw_info = {
0x00001A2C, 0x00001A2C,
}, },
.irq_cmd = 0x00001A30, .irq_cmd = 0x00001A30,
.cons_violation_shift = 28,
.violation_shift = 30,
.image_size_violation = 31,
}, },
.num_client = CAM_TFE_BUS_MAX_CLIENTS, .num_client = CAM_TFE_BUS_MAX_CLIENTS,
.bus_client_reg = { .bus_client_reg = {
@@ -414,6 +477,7 @@ static struct cam_tfe_bus_hw_info tfe530_bus_hw_info = {
.debug_status_0 = 0x00001C7C, .debug_status_0 = 0x00001C7C,
.debug_status_1 = 0x00001C80, .debug_status_1 = 0x00001C80,
.comp_group = CAM_TFE_BUS_COMP_GRP_0, .comp_group = CAM_TFE_BUS_COMP_GRP_0,
.client_name = "BAYER",
}, },
/* BUS Client 1 IDEAL RAW*/ /* BUS Client 1 IDEAL RAW*/
{ {
@@ -441,6 +505,7 @@ static struct cam_tfe_bus_hw_info tfe530_bus_hw_info = {
.debug_status_0 = 0x00001D7C, .debug_status_0 = 0x00001D7C,
.debug_status_1 = 0x00001D80, .debug_status_1 = 0x00001D80,
.comp_group = CAM_TFE_BUS_COMP_GRP_1, .comp_group = CAM_TFE_BUS_COMP_GRP_1,
.client_name = "IDEAL_RAW",
}, },
/* BUS Client 2 Stats BE Tintless */ /* BUS Client 2 Stats BE Tintless */
{ {
@@ -468,6 +533,7 @@ static struct cam_tfe_bus_hw_info tfe530_bus_hw_info = {
.debug_status_0 = 0x00001E7C, .debug_status_0 = 0x00001E7C,
.debug_status_1 = 0x00001E80, .debug_status_1 = 0x00001E80,
.comp_group = CAM_TFE_BUS_COMP_GRP_2, .comp_group = CAM_TFE_BUS_COMP_GRP_2,
.client_name = "STATS BE TINTLESS",
}, },
/* BUS Client 3 Stats Bhist */ /* BUS Client 3 Stats Bhist */
{ {
@@ -495,6 +561,7 @@ static struct cam_tfe_bus_hw_info tfe530_bus_hw_info = {
.debug_status_0 = 0x00001F7C, .debug_status_0 = 0x00001F7C,
.debug_status_1 = 0x00001F80, .debug_status_1 = 0x00001F80,
.comp_group = CAM_TFE_BUS_COMP_GRP_2, .comp_group = CAM_TFE_BUS_COMP_GRP_2,
.client_name = "STATS BHIST",
}, },
/* BUS Client 4 Stats AWB BG */ /* BUS Client 4 Stats AWB BG */
{ {
@@ -522,6 +589,7 @@ static struct cam_tfe_bus_hw_info tfe530_bus_hw_info = {
.debug_status_0 = 0x0000207C, .debug_status_0 = 0x0000207C,
.debug_status_1 = 0x00002080, .debug_status_1 = 0x00002080,
.comp_group = CAM_TFE_BUS_COMP_GRP_3, .comp_group = CAM_TFE_BUS_COMP_GRP_3,
.client_name = "STATS AWB BG",
}, },
/* BUS Client 5 Stats AEC BG */ /* BUS Client 5 Stats AEC BG */
{ {
@@ -549,6 +617,7 @@ static struct cam_tfe_bus_hw_info tfe530_bus_hw_info = {
.debug_status_0 = 0x0000217C, .debug_status_0 = 0x0000217C,
.debug_status_1 = 0x00002180, .debug_status_1 = 0x00002180,
.comp_group = CAM_TFE_BUS_COMP_GRP_3, .comp_group = CAM_TFE_BUS_COMP_GRP_3,
.client_name = "STATS AEC BG",
}, },
/* BUS Client 6 Stats BAF */ /* BUS Client 6 Stats BAF */
{ {
@@ -576,6 +645,7 @@ static struct cam_tfe_bus_hw_info tfe530_bus_hw_info = {
.debug_status_0 = 0x0000227C, .debug_status_0 = 0x0000227C,
.debug_status_1 = 0x00002280, .debug_status_1 = 0x00002280,
.comp_group = CAM_TFE_BUS_COMP_GRP_4, .comp_group = CAM_TFE_BUS_COMP_GRP_4,
.client_name = "STATS BAF",
}, },
/* BUS Client 7 RDI0 */ /* BUS Client 7 RDI0 */
{ {
@@ -603,6 +673,7 @@ static struct cam_tfe_bus_hw_info tfe530_bus_hw_info = {
.debug_status_0 = 0x0000237C, .debug_status_0 = 0x0000237C,
.debug_status_1 = 0x00002380, .debug_status_1 = 0x00002380,
.comp_group = CAM_TFE_BUS_COMP_GRP_5, .comp_group = CAM_TFE_BUS_COMP_GRP_5,
.client_name = "RDI0",
}, },
/* BUS Client 8 RDI1 */ /* BUS Client 8 RDI1 */
{ {
@@ -630,6 +701,7 @@ static struct cam_tfe_bus_hw_info tfe530_bus_hw_info = {
.debug_status_0 = 0x0000247C, .debug_status_0 = 0x0000247C,
.debug_status_1 = 0x00002480, .debug_status_1 = 0x00002480,
.comp_group = CAM_TFE_BUS_COMP_GRP_6, .comp_group = CAM_TFE_BUS_COMP_GRP_6,
.client_name = "RDI1",
}, },
/* BUS Client 9 PDAF/RDI2*/ /* BUS Client 9 PDAF/RDI2*/
{ {
@@ -657,6 +729,7 @@ static struct cam_tfe_bus_hw_info tfe530_bus_hw_info = {
.debug_status_0 = 0x0000257C, .debug_status_0 = 0x0000257C,
.debug_status_1 = 0x00002580, .debug_status_1 = 0x00002580,
.comp_group = CAM_TFE_BUS_COMP_GRP_7, .comp_group = CAM_TFE_BUS_COMP_GRP_7,
.client_name = "RDI2/PADF",
}, },
}, },
.num_out = CAM_TFE_BUS_TFE_OUT_MAX, .num_out = CAM_TFE_BUS_TFE_OUT_MAX,
@@ -800,9 +873,14 @@ struct cam_tfe_hw_info cam_tfe530 = {
.bus_reg_irq_mask = { .bus_reg_irq_mask = {
0x00000002, 0x00000002,
0x00000000, 0x00000000,
},
.bus_error_irq_mask = {
0xC0000000,
0x00000000, 0x00000000,
}, },
.num_clc = 14,
.clc_hw_status_info = tfe530_clc_hw_info,
.bus_version = CAM_TFE_BUS_1_0, .bus_version = CAM_TFE_BUS_1_0,
.bus_hw_info = &tfe530_bus_hw_info, .bus_hw_info = &tfe530_bus_hw_info,

View File

@@ -87,6 +87,7 @@ struct cam_tfe_bus_wm_resource_data {
uint32_t format; uint32_t format;
uint32_t pack_fmt; uint32_t pack_fmt;
uint32_t burst_len; uint32_t burst_len;
uint32_t mode;
uint32_t irq_subsample_period; uint32_t irq_subsample_period;
uint32_t irq_subsample_pattern; uint32_t irq_subsample_pattern;
@@ -95,6 +96,10 @@ struct cam_tfe_bus_wm_resource_data {
uint32_t en_cfg; uint32_t en_cfg;
uint32_t is_dual; uint32_t is_dual;
uint32_t acquired_width;
uint32_t acquired_height;
uint32_t acquired_stride;
}; };
struct cam_tfe_bus_comp_grp_data { struct cam_tfe_bus_comp_grp_data {
@@ -449,6 +454,137 @@ static enum cam_tfe_bus_packer_format
} }
} }
static int cam_tfe_bus_acquire_rdi_wm(
struct cam_tfe_bus_wm_resource_data *rsrc_data)
{
switch (rsrc_data->format) {
case CAM_FORMAT_MIPI_RAW_6:
rsrc_data->pack_fmt = 0xA;
if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) {
rsrc_data->width =
ALIGNUP(rsrc_data->width * 6, 64) / 64;
rsrc_data->en_cfg = 0x1;
} else {
rsrc_data->width =
CAM_TFE_RDI_BUS_DEFAULT_WIDTH;
rsrc_data->height = 0;
rsrc_data->stride =
CAM_TFE_RDI_BUS_DEFAULT_STRIDE;
rsrc_data->en_cfg = (0x1 << 16) | 0x1;
}
break;
case CAM_FORMAT_MIPI_RAW_8:
case CAM_FORMAT_PLAIN8:
rsrc_data->pack_fmt = 0xA;
if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) {
rsrc_data->width =
ALIGNUP(rsrc_data->width * 8, 64) / 64;
rsrc_data->en_cfg = 0x1;
} else {
rsrc_data->width =
CAM_TFE_RDI_BUS_DEFAULT_WIDTH;
rsrc_data->height = 0;
rsrc_data->stride =
CAM_TFE_RDI_BUS_DEFAULT_STRIDE;
rsrc_data->en_cfg = (0x1 << 16) | 0x1;
}
break;
case CAM_FORMAT_MIPI_RAW_10:
rsrc_data->pack_fmt = 0xA;
if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) {
rsrc_data->width =
ALIGNUP(rsrc_data->width * 10, 64) / 64;
rsrc_data->en_cfg = 0x1;
} else {
rsrc_data->width =
CAM_TFE_RDI_BUS_DEFAULT_WIDTH;
rsrc_data->height = 0;
rsrc_data->stride =
CAM_TFE_RDI_BUS_DEFAULT_STRIDE;
rsrc_data->en_cfg = (0x1 << 16) | 0x1;
}
break;
case CAM_FORMAT_MIPI_RAW_12:
rsrc_data->pack_fmt = 0xA;
if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) {
rsrc_data->width =
ALIGNUP(rsrc_data->width * 12, 64) / 64;
rsrc_data->en_cfg = 0x1;
} else {
rsrc_data->width =
CAM_TFE_RDI_BUS_DEFAULT_WIDTH;
rsrc_data->height = 0;
rsrc_data->stride =
CAM_TFE_RDI_BUS_DEFAULT_STRIDE;
rsrc_data->en_cfg = (0x1 << 16) | 0x1;
}
break;
case CAM_FORMAT_MIPI_RAW_14:
rsrc_data->pack_fmt = 0xA;
if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) {
rsrc_data->width =
ALIGNUP(rsrc_data->width * 14, 64) / 64;
rsrc_data->en_cfg = 0x1;
} else {
rsrc_data->width =
CAM_TFE_RDI_BUS_DEFAULT_WIDTH;
rsrc_data->height = 0;
rsrc_data->stride =
CAM_TFE_RDI_BUS_DEFAULT_STRIDE;
rsrc_data->en_cfg = (0x1 << 16) | 0x1;
}
break;
case CAM_FORMAT_PLAIN16_10:
case CAM_FORMAT_PLAIN16_12:
case CAM_FORMAT_PLAIN16_14:
case CAM_FORMAT_MIPI_RAW_16:
case CAM_FORMAT_PLAIN16_16:
rsrc_data->pack_fmt = 0xA;
if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) {
rsrc_data->width =
ALIGNUP(rsrc_data->width * 16, 64) / 64;
rsrc_data->en_cfg = 0x1;
} else {
rsrc_data->width =
CAM_TFE_RDI_BUS_DEFAULT_WIDTH;
rsrc_data->height = 0;
rsrc_data->stride =
CAM_TFE_RDI_BUS_DEFAULT_STRIDE;
rsrc_data->en_cfg = (0x1 << 16) | 0x1;
}
break;
case CAM_FORMAT_PLAIN128:
case CAM_FORMAT_PLAIN64:
rsrc_data->pack_fmt = 0xA;
if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) {
rsrc_data->width =
ALIGNUP(rsrc_data->width * 64, 64) / 64;
rsrc_data->en_cfg = 0x1;
} else {
rsrc_data->width =
CAM_TFE_RDI_BUS_DEFAULT_WIDTH;
rsrc_data->height = 0;
rsrc_data->stride =
CAM_TFE_RDI_BUS_DEFAULT_STRIDE;
rsrc_data->en_cfg = (0x1 << 16) | 0x1;
}
break;
default:
CAM_ERR(CAM_ISP, "Unsupported RDI:%d format %d",
rsrc_data->index, rsrc_data->format);
return -EINVAL;
}
return 0;
}
static int cam_tfe_bus_acquire_wm( static int cam_tfe_bus_acquire_wm(
struct cam_tfe_bus_priv *bus_priv, struct cam_tfe_bus_priv *bus_priv,
struct cam_isp_tfe_out_port_info *out_port_info, struct cam_isp_tfe_out_port_info *out_port_info,
@@ -463,9 +599,9 @@ static int cam_tfe_bus_acquire_wm(
struct cam_isp_resource_node *wm_res_local = NULL; struct cam_isp_resource_node *wm_res_local = NULL;
struct cam_tfe_bus_wm_resource_data *rsrc_data = NULL; struct cam_tfe_bus_wm_resource_data *rsrc_data = NULL;
uint32_t wm_idx = 0; uint32_t wm_idx = 0;
int rc = 0;
*wm_res = NULL; *wm_res = NULL;
/* No need to allocate for BUS TFE OUT to WM is fixed. */ /* No need to allocate for BUS TFE OUT to WM is fixed. */
wm_idx = cam_tfe_bus_get_wm_idx(tfe_out_res_id, plane); wm_idx = cam_tfe_bus_get_wm_idx(tfe_out_res_id, plane);
if (wm_idx < 0 || wm_idx >= bus_priv->num_client) { if (wm_idx < 0 || wm_idx >= bus_priv->num_client) {
@@ -491,50 +627,26 @@ static int cam_tfe_bus_acquire_wm(
rsrc_data->width = out_port_info->width; rsrc_data->width = out_port_info->width;
rsrc_data->height = out_port_info->height; rsrc_data->height = out_port_info->height;
rsrc_data->stride = out_port_info->stride; rsrc_data->stride = out_port_info->stride;
rsrc_data->mode = out_port_info->wm_mode;
/*
* Store the acquire width, height separately. For frame based ports
* width and height modified again
*/
rsrc_data->acquired_width = out_port_info->width;
rsrc_data->acquired_height = out_port_info->height;
rsrc_data->acquired_stride = out_port_info->stride;
rsrc_data->is_dual = is_dual; rsrc_data->is_dual = is_dual;
/* Set WM offset value to default */ /* Set WM offset value to default */
rsrc_data->offset = 0; rsrc_data->offset = 0;
if (rsrc_data->index > 6) { if (rsrc_data->index > 6) {
/* WM 7-9 refers to RDI 0/ RDI 1/RDI 2 */ /* WM 7-9 refers to RDI 0/ RDI 1/RDI 2 */
switch (rsrc_data->format) { rc = cam_tfe_bus_acquire_rdi_wm(rsrc_data);
case CAM_FORMAT_MIPI_RAW_6: if (rc)
case CAM_FORMAT_MIPI_RAW_8: return rc;
case CAM_FORMAT_MIPI_RAW_10:
case CAM_FORMAT_MIPI_RAW_12:
case CAM_FORMAT_MIPI_RAW_14:
case CAM_FORMAT_MIPI_RAW_16:
case CAM_FORMAT_PLAIN128:
rsrc_data->width = CAM_TFE_RDI_BUS_DEFAULT_WIDTH;
rsrc_data->height = 0;
rsrc_data->stride = CAM_TFE_RDI_BUS_DEFAULT_STRIDE;
rsrc_data->pack_fmt = 0xA;
rsrc_data->en_cfg = (0x1 << 16) | 0x1;
break;
case CAM_FORMAT_PLAIN8:
rsrc_data->en_cfg = 0x1;
rsrc_data->pack_fmt = 0xA;
rsrc_data->stride = rsrc_data->width * 2;
break;
case CAM_FORMAT_PLAIN16_10:
case CAM_FORMAT_PLAIN16_12:
case CAM_FORMAT_PLAIN16_14:
case CAM_FORMAT_PLAIN16_16:
rsrc_data->width = CAM_TFE_RDI_BUS_DEFAULT_WIDTH;
rsrc_data->height = 0;
rsrc_data->stride = CAM_TFE_RDI_BUS_DEFAULT_STRIDE;
rsrc_data->pack_fmt = 0xA;
rsrc_data->en_cfg = (0x1 << 16) | 0x1;
break;
case CAM_FORMAT_PLAIN64:
rsrc_data->en_cfg = 0x1;
rsrc_data->pack_fmt = 0xA;
break;
default:
CAM_ERR(CAM_ISP, "Unsupported RDI format %d",
rsrc_data->format);
return -EINVAL;
}
} else if (rsrc_data->index == 0 || rsrc_data->index == 1) { } else if (rsrc_data->index == 0 || rsrc_data->index == 1) {
/* WM 0 FULL_OUT */ /* WM 0 FULL_OUT */
switch (rsrc_data->format) { switch (rsrc_data->format) {
@@ -581,9 +693,10 @@ static int cam_tfe_bus_acquire_wm(
*client_done_mask |= (1 << wm_idx); *client_done_mask |= (1 << wm_idx);
CAM_DBG(CAM_ISP, CAM_DBG(CAM_ISP,
"WM:%d processed width:%d height:%d format:0x%x comp_group:%d packt format:0x%x", "WM:%d processed width:%d height:%d format:0x%x comp_group:%d packt format:0x%x wm mode:%d",
rsrc_data->index, rsrc_data->width, rsrc_data->height, rsrc_data->index, rsrc_data->width, rsrc_data->height,
rsrc_data->format, *comp_grp_id, rsrc_data->pack_fmt); rsrc_data->format, *comp_grp_id, rsrc_data->pack_fmt,
rsrc_data->mode);
return 0; return 0;
} }
@@ -630,7 +743,8 @@ static int cam_tfe_bus_start_wm(struct cam_isp_resource_node *wm_res)
common_data->mem_base + rsrc_data->hw_regs->packer_cfg); common_data->mem_base + rsrc_data->hw_regs->packer_cfg);
/* Configure stride for RDIs on full TFE and TFE lite */ /* Configure stride for RDIs on full TFE and TFE lite */
if (rsrc_data->index > 6) if ((rsrc_data->index > 6) &&
(rsrc_data->mode != CAM_ISP_TFE_WM_LINE_BASED_MODE))
cam_io_w_mb(rsrc_data->stride, (common_data->mem_base + cam_io_w_mb(rsrc_data->stride, (common_data->mem_base +
rsrc_data->hw_regs->image_cfg_2)); rsrc_data->hw_regs->image_cfg_2));
@@ -1573,8 +1687,108 @@ static int cam_tfe_bus_bufdone_bottom_half(
return 0; return 0;
} }
static void cam_tfe_bus_error_bottom_half(
struct cam_tfe_bus_priv *bus_priv,
struct cam_tfe_irq_evt_payload *evt_payload)
{
struct cam_tfe_bus_wm_resource_data *rsrc_data;
struct cam_tfe_bus_reg_offset_common *common_reg;
uint32_t i, overflow_status, image_size_violation_status;
uint32_t ccif_violation_status;
common_reg = bus_priv->common_data.common_reg;
CAM_INFO(CAM_ISP, "BUS IRQ[0]:0x%x BUS IRQ[1]:0x%x",
evt_payload->bus_irq_val[0], evt_payload->bus_irq_val[1]);
overflow_status = cam_io_r_mb(bus_priv->common_data.mem_base +
bus_priv->common_data.common_reg->overflow_status);
image_size_violation_status = cam_io_r_mb(
bus_priv->common_data.mem_base +
bus_priv->common_data.common_reg->image_size_violation_status);
ccif_violation_status = cam_io_r_mb(bus_priv->common_data.mem_base +
bus_priv->common_data.common_reg->ccif_violation_status);
CAM_INFO(CAM_ISP,
"ccif violation status:0x%x image size violation:0x%x overflow status:0x%x",
ccif_violation_status,
image_size_violation_status,
overflow_status);
/* Check the bus errors */
if (evt_payload->bus_irq_val[0] & BIT(common_reg->cons_violation_shift))
CAM_INFO(CAM_ISP, "CONS_VIOLATION");
if (evt_payload->bus_irq_val[0] & BIT(common_reg->violation_shift))
CAM_INFO(CAM_ISP, "VIOLATION");
if (evt_payload->bus_irq_val[0] &
BIT(common_reg->image_size_violation)) {
CAM_INFO(CAM_ISP, "IMAGE_SIZE_VIOLATION val :0x%x",
evt_payload->image_size_violation_status);
for (i = 0; i < CAM_TFE_BUS_MAX_CLIENTS; i++) {
if (!(evt_payload->image_size_violation_status >> i))
break;
if (evt_payload->image_size_violation_status & BIT(i)) {
rsrc_data = bus_priv->bus_client[i].res_priv;
CAM_INFO(CAM_ISP,
"WM:%d width 0x%x height:0x%x format:%d stride:0x%x offset:0x%x encfg:0x%x",
i,
rsrc_data->acquired_width,
rsrc_data->acquired_height,
rsrc_data->format,
rsrc_data->acquired_stride,
rsrc_data->offset,
rsrc_data->en_cfg);
CAM_INFO(CAM_ISP,
"WM:%d current width 0x%x height:0x%x stride:0x%x",
i,
rsrc_data->width,
rsrc_data->height,
rsrc_data->stride);
}
}
}
if (overflow_status) {
for (i = 0; i < CAM_TFE_BUS_MAX_CLIENTS; i++) {
if (!(evt_payload->overflow_status >> i))
break;
if (evt_payload->overflow_status & BIT(i)) {
rsrc_data = bus_priv->bus_client[i].res_priv;
CAM_INFO(CAM_ISP,
"WM:%d %s BUS OVERFLOW width0x%x height:0x%x format:%d stride:0x%x offset:0x%x encfg:%x",
i,
rsrc_data->hw_regs->client_name,
rsrc_data->acquired_width,
rsrc_data->acquired_height,
rsrc_data->format,
rsrc_data->acquired_stride,
rsrc_data->offset,
rsrc_data->en_cfg);
CAM_INFO(CAM_ISP,
"WM:%d current width:0x%x height:0x%x stride:0x%x",
i,
rsrc_data->width,
rsrc_data->height,
rsrc_data->stride);
}
}
}
}
static int cam_tfe_bus_bottom_half(void *priv, static int cam_tfe_bus_bottom_half(void *priv,
bool rup_process, struct cam_tfe_irq_evt_payload *evt_payload) bool rup_process, struct cam_tfe_irq_evt_payload *evt_payload,
bool error_process)
{ {
struct cam_tfe_bus_priv *bus_priv; struct cam_tfe_bus_priv *bus_priv;
uint32_t val; uint32_t val;
@@ -1585,6 +1799,11 @@ static int cam_tfe_bus_bottom_half(void *priv,
} }
bus_priv = (struct cam_tfe_bus_priv *) priv; bus_priv = (struct cam_tfe_bus_priv *) priv;
if (error_process) {
cam_tfe_bus_error_bottom_half(bus_priv, evt_payload);
goto end;
}
/* if bus errors are there, mask all bus errors */ /* if bus errors are there, mask all bus errors */
if (evt_payload->bus_irq_val[0] & bus_priv->bus_irq_error_mask[0]) { if (evt_payload->bus_irq_val[0] & bus_priv->bus_irq_error_mask[0]) {
val = cam_io_r(bus_priv->common_data.mem_base + val = cam_io_r(bus_priv->common_data.mem_base +
@@ -1592,6 +1811,7 @@ static int cam_tfe_bus_bottom_half(void *priv,
val &= ~bus_priv->bus_irq_error_mask[0]; val &= ~bus_priv->bus_irq_error_mask[0];
cam_io_w(val, bus_priv->common_data.mem_base + cam_io_w(val, bus_priv->common_data.mem_base +
bus_priv->common_data.common_reg->irq_mask[0]); bus_priv->common_data.common_reg->irq_mask[0]);
} }
if (rup_process) { if (rup_process) {
@@ -1604,6 +1824,7 @@ static int cam_tfe_bus_bottom_half(void *priv,
cam_tfe_bus_bufdone_bottom_half(bus_priv, evt_payload); cam_tfe_bus_bufdone_bottom_half(bus_priv, evt_payload);
} }
end:
return 0; return 0;
} }
@@ -1657,21 +1878,14 @@ static int cam_tfe_bus_update_wm(void *priv, void *cmd_args,
CAM_DBG(CAM_ISP, "WM:%d image height and width 0x%x", CAM_DBG(CAM_ISP, "WM:%d image height and width 0x%x",
wm_data->index, reg_val_pair[j-1]); wm_data->index, reg_val_pair[j-1]);
val = io_cfg->planes[i].plane_stride;
CAM_DBG(CAM_ISP, "before stride 0x%x", val);
val = ALIGNUP(val, 16);
if (val != io_cfg->planes[i].plane_stride &&
val != wm_data->stride)
CAM_WARN(CAM_ISP, "Warning stride %u expected %u",
io_cfg->planes[i].plane_stride, val);
val = wm_data->offset; val = wm_data->offset;
CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j, CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
wm_data->hw_regs->image_cfg_1, val); wm_data->hw_regs->image_cfg_1, val);
CAM_DBG(CAM_ISP, "WM:%d xinit 0x%x", CAM_DBG(CAM_ISP, "WM:%d xinit 0x%x",
wm_data->index, reg_val_pair[j-1]); wm_data->index, reg_val_pair[j-1]);
if (wm_data->index < 7) { if ((wm_data->index < 7) || ((wm_data->index >= 7) &&
(wm_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE))) {
CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j, CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
wm_data->hw_regs->image_cfg_2, wm_data->hw_regs->image_cfg_2,
io_cfg->planes[i].plane_stride); io_cfg->planes[i].plane_stride);

View File

@@ -11,10 +11,11 @@
#include "cam_isp_hw.h" #include "cam_isp_hw.h"
#include "cam_tfe_hw_intf.h" #include "cam_tfe_hw_intf.h"
#define CAM_TFE_BUS_MAX_CLIENTS 10 #define CAM_TFE_BUS_MAX_CLIENTS 10
#define CAM_TFE_BUS_MAX_SUB_GRPS 4 #define CAM_TFE_BUS_MAX_SUB_GRPS 4
#define CAM_TFE_BUS_MAX_PERF_CNT_REG 8 #define CAM_TFE_BUS_MAX_PERF_CNT_REG 8
#define CAM_TFE_BUS_MAX_IRQ_REGISTERS 2 #define CAM_TFE_BUS_MAX_IRQ_REGISTERS 2
#define CAM_TFE_BUS_CLIENT_NAME_MAX_LENGTH 32
#define CAM_TFE_BUS_1_0 0x1000 #define CAM_TFE_BUS_1_0 0x1000
@@ -29,7 +30,8 @@
((value + alignment - 1) / alignment * alignment) ((value + alignment - 1) / alignment * alignment)
typedef int (*CAM_BUS_HANDLER_BOTTOM_HALF)(void *bus_priv, typedef int (*CAM_BUS_HANDLER_BOTTOM_HALF)(void *bus_priv,
bool rup_process, struct cam_tfe_irq_evt_payload *evt_payload); bool rup_process, struct cam_tfe_irq_evt_payload *evt_payload,
bool error_process);
enum cam_tfe_bus_plane_type { enum cam_tfe_bus_plane_type {
PLANE_Y, PLANE_Y,
@@ -106,6 +108,10 @@ struct cam_tfe_bus_reg_offset_common {
uint32_t irq_clear[CAM_TFE_BUS_IRQ_REGISTERS_MAX]; uint32_t irq_clear[CAM_TFE_BUS_IRQ_REGISTERS_MAX];
uint32_t irq_status[CAM_TFE_BUS_IRQ_REGISTERS_MAX]; uint32_t irq_status[CAM_TFE_BUS_IRQ_REGISTERS_MAX];
uint32_t irq_cmd; uint32_t irq_cmd;
/* common register data */
uint32_t cons_violation_shift;
uint32_t violation_shift;
uint32_t image_size_violation;
}; };
/* /*
@@ -138,6 +144,8 @@ struct cam_tfe_bus_reg_offset_bus_client {
uint32_t debug_status_0; uint32_t debug_status_0;
uint32_t debug_status_1; uint32_t debug_status_1;
uint32_t comp_group; uint32_t comp_group;
/*bus data */
uint8_t client_name[CAM_TFE_BUS_CLIENT_NAME_MAX_LENGTH];
}; };
/* /*

View File

@@ -48,6 +48,10 @@ struct cam_tfe_top_priv {
axi_vote_control[CAM_TFE_TOP_IN_PORT_MAX]; axi_vote_control[CAM_TFE_TOP_IN_PORT_MAX];
uint32_t irq_prepared_mask[3]; uint32_t irq_prepared_mask[3];
void *tasklet_info; void *tasklet_info;
struct timeval sof_ts;
struct timeval epoch_ts;
struct timeval eof_ts;
struct timeval error_ts;
}; };
struct cam_tfe_camif_data { struct cam_tfe_camif_data {
@@ -64,9 +68,11 @@ struct cam_tfe_camif_data {
enum cam_isp_hw_sync_mode sync_mode; enum cam_isp_hw_sync_mode sync_mode;
uint32_t dsp_mode; uint32_t dsp_mode;
uint32_t pix_pattern; uint32_t pix_pattern;
uint32_t first_pixel; uint32_t left_first_pixel;
uint32_t left_last_pixel;
uint32_t right_first_pixel;
uint32_t right_last_pixel;
uint32_t first_line; uint32_t first_line;
uint32_t last_pixel;
uint32_t last_line; uint32_t last_line;
bool enable_sof_irq_debug; bool enable_sof_irq_debug;
uint32_t irq_debug_cnt; uint32_t irq_debug_cnt;
@@ -85,6 +91,10 @@ struct cam_tfe_rdi_data {
void *priv; void *priv;
enum cam_isp_hw_sync_mode sync_mode; enum cam_isp_hw_sync_mode sync_mode;
uint32_t pix_pattern; uint32_t pix_pattern;
uint32_t left_first_pixel;
uint32_t left_last_pixel;
uint32_t first_line;
uint32_t last_line;
}; };
static int cam_tfe_validate_pix_pattern(uint32_t pattern) static int cam_tfe_validate_pix_pattern(uint32_t pattern)
@@ -211,68 +221,15 @@ int cam_tfe_irq_config(void *tfe_core_data,
return 0; return 0;
} }
static void cam_tfe_log_error_irq_status( static void cam_tfe_log_tfe_in_debug_status(
struct cam_tfe_hw_core_info *core_info, struct cam_tfe_top_priv *top_priv)
struct cam_tfe_top_priv *top_priv,
struct cam_tfe_irq_evt_payload *evt_payload)
{ {
struct cam_tfe_hw_info *hw_info;
void __iomem *mem_base; void __iomem *mem_base;
struct cam_hw_soc_info *soc_info;
struct cam_tfe_soc_private *soc_private;
struct cam_tfe_camif_data *camif_data; struct cam_tfe_camif_data *camif_data;
struct cam_tfe_rdi_data *rdi_data; struct cam_tfe_rdi_data *rdi_data;
uint32_t i, val_0, val_1, val_2, val_3; uint32_t i, val_0, val_1;
hw_info = core_info->tfe_hw_info;
mem_base = top_priv->common_data.soc_info->reg_map[0].mem_base; mem_base = top_priv->common_data.soc_info->reg_map[0].mem_base;
soc_info = top_priv->common_data.soc_info;
soc_private = top_priv->common_data.soc_info->soc_private;
val_0 = cam_io_r(mem_base +
top_priv->common_data.common_reg->debug_0);
val_1 = cam_io_r(mem_base +
top_priv->common_data.common_reg->debug_1);
val_2 = cam_io_r(mem_base +
top_priv->common_data.common_reg->debug_2);
val_3 = cam_io_r(mem_base +
top_priv->common_data.common_reg->debug_3);
CAM_INFO(CAM_ISP, "TOP IRQ[0]:0x%x IRQ[1]:0x%x IRQ[2]:0x%x",
evt_payload->irq_reg_val[0], evt_payload->irq_reg_val[1],
evt_payload->irq_reg_val[2]);
CAM_INFO(CAM_ISP, "BUS IRQ[0]:0x%x BUS IRQ[1]:0x%x",
evt_payload->bus_irq_val[0], evt_payload->bus_irq_val[1]);
CAM_INFO(CAM_ISP, "ccif violation:0x%x image size:0x%x overflow:0x%x",
evt_payload->ccif_violation_status,
evt_payload->image_size_violation_status,
evt_payload->overflow_status);
cam_cpas_reg_read(soc_private->cpas_handle,
CAM_CPAS_REG_CAMNOC, 0x20, true, &val_0);
CAM_INFO(CAM_ISP, "tfe_niu_MaxWr_Low offset 0x20 val 0x%x",
val_0);
CAM_INFO(CAM_ISP, "Top debug [0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x",
val_0, val_1, val_2, val_3);
val_0 = cam_io_r(mem_base +
top_priv->common_data.common_reg->perf_pixel_count);
val_1 = cam_io_r(mem_base +
top_priv->common_data.common_reg->perf_line_count);
val_2 = cam_io_r(mem_base +
top_priv->common_data.common_reg->perf_stall_count);
val_3 = cam_io_r(mem_base +
top_priv->common_data.common_reg->perf_always_count);
CAM_INFO(CAM_ISP,
"Top perf cnt pix:0x%x line:0x%x stall:0x%x always:0x%x",
val_0, val_1, val_2, val_3);
for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
if ((top_priv->in_rsrc[i].res_state != if ((top_priv->in_rsrc[i].res_state !=
@@ -291,6 +248,23 @@ static void cam_tfe_log_error_irq_status(
val_1, val_1,
((val_0 >> 16) & 0x1FFF), ((val_0 >> 16) & 0x1FFF),
(val_0 & 0x1FFF)); (val_0 & 0x1FFF));
CAM_INFO(CAM_ISP,
"Acquired sync mode:%d left start pxl:0x%x end_pixel:0x%x",
camif_data->sync_mode,
camif_data->left_first_pixel,
camif_data->left_last_pixel);
if (camif_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
CAM_INFO(CAM_ISP,
"sync mode:%d right start pxl:0x%x end_pixel:0x%x",
camif_data->sync_mode,
camif_data->right_first_pixel,
camif_data->right_last_pixel);
CAM_INFO(CAM_ISP,
"Acquired line start:0x%x line end:0x%x",
camif_data->first_line,
camif_data->last_line);
} else if ((top_priv->in_rsrc[i].res_id >= } else if ((top_priv->in_rsrc[i].res_id >=
CAM_ISP_HW_TFE_IN_RDI0) || CAM_ISP_HW_TFE_IN_RDI0) ||
(top_priv->in_rsrc[i].res_id <= (top_priv->in_rsrc[i].res_id <=
@@ -306,11 +280,104 @@ static void cam_tfe_log_error_irq_status(
top_priv->in_rsrc[i].res_id, top_priv->in_rsrc[i].res_id,
val_1, ((val_0 >> 16) & 0x1FFF), val_1, ((val_0 >> 16) & 0x1FFF),
(val_0 & 0x1FFF)); (val_0 & 0x1FFF));
CAM_INFO(CAM_ISP,
"sync mode:%d left start pxl:0x%x end_pixel:0x%x",
rdi_data->sync_mode,
rdi_data->left_first_pixel,
rdi_data->left_last_pixel);
CAM_INFO(CAM_ISP,
"sync mode:%d line start:0x%x line end:0x%x",
rdi_data->sync_mode,
rdi_data->first_line,
rdi_data->last_line);
} }
} }
}
static void cam_tfe_log_error_irq_status(
struct cam_tfe_hw_core_info *core_info,
struct cam_tfe_top_priv *top_priv,
struct cam_tfe_irq_evt_payload *evt_payload)
{
struct cam_tfe_hw_info *hw_info;
void __iomem *mem_base;
struct cam_hw_soc_info *soc_info;
struct cam_tfe_soc_private *soc_private;
struct cam_tfe_clc_hw_status *clc_hw_status;
struct timespec64 ts;
uint32_t i, val_0, val_1, val_2, val_3;
ktime_get_boottime_ts64(&ts);
hw_info = core_info->tfe_hw_info;
mem_base = top_priv->common_data.soc_info->reg_map[0].mem_base;
soc_info = top_priv->common_data.soc_info;
soc_private = top_priv->common_data.soc_info->soc_private;
CAM_INFO(CAM_ISP, "current monotonic time stamp seconds %lld:%lld",
ts.tv_sec, ts.tv_nsec/1000);
CAM_INFO(CAM_ISP,
"ERROR time %lld:%lld SOF %lld:%lld EPOCH %lld:%lld EOF %lld:%lld",
top_priv->error_ts.tv_sec,
top_priv->error_ts.tv_usec,
top_priv->sof_ts.tv_sec,
top_priv->sof_ts.tv_usec,
top_priv->epoch_ts.tv_sec,
top_priv->epoch_ts.tv_usec,
top_priv->eof_ts.tv_sec,
top_priv->eof_ts.tv_usec);
val_0 = cam_io_r(mem_base + val_0 = cam_io_r(mem_base +
top_priv->common_data.common_reg->debug_0);
val_1 = cam_io_r(mem_base +
top_priv->common_data.common_reg->debug_1);
val_2 = cam_io_r(mem_base +
top_priv->common_data.common_reg->debug_2);
val_3 = cam_io_r(mem_base +
top_priv->common_data.common_reg->debug_3);
CAM_INFO(CAM_ISP, "TOP IRQ[0]:0x%x IRQ[1]:0x%x IRQ[2]:0x%x",
evt_payload->irq_reg_val[0], evt_payload->irq_reg_val[1],
evt_payload->irq_reg_val[2]);
CAM_INFO(CAM_ISP, "Top debug [0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x",
val_0, val_1, val_2, val_3);
cam_cpas_reg_read(soc_private->cpas_handle,
CAM_CPAS_REG_CAMNOC, 0x20, true, &val_0);
CAM_INFO(CAM_ISP, "tfe_niu_MaxWr_Low offset 0x20 val 0x%x",
val_0);
val_0 = cam_io_r(mem_base +
top_priv->common_data.common_reg->perf_pixel_count);
val_1 = cam_io_r(mem_base +
top_priv->common_data.common_reg->perf_line_count);
val_2 = cam_io_r(mem_base +
top_priv->common_data.common_reg->perf_stall_count); top_priv->common_data.common_reg->perf_stall_count);
val_3 = cam_io_r(mem_base +
top_priv->common_data.common_reg->perf_always_count);
CAM_INFO(CAM_ISP,
"Top perf cnt pix:0x%x line:0x%x stall:0x%x always:0x%x",
val_0, val_1, val_2, val_3);
clc_hw_status = hw_info->clc_hw_status_info;
for (i = 0; i < hw_info->num_clc; i++) {
val_0 = cam_io_r(mem_base +
clc_hw_status[i].hw_status_reg);
if (val_0)
CAM_INFO(CAM_ISP,
"CLC HW status :name:%s offset:0x%x value:0x%x",
clc_hw_status[i].name,
clc_hw_status[i].hw_status_reg,
val_0);
}
cam_tfe_log_tfe_in_debug_status(top_priv);
/* Check the overflow errors */ /* Check the overflow errors */
if (evt_payload->irq_reg_val[0] & hw_info->error_irq_mask[0]) { if (evt_payload->irq_reg_val[0] & hw_info->error_irq_mask[0]) {
if (evt_payload->irq_reg_val[0] & BIT(8)) if (evt_payload->irq_reg_val[0] & BIT(8))
@@ -363,22 +430,13 @@ static void cam_tfe_log_error_irq_status(
CAM_INFO(CAM_ISP, "TOP Violation status:0x%x", val_0); CAM_INFO(CAM_ISP, "TOP Violation status:0x%x", val_0);
} }
/* Check the bus errors */ core_info->tfe_bus->bottom_half_handler(
if (evt_payload->bus_irq_val[0] & BIT(29)) core_info->tfe_bus->bus_priv, false, evt_payload, true);
CAM_INFO(CAM_ISP, "CONS_VIOLATION");
if (evt_payload->bus_irq_val[0] & BIT(30)) CAM_INFO(CAM_ISP,
CAM_INFO(CAM_ISP, "VIOLATION val 0x%x", "TFE clock rate:%d TFE total bw applied:%lld",
evt_payload->ccif_violation_status); top_priv->hw_clk_rate,
top_priv->total_bw_applied);
if (evt_payload->bus_irq_val[0] & BIT(31))
CAM_INFO(CAM_ISP, "IMAGE_SIZE_VIOLATION val :0x%x",
evt_payload->image_size_violation_status);
/* clear the bus irq overflow status*/
if (evt_payload->overflow_status)
cam_io_w_mb(1, mem_base +
core_info->tfe_hw_info->bus_overflow_clear_cmd);
} }
@@ -391,33 +449,31 @@ static int cam_tfe_error_irq_bottom_half(
{ {
struct cam_isp_hw_event_info evt_info; struct cam_isp_hw_event_info evt_info;
struct cam_tfe_hw_info *hw_info; struct cam_tfe_hw_info *hw_info;
uint32_t error_detected = 0;
hw_info = core_info->tfe_hw_info; hw_info = core_info->tfe_hw_info;
evt_info.hw_idx = core_info->core_index; evt_info.hw_idx = core_info->core_index;
evt_info.res_type = CAM_ISP_RESOURCE_TFE_IN; evt_info.res_type = CAM_ISP_RESOURCE_TFE_IN;
if (evt_payload->irq_reg_val[0] & hw_info->error_irq_mask[0]) { if (evt_payload->irq_reg_val[0] & hw_info->error_irq_mask[0]) {
CAM_ERR(CAM_ISP, "TFE:%d Overflow error irq_status[0]:%x",
core_info->core_index,
evt_payload->irq_reg_val[0]);
evt_info.err_type = CAM_TFE_IRQ_STATUS_OVERFLOW; evt_info.err_type = CAM_TFE_IRQ_STATUS_OVERFLOW;
cam_tfe_log_error_irq_status(core_info, top_priv, evt_payload); error_detected = 1;
if (event_cb)
event_cb(event_cb_priv,
CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
else
CAM_ERR(CAM_ISP, "TFE:%d invalid eventcb:",
core_info->core_index);
} }
if (evt_payload->irq_reg_val[2] & hw_info->error_irq_mask[2]) { if ((evt_payload->bus_irq_val[0] & hw_info->bus_error_irq_mask[0]) ||
CAM_ERR(CAM_ISP, "TFE:%d Violation error irq_status[2]:%x", (evt_payload->irq_reg_val[2] & hw_info->error_irq_mask[2])) {
core_info->core_index, evt_payload->irq_reg_val[2]);
evt_info.err_type = CAM_TFE_IRQ_STATUS_VIOLATION; evt_info.err_type = CAM_TFE_IRQ_STATUS_VIOLATION;
cam_tfe_log_error_irq_status(core_info, top_priv, evt_payload); error_detected = 1;
}
if (error_detected) {
evt_info.err_type = CAM_TFE_IRQ_STATUS_OVERFLOW;
top_priv->error_ts.tv_sec =
evt_payload->ts.mono_time.tv_sec;
top_priv->error_ts.tv_usec =
evt_payload->ts.mono_time.tv_usec;
cam_tfe_log_error_irq_status(core_info, top_priv, evt_payload);
if (event_cb) if (event_cb)
event_cb(event_cb_priv, event_cb(event_cb_priv,
CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
@@ -430,6 +486,7 @@ static int cam_tfe_error_irq_bottom_half(
} }
static int cam_tfe_rdi_irq_bottom_half( static int cam_tfe_rdi_irq_bottom_half(
struct cam_tfe_top_priv *top_priv,
struct cam_isp_resource_node *rdi_node, struct cam_isp_resource_node *rdi_node,
bool epoch_process, bool epoch_process,
struct cam_tfe_irq_evt_payload *evt_payload) struct cam_tfe_irq_evt_payload *evt_payload)
@@ -448,6 +505,11 @@ static int cam_tfe_rdi_irq_bottom_half(
if ((!epoch_process) && (evt_payload->irq_reg_val[1] & if ((!epoch_process) && (evt_payload->irq_reg_val[1] &
rdi_priv->reg_data->eof_irq_mask)) { rdi_priv->reg_data->eof_irq_mask)) {
CAM_DBG(CAM_ISP, "Received EOF"); CAM_DBG(CAM_ISP, "Received EOF");
top_priv->eof_ts.tv_sec =
evt_payload->ts.mono_time.tv_sec;
top_priv->eof_ts.tv_usec =
evt_payload->ts.mono_time.tv_usec;
if (rdi_priv->event_cb) if (rdi_priv->event_cb)
rdi_priv->event_cb(rdi_priv->priv, rdi_priv->event_cb(rdi_priv->priv,
CAM_ISP_HW_EVENT_EOF, (void *)&evt_info); CAM_ISP_HW_EVENT_EOF, (void *)&evt_info);
@@ -456,6 +518,11 @@ static int cam_tfe_rdi_irq_bottom_half(
if ((!epoch_process) && (evt_payload->irq_reg_val[1] & if ((!epoch_process) && (evt_payload->irq_reg_val[1] &
rdi_priv->reg_data->sof_irq_mask)) { rdi_priv->reg_data->sof_irq_mask)) {
CAM_DBG(CAM_ISP, "Received SOF"); CAM_DBG(CAM_ISP, "Received SOF");
top_priv->sof_ts.tv_sec =
evt_payload->ts.mono_time.tv_sec;
top_priv->sof_ts.tv_usec =
evt_payload->ts.mono_time.tv_usec;
if (rdi_priv->event_cb) if (rdi_priv->event_cb)
rdi_priv->event_cb(rdi_priv->priv, rdi_priv->event_cb(rdi_priv->priv,
CAM_ISP_HW_EVENT_SOF, (void *)&evt_info); CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
@@ -464,6 +531,10 @@ static int cam_tfe_rdi_irq_bottom_half(
if (epoch_process && (evt_payload->irq_reg_val[1] & if (epoch_process && (evt_payload->irq_reg_val[1] &
rdi_priv->reg_data->epoch0_irq_mask)) { rdi_priv->reg_data->epoch0_irq_mask)) {
CAM_DBG(CAM_ISP, "Received EPOCH0"); CAM_DBG(CAM_ISP, "Received EPOCH0");
top_priv->epoch_ts.tv_sec =
evt_payload->ts.mono_time.tv_sec;
top_priv->epoch_ts.tv_usec =
evt_payload->ts.mono_time.tv_usec;
if (rdi_priv->event_cb) if (rdi_priv->event_cb)
rdi_priv->event_cb(rdi_priv->priv, rdi_priv->event_cb(rdi_priv->priv,
@@ -474,6 +545,7 @@ static int cam_tfe_rdi_irq_bottom_half(
} }
static int cam_tfe_camif_irq_bottom_half( static int cam_tfe_camif_irq_bottom_half(
struct cam_tfe_top_priv *top_priv,
struct cam_isp_resource_node *camif_node, struct cam_isp_resource_node *camif_node,
bool epoch_process, bool epoch_process,
struct cam_tfe_irq_evt_payload *evt_payload) struct cam_tfe_irq_evt_payload *evt_payload)
@@ -493,6 +565,11 @@ static int cam_tfe_camif_irq_bottom_half(
camif_priv->reg_data->eof_irq_mask)) { camif_priv->reg_data->eof_irq_mask)) {
CAM_DBG(CAM_ISP, "Received EOF"); CAM_DBG(CAM_ISP, "Received EOF");
top_priv->eof_ts.tv_sec =
evt_payload->ts.mono_time.tv_sec;
top_priv->eof_ts.tv_usec =
evt_payload->ts.mono_time.tv_usec;
if (camif_priv->event_cb) if (camif_priv->event_cb)
camif_priv->event_cb(camif_priv->priv, camif_priv->event_cb(camif_priv->priv,
CAM_ISP_HW_EVENT_EOF, (void *)&evt_info); CAM_ISP_HW_EVENT_EOF, (void *)&evt_info);
@@ -515,6 +592,11 @@ static int cam_tfe_camif_irq_bottom_half(
} else } else
CAM_DBG(CAM_ISP, "Received SOF"); CAM_DBG(CAM_ISP, "Received SOF");
top_priv->sof_ts.tv_sec =
evt_payload->ts.mono_time.tv_sec;
top_priv->sof_ts.tv_usec =
evt_payload->ts.mono_time.tv_usec;
if (camif_priv->event_cb) if (camif_priv->event_cb)
camif_priv->event_cb(camif_priv->priv, camif_priv->event_cb(camif_priv->priv,
CAM_ISP_HW_EVENT_SOF, (void *)&evt_info); CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
@@ -524,6 +606,11 @@ static int cam_tfe_camif_irq_bottom_half(
camif_priv->reg_data->epoch0_irq_mask)) { camif_priv->reg_data->epoch0_irq_mask)) {
CAM_DBG(CAM_ISP, "Received EPOCH"); CAM_DBG(CAM_ISP, "Received EPOCH");
top_priv->epoch_ts.tv_sec =
evt_payload->ts.mono_time.tv_sec;
top_priv->epoch_ts.tv_usec =
evt_payload->ts.mono_time.tv_usec;
if (camif_priv->event_cb) if (camif_priv->event_cb)
camif_priv->event_cb(camif_priv->priv, camif_priv->event_cb(camif_priv->priv,
CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info); CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
@@ -575,7 +662,7 @@ static int cam_tfe_irq_bottom_half(void *handler_priv,
if (camif_priv->reg_data->subscribe_irq_mask[1] & if (camif_priv->reg_data->subscribe_irq_mask[1] &
evt_payload->irq_reg_val[1]) evt_payload->irq_reg_val[1])
cam_tfe_camif_irq_bottom_half( cam_tfe_camif_irq_bottom_half(top_priv,
&top_priv->in_rsrc[i], false, &top_priv->in_rsrc[i], false,
evt_payload); evt_payload);
@@ -584,7 +671,8 @@ static int cam_tfe_irq_bottom_half(void *handler_priv,
(top_priv->in_rsrc[i].res_id <= (top_priv->in_rsrc[i].res_id <=
CAM_ISP_HW_TFE_IN_RDI2) && CAM_ISP_HW_TFE_IN_RDI2) &&
(top_priv->in_rsrc[i].res_state == (top_priv->in_rsrc[i].res_state ==
CAM_ISP_RESOURCE_STATE_STREAMING)) { CAM_ISP_RESOURCE_STATE_STREAMING) &&
top_priv->in_rsrc[i].rdi_only_ctx) {
rdi_priv = (struct cam_tfe_rdi_data *) rdi_priv = (struct cam_tfe_rdi_data *)
top_priv->in_rsrc[i].res_priv; top_priv->in_rsrc[i].res_priv;
event_cb = rdi_priv->event_cb; event_cb = rdi_priv->event_cb;
@@ -592,7 +680,7 @@ static int cam_tfe_irq_bottom_half(void *handler_priv,
if (rdi_priv->reg_data->subscribe_irq_mask[1] & if (rdi_priv->reg_data->subscribe_irq_mask[1] &
evt_payload->irq_reg_val[1]) evt_payload->irq_reg_val[1])
cam_tfe_rdi_irq_bottom_half( cam_tfe_rdi_irq_bottom_half(top_priv,
&top_priv->in_rsrc[i], false, &top_priv->in_rsrc[i], false,
evt_payload); evt_payload);
} }
@@ -606,7 +694,7 @@ static int cam_tfe_irq_bottom_half(void *handler_priv,
if (evt_payload->irq_reg_val[0] & if (evt_payload->irq_reg_val[0] &
core_info->tfe_hw_info->bus_reg_irq_mask[0]) { core_info->tfe_hw_info->bus_reg_irq_mask[0]) {
core_info->tfe_bus->bottom_half_handler( core_info->tfe_bus->bottom_half_handler(
core_info->tfe_bus->bus_priv, true, evt_payload); core_info->tfe_bus->bus_priv, true, evt_payload, false);
} }
/* process the epoch */ /* process the epoch */
@@ -619,7 +707,7 @@ static int cam_tfe_irq_bottom_half(void *handler_priv,
top_priv->in_rsrc[i].res_priv; top_priv->in_rsrc[i].res_priv;
if (camif_priv->reg_data->subscribe_irq_mask[1] & if (camif_priv->reg_data->subscribe_irq_mask[1] &
evt_payload->irq_reg_val[1]) evt_payload->irq_reg_val[1])
cam_tfe_camif_irq_bottom_half( cam_tfe_camif_irq_bottom_half(top_priv,
&top_priv->in_rsrc[i], true, &top_priv->in_rsrc[i], true,
evt_payload); evt_payload);
} else if ((top_priv->in_rsrc[i].res_id >= } else if ((top_priv->in_rsrc[i].res_id >=
@@ -632,7 +720,7 @@ static int cam_tfe_irq_bottom_half(void *handler_priv,
top_priv->in_rsrc[i].res_priv; top_priv->in_rsrc[i].res_priv;
if (rdi_priv->reg_data->subscribe_irq_mask[1] & if (rdi_priv->reg_data->subscribe_irq_mask[1] &
evt_payload->irq_reg_val[1]) evt_payload->irq_reg_val[1])
cam_tfe_rdi_irq_bottom_half( cam_tfe_rdi_irq_bottom_half(top_priv,
&top_priv->in_rsrc[i], true, &top_priv->in_rsrc[i], true,
evt_payload); evt_payload);
} }
@@ -642,7 +730,8 @@ static int cam_tfe_irq_bottom_half(void *handler_priv,
if (evt_payload->irq_reg_val[0] & if (evt_payload->irq_reg_val[0] &
core_info->tfe_hw_info->bus_reg_irq_mask[0]) { core_info->tfe_hw_info->bus_reg_irq_mask[0]) {
core_info->tfe_bus->bottom_half_handler( core_info->tfe_bus->bottom_half_handler(
core_info->tfe_bus->bus_priv, false, evt_payload); core_info->tfe_bus->bus_priv, false, evt_payload,
false);
} }
cam_tfe_put_evt_payload(core_info, &evt_payload); cam_tfe_put_evt_payload(core_info, &evt_payload);
@@ -653,16 +742,24 @@ static int cam_tfe_irq_bottom_half(void *handler_priv,
static int cam_tfe_irq_err_top_half( static int cam_tfe_irq_err_top_half(
struct cam_tfe_hw_core_info *core_info, struct cam_tfe_hw_core_info *core_info,
void __iomem *mem_base, void __iomem *mem_base,
uint32_t *irq_status) uint32_t *top_irq_status,
uint32_t *bus_irq_status)
{ {
uint32_t i; uint32_t i;
if (irq_status[0] & core_info->tfe_hw_info->error_irq_mask[0] || if ((top_irq_status[0] & core_info->tfe_hw_info->error_irq_mask[0]) ||
irq_status[2] & core_info->tfe_hw_info->error_irq_mask[2]) { (top_irq_status[2] &
core_info->tfe_hw_info->error_irq_mask[2]) ||
(bus_irq_status[0] &
core_info->tfe_hw_info->bus_error_irq_mask[0])) {
CAM_ERR(CAM_ISP, CAM_ERR(CAM_ISP,
"Encountered Error: tfe:%d: Irq_status0=0x%x status2=0x%x", "Encountered Error: tfe:%d: Irq_status0=0x%x status2=0x%x",
core_info->core_index, irq_status[0], core_info->core_index, top_irq_status[0],
irq_status[2]); top_irq_status[2]);
CAM_ERR(CAM_ISP,
"Encountered Error: tfe:%d:BUS Irq_status0=0x%x",
core_info->core_index, bus_irq_status[0]);
for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++) for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++)
cam_io_w(0, mem_base + cam_io_w(0, mem_base +
core_info->tfe_hw_info->top_irq_mask[i]); core_info->tfe_hw_info->top_irq_mask[i]);
@@ -755,7 +852,8 @@ irqreturn_t cam_tfe_irq(int irq_num, void *data)
} }
/* Check the irq errors */ /* Check the irq errors */
cam_tfe_irq_err_top_half(core_info, mem_base, top_irq_status); cam_tfe_irq_err_top_half(core_info, mem_base, top_irq_status,
bus_irq_status);
rc = cam_tfe_get_evt_payload(core_info, &evt_payload); rc = cam_tfe_get_evt_payload(core_info, &evt_payload);
if (rc) { if (rc) {
@@ -877,7 +975,7 @@ static int cam_tfe_top_set_axi_bw_vote(
struct cam_tfe_top_priv *top_priv, struct cam_tfe_top_priv *top_priv,
bool start_stop) bool start_stop)
{ {
struct cam_axi_vote agg_vote = {0}; struct cam_axi_vote *agg_vote = NULL;
struct cam_axi_vote *to_be_applied_axi_vote = NULL; struct cam_axi_vote *to_be_applied_axi_vote = NULL;
struct cam_hw_soc_info *soc_info = top_priv->common_data.soc_info; struct cam_hw_soc_info *soc_info = top_priv->common_data.soc_info;
struct cam_tfe_soc_private *soc_private = soc_info->soc_private; struct cam_tfe_soc_private *soc_private = soc_info->soc_private;
@@ -893,6 +991,12 @@ static int cam_tfe_top_set_axi_bw_vote(
return -EINVAL; return -EINVAL;
} }
agg_vote = kzalloc(sizeof(struct cam_axi_vote), GFP_KERNEL);
if (!agg_vote) {
CAM_ERR(CAM_ISP, "Out of memory");
return -ENOMEM;
}
for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
if (top_priv->axi_vote_control[i] == if (top_priv->axi_vote_control[i] ==
CAM_TFE_BW_CONTROL_INCLUDE) { CAM_TFE_BW_CONTROL_INCLUDE) {
@@ -904,10 +1008,11 @@ static int cam_tfe_top_set_axi_bw_vote(
num_paths + num_paths +
top_priv->req_axi_vote[i].num_paths, top_priv->req_axi_vote[i].num_paths,
CAM_CPAS_MAX_PATHS_PER_CLIENT); CAM_CPAS_MAX_PATHS_PER_CLIENT);
return -EINVAL; rc = -EINVAL;
goto free_mem;
} }
memcpy(&agg_vote.axi_path[num_paths], memcpy(&agg_vote->axi_path[num_paths],
&top_priv->req_axi_vote[i].axi_path[0], &top_priv->req_axi_vote[i].axi_path[0],
top_priv->req_axi_vote[i].num_paths * top_priv->req_axi_vote[i].num_paths *
sizeof( sizeof(
@@ -916,31 +1021,31 @@ static int cam_tfe_top_set_axi_bw_vote(
} }
} }
agg_vote.num_paths = num_paths; agg_vote->num_paths = num_paths;
for (i = 0; i < agg_vote.num_paths; i++) { for (i = 0; i < agg_vote->num_paths; i++) {
CAM_DBG(CAM_PERF, CAM_DBG(CAM_PERF,
"tfe[%d] : New BW Vote : counter[%d] [%s][%s] [%llu %llu %llu]", "tfe[%d] : New BW Vote : counter[%d] [%s][%s] [%llu %llu %llu]",
top_priv->common_data.hw_intf->hw_idx, top_priv->common_data.hw_intf->hw_idx,
top_priv->last_counter, top_priv->last_counter,
cam_cpas_axi_util_path_type_to_string( cam_cpas_axi_util_path_type_to_string(
agg_vote.axi_path[i].path_data_type), agg_vote->axi_path[i].path_data_type),
cam_cpas_axi_util_trans_type_to_string( cam_cpas_axi_util_trans_type_to_string(
agg_vote.axi_path[i].transac_type), agg_vote->axi_path[i].transac_type),
agg_vote.axi_path[i].camnoc_bw, agg_vote->axi_path[i].camnoc_bw,
agg_vote.axi_path[i].mnoc_ab_bw, agg_vote->axi_path[i].mnoc_ab_bw,
agg_vote.axi_path[i].mnoc_ib_bw); agg_vote->axi_path[i].mnoc_ib_bw);
total_bw_new_vote += agg_vote.axi_path[i].camnoc_bw; total_bw_new_vote += agg_vote->axi_path[i].camnoc_bw;
} }
memcpy(&top_priv->last_vote[top_priv->last_counter], &agg_vote, memcpy(&top_priv->last_vote[top_priv->last_counter], agg_vote,
sizeof(struct cam_axi_vote)); sizeof(struct cam_axi_vote));
top_priv->last_counter = (top_priv->last_counter + 1) % top_priv->last_counter = (top_priv->last_counter + 1) %
(CAM_TFE_TOP_IN_PORT_MAX * (CAM_TFE_TOP_IN_PORT_MAX *
CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES); CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES);
if ((agg_vote.num_paths != top_priv->applied_axi_vote.num_paths) || if ((agg_vote->num_paths != top_priv->applied_axi_vote.num_paths) ||
(total_bw_new_vote != top_priv->total_bw_applied)) (total_bw_new_vote != top_priv->total_bw_applied))
bw_unchanged = false; bw_unchanged = false;
@@ -952,18 +1057,19 @@ static int cam_tfe_top_set_axi_bw_vote(
if (bw_unchanged) { if (bw_unchanged) {
CAM_DBG(CAM_ISP, "BW config unchanged"); CAM_DBG(CAM_ISP, "BW config unchanged");
return 0; rc = 0;
goto free_mem;
} }
if (start_stop) { if (start_stop) {
/* need to vote current request immediately */ /* need to vote current request immediately */
to_be_applied_axi_vote = &agg_vote; to_be_applied_axi_vote = agg_vote;
/* Reset everything, we can start afresh */ /* Reset everything, we can start afresh */
memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) * memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
(CAM_TFE_TOP_IN_PORT_MAX * (CAM_TFE_TOP_IN_PORT_MAX *
CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES)); CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES));
top_priv->last_counter = 0; top_priv->last_counter = 0;
top_priv->last_vote[top_priv->last_counter] = agg_vote; top_priv->last_vote[top_priv->last_counter] = *agg_vote;
top_priv->last_counter = (top_priv->last_counter + 1) % top_priv->last_counter = (top_priv->last_counter + 1) %
(CAM_TFE_TOP_IN_PORT_MAX * (CAM_TFE_TOP_IN_PORT_MAX *
CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES); CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES);
@@ -977,7 +1083,8 @@ static int cam_tfe_top_set_axi_bw_vote(
&total_bw_new_vote); &total_bw_new_vote);
if (!to_be_applied_axi_vote) { if (!to_be_applied_axi_vote) {
CAM_ERR(CAM_ISP, "to_be_applied_axi_vote is NULL"); CAM_ERR(CAM_ISP, "to_be_applied_axi_vote is NULL");
return -EINVAL; rc = -EINVAL;
goto free_mem;
} }
} }
@@ -1018,6 +1125,9 @@ static int cam_tfe_top_set_axi_bw_vote(
} }
} }
free_mem:
kzfree(agg_vote);
agg_vote = NULL;
return rc; return rc;
} }
@@ -1381,6 +1491,234 @@ static int cam_tfe_top_get_reg_dump(
return 0; return 0;
} }
static int cam_tfe_hw_dump(
struct cam_tfe_hw_core_info *core_info,
void *cmd_args,
uint32_t arg_size)
{
int i, j;
uint8_t *dst;
uint32_t reg_start_offset;
uint32_t reg_dump_size = 0;
uint32_t lut_dump_size = 0;
uint32_t num_lut_dump_entries = 0;
uint32_t num_reg;
uint32_t lut_word_size, lut_size;
uint32_t lut_bank_sel, lut_dmi_reg;
uint32_t val;
void __iomem *reg_base;
void __iomem *mem_base;
uint32_t *addr, *start;
uint64_t *clk_waddr, *clk_wstart;
size_t remain_len;
uint32_t min_len;
struct cam_hw_info *tfe_hw_info;
struct cam_hw_soc_info *soc_info;
struct cam_tfe_top_priv *top_priv;
struct cam_tfe_soc_private *soc_private;
struct cam_tfe_reg_dump_data *reg_dump_data;
struct cam_isp_hw_dump_header *hdr;
struct cam_isp_hw_dump_args *dump_args =
(struct cam_isp_hw_dump_args *)cmd_args;
if (!dump_args || !core_info) {
CAM_ERR(CAM_ISP, "Invalid args");
return -EINVAL;
}
if (!dump_args->cpu_addr || !dump_args->buf_len) {
CAM_ERR(CAM_ISP,
"Invalid params %pK %zu",
(void *)dump_args->cpu_addr,
dump_args->buf_len);
return -EINVAL;
}
if (dump_args->buf_len <= dump_args->offset) {
CAM_WARN(CAM_ISP,
"Dump offset overshoot offset %zu buf_len %zu",
dump_args->offset, dump_args->buf_len);
return -ENOSPC;
}
top_priv = (struct cam_tfe_top_priv *)core_info->top_priv;
tfe_hw_info =
(struct cam_hw_info *)(top_priv->common_data.hw_intf->hw_priv);
reg_dump_data = top_priv->common_data.reg_dump_data;
soc_info = top_priv->common_data.soc_info;
soc_private = top_priv->common_data.soc_info->soc_private;
mem_base = soc_info->reg_map[TFE_CORE_BASE_IDX].mem_base;
if (dump_args->is_dump_all) {
/*Dump registers size*/
for (i = 0; i < reg_dump_data->num_reg_dump_entries; i++)
reg_dump_size +=
(reg_dump_data->reg_entry[i].end_offset -
reg_dump_data->reg_entry[i].start_offset);
/*
* We dump the offset as well, so the total size dumped becomes
* multiplied by 2
*/
reg_dump_size *= 2;
/* LUT dump size */
for (i = 0; i < reg_dump_data->num_lut_dump_entries; i++)
lut_dump_size +=
((reg_dump_data->lut_entry[i].lut_addr_size) *
(reg_dump_data->lut_entry[i].lut_word_size/8));
num_lut_dump_entries = reg_dump_data->num_lut_dump_entries;
}
/*Minimum len comprises of:
* lut_dump_size + reg_dump_size + sizeof dump_header +
* (num_lut_dump_entries--> represents number of banks) +
* (misc number of words) * sizeof(uint32_t)
*/
min_len = lut_dump_size + reg_dump_size +
sizeof(struct cam_isp_hw_dump_header) +
(num_lut_dump_entries * sizeof(uint32_t)) +
(sizeof(uint32_t) * CAM_TFE_CORE_DUMP_MISC_NUM_WORDS);
remain_len = dump_args->buf_len - dump_args->offset;
if (remain_len < min_len) {
CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu, min %u",
remain_len, min_len);
return -ENOSPC;
}
mutex_lock(&tfe_hw_info->hw_mutex);
if (tfe_hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
CAM_ERR(CAM_ISP, "TFE:%d HW not powered up",
core_info->core_index);
mutex_unlock(&tfe_hw_info->hw_mutex);
return -EPERM;
}
if (!dump_args->is_dump_all)
goto dump_bw;
dst = (uint8_t *)dump_args->cpu_addr + dump_args->offset;
hdr = (struct cam_isp_hw_dump_header *)dst;
hdr->word_size = sizeof(uint32_t);
scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "TFE_REG:");
addr = (uint32_t *)(dst + sizeof(struct cam_isp_hw_dump_header));
start = addr;
*addr++ = soc_info->index;
for (i = 0; i < reg_dump_data->num_reg_dump_entries; i++) {
num_reg = (reg_dump_data->reg_entry[i].end_offset -
reg_dump_data->reg_entry[i].start_offset)/4;
reg_start_offset = reg_dump_data->reg_entry[i].start_offset;
reg_base = mem_base + reg_start_offset;
for (j = 0; j < num_reg; j++) {
addr[0] =
soc_info->mem_block[TFE_CORE_BASE_IDX]->start +
reg_start_offset + (j*4);
addr[1] = cam_io_r(reg_base + (j*4));
addr += 2;
}
}
/*Dump bus top registers*/
num_reg = (reg_dump_data->bus_write_top_end_addr -
reg_dump_data->bus_start_addr)/4;
reg_base = mem_base + reg_dump_data->bus_start_addr;
reg_start_offset = soc_info->mem_block[TFE_CORE_BASE_IDX]->start +
reg_dump_data->bus_start_addr;
for (i = 0; i < num_reg; i++) {
addr[0] = reg_start_offset + (i*4);
addr[1] = cam_io_r(reg_base + (i*4));
addr += 2;
}
/* Dump bus clients */
reg_base = mem_base + reg_dump_data->bus_client_start_addr;
reg_start_offset = soc_info->mem_block[TFE_CORE_BASE_IDX]->start +
reg_dump_data->bus_client_start_addr;
for (j = 0; j < reg_dump_data->num_bus_clients; j++) {
for (i = 0; i <= 0x3c; i += 4) {
addr[0] = reg_start_offset + i;
addr[1] = cam_io_r(reg_base + i);
addr += 2;
}
for (i = 0x60; i <= 0x80; i += 4) {
addr[0] = reg_start_offset + (i*4);
addr[1] = cam_io_r(reg_base + (i*4));
addr += 2;
}
reg_base += reg_dump_data->bus_client_offset;
reg_start_offset += reg_dump_data->bus_client_offset;
}
hdr->size = hdr->word_size * (addr - start);
dump_args->offset += hdr->size +
sizeof(struct cam_isp_hw_dump_header);
/* Dump LUT entries */
for (i = 0; i < reg_dump_data->num_lut_dump_entries; i++) {
lut_bank_sel = reg_dump_data->lut_entry[i].lut_bank_sel;
lut_size = reg_dump_data->lut_entry[i].lut_addr_size;
lut_word_size = reg_dump_data->lut_entry[i].lut_word_size;
lut_dmi_reg = reg_dump_data->lut_entry[i].dmi_reg_offset;
dst = (char *)dump_args->cpu_addr + dump_args->offset;
hdr = (struct cam_isp_hw_dump_header *)dst;
scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "LUT_REG:");
hdr->word_size = lut_word_size/8;
addr = (uint32_t *)(dst +
sizeof(struct cam_isp_hw_dump_header));
start = addr;
*addr++ = lut_bank_sel;
cam_io_w_mb(lut_bank_sel, mem_base + lut_dmi_reg + 4);
cam_io_w_mb(0, mem_base + 0xC28);
for (j = 0; j < lut_size; j++) {
*addr = cam_io_r_mb(mem_base + 0xc30);
addr++;
}
hdr->size = hdr->word_size * (addr - start);
dump_args->offset += hdr->size +
sizeof(struct cam_isp_hw_dump_header);
}
cam_io_w_mb(0, mem_base + 0xC24);
cam_io_w_mb(0, mem_base + 0xC28);
dump_bw:
dst = (char *)dump_args->cpu_addr + dump_args->offset;
hdr = (struct cam_isp_hw_dump_header *)dst;
scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "TFE_CLK_RATE_BW:");
clk_waddr = (uint64_t *)(dst +
sizeof(struct cam_isp_hw_dump_header));
clk_wstart = clk_waddr;
hdr->word_size = sizeof(uint64_t);
*clk_waddr++ = top_priv->hw_clk_rate;
*clk_waddr++ = top_priv->total_bw_applied;
hdr->size = hdr->word_size * (clk_waddr - clk_wstart);
dump_args->offset += hdr->size +
sizeof(struct cam_isp_hw_dump_header);
dst = (char *)dump_args->cpu_addr + dump_args->offset;
hdr = (struct cam_isp_hw_dump_header *)dst;
scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "TFE_NIU_MAXWR:");
addr = (uint32_t *)(dst +
sizeof(struct cam_isp_hw_dump_header));
start = addr;
hdr->word_size = sizeof(uint32_t);
cam_cpas_reg_read(soc_private->cpas_handle,
CAM_CPAS_REG_CAMNOC, 0x20, true, &val);
*addr++ = val;
hdr->size = hdr->word_size * (addr - start);
dump_args->offset += hdr->size +
sizeof(struct cam_isp_hw_dump_header);
mutex_unlock(&tfe_hw_info->hw_mutex);
CAM_DBG(CAM_ISP, "offset %zu", dump_args->offset);
return 0;
}
static int cam_tfe_camif_irq_reg_dump( static int cam_tfe_camif_irq_reg_dump(
struct cam_tfe_hw_core_info *core_info, struct cam_tfe_hw_core_info *core_info,
void *cmd_args, uint32_t arg_size) void *cmd_args, uint32_t arg_size)
@@ -1465,10 +1803,14 @@ int cam_tfe_top_reserve(void *device_priv,
acquire_args->in_port->pix_pattern; acquire_args->in_port->pix_pattern;
camif_data->dsp_mode = camif_data->dsp_mode =
acquire_args->in_port->dsp_mode; acquire_args->in_port->dsp_mode;
camif_data->first_pixel = camif_data->left_first_pixel =
acquire_args->in_port->left_start; acquire_args->in_port->left_start;
camif_data->last_pixel = camif_data->left_last_pixel =
acquire_args->in_port->left_end; acquire_args->in_port->left_end;
camif_data->right_first_pixel =
acquire_args->in_port->right_start;
camif_data->right_last_pixel =
acquire_args->in_port->right_end;
camif_data->first_line = camif_data->first_line =
acquire_args->in_port->line_start; acquire_args->in_port->line_start;
camif_data->last_line = camif_data->last_line =
@@ -1494,6 +1836,14 @@ int cam_tfe_top_reserve(void *device_priv,
rdi_data->sync_mode = acquire_args->sync_mode; rdi_data->sync_mode = acquire_args->sync_mode;
rdi_data->event_cb = args->event_cb; rdi_data->event_cb = args->event_cb;
rdi_data->priv = args->priv; rdi_data->priv = args->priv;
rdi_data->left_first_pixel =
acquire_args->in_port->left_start;
rdi_data->left_last_pixel =
acquire_args->in_port->left_end;
rdi_data->first_line =
acquire_args->in_port->line_start;
rdi_data->last_line =
acquire_args->in_port->line_end;
} }
top_priv->in_rsrc[i].cdm_ops = acquire_args->cdm_ops; top_priv->in_rsrc[i].cdm_ops = acquire_args->cdm_ops;
@@ -1604,6 +1954,9 @@ static int cam_tfe_camif_resource_start(
if (!rsrc_data->camif_pd_enable) if (!rsrc_data->camif_pd_enable)
val |= (1 << rsrc_data->reg_data->camif_pd_rdi2_src_sel_shift); val |= (1 << rsrc_data->reg_data->camif_pd_rdi2_src_sel_shift);
/* enables the Delay Line CLC in the pixel pipeline */
val |= BIT(rsrc_data->reg_data->delay_line_en_shift);
cam_io_w_mb(val, rsrc_data->mem_base + cam_io_w_mb(val, rsrc_data->mem_base +
rsrc_data->common_reg->core_cfg_0); rsrc_data->common_reg->core_cfg_0);
@@ -1755,10 +2108,19 @@ int cam_tfe_top_start(struct cam_tfe_hw_core_info *core_info,
in_res->res_id - CAM_ISP_HW_TFE_IN_RDI0); in_res->res_id - CAM_ISP_HW_TFE_IN_RDI0);
} }
core_info->irq_err_config_cnt++; core_info->irq_err_config_cnt++;
if (core_info->irq_err_config_cnt == 1) { if (core_info->irq_err_config_cnt == 1) {
cam_tfe_irq_config(core_info, cam_tfe_irq_config(core_info,
core_info->tfe_hw_info->error_irq_mask, core_info->tfe_hw_info->error_irq_mask,
CAM_TFE_TOP_IRQ_REG_NUM, true);
top_priv->error_ts.tv_sec = 0;
top_priv->error_ts.tv_usec = 0;
top_priv->sof_ts.tv_sec = 0;
top_priv->sof_ts.tv_usec = 0;
top_priv->epoch_ts.tv_sec = 0;
top_priv->epoch_ts.tv_usec = 0;
top_priv->eof_ts.tv_sec = 0;
top_priv->eof_ts.tv_usec = 0;
} }
end: end:
@@ -2079,8 +2441,9 @@ int cam_tfe_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
top_priv->common_data.common_reg->global_reset_cmd); top_priv->common_data.common_reg->global_reset_cmd);
CAM_DBG(CAM_ISP, "TFE:%d waiting for tfe reset complete", CAM_DBG(CAM_ISP, "TFE:%d waiting for tfe reset complete",
core_info->core_index); core_info->core_index);
/* Wait for Completion or Timeout of 500ms */ /* Wait for Completion or Timeout of 100ms */
rc = wait_for_completion_timeout(&core_info->reset_complete,
msecs_to_jiffies(100)); msecs_to_jiffies(100));
if (rc <= 0) { if (rc <= 0) {
CAM_ERR(CAM_ISP, "TFE:%d Error Reset Timeout", CAM_ERR(CAM_ISP, "TFE:%d Error Reset Timeout",
@@ -2436,6 +2799,10 @@ int cam_tfe_process_cmd(void *hw_priv, uint32_t cmd_type,
break; break;
case CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA: case CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA:
*((struct cam_hw_soc_info **)cmd_args) = soc_info; *((struct cam_hw_soc_info **)cmd_args) = soc_info;
break;
case CAM_ISP_HW_CMD_DUMP_HW:
rc = cam_tfe_hw_dump(core_info,
cmd_args, arg_size);
break; break;
case CAM_ISP_HW_CMD_GET_BUF_UPDATE: case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
case CAM_ISP_HW_CMD_GET_HFR_UPDATE: case CAM_ISP_HW_CMD_GET_HFR_UPDATE:

View File

@@ -25,6 +25,17 @@
#define CAM_TFE_MAX_REG_DUMP_ENTRIES 20 #define CAM_TFE_MAX_REG_DUMP_ENTRIES 20
#define CAM_TFE_MAX_LUT_DUMP_ENTRIES 10 #define CAM_TFE_MAX_LUT_DUMP_ENTRIES 10
#define CAM_TFE_MAX_CLC 30
#define CAM_TFE_CLC_NAME_LENGTH_MAX 32
/*we take each word as uint32_t, for dumping uint64_t count as 2 words
* soc index
* clk_rate--> uint64_t--> count as 2 words
* BW--> uint64_t --> count as 2 words
* MAX_NIU
*/
#define CAM_TFE_CORE_DUMP_MISC_NUM_WORDS 4
enum cam_tfe_lut_word_size { enum cam_tfe_lut_word_size {
CAM_TFE_LUT_WORD_SIZE_32, CAM_TFE_LUT_WORD_SIZE_32,
CAM_TFE_LUT_WORD_SIZE_64, CAM_TFE_LUT_WORD_SIZE_64,
@@ -112,6 +123,7 @@ struct cam_tfe_camif_reg_data {
uint32_t extern_reg_update_shift; uint32_t extern_reg_update_shift;
uint32_t camif_pd_rdi2_src_sel_shift; uint32_t camif_pd_rdi2_src_sel_shift;
uint32_t dual_tfe_sync_sel_shift; uint32_t dual_tfe_sync_sel_shift;
uint32_t delay_line_en_shift;
uint32_t pixel_pattern_shift; uint32_t pixel_pattern_shift;
uint32_t pixel_pattern_mask; uint32_t pixel_pattern_mask;
@@ -180,6 +192,11 @@ struct cam_tfe_rdi_reg_data {
uint32_t enable_diagnostic_hw; uint32_t enable_diagnostic_hw;
}; };
struct cam_tfe_clc_hw_status {
uint8_t name[CAM_TFE_CLC_NAME_LENGTH_MAX];
uint32_t hw_status_reg;
};
struct cam_tfe_rdi_hw_info { struct cam_tfe_rdi_hw_info {
struct cam_tfe_rdi_reg *rdi_reg; struct cam_tfe_rdi_reg *rdi_reg;
struct cam_tfe_rdi_reg_data *reg_data; struct cam_tfe_rdi_reg_data *reg_data;
@@ -194,32 +211,36 @@ struct cam_tfe_top_hw_info {
}; };
struct cam_tfe_hw_info { struct cam_tfe_hw_info {
uint32_t top_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM]; uint32_t top_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM];
uint32_t top_irq_clear[CAM_TFE_TOP_IRQ_REG_NUM]; uint32_t top_irq_clear[CAM_TFE_TOP_IRQ_REG_NUM];
uint32_t top_irq_status[CAM_TFE_TOP_IRQ_REG_NUM]; uint32_t top_irq_status[CAM_TFE_TOP_IRQ_REG_NUM];
uint32_t top_irq_cmd; uint32_t top_irq_cmd;
uint32_t global_clear_bitmask; uint32_t global_clear_bitmask;
uint32_t bus_irq_mask[CAM_TFE_BUS_MAX_IRQ_REGISTERS]; uint32_t bus_irq_mask[CAM_TFE_BUS_MAX_IRQ_REGISTERS];
uint32_t bus_irq_clear[CAM_TFE_BUS_MAX_IRQ_REGISTERS]; uint32_t bus_irq_clear[CAM_TFE_BUS_MAX_IRQ_REGISTERS];
uint32_t bus_irq_status[CAM_TFE_BUS_MAX_IRQ_REGISTERS]; uint32_t bus_irq_status[CAM_TFE_BUS_MAX_IRQ_REGISTERS];
uint32_t bus_irq_cmd; uint32_t bus_irq_cmd;
uint32_t bus_violation_reg; uint32_t bus_violation_reg;
uint32_t bus_overflow_reg; uint32_t bus_overflow_reg;
uint32_t bus_image_size_vilation_reg; uint32_t bus_image_size_vilation_reg;
uint32_t bus_overflow_clear_cmd; uint32_t bus_overflow_clear_cmd;
uint32_t debug_status_top; uint32_t debug_status_top;
uint32_t reset_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM]; uint32_t reset_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM];
uint32_t error_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM]; uint32_t error_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM];
uint32_t bus_reg_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM]; uint32_t bus_reg_irq_mask[CAM_TFE_BUS_MAX_IRQ_REGISTERS];
uint32_t bus_error_irq_mask[CAM_TFE_BUS_MAX_IRQ_REGISTERS];
uint32_t top_version; uint32_t num_clc;
void *top_hw_info; struct cam_tfe_clc_hw_status *clc_hw_status_info;
uint32_t bus_version; uint32_t top_version;
void *bus_hw_info; void *top_hw_info;
uint32_t bus_version;
void *bus_hw_info;
}; };
struct cam_tfe_hw_core_info { struct cam_tfe_hw_core_info {

View File

@@ -42,6 +42,7 @@ int cam_jpeg_enc_init_hw(void *device_priv,
struct cam_jpeg_enc_device_core_info *core_info = NULL; struct cam_jpeg_enc_device_core_info *core_info = NULL;
struct cam_ahb_vote ahb_vote; struct cam_ahb_vote ahb_vote;
struct cam_axi_vote axi_vote = {0}; struct cam_axi_vote axi_vote = {0};
unsigned long flags;
int rc; int rc;
if (!device_priv) { if (!device_priv) {
@@ -92,6 +93,9 @@ int cam_jpeg_enc_init_hw(void *device_priv,
CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc); CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc);
goto soc_failed; goto soc_failed;
} }
spin_lock_irqsave(&jpeg_enc_dev->hw_lock, flags);
jpeg_enc_dev->hw_state = CAM_HW_STATE_POWER_UP;
spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags);
mutex_unlock(&core_info->core_mutex); mutex_unlock(&core_info->core_mutex);
@@ -112,6 +116,7 @@ int cam_jpeg_enc_deinit_hw(void *device_priv,
struct cam_hw_info *jpeg_enc_dev = device_priv; struct cam_hw_info *jpeg_enc_dev = device_priv;
struct cam_hw_soc_info *soc_info = NULL; struct cam_hw_soc_info *soc_info = NULL;
struct cam_jpeg_enc_device_core_info *core_info = NULL; struct cam_jpeg_enc_device_core_info *core_info = NULL;
unsigned long flags;
int rc; int rc;
if (!device_priv) { if (!device_priv) {
@@ -141,6 +146,9 @@ int cam_jpeg_enc_deinit_hw(void *device_priv,
return -EFAULT; return -EFAULT;
} }
spin_lock_irqsave(&jpeg_enc_dev->hw_lock, flags);
jpeg_enc_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags);
rc = cam_jpeg_enc_disable_soc_resources(soc_info); rc = cam_jpeg_enc_disable_soc_resources(soc_info);
if (rc) if (rc)
CAM_ERR(CAM_JPEG, "soc disable failed %d", rc); CAM_ERR(CAM_JPEG, "soc disable failed %d", rc);
@@ -174,12 +182,19 @@ irqreturn_t cam_jpeg_enc_irq(int irq_num, void *data)
hw_info = core_info->jpeg_enc_hw_info; hw_info = core_info->jpeg_enc_hw_info;
mem_base = soc_info->reg_map[0].mem_base; mem_base = soc_info->reg_map[0].mem_base;
spin_lock(&jpeg_enc_dev->hw_lock);
if (jpeg_enc_dev->hw_state == CAM_HW_STATE_POWER_DOWN) {
CAM_ERR(CAM_JPEG, "JPEG HW is in off state");
spin_unlock(&jpeg_enc_dev->hw_lock);
return IRQ_HANDLED;
}
irq_status = cam_io_r_mb(mem_base + irq_status = cam_io_r_mb(mem_base +
core_info->jpeg_enc_hw_info->reg_offset.int_status); core_info->jpeg_enc_hw_info->reg_offset.int_status);
cam_io_w_mb(irq_status, cam_io_w_mb(irq_status,
soc_info->reg_map[0].mem_base + soc_info->reg_map[0].mem_base +
core_info->jpeg_enc_hw_info->reg_offset.int_clr); core_info->jpeg_enc_hw_info->reg_offset.int_clr);
spin_unlock(&jpeg_enc_dev->hw_lock);
CAM_DBG(CAM_JPEG, "irq_num %d irq_status = %x , core_state %d", CAM_DBG(CAM_JPEG, "irq_num %d irq_status = %x , core_state %d",
irq_num, irq_status, core_info->core_state); irq_num, irq_status, core_info->core_state);
@@ -255,6 +270,7 @@ int cam_jpeg_enc_reset_hw(void *data,
struct cam_jpeg_enc_device_hw_info *hw_info = NULL; struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
void __iomem *mem_base; void __iomem *mem_base;
unsigned long rem_jiffies; unsigned long rem_jiffies;
unsigned long flags;
if (!jpeg_enc_dev) { if (!jpeg_enc_dev) {
CAM_ERR(CAM_JPEG, "Invalid args"); CAM_ERR(CAM_JPEG, "Invalid args");
@@ -268,17 +284,23 @@ int cam_jpeg_enc_reset_hw(void *data,
mem_base = soc_info->reg_map[0].mem_base; mem_base = soc_info->reg_map[0].mem_base;
mutex_lock(&core_info->core_mutex); mutex_lock(&core_info->core_mutex);
spin_lock(&jpeg_enc_dev->hw_lock); spin_lock_irqsave(&jpeg_enc_dev->hw_lock, flags);
if (jpeg_enc_dev->hw_state == CAM_HW_STATE_POWER_DOWN) {
CAM_ERR(CAM_JPEG, "JPEG HW is in off state");
spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags);
mutex_unlock(&core_info->core_mutex);
return -EINVAL;
}
if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) { if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) {
CAM_ERR(CAM_JPEG, "alrady resetting"); CAM_ERR(CAM_JPEG, "alrady resetting");
spin_unlock(&jpeg_enc_dev->hw_lock); spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags);
mutex_unlock(&core_info->core_mutex); mutex_unlock(&core_info->core_mutex);
return 0; return 0;
} }
reinit_completion(&jpeg_enc_dev->hw_complete); reinit_completion(&jpeg_enc_dev->hw_complete);
core_info->core_state = CAM_JPEG_ENC_CORE_RESETTING; core_info->core_state = CAM_JPEG_ENC_CORE_RESETTING;
spin_unlock(&jpeg_enc_dev->hw_lock); spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags);
cam_io_w_mb(hw_info->reg_val.int_mask_disable_all, cam_io_w_mb(hw_info->reg_val.int_mask_disable_all,
mem_base + hw_info->reg_offset.int_mask); mem_base + hw_info->reg_offset.int_mask);
@@ -308,6 +330,7 @@ int cam_jpeg_enc_start_hw(void *data,
struct cam_hw_soc_info *soc_info = NULL; struct cam_hw_soc_info *soc_info = NULL;
struct cam_jpeg_enc_device_hw_info *hw_info = NULL; struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
void __iomem *mem_base; void __iomem *mem_base;
unsigned long flags;
if (!jpeg_enc_dev) { if (!jpeg_enc_dev) {
CAM_ERR(CAM_JPEG, "Invalid args"); CAM_ERR(CAM_JPEG, "Invalid args");
@@ -320,10 +343,18 @@ int cam_jpeg_enc_start_hw(void *data,
hw_info = core_info->jpeg_enc_hw_info; hw_info = core_info->jpeg_enc_hw_info;
mem_base = soc_info->reg_map[0].mem_base; mem_base = soc_info->reg_map[0].mem_base;
if (core_info->core_state != CAM_JPEG_ENC_CORE_READY) { spin_lock_irqsave(&jpeg_enc_dev->hw_lock, flags);
CAM_ERR(CAM_JPEG, "Error not ready"); if (jpeg_enc_dev->hw_state == CAM_HW_STATE_POWER_DOWN) {
CAM_ERR(CAM_JPEG, "JPEG HW is in off state");
spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags);
return -EINVAL; return -EINVAL;
} }
if (core_info->core_state != CAM_JPEG_ENC_CORE_READY) {
CAM_ERR(CAM_JPEG, "Error not ready: %d", core_info->core_state);
spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags);
return -EINVAL;
}
spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags);
cam_io_w_mb(hw_info->reg_val.hw_cmd_start, cam_io_w_mb(hw_info->reg_val.hw_cmd_start,
mem_base + hw_info->reg_offset.hw_cmd); mem_base + hw_info->reg_offset.hw_cmd);
@@ -340,6 +371,7 @@ int cam_jpeg_enc_stop_hw(void *data,
struct cam_jpeg_enc_device_hw_info *hw_info = NULL; struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
void __iomem *mem_base; void __iomem *mem_base;
unsigned long rem_jiffies; unsigned long rem_jiffies;
unsigned long flags;
if (!jpeg_enc_dev) { if (!jpeg_enc_dev) {
CAM_ERR(CAM_JPEG, "Invalid args"); CAM_ERR(CAM_JPEG, "Invalid args");
@@ -352,17 +384,23 @@ int cam_jpeg_enc_stop_hw(void *data,
mem_base = soc_info->reg_map[0].mem_base; mem_base = soc_info->reg_map[0].mem_base;
mutex_lock(&core_info->core_mutex); mutex_lock(&core_info->core_mutex);
spin_lock(&jpeg_enc_dev->hw_lock); spin_lock_irqsave(&jpeg_enc_dev->hw_lock, flags);
if (jpeg_enc_dev->hw_state == CAM_HW_STATE_POWER_DOWN) {
CAM_ERR(CAM_JPEG, "JPEG HW is in off state");
spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags);
mutex_unlock(&core_info->core_mutex);
return -EINVAL;
}
if (core_info->core_state == CAM_JPEG_ENC_CORE_ABORTING) { if (core_info->core_state == CAM_JPEG_ENC_CORE_ABORTING) {
CAM_ERR(CAM_JPEG, "alrady stopping"); CAM_ERR(CAM_JPEG, "alrady stopping");
spin_unlock(&jpeg_enc_dev->hw_lock); spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags);
mutex_unlock(&core_info->core_mutex); mutex_unlock(&core_info->core_mutex);
return 0; return 0;
} }
reinit_completion(&jpeg_enc_dev->hw_complete); reinit_completion(&jpeg_enc_dev->hw_complete);
core_info->core_state = CAM_JPEG_ENC_CORE_ABORTING; core_info->core_state = CAM_JPEG_ENC_CORE_ABORTING;
spin_unlock(&jpeg_enc_dev->hw_lock); spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags);
cam_io_w_mb(hw_info->reg_val.hw_cmd_stop, cam_io_w_mb(hw_info->reg_val.hw_cmd_stop,
mem_base + hw_info->reg_offset.hw_cmd); mem_base + hw_info->reg_offset.hw_cmd);

File diff suppressed because it is too large Load Diff

View File

@@ -60,6 +60,8 @@
#define CLK_HW_MAX 0x1 #define CLK_HW_MAX 0x1
#define OPE_DEVICE_IDLE_TIMEOUT 400 #define OPE_DEVICE_IDLE_TIMEOUT 400
#define OPE_REQUEST_TIMEOUT 200
/** /**
@@ -223,12 +225,14 @@ struct cdm_dmi_cmd {
* @iova_addr: IOVA address * @iova_addr: IOVA address
* @len: Buffer length * @len: Buffer length
* @size: Buffer Size * @size: Buffer Size
* @offset: buffer offset
*/ */
struct ope_debug_buffer { struct ope_debug_buffer {
uintptr_t cpu_addr; uintptr_t cpu_addr;
dma_addr_t iova_addr; dma_addr_t iova_addr;
size_t len; size_t len;
uint32_t size; uint32_t size;
uint32_t offset;
}; };
/** /**
@@ -238,6 +242,7 @@ struct ope_debug_buffer {
* @cpu_addr: CPU address * @cpu_addr: CPU address
* @iova_addr: IOVA address * @iova_addr: IOVA address
* @iova_cdm_addr: CDM IOVA address * @iova_cdm_addr: CDM IOVA address
* @offset: Offset of buffer
* @len: Buffer length * @len: Buffer length
* @size: Buffer Size * @size: Buffer Size
*/ */
@@ -246,6 +251,7 @@ struct ope_kmd_buffer {
uintptr_t cpu_addr; uintptr_t cpu_addr;
dma_addr_t iova_addr; dma_addr_t iova_addr;
dma_addr_t iova_cdm_addr; dma_addr_t iova_cdm_addr;
uint32_t offset;
size_t len; size_t len;
uint32_t size; uint32_t size;
}; };
@@ -383,6 +389,7 @@ struct ope_io_buf {
* @cdm_cmd: CDM command for OPE CDM * @cdm_cmd: CDM command for OPE CDM
* @clk_info: Clock Info V1 * @clk_info: Clock Info V1
* @clk_info_v2: Clock Info V2 * @clk_info_v2: Clock Info V2
* @hang_data: Debug data for HW error
*/ */
struct cam_ope_request { struct cam_ope_request {
uint64_t request_id; uint64_t request_id;
@@ -398,10 +405,11 @@ struct cam_ope_request {
uint8_t num_stripe_cmd_bufs[OPE_MAX_BATCH_SIZE][OPE_MAX_STRIPES]; uint8_t num_stripe_cmd_bufs[OPE_MAX_BATCH_SIZE][OPE_MAX_STRIPES];
struct ope_kmd_buffer ope_kmd_buf; struct ope_kmd_buffer ope_kmd_buf;
struct ope_debug_buffer ope_debug_buf; struct ope_debug_buffer ope_debug_buf;
struct ope_io_buf io_buf[OPE_MAX_BATCH_SIZE][OPE_MAX_IO_BUFS]; struct ope_io_buf *io_buf[OPE_MAX_BATCH_SIZE][OPE_MAX_IO_BUFS];
struct cam_cdm_bl_request *cdm_cmd; struct cam_cdm_bl_request *cdm_cmd;
struct cam_ope_clk_bw_request clk_info; struct cam_ope_clk_bw_request clk_info;
struct cam_ope_clk_bw_req_internal_v2 clk_info_v2; struct cam_ope_clk_bw_req_internal_v2 clk_info_v2;
struct cam_hw_mgr_dump_pf_data hang_data;
}; };
/** /**
@@ -431,11 +439,13 @@ struct cam_ope_cdm {
* @ctxt_event_cb: Callback of a context * @ctxt_event_cb: Callback of a context
* @req_list: Request List * @req_list: Request List
* @ope_cdm: OPE CDM info * @ope_cdm: OPE CDM info
* @last_req_time: Timestamp of last request
* @req_watch_dog: Watchdog for requests * @req_watch_dog: Watchdog for requests
* @req_watch_dog_reset_counter: Request reset counter * @req_watch_dog_reset_counter: Request reset counter
* @clk_info: OPE Ctx clock info * @clk_info: OPE Ctx clock info
* @clk_watch_dog: Clock watchdog * @clk_watch_dog: Clock watchdog
* @clk_watch_dog_reset_counter: Reset counter * @clk_watch_dog_reset_counter: Reset counter
* @last_flush_req: last flush req for this ctx
*/ */
struct cam_ope_ctx { struct cam_ope_ctx {
void *context_priv; void *context_priv;
@@ -451,11 +461,13 @@ struct cam_ope_ctx {
cam_hw_event_cb_func ctxt_event_cb; cam_hw_event_cb_func ctxt_event_cb;
struct cam_ope_request *req_list[CAM_CTX_REQ_MAX]; struct cam_ope_request *req_list[CAM_CTX_REQ_MAX];
struct cam_ope_cdm ope_cdm; struct cam_ope_cdm ope_cdm;
uint64_t last_req_time;
struct cam_req_mgr_timer *req_watch_dog; struct cam_req_mgr_timer *req_watch_dog;
uint32_t req_watch_dog_reset_counter; uint32_t req_watch_dog_reset_counter;
struct cam_ctx_clk_info clk_info; struct cam_ctx_clk_info clk_info;
struct cam_req_mgr_timer *clk_watch_dog; struct cam_req_mgr_timer *clk_watch_dog;
uint32_t clk_watch_dog_reset_counter; uint32_t clk_watch_dog_reset_counter;
uint64_t last_flush_req;
}; };
/** /**
@@ -506,6 +518,7 @@ struct cam_ope_hw_mgr {
struct cam_ope_ctx ctx[OPE_CTX_MAX]; struct cam_ope_ctx ctx[OPE_CTX_MAX];
struct cam_hw_intf **devices[OPE_DEV_MAX]; struct cam_hw_intf **devices[OPE_DEV_MAX];
struct ope_query_cap_cmd ope_caps; struct ope_query_cap_cmd ope_caps;
uint64_t last_callback_time;
struct cam_req_mgr_core_workq *cmd_work; struct cam_req_mgr_core_workq *cmd_work;
struct cam_req_mgr_core_workq *msg_work; struct cam_req_mgr_core_workq *msg_work;
@@ -518,4 +531,68 @@ struct cam_ope_hw_mgr {
struct cam_ope_clk_info clk_info; struct cam_ope_clk_info clk_info;
}; };
/**
* struct cam_ope_buf_entry
*
* @fd: FD of cmd buffer
* @memhdl: Memhandle of cmd buffer
* @iova: IOVA address of cmd buffer
* @offset: Offset of cmd buffer
* @len: Length of cmd buffer
* @size: Size of cmd buffer
*/
struct cam_ope_buf_entry {
uint32_t fd;
uint64_t memhdl;
uint64_t iova;
uint64_t offset;
uint64_t len;
uint64_t size;
};
/**
* struct cam_ope_bl_entry
*
* @base: Base IOVA address of BL
* @len: Length of BL
* @arbitration: Arbitration bit
*/
struct cam_ope_bl_entry {
uint32_t base;
uint32_t len;
uint32_t arbitration;
};
/**
* struct cam_ope_output_info
*
* @iova: IOVA address of output buffer
* @offset: Offset of buffer
* @len: Length of buffer
*/
struct cam_ope_output_info {
uint64_t iova;
uint64_t offset;
uint64_t len;
};
/**
* struct cam_ope_hang_dump
*
* @num_bls: count of BLs for request
* @num_bufs: Count of buffer related to request
* @num_outputs: Count of output beffers
* @entries: Buffers info
* @bl_entries: BLs info
* @outputs: Output info
*/
struct cam_ope_hang_dump {
uint32_t num_bls;
uint32_t num_bufs;
uint64_t num_outputs;
struct cam_ope_buf_entry entries[OPE_MAX_BATCH_SIZE * OPE_MAX_CMD_BUFS];
struct cam_ope_bl_entry bl_entries[OPE_MAX_CDM_BLS];
struct cam_ope_output_info outputs
[OPE_MAX_BATCH_SIZE * OPE_OUT_RES_MAX];
};
#endif /* CAM_OPE_HW_MGR_H */ #endif /* CAM_OPE_HW_MGR_H */

View File

@@ -29,6 +29,29 @@
static struct ope_bus_rd *bus_rd; static struct ope_bus_rd *bus_rd;
enum cam_ope_bus_unpacker_format {
UNPACKER_FMT_PLAIN_128 = 0x0,
UNPACKER_FMT_PLAIN_8 = 0x1,
UNPACKER_FMT_PLAIN_16_10BPP = 0x2,
UNPACKER_FMT_PLAIN_16_12BPP = 0x3,
UNPACKER_FMT_PLAIN_16_14BPP = 0x4,
UNPACKER_FMT_PLAIN_32_20BPP = 0x5,
UNPACKER_FMT_ARGB_16_10BPP = 0x6,
UNPACKER_FMT_ARGB_16_12BPP = 0x7,
UNPACKER_FMT_ARGB_16_14BPP = 0x8,
UNPACKER_FMT_PLAIN_32 = 0x9,
UNPACKER_FMT_PLAIN_64 = 0xA,
UNPACKER_FMT_TP_10 = 0xB,
UNPACKER_FMT_MIPI_8 = 0xC,
UNPACKER_FMT_MIPI_10 = 0xD,
UNPACKER_FMT_MIPI_12 = 0xE,
UNPACKER_FMT_MIPI_14 = 0xF,
UNPACKER_FMT_PLAIN_16_16BPP = 0x10,
UNPACKER_FMT_PLAIN_128_ODD_EVEN = 0x11,
UNPACKER_FMT_PLAIN_8_ODD_EVEN = 0x12,
UNPACKER_FMT_MAX = 0x13,
};
static int cam_ope_bus_rd_in_port_idx(uint32_t input_port_id) static int cam_ope_bus_rd_in_port_idx(uint32_t input_port_id)
{ {
int i; int i;
@@ -98,12 +121,16 @@ static int cam_ope_bus_is_rm_enabled(
} }
for (i = 0; i < ope_request->num_io_bufs[batch_idx]; i++) { for (i = 0; i < ope_request->num_io_bufs[batch_idx]; i++) {
io_buf = &ope_request->io_buf[batch_idx][i]; io_buf = ope_request->io_buf[batch_idx][i];
if (io_buf->direction != CAM_BUF_INPUT) if (io_buf->direction != CAM_BUF_INPUT)
continue; continue;
in_port_to_rm = in_port_to_rm =
&bus_rd->in_port_to_rm[io_buf->resource_type - 1]; &bus_rd->in_port_to_rm[io_buf->resource_type - 1];
combo_idx = cam_ope_bus_rd_combo_idx(io_buf->format); combo_idx = cam_ope_bus_rd_combo_idx(io_buf->format);
if (combo_idx < 0) {
CAM_ERR(CAM_OPE, "Invalid combo_idx");
return -EINVAL;
}
for (k = 0; k < io_buf->num_planes; k++) { for (k = 0; k < io_buf->num_planes; k++) {
if (rm_id == if (rm_id ==
in_port_to_rm->rm_port_id[combo_idx][k]) in_port_to_rm->rm_port_id[combo_idx][k])
@@ -171,7 +198,7 @@ static uint32_t *cam_ope_bus_rd_update(struct ope_hw *ope_hw_info,
rd_reg = ope_hw_info->bus_rd_reg; rd_reg = ope_hw_info->bus_rd_reg;
rd_reg_val = ope_hw_info->bus_rd_reg_val; rd_reg_val = ope_hw_info->bus_rd_reg_val;
io_buf = &ope_request->io_buf[batch_idx][io_idx]; io_buf = ope_request->io_buf[batch_idx][io_idx];
CAM_DBG(CAM_OPE, "batch:%d iobuf:%d direction:%d", CAM_DBG(CAM_OPE, "batch:%d iobuf:%d direction:%d",
batch_idx, io_idx, io_buf->direction); batch_idx, io_idx, io_buf->direction);
@@ -239,6 +266,14 @@ static uint32_t *cam_ope_bus_rd_update(struct ope_hw *ope_hw_info,
rd_reg_client->stride; rd_reg_client->stride;
temp_reg[count++] = stripe_io->stride; temp_reg[count++] = stripe_io->stride;
/*
* In case of NV12, change the unpacker format of
* chroma plane to odd even byte swapped format.
*/
if (k == 1 && stripe_io->format == CAM_FORMAT_NV12)
stripe_io->unpack_format =
UNPACKER_FMT_PLAIN_8_ODD_EVEN;
/* Unpack cfg : Mode and alignment */ /* Unpack cfg : Mode and alignment */
temp_reg[count++] = rd_reg->offset + temp_reg[count++] = rd_reg->offset +
rd_reg_client->unpack_cfg; rd_reg_client->unpack_cfg;
@@ -324,6 +359,11 @@ static uint32_t *cam_ope_bus_rm_disable(struct ope_hw *ope_hw_info,
return NULL; return NULL;
} }
if (rm_idx >= MAX_RD_CLIENTS) {
CAM_ERR(CAM_OPE, "Invalid read client: %d", rm_idx);
return NULL;
}
ctx_data = prepare->ctx_data; ctx_data = prepare->ctx_data;
req_idx = prepare->req_idx; req_idx = prepare->req_idx;
cdm_ops = ctx_data->ope_cdm.cdm_ops; cdm_ops = ctx_data->ope_cdm.cdm_ops;
@@ -403,9 +443,9 @@ static int cam_ope_bus_rd_prepare(struct ope_hw *ope_hw_info,
struct cam_ope_bus_rd_reg *rd_reg; struct cam_ope_bus_rd_reg *rd_reg;
struct cam_ope_bus_rd_reg_val *rd_reg_val; struct cam_ope_bus_rd_reg_val *rd_reg_val;
struct ope_bus_rd_io_port_cdm_batch *io_port_cdm_batch; struct ope_bus_rd_io_port_cdm_batch *io_port_cdm_batch;
struct ope_bus_rd_io_port_cdm_info *io_port_cdm; struct ope_bus_rd_io_port_cdm_info *io_port_cdm = NULL;
struct cam_cdm_utils_ops *cdm_ops; struct cam_cdm_utils_ops *cdm_ops;
int32_t num_stripes; int32_t num_stripes = 0;
if (ctx_id < 0 || !data) { if (ctx_id < 0 || !data) {
CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, data); CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, data);
@@ -434,7 +474,7 @@ static int cam_ope_bus_rd_prepare(struct ope_hw *ope_hw_info,
for (i = 0; i < ope_request->num_batch; i++) { for (i = 0; i < ope_request->num_batch; i++) {
for (j = 0; j < ope_request->num_io_bufs[i]; j++) { for (j = 0; j < ope_request->num_io_bufs[i]; j++) {
io_buf = &ope_request->io_buf[i][j]; io_buf = ope_request->io_buf[i][j];
if (io_buf->direction != CAM_BUF_INPUT) if (io_buf->direction != CAM_BUF_INPUT)
continue; continue;
@@ -469,12 +509,20 @@ static int cam_ope_bus_rd_prepare(struct ope_hw *ope_hw_info,
for (j = 0; j < rd_reg_val->num_clients; j++) { for (j = 0; j < rd_reg_val->num_clients; j++) {
is_rm_enabled = cam_ope_bus_is_rm_enabled( is_rm_enabled = cam_ope_bus_is_rm_enabled(
ope_request, i, j); ope_request, i, j);
if (is_rm_enabled < 0) {
rc = -EINVAL;
goto end;
}
if (is_rm_enabled) if (is_rm_enabled)
continue; continue;
kmd_buf = cam_ope_bus_rm_disable(ope_hw_info, kmd_buf = cam_ope_bus_rm_disable(ope_hw_info,
ctx_id, prepare, i, j, ctx_id, prepare, i, j,
kmd_buf, num_stripes); kmd_buf, num_stripes);
if (!kmd_buf) {
rc = -EINVAL;
goto end;
}
} }
} }
@@ -772,7 +820,7 @@ static int cam_ope_bus_rd_isr(struct ope_hw *ope_hw_info,
if (irq_status & bus_rd_reg_val->rst_done) { if (irq_status & bus_rd_reg_val->rst_done) {
complete(&bus_rd->reset_complete); complete(&bus_rd->reset_complete);
CAM_ERR(CAM_OPE, "ope bus rd reset done"); CAM_DBG(CAM_OPE, "ope bus rd reset done");
} }
if ((irq_status & bus_rd_reg_val->violation) == if ((irq_status & bus_rd_reg_val->violation) ==

View File

@@ -29,6 +29,24 @@
static struct ope_bus_wr *wr_info; static struct ope_bus_wr *wr_info;
enum cam_ope_bus_packer_format {
PACKER_FMT_PLAIN_128 = 0x0,
PACKER_FMT_PLAIN_8 = 0x1,
PACKER_FMT_PLAIN_8_ODD_EVEN = 0x2,
PACKER_FMT_PLAIN_8_LSB_MSB_10 = 0x3,
PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN = 0x4,
PACKER_FMT_PLAIN_16_10BPP = 0x5,
PACKER_FMT_PLAIN_16_12BPP = 0x6,
PACKER_FMT_PLAIN_16_14BPP = 0x7,
PACKER_FMT_PLAIN_16_16BPP = 0x8,
PACKER_FMT_PLAIN_32 = 0x9,
PACKER_FMT_PLAIN_64 = 0xA,
PACKER_FMT_TP_10 = 0xB,
PACKER_FMT_MIPI_10 = 0xC,
PACKER_FMT_MIPI_12 = 0xD,
PACKER_FMT_MAX = 0xE,
};
static int cam_ope_bus_en_port_idx( static int cam_ope_bus_en_port_idx(
struct cam_ope_request *ope_request, struct cam_ope_request *ope_request,
uint32_t batch_idx, uint32_t batch_idx,
@@ -43,7 +61,7 @@ static int cam_ope_bus_en_port_idx(
} }
for (i = 0; i < ope_request->num_io_bufs[batch_idx]; i++) { for (i = 0; i < ope_request->num_io_bufs[batch_idx]; i++) {
io_buf = &ope_request->io_buf[batch_idx][i]; io_buf = ope_request->io_buf[batch_idx][i];
if (io_buf->direction != CAM_BUF_OUTPUT) if (io_buf->direction != CAM_BUF_OUTPUT)
continue; continue;
if (io_buf->resource_type == output_port_id) if (io_buf->resource_type == output_port_id)
@@ -133,25 +151,15 @@ static int cam_ope_bus_wr_subsample(
static int cam_ope_bus_wr_release(struct ope_hw *ope_hw_info, static int cam_ope_bus_wr_release(struct ope_hw *ope_hw_info,
int32_t ctx_id, void *data) int32_t ctx_id, void *data)
{ {
int rc = 0, i; int rc = 0;
struct ope_acquire_dev_info *in_acquire;
struct ope_bus_wr_ctx *bus_wr_ctx;
if (ctx_id < 0) { if (ctx_id < 0 || ctx_id >= OPE_CTX_MAX) {
CAM_ERR(CAM_OPE, "Invalid data: %d", ctx_id); CAM_ERR(CAM_OPE, "Invalid data: %d", ctx_id);
return -EINVAL; return -EINVAL;
} }
in_acquire = wr_info->bus_wr_ctx[ctx_id].ope_acquire; vfree(wr_info->bus_wr_ctx[ctx_id]);
wr_info->bus_wr_ctx[ctx_id].ope_acquire = NULL; wr_info->bus_wr_ctx[ctx_id] = NULL;
bus_wr_ctx = &wr_info->bus_wr_ctx[ctx_id];
bus_wr_ctx->num_out_ports = 0;
for (i = 0; i < bus_wr_ctx->num_out_ports; i++) {
bus_wr_ctx->io_port_info.output_port_id[i] = 0;
bus_wr_ctx->io_port_info.output_format_type[i - 1] = 0;
bus_wr_ctx->io_port_info.pixel_pattern[i - 1] = 0;
}
return rc; return rc;
} }
@@ -208,7 +216,7 @@ static uint32_t *cam_ope_bus_wr_update(struct ope_hw *ope_hw_info,
cdm_ops = ctx_data->ope_cdm.cdm_ops; cdm_ops = ctx_data->ope_cdm.cdm_ops;
ope_request = ctx_data->req_list[req_idx]; ope_request = ctx_data->req_list[req_idx];
bus_wr_ctx = &wr_info->bus_wr_ctx[ctx_id]; bus_wr_ctx = wr_info->bus_wr_ctx[ctx_id];
io_port_cdm_batch = &bus_wr_ctx->io_port_cdm_batch; io_port_cdm_batch = &bus_wr_ctx->io_port_cdm_batch;
wr_reg = ope_hw_info->bus_wr_reg; wr_reg = ope_hw_info->bus_wr_reg;
wr_reg_val = ope_hw_info->bus_wr_reg_val; wr_reg_val = ope_hw_info->bus_wr_reg_val;
@@ -217,7 +225,7 @@ static uint32_t *cam_ope_bus_wr_update(struct ope_hw *ope_hw_info,
kmd_buf, req_idx, ope_request->request_id, kmd_buf, req_idx, ope_request->request_id,
prepare->kmd_buf_offset); prepare->kmd_buf_offset);
io_buf = &ope_request->io_buf[batch_idx][io_idx]; io_buf = ope_request->io_buf[batch_idx][io_idx];
CAM_DBG(CAM_OPE, "batch = %d io buf num = %d dir = %d", CAM_DBG(CAM_OPE, "batch = %d io buf num = %d dir = %d",
batch_idx, io_idx, io_buf->direction); batch_idx, io_idx, io_buf->direction);
@@ -286,6 +294,16 @@ static uint32_t *cam_ope_bus_wr_update(struct ope_hw *ope_hw_info,
temp_reg[count++] = wr_reg->offset + temp_reg[count++] = wr_reg->offset +
wr_reg_client->pack_cfg; wr_reg_client->pack_cfg;
temp = 0; temp = 0;
/*
* In case of NV12, change the packer format of chroma
* plane to odd even byte swapped format
*/
if (k == 1 && stripe_io->format == CAM_FORMAT_NV12)
stripe_io->pack_format =
PACKER_FMT_PLAIN_8_ODD_EVEN;
temp |= ((stripe_io->pack_format & temp |= ((stripe_io->pack_format &
wr_res_val_client->format_mask) << wr_res_val_client->format_mask) <<
wr_res_val_client->format_shift); wr_res_val_client->format_shift);
@@ -376,7 +394,7 @@ static uint32_t *cam_ope_bus_wm_disable(struct ope_hw *ope_hw_info,
req_idx = prepare->req_idx; req_idx = prepare->req_idx;
cdm_ops = ctx_data->ope_cdm.cdm_ops; cdm_ops = ctx_data->ope_cdm.cdm_ops;
bus_wr_ctx = &wr_info->bus_wr_ctx[ctx_id]; bus_wr_ctx = wr_info->bus_wr_ctx[ctx_id];
io_port_cdm_batch = &bus_wr_ctx->io_port_cdm_batch; io_port_cdm_batch = &bus_wr_ctx->io_port_cdm_batch;
wr_reg = ope_hw_info->bus_wr_reg; wr_reg = ope_hw_info->bus_wr_reg;
@@ -466,7 +484,7 @@ static int cam_ope_bus_wr_prepare(struct ope_hw *ope_hw_info,
prepare = data; prepare = data;
ctx_data = prepare->ctx_data; ctx_data = prepare->ctx_data;
req_idx = prepare->req_idx; req_idx = prepare->req_idx;
bus_wr_ctx = &wr_info->bus_wr_ctx[ctx_id]; bus_wr_ctx = wr_info->bus_wr_ctx[ctx_id];
ope_request = ctx_data->req_list[req_idx]; ope_request = ctx_data->req_list[req_idx];
kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr + kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr +
@@ -477,13 +495,13 @@ static int cam_ope_bus_wr_prepare(struct ope_hw *ope_hw_info,
kmd_buf, req_idx, ope_request->request_id, kmd_buf, req_idx, ope_request->request_id,
prepare->kmd_buf_offset); prepare->kmd_buf_offset);
io_port_cdm_batch = &wr_info->bus_wr_ctx[ctx_id].io_port_cdm_batch; io_port_cdm_batch = &wr_info->bus_wr_ctx[ctx_id]->io_port_cdm_batch;
memset(io_port_cdm_batch, 0, memset(io_port_cdm_batch, 0,
sizeof(struct ope_bus_wr_io_port_cdm_batch)); sizeof(struct ope_bus_wr_io_port_cdm_batch));
for (i = 0; i < ope_request->num_batch; i++) { for (i = 0; i < ope_request->num_batch; i++) {
for (j = 0; j < ope_request->num_io_bufs[i]; j++) { for (j = 0; j < ope_request->num_io_bufs[i]; j++) {
io_buf = &ope_request->io_buf[i][j]; io_buf = ope_request->io_buf[i][j];
CAM_DBG(CAM_OPE, "batch = %d io buf num = %d dir = %d", CAM_DBG(CAM_OPE, "batch = %d io buf num = %d dir = %d",
i, j, io_buf->direction); i, j, io_buf->direction);
if (io_buf->direction != CAM_BUF_OUTPUT) if (io_buf->direction != CAM_BUF_OUTPUT)
@@ -533,14 +551,20 @@ static int cam_ope_bus_wr_acquire(struct ope_hw *ope_hw_info,
int combo_idx; int combo_idx;
int out_port_idx; int out_port_idx;
if (ctx_id < 0 || !data) { if (ctx_id < 0 || !data || ctx_id >= OPE_CTX_MAX) {
CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, data); CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, data);
return -EINVAL; return -EINVAL;
} }
wr_info->bus_wr_ctx[ctx_id].ope_acquire = data; wr_info->bus_wr_ctx[ctx_id] = vzalloc(sizeof(struct ope_bus_wr_ctx));
if (!wr_info->bus_wr_ctx[ctx_id]) {
CAM_ERR(CAM_OPE, "Out of memory");
return -ENOMEM;
}
wr_info->bus_wr_ctx[ctx_id]->ope_acquire = data;
in_acquire = data; in_acquire = data;
bus_wr_ctx = &wr_info->bus_wr_ctx[ctx_id]; bus_wr_ctx = wr_info->bus_wr_ctx[ctx_id];
bus_wr_ctx->num_out_ports = in_acquire->num_out_res; bus_wr_ctx->num_out_ports = in_acquire->num_out_res;
bus_wr_ctx->security_flag = in_acquire->secure_mode; bus_wr_ctx->security_flag = in_acquire->secure_mode;
@@ -688,11 +712,12 @@ static int cam_ope_bus_wr_isr(struct ope_hw *ope_hw_info,
int32_t ctx_id, void *data) int32_t ctx_id, void *data)
{ {
int rc = 0; int rc = 0;
uint32_t irq_status_0, irq_status_1; uint32_t irq_status_0, irq_status_1, violation_status;
struct cam_ope_bus_wr_reg *bus_wr_reg; struct cam_ope_bus_wr_reg *bus_wr_reg;
struct cam_ope_bus_wr_reg_val *bus_wr_reg_val; struct cam_ope_bus_wr_reg_val *bus_wr_reg_val;
struct cam_ope_irq_data *irq_data = data;
if (!ope_hw_info) { if (!ope_hw_info || !irq_data) {
CAM_ERR(CAM_OPE, "Invalid ope_hw_info"); CAM_ERR(CAM_OPE, "Invalid ope_hw_info");
return -EINVAL; return -EINVAL;
} }
@@ -712,15 +737,26 @@ static int cam_ope_bus_wr_isr(struct ope_hw *ope_hw_info,
bus_wr_reg->base + bus_wr_reg->irq_cmd); bus_wr_reg->base + bus_wr_reg->irq_cmd);
if (irq_status_0 & bus_wr_reg_val->cons_violation) { if (irq_status_0 & bus_wr_reg_val->cons_violation) {
irq_data->error = 1;
CAM_ERR(CAM_OPE, "ope bus wr cons_violation"); CAM_ERR(CAM_OPE, "ope bus wr cons_violation");
} }
if (irq_status_0 & bus_wr_reg_val->violation) { if (irq_status_0 & bus_wr_reg_val->violation) {
CAM_ERR(CAM_OPE, "ope bus wr vioalation"); irq_data->error = 1;
violation_status = cam_io_r_mb(bus_wr_reg->base +
bus_wr_reg->violation_status);
CAM_ERR(CAM_OPE,
"ope bus wr violation, violation_status 0x%x",
violation_status);
} }
if (irq_status_0 & bus_wr_reg_val->img_size_violation) { if (irq_status_0 & bus_wr_reg_val->img_size_violation) {
CAM_ERR(CAM_OPE, "ope bus wr img_size_violation"); irq_data->error = 1;
violation_status = cam_io_r_mb(bus_wr_reg->base +
bus_wr_reg->image_size_violation_status);
CAM_ERR(CAM_OPE,
"ope bus wr img_size_violation, violation_status 0x%x",
violation_status);
} }
return rc; return rc;
@@ -769,7 +805,7 @@ int cam_ope_bus_wr_process(struct ope_hw *ope_hw_info,
CAM_DBG(CAM_OPE, "Unhandled cmds: %d", cmd_id); CAM_DBG(CAM_OPE, "Unhandled cmds: %d", cmd_id);
break; break;
case OPE_HW_ISR: case OPE_HW_ISR:
rc = cam_ope_bus_wr_isr(ope_hw_info, 0, NULL); rc = cam_ope_bus_wr_isr(ope_hw_info, 0, data);
break; break;
default: default:
CAM_ERR(CAM_OPE, "Unsupported cmd: %d", cmd_id); CAM_ERR(CAM_OPE, "Unsupported cmd: %d", cmd_id);

View File

@@ -130,7 +130,7 @@ struct ope_bus_wr_ctx {
struct ope_bus_wr { struct ope_bus_wr {
struct ope_hw *ope_hw_info; struct ope_hw *ope_hw_info;
struct ope_bus_out_port_to_wm out_port_to_wm[OPE_OUT_RES_MAX]; struct ope_bus_out_port_to_wm out_port_to_wm[OPE_OUT_RES_MAX];
struct ope_bus_wr_ctx bus_wr_ctx[OPE_CTX_MAX]; struct ope_bus_wr_ctx *bus_wr_ctx[OPE_CTX_MAX];
}; };
#endif /* OPE_BUS_WR_H */ #endif /* OPE_BUS_WR_H */

View File

@@ -96,7 +96,29 @@ int cam_ope_start(void *hw_priv, void *start_args, uint32_t arg_size)
int cam_ope_stop(void *hw_priv, void *start_args, uint32_t arg_size) int cam_ope_stop(void *hw_priv, void *start_args, uint32_t arg_size)
{ {
return 0; struct cam_hw_info *ope_dev = hw_priv;
struct cam_ope_device_core_info *core_info = NULL;
int rc = 0;
if (!hw_priv) {
CAM_ERR(CAM_OPE, "Invalid cam_dev_info");
return -EINVAL;
}
core_info = (struct cam_ope_device_core_info *)ope_dev->core_info;
if (!core_info) {
CAM_ERR(CAM_OPE, "core_info = %pK", core_info);
return -EINVAL;
}
if (core_info->cpas_start) {
if (cam_cpas_stop(core_info->cpas_handle))
CAM_ERR(CAM_OPE, "cpas stop is failed");
else
core_info->cpas_start = false;
}
return rc;
} }
int cam_ope_flush(void *hw_priv, void *flush_args, uint32_t arg_size) int cam_ope_flush(void *hw_priv, void *flush_args, uint32_t arg_size)
@@ -149,14 +171,15 @@ int cam_ope_init_hw(void *device_priv,
struct cam_hw_info *ope_dev = device_priv; struct cam_hw_info *ope_dev = device_priv;
struct cam_hw_soc_info *soc_info = NULL; struct cam_hw_soc_info *soc_info = NULL;
struct cam_ope_device_core_info *core_info = NULL; struct cam_ope_device_core_info *core_info = NULL;
struct cam_ope_cpas_vote cpas_vote; struct cam_ope_cpas_vote *cpas_vote;
int rc = 0; int rc = 0;
struct cam_ope_dev_init *init; struct cam_ope_dev_init *init;
struct ope_hw *ope_hw; struct ope_hw *ope_hw;
if (!device_priv) { if (!device_priv) {
CAM_ERR(CAM_OPE, "Invalid cam_dev_info"); CAM_ERR(CAM_OPE, "Invalid cam_dev_info");
return -EINVAL; rc = -EINVAL;
goto end;
} }
soc_info = &ope_dev->soc_info; soc_info = &ope_dev->soc_info;
@@ -164,55 +187,72 @@ int cam_ope_init_hw(void *device_priv,
if ((!soc_info) || (!core_info)) { if ((!soc_info) || (!core_info)) {
CAM_ERR(CAM_OPE, "soc_info = %pK core_info = %pK", CAM_ERR(CAM_OPE, "soc_info = %pK core_info = %pK",
soc_info, core_info); soc_info, core_info);
return -EINVAL; rc = -EINVAL;
goto end;
} }
ope_hw = core_info->ope_hw_info->ope_hw; ope_hw = core_info->ope_hw_info->ope_hw;
cpas_vote = kzalloc(sizeof(struct cam_ope_cpas_vote), GFP_KERNEL);
if (!cpas_vote) {
CAM_ERR(CAM_ISP, "Out of memory");
rc = -ENOMEM;
goto end;
}
cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE; cpas_vote->ahb_vote.type = CAM_VOTE_ABSOLUTE;
cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE; cpas_vote->ahb_vote.vote.level = CAM_SVS_VOTE;
cpas_vote.axi_vote.num_paths = 1; cpas_vote->axi_vote.num_paths = 1;
cpas_vote.axi_vote.axi_path[0].path_data_type = cpas_vote->axi_vote.axi_path[0].path_data_type =
CAM_AXI_PATH_DATA_ALL; CAM_AXI_PATH_DATA_ALL;
cpas_vote.axi_vote.axi_path[0].transac_type = cpas_vote->axi_vote.axi_path[0].transac_type =
CAM_AXI_TRANSACTION_WRITE; CAM_AXI_TRANSACTION_WRITE;
cpas_vote.axi_vote.axi_path[0].camnoc_bw = cpas_vote->axi_vote.axi_path[0].camnoc_bw =
CAM_CPAS_DEFAULT_AXI_BW; CAM_CPAS_DEFAULT_AXI_BW;
cpas_vote.axi_vote.axi_path[0].mnoc_ab_bw = cpas_vote->axi_vote.axi_path[0].mnoc_ab_bw =
CAM_CPAS_DEFAULT_AXI_BW; CAM_CPAS_DEFAULT_AXI_BW;
cpas_vote.axi_vote.axi_path[0].mnoc_ib_bw = cpas_vote->axi_vote.axi_path[0].mnoc_ib_bw =
CAM_CPAS_DEFAULT_AXI_BW; CAM_CPAS_DEFAULT_AXI_BW;
cpas_vote.axi_vote.axi_path[0].ddr_ab_bw = cpas_vote->axi_vote.axi_path[0].ddr_ab_bw =
CAM_CPAS_DEFAULT_AXI_BW; CAM_CPAS_DEFAULT_AXI_BW;
cpas_vote.axi_vote.axi_path[0].ddr_ib_bw = cpas_vote->axi_vote.axi_path[0].ddr_ib_bw =
CAM_CPAS_DEFAULT_AXI_BW; CAM_CPAS_DEFAULT_AXI_BW;
rc = cam_cpas_start(core_info->cpas_handle, rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote.ahb_vote, &cpas_vote.axi_vote); &cpas_vote->ahb_vote, &cpas_vote->axi_vote);
if (rc) { if (rc) {
CAM_ERR(CAM_OPE, "cpass start failed: %d", rc); CAM_ERR(CAM_OPE, "cpass start failed: %d", rc);
return rc; goto free_cpas_vote;
} }
core_info->cpas_start = true; core_info->cpas_start = true;
rc = cam_ope_enable_soc_resources(soc_info); rc = cam_ope_enable_soc_resources(soc_info);
if (rc) { if (rc)
CAM_ERR(CAM_OPE, "soc enable is failed : %d", rc); goto enable_soc_resource_failed;
if (cam_cpas_stop(core_info->cpas_handle)) else
CAM_ERR(CAM_OPE, "cpas stop is failed");
else
core_info->cpas_start = false;
} else {
core_info->clk_enable = true; core_info->clk_enable = true;
}
init = init_hw_args; init = init_hw_args;
core_info->ope_hw_info->hfi_en = init->hfi_en; core_info->ope_hw_info->hfi_en = init->hfi_en;
init->core_info = core_info; init->core_info = core_info;
rc = cam_ope_process_init(ope_hw, init_hw_args, init->hfi_en); rc = cam_ope_process_init(ope_hw, init_hw_args, init->hfi_en);
if (rc)
goto process_init_failed;
else
goto free_cpas_vote;
process_init_failed:
if (cam_ope_disable_soc_resources(soc_info, core_info->clk_enable))
CAM_ERR(CAM_OPE, "disable soc resource failed");
enable_soc_resource_failed:
if (cam_cpas_stop(core_info->cpas_handle))
CAM_ERR(CAM_OPE, "cpas stop is failed");
else
core_info->cpas_start = false;
free_cpas_vote:
kzfree(cpas_vote);
cpas_vote = NULL;
end:
return rc; return rc;
} }
@@ -242,12 +282,15 @@ int cam_ope_deinit_hw(void *device_priv,
CAM_ERR(CAM_OPE, "soc disable is failed : %d", rc); CAM_ERR(CAM_OPE, "soc disable is failed : %d", rc);
core_info->clk_enable = false; core_info->clk_enable = false;
if (core_info->cpas_start) { return rc;
if (cam_cpas_stop(core_info->cpas_handle)) }
CAM_ERR(CAM_OPE, "cpas stop is failed");
else static int cam_ope_dev_process_dump_debug_reg(struct ope_hw *ope_hw)
core_info->cpas_start = false; {
} int rc = 0;
rc = cam_ope_top_process(ope_hw, -1,
OPE_HW_DUMP_DEBUG, NULL);
return rc; return rc;
} }
@@ -310,14 +353,11 @@ static int cam_ope_dev_process_acquire(struct ope_hw *ope_hw, void *cmd_args)
return 0; return 0;
bus_wr_acquire_fail: bus_wr_acquire_fail:
rc = cam_ope_bus_rd_process(ope_hw, ope_dev_acquire->ctx_id, cam_ope_bus_rd_process(ope_hw, ope_dev_acquire->ctx_id,
OPE_HW_RELEASE, ope_dev_acquire->ope_acquire); OPE_HW_RELEASE, ope_dev_acquire->ope_acquire);
bus_rd_acquire_fail: bus_rd_acquire_fail:
rc = cam_ope_top_process(ope_hw, ope_dev_acquire->ctx_id, cam_ope_top_process(ope_hw, ope_dev_acquire->ctx_id,
OPE_HW_RELEASE, ope_dev_acquire->ope_acquire); OPE_HW_RELEASE, ope_dev_acquire->ope_acquire);
if (rc)
goto top_acquire_fail;
top_acquire_fail: top_acquire_fail:
return rc; return rc;
} }
@@ -340,16 +380,17 @@ static int cam_ope_dev_prepare_cdm_request(
kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr + kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr +
kmd_buf_offset; kmd_buf_offset;
cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_HW_IOVA; cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
cdm_cmd->flag = true; cdm_cmd->flag = true;
cdm_cmd->userdata = ctx_data; cdm_cmd->userdata = ctx_data;
cdm_cmd->cookie = req_idx; cdm_cmd->cookie = req_idx;
cdm_cmd->gen_irq_arb = true; cdm_cmd->gen_irq_arb = true;
i = cdm_cmd->cmd_arrary_count; i = cdm_cmd->cmd_arrary_count;
cdm_cmd->cmd[i].bl_addr.hw_iova = cdm_cmd->cmd[i].bl_addr.mem_handle =
(uint32_t *)ope_request->ope_kmd_buf.iova_cdm_addr; ope_request->ope_kmd_buf.mem_handle;
cdm_cmd->cmd[i].offset = kmd_buf_offset; cdm_cmd->cmd[i].offset = kmd_buf_offset +
ope_request->ope_kmd_buf.offset;
cdm_cmd->cmd[i].len = len; cdm_cmd->cmd[i].len = len;
cdm_cmd->cmd[i].arbitrate = arbitrate; cdm_cmd->cmd[i].arbitrate = arbitrate;
@@ -365,6 +406,7 @@ static int cam_ope_dev_prepare_cdm_request(
return 0; return 0;
} }
static int dump_dmi_cmd(uint32_t print_idx, static int dump_dmi_cmd(uint32_t print_idx,
uint32_t *print_ptr, struct cdm_dmi_cmd *dmi_cmd, uint32_t *print_ptr, struct cdm_dmi_cmd *dmi_cmd,
uint32_t *temp) uint32_t *temp)
@@ -449,119 +491,6 @@ static int dump_stripe_cmd(struct ope_frame_process *frm_proc,
return 0; return 0;
} }
static uint32_t *ope_create_frame_cmd_prefetch_dis(
struct cam_ope_hw_mgr *hw_mgr,
struct cam_ope_ctx *ctx_data, uint32_t req_idx,
uint32_t *kmd_buf, uint32_t buffered, int batch_idx,
struct cam_ope_dev_prepare_req *ope_dev_prepare_req)
{
int rc = 0, i, j;
uint32_t temp[3];
struct cam_ope_request *ope_request;
struct cdm_dmi_cmd *dmi_cmd;
struct ope_bus_wr_io_port_cdm_info *wr_cdm_info;
struct ope_bus_rd_io_port_cdm_info *rd_cdm_info;
struct ope_frame_process *frm_proc;
dma_addr_t iova_addr;
uintptr_t cpu_addr;
size_t buf_len;
uint32_t print_idx;
uint32_t *print_ptr;
int num_dmi = 0;
struct cam_cdm_utils_ops *cdm_ops;
frm_proc = ope_dev_prepare_req->frame_process;
ope_request = ctx_data->req_list[req_idx];
cdm_ops = ctx_data->ope_cdm.cdm_ops;
wr_cdm_info =
&ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[0];
rd_cdm_info =
&ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[0];
if (batch_idx >= OPE_MAX_BATCH_SIZE) {
CAM_ERR(CAM_OPE, "Invalid input: %d", batch_idx);
return NULL;
}
i = batch_idx;
for (j = 0; j < frm_proc->num_cmd_bufs[i]; j++) {
if (frm_proc->cmd_buf[i][j].cmd_buf_scope !=
OPE_CMD_BUF_SCOPE_FRAME)
continue;
if (frm_proc->cmd_buf[i][j].cmd_buf_usage ==
OPE_CMD_BUF_KMD ||
frm_proc->cmd_buf[i][j].cmd_buf_usage ==
OPE_CMD_BUF_DEBUG)
continue;
if (frm_proc->cmd_buf[i][j].prefetch_disable &&
frm_proc->cmd_buf[i][j].cmd_buf_buffered !=
buffered)
continue;
if (!frm_proc->cmd_buf[i][j].mem_handle)
continue;
rc = cam_mem_get_io_buf(
frm_proc->cmd_buf[i][j].mem_handle,
hw_mgr->iommu_cdm_hdl, &iova_addr, &buf_len);
if (rc) {
CAM_ERR(CAM_OPE, "get cmd buf failed %x",
hw_mgr->iommu_hdl);
return NULL;
}
iova_addr = iova_addr + frm_proc->cmd_buf[i][j].offset;
rc = cam_mem_get_cpu_buf(
frm_proc->cmd_buf[i][j].mem_handle,
&cpu_addr, &buf_len);
if (rc || !cpu_addr) {
CAM_ERR(CAM_OPE, "get cmd buf failed %x",
hw_mgr->iommu_hdl);
return NULL;
}
cpu_addr = cpu_addr + frm_proc->cmd_buf[i][j].offset;
if (frm_proc->cmd_buf[i][j].type ==
OPE_CMD_BUF_TYPE_DIRECT) {
kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf,
iova_addr,
frm_proc->cmd_buf[i][j].length);
print_ptr = (uint32_t *)cpu_addr;
dump_frame_direct(print_idx, print_ptr,
frm_proc, i, j);
} else {
num_dmi = frm_proc->cmd_buf[i][j].length /
sizeof(struct cdm_dmi_cmd);
CAM_DBG(CAM_OPE, "Frame DB : In direct: E");
print_ptr = (uint32_t *)cpu_addr;
for (print_idx = 0;
print_idx < num_dmi; print_idx++) {
memcpy(temp, (const void *)print_ptr,
sizeof(struct cdm_dmi_cmd));
dmi_cmd = (struct cdm_dmi_cmd *)temp;
kmd_buf = cdm_ops->cdm_write_dmi(
kmd_buf,
0, dmi_cmd->DMIAddr,
dmi_cmd->DMISel, dmi_cmd->addr,
dmi_cmd->length);
dump_dmi_cmd(print_idx,
print_ptr, dmi_cmd, temp);
print_ptr +=
sizeof(struct cdm_dmi_cmd) /
sizeof(uint32_t);
}
CAM_DBG(CAM_OPE, "Frame DB : In direct: X");
}
dump_frame_cmd(frm_proc, i, j,
iova_addr, kmd_buf, buf_len);
}
return kmd_buf;
}
static uint32_t *ope_create_frame_cmd_batch(struct cam_ope_hw_mgr *hw_mgr, static uint32_t *ope_create_frame_cmd_batch(struct cam_ope_hw_mgr *hw_mgr,
struct cam_ope_ctx *ctx_data, uint32_t req_idx, struct cam_ope_ctx *ctx_data, uint32_t req_idx,
uint32_t *kmd_buf, uint32_t buffered, int batch_idx, uint32_t *kmd_buf, uint32_t buffered, int batch_idx,
@@ -1372,15 +1301,6 @@ static int cam_ope_dev_create_kmd_buf_batch(struct cam_ope_hw_mgr *hw_mgr,
/* After second batch DB programming add prefecth dis */ /* After second batch DB programming add prefecth dis */
if (i) { if (i) {
/* program db buffered prefecth disable cmds */
kmd_buf = ope_create_frame_cmd_prefetch_dis(hw_mgr,
ctx_data, req_idx,
kmd_buf, OPE_CMD_BUF_DOUBLE_BUFFERED, i,
ope_dev_prepare_req);
if (!kmd_buf) {
rc = -EINVAL;
goto end;
}
kmd_buf = kmd_buf =
cdm_ops->cdm_write_wait_prefetch_disable( cdm_ops->cdm_write_wait_prefetch_disable(
kmd_buf, 0x0, kmd_buf, 0x0,
@@ -1621,6 +1541,15 @@ static int cam_ope_process_probe(struct ope_hw *ope_hw,
return -EINVAL; return -EINVAL;
} }
static int cam_ope_process_dump_debug_reg(struct ope_hw *ope_hw,
bool hfi_en)
{
if (!hfi_en)
return cam_ope_dev_process_dump_debug_reg(ope_hw);
return -EINVAL;
}
static int cam_ope_process_reset(struct ope_hw *ope_hw, static int cam_ope_process_reset(struct ope_hw *ope_hw,
void *cmd_args, bool hfi_en) void *cmd_args, bool hfi_en)
{ {
@@ -1762,6 +1691,9 @@ int cam_ope_process_cmd(void *device_priv, uint32_t cmd_type,
spin_unlock_irqrestore(&ope_dev->hw_lock, flags); spin_unlock_irqrestore(&ope_dev->hw_lock, flags);
} }
break; break;
case OPE_HW_DUMP_DEBUG:
rc = cam_ope_process_dump_debug_reg(ope_hw, hfi_en);
break;
default: default:
break; break;
} }

View File

@@ -28,6 +28,7 @@
#define OPE_HW_SET_IRQ_CB 0xE #define OPE_HW_SET_IRQ_CB 0xE
#define OPE_HW_CLK_DISABLE 0xF #define OPE_HW_CLK_DISABLE 0xF
#define OPE_HW_CLK_ENABLE 0x10 #define OPE_HW_CLK_ENABLE 0x10
#define OPE_HW_DUMP_DEBUG 0x11
/** /**
* struct cam_ope_dev_probe * struct cam_ope_dev_probe

View File

@@ -49,6 +49,8 @@
#define OPE_WAIT_COMP_IDLE 0x4 #define OPE_WAIT_COMP_IDLE 0x4
#define OPE_WAIT_COMP_GEN_IRQ 0x8 #define OPE_WAIT_COMP_GEN_IRQ 0x8
#define OPE_MAX_DEBUG_REGISTER 30
struct cam_ope_common { struct cam_ope_common {
uint32_t mode[CAM_FORMAT_MAX]; uint32_t mode[CAM_FORMAT_MAX];
}; };
@@ -68,6 +70,9 @@ struct cam_ope_top_reg {
uint32_t irq_cmd; uint32_t irq_cmd;
uint32_t violation_status; uint32_t violation_status;
uint32_t throttle_cnt_cfg; uint32_t throttle_cnt_cfg;
uint32_t debug_cfg;
uint32_t num_debug_registers;
struct cam_ope_debug_register *debug_regs;
}; };
struct cam_ope_top_reg_val { struct cam_ope_top_reg_val {
@@ -103,6 +108,7 @@ struct cam_ope_top_reg_val {
uint32_t fe_done; uint32_t fe_done;
uint32_t ope_violation; uint32_t ope_violation;
uint32_t idle; uint32_t idle;
uint32_t debug_cfg_val;
}; };
struct cam_ope_qos_reg { struct cam_ope_qos_reg {
@@ -375,6 +381,10 @@ struct cam_ope_bus_wr_reg_val {
struct cam_ope_bus_wr_client_reg_val wr_clients[MAX_WR_CLIENTS]; struct cam_ope_bus_wr_client_reg_val wr_clients[MAX_WR_CLIENTS];
}; };
struct cam_ope_debug_register {
uint32_t offset;
};
struct ope_hw { struct ope_hw {
struct cam_ope_top_reg *top_reg; struct cam_ope_top_reg *top_reg;
struct cam_ope_top_reg_val *top_reg_val; struct cam_ope_top_reg_val *top_reg_val;

View File

@@ -42,6 +42,36 @@ enum cam_ope_bus_rd_unpacker_format {
BUS_RD_VER1_PACKER_FMT_MAX = 0x13, BUS_RD_VER1_PACKER_FMT_MAX = 0x13,
}; };
static struct cam_ope_debug_register ope_debug_regs[OPE_MAX_DEBUG_REGISTER] = {
{
.offset = 0xA0,
},
{
.offset = 0xA4
},
{
.offset = 0xA8,
},
{
.offset = 0xAC,
},
{
.offset = 0xB0,
},
{
.offset = 0xB4,
},
{
.offset = 0xB8,
},
{
.offset = 0xBC,
},
{
.offset = 0xD0,
},
};
static struct cam_ope_top_reg ope_top_reg = { static struct cam_ope_top_reg ope_top_reg = {
.offset = 0x400, .offset = 0x400,
.hw_version = 0x0, .hw_version = 0x0,
@@ -56,6 +86,9 @@ static struct cam_ope_top_reg ope_top_reg = {
.irq_cmd = 0x24, .irq_cmd = 0x24,
.violation_status = 0x28, .violation_status = 0x28,
.throttle_cnt_cfg = 0x2C, .throttle_cnt_cfg = 0x2C,
.debug_cfg = 0xDC,
.num_debug_registers = 9,
.debug_regs = ope_debug_regs,
}; };
static struct cam_ope_top_reg_val ope_top_reg_val = { static struct cam_ope_top_reg_val ope_top_reg_val = {
@@ -75,6 +108,7 @@ static struct cam_ope_top_reg_val ope_top_reg_val = {
.fe_done = 0x4, .fe_done = 0x4,
.ope_violation = 0x8, .ope_violation = 0x8,
.idle = 0x10, .idle = 0x10,
.debug_cfg_val = 0x1,
}; };

View File

@@ -29,12 +29,27 @@
static struct ope_top ope_top_info; static struct ope_top ope_top_info;
static int cam_ope_top_dump_debug_reg(struct ope_hw *ope_hw_info)
{
uint32_t i, val;
struct cam_ope_top_reg *top_reg;
top_reg = ope_hw_info->top_reg;
for (i = 0; i < top_reg->num_debug_registers; i++) {
val = cam_io_r_mb(top_reg->base +
top_reg->debug_regs[i].offset);
CAM_INFO(CAM_OPE, "Debug_status_%d val: 0x%x", i, val);
}
return 0;
}
static int cam_ope_top_reset(struct ope_hw *ope_hw_info, static int cam_ope_top_reset(struct ope_hw *ope_hw_info,
int32_t ctx_id, void *data) int32_t ctx_id, void *data)
{ {
int rc = 0; int rc = 0;
struct cam_ope_top_reg *top_reg; struct cam_ope_top_reg *top_reg;
struct cam_ope_top_reg_val *top_reg_val; struct cam_ope_top_reg_val *top_reg_val;
uint32_t irq_mask, irq_status;
if (!ope_hw_info) { if (!ope_hw_info) {
CAM_ERR(CAM_OPE, "Invalid ope_hw_info"); CAM_ERR(CAM_OPE, "Invalid ope_hw_info");
@@ -44,7 +59,8 @@ static int cam_ope_top_reset(struct ope_hw *ope_hw_info,
top_reg = ope_hw_info->top_reg; top_reg = ope_hw_info->top_reg;
top_reg_val = ope_hw_info->top_reg_val; top_reg_val = ope_hw_info->top_reg_val;
init_completion(&ope_top_info.reset_complete); mutex_lock(&ope_top_info.ope_hw_mutex);
reinit_completion(&ope_top_info.reset_complete);
/* enable interrupt mask */ /* enable interrupt mask */
cam_io_w_mb(top_reg_val->irq_mask, cam_io_w_mb(top_reg_val->irq_mask,
@@ -58,10 +74,19 @@ static int cam_ope_top_reset(struct ope_hw *ope_hw_info,
&ope_top_info.reset_complete, &ope_top_info.reset_complete,
msecs_to_jiffies(30)); msecs_to_jiffies(30));
cam_io_w_mb(top_reg_val->debug_cfg_val,
top_reg->base + top_reg->debug_cfg);
if (!rc || rc < 0) { if (!rc || rc < 0) {
CAM_ERR(CAM_OPE, "reset error result = %d", rc); CAM_ERR(CAM_OPE, "reset error result = %d", rc);
if (!rc) irq_mask = cam_io_r_mb(ope_hw_info->top_reg->base +
rc = -ETIMEDOUT; top_reg->irq_mask);
irq_status = cam_io_r_mb(ope_hw_info->top_reg->base +
top_reg->irq_status);
CAM_ERR(CAM_OPE, "irq mask 0x%x irq status 0x%x",
irq_mask, irq_status);
cam_ope_top_dump_debug_reg(ope_hw_info);
rc = -ETIMEDOUT;
} else { } else {
rc = 0; rc = 0;
} }
@@ -70,6 +95,7 @@ static int cam_ope_top_reset(struct ope_hw *ope_hw_info,
cam_io_w_mb(top_reg_val->irq_mask, cam_io_w_mb(top_reg_val->irq_mask,
ope_hw_info->top_reg->base + top_reg->irq_mask); ope_hw_info->top_reg->base + top_reg->irq_mask);
mutex_unlock(&ope_top_info.ope_hw_mutex);
return rc; return rc;
} }
@@ -110,6 +136,7 @@ static int cam_ope_top_init(struct ope_hw *ope_hw_info,
struct cam_ope_top_reg *top_reg; struct cam_ope_top_reg *top_reg;
struct cam_ope_top_reg_val *top_reg_val; struct cam_ope_top_reg_val *top_reg_val;
struct cam_ope_dev_init *dev_init = data; struct cam_ope_dev_init *dev_init = data;
uint32_t irq_mask, irq_status;
if (!ope_hw_info) { if (!ope_hw_info) {
CAM_ERR(CAM_OPE, "Invalid ope_hw_info"); CAM_ERR(CAM_OPE, "Invalid ope_hw_info");
@@ -121,13 +148,13 @@ static int cam_ope_top_init(struct ope_hw *ope_hw_info,
top_reg->base = dev_init->core_info->ope_hw_info->ope_top_base; top_reg->base = dev_init->core_info->ope_hw_info->ope_top_base;
mutex_init(&ope_top_info.ope_hw_mutex);
/* OPE SW RESET */ /* OPE SW RESET */
init_completion(&ope_top_info.reset_complete); init_completion(&ope_top_info.reset_complete);
/* enable interrupt mask */ /* enable interrupt mask */
cam_io_w_mb(top_reg_val->irq_mask, cam_io_w_mb(top_reg_val->irq_mask,
ope_hw_info->top_reg->base + top_reg->irq_mask); ope_hw_info->top_reg->base + top_reg->irq_mask);
cam_io_w_mb(top_reg_val->sw_reset_cmd, cam_io_w_mb(top_reg_val->sw_reset_cmd,
ope_hw_info->top_reg->base + top_reg->reset_cmd); ope_hw_info->top_reg->base + top_reg->reset_cmd);
@@ -135,18 +162,27 @@ static int cam_ope_top_init(struct ope_hw *ope_hw_info,
&ope_top_info.reset_complete, &ope_top_info.reset_complete,
msecs_to_jiffies(30)); msecs_to_jiffies(30));
/* enable interrupt mask */ cam_io_w_mb(top_reg_val->debug_cfg_val,
cam_io_w_mb(top_reg_val->irq_mask, top_reg->base + top_reg->debug_cfg);
ope_hw_info->top_reg->base + top_reg->irq_mask);
if (!rc || rc < 0) { if (!rc || rc < 0) {
CAM_ERR(CAM_OPE, "reset error result = %d", rc); CAM_ERR(CAM_OPE, "reset error result = %d", rc);
if (!rc) irq_mask = cam_io_r_mb(ope_hw_info->top_reg->base +
rc = -ETIMEDOUT; top_reg->irq_mask);
irq_status = cam_io_r_mb(ope_hw_info->top_reg->base +
top_reg->irq_status);
CAM_ERR(CAM_OPE, "irq mask 0x%x irq status 0x%x",
irq_mask, irq_status);
cam_ope_top_dump_debug_reg(ope_hw_info);
rc = -ETIMEDOUT;
} else { } else {
rc = 0; rc = 0;
} }
/* enable interrupt mask */
cam_io_w_mb(top_reg_val->irq_mask,
ope_hw_info->top_reg->base + top_reg->irq_mask);
return rc; return rc;
} }
@@ -246,6 +282,8 @@ int cam_ope_top_process(struct ope_hw *ope_hw_info,
case OPE_HW_RESET: case OPE_HW_RESET:
rc = cam_ope_top_reset(ope_hw_info, 0, 0); rc = cam_ope_top_reset(ope_hw_info, 0, 0);
break; break;
case OPE_HW_DUMP_DEBUG:
rc - cam_ope_top_dump_debug_reg(ope_hw_info);
default: default:
break; break;
} }

View File

@@ -32,10 +32,12 @@ struct ope_top_ctx {
* @ope_hw_info: OPE hardware info * @ope_hw_info: OPE hardware info
* @top_ctx: OPE top context * @top_ctx: OPE top context
* @reset_complete: Reset complete flag * @reset_complete: Reset complete flag
* @ope_mutex: OPE hardware mutex
*/ */
struct ope_top { struct ope_top {
struct ope_hw *ope_hw_info; struct ope_hw *ope_hw_info;
struct ope_top_ctx top_ctx[OPE_CTX_MAX]; struct ope_top_ctx top_ctx[OPE_CTX_MAX];
struct completion reset_complete; struct completion reset_complete;
struct mutex ope_hw_mutex;
}; };
#endif /* OPE_TOP_H */ #endif /* OPE_TOP_H */

View File

@@ -4027,7 +4027,7 @@ int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control)
spin_unlock_bh(&link->link_state_spin_lock); spin_unlock_bh(&link->link_state_spin_lock);
/* Start SOF watchdog timer */ /* Start SOF watchdog timer */
rc = crm_timer_init(&link->watchdog, rc = crm_timer_init(&link->watchdog,
CAM_REQ_MGR_WATCHDOG_TIMEOUT, link, CAM_REQ_MGR_WATCHDOG_TIMEOUT_DEFAULT, link,
&__cam_req_mgr_sof_freeze); &__cam_req_mgr_sof_freeze);
if (rc < 0) { if (rc < 0) {
CAM_ERR(CAM_CRM, CAM_ERR(CAM_CRM,

View File

@@ -13,10 +13,11 @@
#define CAM_REQ_MGR_MAX_LINKED_DEV 16 #define CAM_REQ_MGR_MAX_LINKED_DEV 16
#define MAX_REQ_SLOTS 48 #define MAX_REQ_SLOTS 48
#define CAM_REQ_MGR_WATCHDOG_TIMEOUT 5000 #define CAM_REQ_MGR_WATCHDOG_TIMEOUT 1000
#define CAM_REQ_MGR_WATCHDOG_TIMEOUT_MAX 50000 #define CAM_REQ_MGR_WATCHDOG_TIMEOUT_DEFAULT 5000
#define CAM_REQ_MGR_SCHED_REQ_TIMEOUT 1000 #define CAM_REQ_MGR_WATCHDOG_TIMEOUT_MAX 50000
#define CAM_REQ_MGR_SIMULATE_SCHED_REQ 30 #define CAM_REQ_MGR_SCHED_REQ_TIMEOUT 1000
#define CAM_REQ_MGR_SIMULATE_SCHED_REQ 30
#define FORCE_DISABLE_RECOVERY 2 #define FORCE_DISABLE_RECOVERY 2
#define FORCE_ENABLE_RECOVERY 1 #define FORCE_ENABLE_RECOVERY 1

View File

@@ -1544,6 +1544,76 @@ static int32_t cam_cci_i2c_write_async(struct v4l2_subdev *sd,
return rc; return rc;
} }
static int32_t cam_cci_read_bytes_v_1_2(struct v4l2_subdev *sd,
struct cam_cci_ctrl *c_ctrl)
{
int32_t rc = 0;
struct cci_device *cci_dev = NULL;
enum cci_i2c_master_t master;
struct cam_cci_read_cfg *read_cfg = NULL;
uint16_t read_bytes = 0;
if (!sd || !c_ctrl) {
CAM_ERR(CAM_CCI, "sd %pK c_ctrl %pK", sd, c_ctrl);
return -EINVAL;
}
if (!c_ctrl->cci_info) {
CAM_ERR(CAM_CCI, "cci_info NULL");
return -EINVAL;
}
cci_dev = v4l2_get_subdevdata(sd);
if (!cci_dev) {
CAM_ERR(CAM_CCI, "cci_dev NULL");
return -EINVAL;
}
if (cci_dev->cci_state != CCI_STATE_ENABLED) {
CAM_ERR(CAM_CCI, "invalid cci state %d", cci_dev->cci_state);
return -EINVAL;
}
if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
|| c_ctrl->cci_info->cci_i2c_master < 0) {
CAM_ERR(CAM_CCI, "Invalid I2C master addr");
return -EINVAL;
}
master = c_ctrl->cci_info->cci_i2c_master;
read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
if ((!read_cfg->num_byte) || (read_cfg->num_byte > CCI_I2C_MAX_READ)) {
CAM_ERR(CAM_CCI, "read num bytes 0");
rc = -EINVAL;
goto ERROR;
}
read_bytes = read_cfg->num_byte;
CAM_DBG(CAM_CCI, "Bytes to read %u", read_bytes);
do {
if (read_bytes >= CCI_READ_MAX_V_1_2)
read_cfg->num_byte = CCI_READ_MAX_V_1_2;
else
read_cfg->num_byte = read_bytes;
cci_dev->is_burst_read = false;
rc = cam_cci_read(sd, c_ctrl);
if (rc) {
CAM_ERR(CAM_CCI, "failed to read rc:%d", rc);
goto ERROR;
}
if (read_bytes >= CCI_READ_MAX_V_1_2) {
read_cfg->addr += CCI_READ_MAX_V_1_2;
read_cfg->data += CCI_READ_MAX_V_1_2;
read_bytes -= CCI_READ_MAX_V_1_2;
} else {
read_bytes = 0;
}
} while (read_bytes);
ERROR:
cci_dev->is_burst_read = false;
return rc;
}
static int32_t cam_cci_read_bytes(struct v4l2_subdev *sd, static int32_t cam_cci_read_bytes(struct v4l2_subdev *sd,
struct cam_cci_ctrl *c_ctrl) struct cam_cci_ctrl *c_ctrl)
{ {
@@ -1770,7 +1840,16 @@ int32_t cam_cci_core_cfg(struct v4l2_subdev *sd,
mutex_unlock(&cci_dev->init_mutex); mutex_unlock(&cci_dev->init_mutex);
break; break;
case MSM_CCI_I2C_READ: case MSM_CCI_I2C_READ:
rc = cam_cci_read_bytes(sd, cci_ctrl); /*
* CCI version 1.2 does not support burst read
* due to the absence of the read threshold register
*/
if (cci_dev->hw_version == CCI_VERSION_1_2_9) {
CAM_DBG(CAM_CCI, "cci-v1.2 no burst read");
rc = cam_cci_read_bytes_v_1_2(sd, cci_ctrl);
} else {
rc = cam_cci_read_bytes(sd, cci_ctrl);
}
break; break;
case MSM_CCI_I2C_WRITE: case MSM_CCI_I2C_WRITE:
case MSM_CCI_I2C_WRITE_SEQ: case MSM_CCI_I2C_WRITE_SEQ:

View File

@@ -57,6 +57,7 @@
/* Max bytes that can be read per CCI read transaction */ /* Max bytes that can be read per CCI read transaction */
#define CCI_READ_MAX 256 #define CCI_READ_MAX 256
#define CCI_READ_MAX_V_1_2 0xE
#define CCI_I2C_READ_MAX_RETRIES 3 #define CCI_I2C_READ_MAX_RETRIES 3
#define CCI_I2C_MAX_READ 10240 #define CCI_I2C_MAX_READ 10240
#define CCI_I2C_MAX_WRITE 10240 #define CCI_I2C_MAX_WRITE 10240
@@ -69,6 +70,7 @@
#define PRIORITY_QUEUE (QUEUE_0) #define PRIORITY_QUEUE (QUEUE_0)
#define SYNC_QUEUE (QUEUE_1) #define SYNC_QUEUE (QUEUE_1)
#define CCI_VERSION_1_2_9 0x10020009
enum cci_i2c_sync { enum cci_i2c_sync {
MSM_SYNC_DISABLE, MSM_SYNC_DISABLE,
MSM_SYNC_ENABLE, MSM_SYNC_ENABLE,

View File

@@ -119,8 +119,7 @@ int cam_cci_init(struct v4l2_subdev *sd,
MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11; MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11;
cci_dev->support_seq_write = 1; cci_dev->support_seq_write = 1;
if (of_device_is_compatible(soc_info->dev->of_node, if (cci_dev->hw_version == CCI_VERSION_1_2_9) {
"qcom,cci-v1.2")) {
max_queue_0_size = CCI_I2C_QUEUE_0_SIZE_V_1_2; max_queue_0_size = CCI_I2C_QUEUE_0_SIZE_V_1_2;
max_queue_1_size = CCI_I2C_QUEUE_1_SIZE_V_1_2; max_queue_1_size = CCI_I2C_QUEUE_1_SIZE_V_1_2;
} else { } else {
@@ -180,10 +179,12 @@ int cam_cci_init(struct v4l2_subdev *sd,
} }
/* Set RD FIFO threshold for M0 & M1 */ /* Set RD FIFO threshold for M0 & M1 */
cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE, if (cci_dev->hw_version != CCI_VERSION_1_2_9) {
base + CCI_I2C_M0_RD_THRESHOLD_ADDR); cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE,
cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE, base + CCI_I2C_M0_RD_THRESHOLD_ADDR);
base + CCI_I2C_M1_RD_THRESHOLD_ADDR); cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE,
base + CCI_I2C_M1_RD_THRESHOLD_ADDR);
}
cci_dev->cci_state = CCI_STATE_ENABLED; cci_dev->cci_state = CCI_STATE_ENABLED;

View File

@@ -15,6 +15,11 @@
#include "cam_packet_util.h" #include "cam_packet_util.h"
#include "cam_mem_mgr.h" #include "cam_mem_mgr.h"
#include "cam_cpas_api.h" #include "cam_cpas_api.h"
#include "cam_compat.h"
#define SCM_SVC_CAMERASS 0x18
#define SECURE_SYSCALL_ID 0x6
#define SECURE_SYSCALL_ID_2 0x7
#define LANE_MASK_2PH 0x1F #define LANE_MASK_2PH 0x1F
#define LANE_MASK_3PH 0x7 #define LANE_MASK_3PH 0x7
@@ -834,6 +839,7 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
} }
break; break;
case CAM_RELEASE_DEV: { case CAM_RELEASE_DEV: {
int32_t offset;
struct cam_release_dev_cmd release; struct cam_release_dev_cmd release;
if (!csiphy_dev->acquire_count) { if (!csiphy_dev->acquire_count) {
@@ -849,6 +855,23 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
goto release_mutex; goto release_mutex;
} }
offset = cam_csiphy_get_instance_offset(csiphy_dev,
release.dev_handle);
if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) {
CAM_ERR(CAM_CSIPHY, "Invalid offset");
goto release_mutex;
}
if (csiphy_dev->csiphy_info.secure_mode[offset])
cam_csiphy_notify_secure_mode(
csiphy_dev,
CAM_SECURE_MODE_NON_SECURE, offset);
csiphy_dev->csiphy_info.secure_mode[offset] =
CAM_SECURE_MODE_NON_SECURE;
csiphy_dev->csiphy_cpas_cp_reg_mask[offset] = 0x0;
rc = cam_destroy_device_hdl(release.dev_handle); rc = cam_destroy_device_hdl(release.dev_handle);
if (rc < 0) if (rc < 0)
CAM_ERR(CAM_CSIPHY, "destroying the device hdl"); CAM_ERR(CAM_CSIPHY, "destroying the device hdl");

View File

@@ -322,11 +322,11 @@ int32_t cam_csiphy_parse_dt_info(struct platform_device *pdev,
csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg = NULL; csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg = NULL;
csiphy_dev->ctrl_reg->csiphy_irq_reg = csiphy_irq_reg_1_2; csiphy_dev->ctrl_reg->csiphy_irq_reg = csiphy_irq_reg_1_2;
csiphy_dev->ctrl_reg->csiphy_common_reg = csiphy_dev->ctrl_reg->csiphy_common_reg =
csiphy_common_reg_1_2; csiphy_common_reg_1_2_2;
csiphy_dev->ctrl_reg->csiphy_reset_reg = csiphy_dev->ctrl_reg->csiphy_reset_reg =
csiphy_reset_reg_1_2; csiphy_reset_reg_1_2;
csiphy_dev->ctrl_reg->getclockvoting = get_clk_vote_default; csiphy_dev->ctrl_reg->getclockvoting = get_clk_vote_default;
csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v1_2; csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v1_2_2;
csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW; csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
csiphy_dev->is_divisor_32_comp = false; csiphy_dev->is_divisor_32_comp = false;
csiphy_dev->hw_version = CSIPHY_VERSION_V12; csiphy_dev->hw_version = CSIPHY_VERSION_V12;

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (c) 2019, The Linux Foundation. All rights reserved. * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/ */
#ifndef _CAM_CSIPHY_1_2_2_HWREG_H_ #ifndef _CAM_CSIPHY_1_2_2_HWREG_H_
@@ -8,6 +8,29 @@
#include "../cam_csiphy_dev.h" #include "../cam_csiphy_dev.h"
struct csiphy_reg_parms_t csiphy_v1_2_2 = {
.mipi_csiphy_interrupt_status0_addr = 0x8B0,
.mipi_csiphy_interrupt_clear0_addr = 0x858,
.mipi_csiphy_glbl_irq_cmd_addr = 0x828,
.csiphy_common_array_size = 8,
.csiphy_reset_array_size = 5,
.csiphy_2ph_config_array_size = 18,
.csiphy_3ph_config_array_size = 33,
.csiphy_2ph_clock_lane = 0x1,
.csiphy_2ph_combo_ck_ln = 0x10,
};
struct csiphy_reg_t csiphy_common_reg_1_2_2[] = {
{0x0814, 0xd5, 0x00, CSIPHY_LANE_ENABLE},
{0x0818, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x081C, 0x5A, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0800, 0x03, 0x01, CSIPHY_DEFAULT_PARAMS},
{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0884, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x088C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0824, 0x72, 0x00, CSIPHY_2PH_REGS},
};
struct csiphy_reg_t struct csiphy_reg_t
csiphy_2ph_v1_2_2_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = { csiphy_2ph_v1_2_2_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
{ {

View File

@@ -1827,7 +1827,11 @@ void cam_flash_shutdown(struct cam_flash_ctrl *fctrl)
if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) || if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) ||
(fctrl->flash_state == CAM_FLASH_STATE_START)) { (fctrl->flash_state == CAM_FLASH_STATE_START)) {
fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0); fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
cam_flash_off(fctrl); rc = cam_flash_off(fctrl);
if (rc) {
CAM_ERR(CAM_FLASH,
"LED OFF FAILED: %d", rc);
}
if (fctrl->func_tbl.power_ops) { if (fctrl->func_tbl.power_ops) {
rc = fctrl->func_tbl.power_ops(fctrl, false); rc = fctrl->func_tbl.power_ops(fctrl, false);
if (rc) if (rc)

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
*/ */
#ifndef _CAM_SMMU_API_H_ #ifndef _CAM_SMMU_API_H_

View File

@@ -37,9 +37,9 @@ int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length);
void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length); void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length);
void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa); void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa);
int cam_ife_notify_safe_lut_scm(bool safe_trigger); int cam_ife_notify_safe_lut_scm(bool safe_trigger);
int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
bool protect, int32_t offset);
int camera_component_match_add_drivers(struct device *master_dev, int camera_component_match_add_drivers(struct device *master_dev,
struct component_match **match_list); struct component_match **match_list);
int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
bool protect, int32_t offset);
#endif /* _CAM_COMPAT_H_ */ #endif /* _CAM_COMPAT_H_ */

View File

@@ -17,7 +17,7 @@ static char supported_clk_info[256];
static char debugfs_dir_name[64]; static char debugfs_dir_name[64];
int cam_soc_util_get_clk_level(struct cam_hw_soc_info *soc_info, int cam_soc_util_get_clk_level(struct cam_hw_soc_info *soc_info,
int32_t clk_rate, int clk_idx, int32_t *clk_lvl) int64_t clk_rate, int clk_idx, int32_t *clk_lvl)
{ {
int i; int i;
long clk_rate_round; long clk_rate_round;
@@ -41,9 +41,9 @@ int cam_soc_util_get_clk_level(struct cam_hw_soc_info *soc_info,
(soc_info->clk_rate[i][clk_idx] >= (soc_info->clk_rate[i][clk_idx] >=
clk_rate_round)) { clk_rate_round)) {
CAM_DBG(CAM_UTIL, CAM_DBG(CAM_UTIL,
"soc = %d round rate = %ld actual = %d", "soc = %d round rate = %ld actual = %lld",
soc_info->clk_rate[i][clk_idx], soc_info->clk_rate[i][clk_idx],
clk_rate_round, clk_rate); clk_rate_round, clk_rate);
*clk_lvl = i; *clk_lvl = i;
return 0; return 0;
} }
@@ -380,7 +380,7 @@ long cam_soc_util_get_clk_round_rate(struct cam_hw_soc_info *soc_info,
* @return: Success or failure * @return: Success or failure
*/ */
static int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name, static int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name,
int32_t clk_rate) int64_t clk_rate)
{ {
int rc = 0; int rc = 0;
long clk_rate_round; long clk_rate_round;
@@ -388,7 +388,7 @@ static int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name,
if (!clk || !clk_name) if (!clk || !clk_name)
return -EINVAL; return -EINVAL;
CAM_DBG(CAM_UTIL, "set %s, rate %d", clk_name, clk_rate); CAM_DBG(CAM_UTIL, "set %s, rate %lld", clk_name, clk_rate);
if (clk_rate > 0) { if (clk_rate > 0) {
clk_rate_round = clk_round_rate(clk, clk_rate); clk_rate_round = clk_round_rate(clk, clk_rate);
CAM_DBG(CAM_UTIL, "new_rate %ld", clk_rate_round); CAM_DBG(CAM_UTIL, "new_rate %ld", clk_rate_round);
@@ -424,7 +424,7 @@ static int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name,
} }
int cam_soc_util_set_src_clk_rate(struct cam_hw_soc_info *soc_info, int cam_soc_util_set_src_clk_rate(struct cam_hw_soc_info *soc_info,
int32_t clk_rate) int64_t clk_rate)
{ {
int rc = 0; int rc = 0;
int i = 0; int i = 0;
@@ -452,13 +452,13 @@ int cam_soc_util_set_src_clk_rate(struct cam_hw_soc_info *soc_info,
&apply_level); &apply_level);
if (rc || (apply_level < 0) || (apply_level >= CAM_MAX_VOTE)) { if (rc || (apply_level < 0) || (apply_level >= CAM_MAX_VOTE)) {
CAM_ERR(CAM_UTIL, CAM_ERR(CAM_UTIL,
"set %s, rate %d dev_name = %s apply level = %d", "set %s, rate %lld dev_name = %s apply level = %d",
soc_info->clk_name[src_clk_idx], clk_rate, soc_info->clk_name[src_clk_idx], clk_rate,
soc_info->dev_name, apply_level); soc_info->dev_name, apply_level);
return -EINVAL; return -EINVAL;
} }
CAM_DBG(CAM_UTIL, "set %s, rate %d dev_name = %s apply level = %d", CAM_DBG(CAM_UTIL, "set %s, rate %lld dev_name = %s apply level = %d",
soc_info->clk_name[src_clk_idx], clk_rate, soc_info->clk_name[src_clk_idx], clk_rate,
soc_info->dev_name, apply_level); soc_info->dev_name, apply_level);
@@ -471,7 +471,7 @@ int cam_soc_util_set_src_clk_rate(struct cam_hw_soc_info *soc_info,
soc_info->clk_name[src_clk_idx], clk_rate); soc_info->clk_name[src_clk_idx], clk_rate);
if (rc) { if (rc) {
CAM_ERR(CAM_UTIL, CAM_ERR(CAM_UTIL,
"SET_RATE Failed: src clk: %s, rate %d, dev_name = %s rc: %d", "SET_RATE Failed: src clk: %s, rate %lld, dev_name = %s rc: %d",
soc_info->clk_name[src_clk_idx], clk_rate, soc_info->clk_name[src_clk_idx], clk_rate,
soc_info->dev_name, rc); soc_info->dev_name, rc);
return rc; return rc;

View File

@@ -415,7 +415,7 @@ long cam_soc_util_get_clk_round_rate(struct cam_hw_soc_info *soc_info,
* @return: success or failure * @return: success or failure
*/ */
int cam_soc_util_set_src_clk_rate(struct cam_hw_soc_info *soc_info, int cam_soc_util_set_src_clk_rate(struct cam_hw_soc_info *soc_info,
int32_t clk_rate); int64_t clk_rate);
/** /**
* cam_soc_util_get_option_clk_by_name() * cam_soc_util_get_option_clk_by_name()
@@ -657,7 +657,7 @@ int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
enum cam_vote_level clk_level); enum cam_vote_level clk_level);
int cam_soc_util_get_clk_level(struct cam_hw_soc_info *soc_info, int cam_soc_util_get_clk_level(struct cam_hw_soc_info *soc_info,
int32_t clk_rate, int clk_idx, int32_t *clk_lvl); int64_t clk_rate, int clk_idx, int32_t *clk_lvl);
/* Callback to get reg space data for specific HW */ /* Callback to get reg space data for specific HW */
typedef int (*cam_soc_util_regspace_data_cb)(uint32_t reg_base_type, typedef int (*cam_soc_util_regspace_data_cb)(uint32_t reg_base_type,

View File

@@ -73,7 +73,7 @@
#define OPE_MAX_IO_BUFS (OPE_OUT_RES_MAX + OPE_IN_RES_MAX) #define OPE_MAX_IO_BUFS (OPE_OUT_RES_MAX + OPE_IN_RES_MAX)
#define OPE_MAX_PASS 1 #define OPE_MAX_PASS 1
#define OPE_MAX_PLANES 2 #define OPE_MAX_PLANES 2
#define OPE_MAX_STRIPES 32 #define OPE_MAX_STRIPES 48
#define OPE_MAX_BATCH_SIZE 16 #define OPE_MAX_BATCH_SIZE 16
/** /**
@@ -83,6 +83,7 @@
* @x_init: X_init * @x_init: X_init
* @stripe_location: Stripe location (OPE_STRIPE_XXX) * @stripe_location: Stripe location (OPE_STRIPE_XXX)
* @width: Width of a stripe * @width: Width of a stripe
* @height: Height of a stripe
* @disable_bus: Flag to disable BUS master * @disable_bus: Flag to disable BUS master
* @reserved: Reserved * @reserved: Reserved
* *

View File

@@ -83,6 +83,11 @@
#define CAM_ISP_TFE_USAGE_RIGHT_PX 2 #define CAM_ISP_TFE_USAGE_RIGHT_PX 2
#define CAM_ISP_TFE_USAGE_RDI 3 #define CAM_ISP_TFE_USAGE_RDI 3
/* Bus write master modes */
#define CAM_ISP_TFE_WM_FRAME_BASED_MODE 0
#define CAM_ISP_TFE_WM_LINE_BASED_MODE 1
#define CAM_ISP_TFE_WM_INDEX_BASED_MODE 2
/* Query devices */ /* Query devices */
/** /**
* struct cam_isp_tfe_dev_cap_info - A cap info for particular hw type * struct cam_isp_tfe_dev_cap_info - A cap info for particular hw type