diff --git a/drivers/cam_cdm/cam_cdm.h b/drivers/cam_cdm/cam_cdm.h index ab12ab52f2..525d54d68d 100644 --- a/drivers/cam_cdm/cam_cdm.h +++ b/drivers/cam_cdm/cam_cdm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #ifndef _CAM_CDM_H_ @@ -26,24 +26,341 @@ #define CAM_CDM_INFLIGHT_WORKS 5 #define CAM_CDM_HW_RESET_TIMEOUT 300 +/* + * Macros to get prepare and get information + * from client CDM handles. + */ + #define CAM_CDM_HW_ID_MASK 0xF -#define CAM_CDM_HW_ID_SHIFT 0x5 -#define CAM_CDM_CLIENTS_ID_MASK 0x1F +#define CAM_CDM_HW_ID_SHIFT 0x10 + +#define CAM_CDM_CLIENTS_ID_MASK 0xFF + +#define CAM_CDM_BL_FIFO_ID_MASK 0xF +#define CAM_CDM_BL_FIFO_ID_SHIFT 0x8 #define CAM_CDM_GET_HW_IDX(x) (((x) >> CAM_CDM_HW_ID_SHIFT) & \ CAM_CDM_HW_ID_MASK) -#define CAM_CDM_CREATE_CLIENT_HANDLE(hw_idx, client_idx) \ + +#define CAM_CDM_GET_BLFIFO_IDX(x) (((x) >> CAM_CDM_BL_FIFO_ID_SHIFT) & \ + CAM_CDM_BL_FIFO_ID_MASK) + +#define CAM_CDM_CREATE_CLIENT_HANDLE(hw_idx, priority, client_idx) \ ((((hw_idx) & CAM_CDM_HW_ID_MASK) << CAM_CDM_HW_ID_SHIFT) | \ + (((priority) & CAM_CDM_BL_FIFO_ID_MASK) << CAM_CDM_BL_FIFO_ID_SHIFT)| \ ((client_idx) & CAM_CDM_CLIENTS_ID_MASK)) #define CAM_CDM_GET_CLIENT_IDX(x) ((x) & CAM_CDM_CLIENTS_ID_MASK) #define CAM_PER_CDM_MAX_REGISTERED_CLIENTS (CAM_CDM_CLIENTS_ID_MASK + 1) #define CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM (CAM_CDM_HW_ID_MASK + 1) -/* enum cam_cdm_reg_attr - read, write, read and write permissions.*/ -enum cam_cdm_reg_attr { - CAM_REG_ATTR_READ, - CAM_REG_ATTR_WRITE, - CAM_REG_ATTR_READ_WRITE, +/* Number of FIFO supported on CDM */ +#define CAM_CDM_NUM_BL_FIFO 0x4 + +/* Max number of register set for different CDM */ +#define CAM_CDM_BL_FIFO_REG_NUM 0x4 +#define CAM_CDM_BL_FIFO_IRQ_REG_NUM 0x4 +#define CAM_CDM_BL_FIFO_PENDING_REQ_REG_NUM 0x2 +#define CAM_CDM_SCRATCH_REG_NUM 0xc +#define CAM_CDM_COMP_WAIT_STATUS_REG_NUM 0x2 +#define CAM_CDM_PERF_MON_REG_NUM 0x2 + +/* BL_FIFO configurations*/ +#define CAM_CDM_BL_FIFO_LENGTH_MAX_DEFAULT 0x40 +#define CAM_CDM_BL_FIFO_LENGTH_CFG_SHIFT 0x10 + +#define CAM_CDM_BL_FIFO_REQ_SIZE_MAX 0x00 +#define CAM_CDM_BL_FIFO_REQ_SIZE_MAX_DIV2 0x01 +#define CAM_CDM_BL_FIFO_REQ_SIZE_MAX_DIV4 0x10 +#define CAM_CDM_BL_FIFO_REQ_SIZE_MAX_DIV8 0x11 + +/* CDM core status bitmap */ +#define CAM_CDM_HW_INIT_STATUS 0x0 +#define CAM_CDM_FIFO_0_BLDONE_STATUS 0x0 +#define CAM_CDM_FIFO_1_BLDONE_STATUS 0x1 +#define CAM_CDM_FIFO_2_BLDONE_STATUS 0x2 +#define CAM_CDM_FIFO_3_BLDONE_STATUS 0x3 +#define CAM_CDM_RESET_HW_STATUS 0x4 +#define CAM_CDM_ERROR_HW_STATUS 0x5 +#define CAM_CDM_FLUSH_HW_STATUS 0x6 + +/* Curent BL command masks and shifts */ +#define CAM_CDM_CURRENT_BL_LEN 0xFFFFF +#define CAM_CDM_CURRENT_BL_ARB 0x100000 +#define CAM_CDM_CURRENT_BL_FIFO 0xC00000 +#define CAM_CDM_CURRENT_BL_TAG 0xFF000000 + +#define CAM_CDM_CURRENT_BL_ARB_SHIFT 0x14 +#define CAM_CDM_CURRENT_BL_FIFO_SHIFT 0x16 +#define CAM_CDM_CURRENT_BL_TAG_SHIFT 0x18 + +/* IRQ bit-masks */ +#define CAM_CDM_IRQ_STATUS_RST_DONE_MASK 0x1 +#define CAM_CDM_IRQ_STATUS_INLINE_IRQ_MASK 0x2 +#define CAM_CDM_IRQ_STATUS_BL_DONE_MASK 0x4 +#define CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK 0x10000 +#define CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK 0x20000 +#define CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK 0x40000 + +#define CAM_CDM_IRQ_STATUS_ERRORS \ + (CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK | \ + CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK | \ + CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) + +/* Structure to store hw version info */ +struct cam_version_reg { + uint32_t hw_version; +}; + +/** + * struct cam_cdm_irq_regs - CDM IRQ registers + * + * @irq_mask: register offset for irq_mask + * @irq_clear: register offset for irq_clear + * @irq_clear_cmd: register offset to initiate irq clear + * @irq_set: register offset to set irq + * @irq_set_cmd: register offset to issue set_irq from irq_set + * @irq_status: register offset to look which irq is received + */ +struct cam_cdm_irq_regs { + uint32_t irq_mask; + uint32_t irq_clear; + uint32_t irq_clear_cmd; + uint32_t irq_set; + uint32_t irq_set_cmd; + uint32_t irq_status; +}; + +/** + * struct cam_cdm_bl_fifo_regs - BL_FIFO registers + * + * @bl_fifo_base: register offset to write bl_cmd base address + * @bl_fifo_len: register offset to write bl_cmd length + * @bl_fifo_store: register offset to commit the BL cmd + * @bl_fifo_cfg: register offset to config BL_FIFO depth, etc. + */ +struct cam_cdm_bl_fifo_regs { + uint32_t bl_fifo_base; + uint32_t bl_fifo_len; + uint32_t bl_fifo_store; + uint32_t bl_fifo_cfg; +}; + +/** + * struct cam_cdm_bl_pending_req_reg_params - BL_FIFO pending registers + * + * @rb_offset: register offset pending bl request in BL_FIFO + * @rb_mask: mask to get number of pending BLs in BL_FIFO + * @rb_num_fifo: number of BL_FIFO's information in the register + * @rb_next_fifo_shift: shift to get next fifo's pending BLs. + */ +struct cam_cdm_bl_pending_req_reg_params { + uint32_t rb_offset; + uint32_t rb_mask; + uint32_t rb_num_fifo; + uint32_t rb_next_fifo_shift; +}; + +/** + * struct cam_cdm_scratch_reg - scratch register + * + * @scratch_reg: offset of scratch register + */ +struct cam_cdm_scratch_reg { + uint32_t scratch_reg; +}; + +/* struct cam_cdm_perf_mon_regs - perf_mon registers */ +struct cam_cdm_perf_mon_regs { + uint32_t perf_mon_ctrl; + uint32_t perf_mon_0; + uint32_t perf_mon_1; + uint32_t perf_mon_2; +}; + +/** + * struct cam_cdm_perf_mon_regs - perf mon counter's registers + * + * @count_cfg_0: register offset to configure perf measures + * @always_count_val: register offset for always count value + * @busy_count_val: register offset to get busy count + * @stall_axi_count_val: register offset to get axi stall counts + * @count_status: register offset to know if count status finished + * for stall, busy and always. + */ +struct cam_cdm_perf_regs { + uint32_t count_cfg_0; + uint32_t always_count_val; + uint32_t busy_count_val; + uint32_t stall_axi_count_val; + uint32_t count_status; +}; + +/** + * struct cam_cdm_icl_data_regs - CDM icl data registers + * + * @icl_last_data_0: register offset to log last known good command + * @icl_last_data_1: register offset to log last known good command 1 + * @icl_last_data_2: register offset to log last known good command 2 + * @icl_inv_data: register offset to log CDM cmd that triggered + * invalid command. + */ +struct cam_cdm_icl_data_regs { + uint32_t icl_last_data_0; + uint32_t icl_last_data_1; + uint32_t icl_last_data_2; + uint32_t icl_inv_data; +}; + +/** + * struct cam_cdm_icl_misc_regs - CDM icl misc registers + * + * @icl_inv_bl_addr: register offset to give address of bl_cmd that + * gave invalid command + * @icl_status: register offset for context that gave good BL + * command and invalid command. + */ +struct cam_cdm_icl_misc_regs { + uint32_t icl_inv_bl_addr; + uint32_t icl_status; +}; + +/** + * struct cam_cdm_icl_regs - CDM icl registers + * + * @data_regs: structure with registers of all cdm good and invalid + * BL command information. + * @misc_regs: structure with registers for invalid command address + * and context + */ +struct cam_cdm_icl_regs { + struct cam_cdm_icl_data_regs *data_regs; + struct cam_cdm_icl_misc_regs *misc_regs; +}; + +/** + * struct cam_cdm_comp_wait_status - BL_FIFO comp_event status register + * + * @comp_wait_status: register offset to give information on whether the + * CDM is waiting for an event from another module + */ +struct cam_cdm_comp_wait_status { + uint32_t comp_wait_status; +}; + +/** + * struct cam_cdm_common_reg_data - structure for register data + * + * @num_bl_fifo: number of FIFO are there in CDM + * @num_bl_fifo_irq: number of FIFO irqs in CDM + * @num_bl_pending_req_reg: number of pending_requests register in CDM + * @num_scratch_reg: number of scratch registers in CDM + */ +struct cam_cdm_common_reg_data { + uint32_t num_bl_fifo; + uint32_t num_bl_fifo_irq; + uint32_t num_bl_pending_req_reg; + uint32_t num_scratch_reg; +}; + +/** + * struct cam_cdm_common_regs - common structure to get common registers + * of CDM + * + * @cdm_hw_version: offset to read cdm_hw_version + * @cam_version: offset to read the camera Titan architecture version + * @rst_cmd: offset to reset the CDM + * @cgc_cfg: offset to configure CDM CGC logic + * @core_cfg: offset to configure CDM core with ARB_SEL, implicit + * wait, etc. + * @core_en: offset to pause/enable CDM + * @fe_cfg: offset to configure CDM fetch engine + * @bl_fifo_rb: offset to set BL_FIFO read back + * @bl_fifo_base_rb: offset to read back base address on offset set by + * bl_fifo_rb + * @bl_fifo_len_rb: offset to read back base len and tag on offset set by + * bl_fifo_rb + * @usr_data: offset to read user data from GEN_IRQ commands + * @wait_status: offset to read status for last WAIT command + * @last_ahb_addr: offset to read back last AHB address generated by CDM + * @last_ahb_data: offset to read back last AHB data generated by CDM + * @core_debug: offset to configure CDM debug bus and debug features + * @last_ahb_err_addr: offset to read back last AHB Error address generated + * by CDM + * @last_ahb_err_data: offset to read back last AHB Error data generated + * by CDM + * @current_bl_base: offset to read back current command buffer BASE address + * value out of BL_FIFO + * @current_bl_len: offset to read back current command buffer len, TAG, + * context ID ARB value out of BL_FIFO + * @current_used_ahb_base: offset to read back current base address used by + * CDM to access camera register + * @debug_status: offset to read back current CDM status + * @bus_misr_cfg0: offset to enable bus MISR and configure sampling mode + * @bus_misr_cfg1: offset to select from one of the six MISR's for reading + * signature value + * @bus_misr_rd_val: offset to read MISR signature + * @pending_req: registers to read pending request in FIFO + * @comp_wait: registers to read comp_event CDM is waiting for + * @perf_mon: registers to read perf_mon information + * @scratch: registers to read scratch register value + * @perf_reg: registers to read performance counters value + * @icl_reg: registers to read information related to good + * and invalid commands in FIFO + * @spare: spare register + * + */ +struct cam_cdm_common_regs { + uint32_t cdm_hw_version; + const struct cam_version_reg *cam_version; + uint32_t rst_cmd; + uint32_t cgc_cfg; + uint32_t core_cfg; + uint32_t core_en; + uint32_t fe_cfg; + uint32_t bl_fifo_rb; + uint32_t bl_fifo_base_rb; + uint32_t bl_fifo_len_rb; + uint32_t usr_data; + uint32_t wait_status; + uint32_t last_ahb_addr; + uint32_t last_ahb_data; + uint32_t core_debug; + uint32_t last_ahb_err_addr; + uint32_t last_ahb_err_data; + uint32_t current_bl_base; + uint32_t current_bl_len; + uint32_t current_used_ahb_base; + uint32_t debug_status; + uint32_t bus_misr_cfg0; + uint32_t bus_misr_cfg1; + uint32_t bus_misr_rd_val; + const struct cam_cdm_bl_pending_req_reg_params + *pending_req[CAM_CDM_BL_FIFO_PENDING_REQ_REG_NUM]; + const struct cam_cdm_comp_wait_status + *comp_wait[CAM_CDM_COMP_WAIT_STATUS_REG_NUM]; + const struct cam_cdm_perf_mon_regs + *perf_mon[CAM_CDM_PERF_MON_REG_NUM]; + const struct cam_cdm_scratch_reg + *scratch[CAM_CDM_SCRATCH_REG_NUM]; + const struct cam_cdm_perf_regs *perf_reg; + const struct cam_cdm_icl_regs *icl_reg; + uint32_t spare; +}; + +/** + * struct cam_cdm_hw_reg_offset - BL_FIFO comp_event status register + * + * @cmn_reg: pointer to structure to get common registers of a CDM + * @bl_fifo_reg: pointer to structure to get BL_FIFO registers of a CDM + * @irq_reg: pointer to structure to get IRQ registers of a CDM + * @reg_data: pointer to structure to reg_data related to CDM + * registers + */ +struct cam_cdm_hw_reg_offset { + const struct cam_cdm_common_regs *cmn_reg; + const struct cam_cdm_bl_fifo_regs *bl_fifo_reg[CAM_CDM_BL_FIFO_REG_NUM]; + const struct cam_cdm_irq_regs *irq_reg[CAM_CDM_BL_FIFO_IRQ_REG_NUM]; + const struct cam_cdm_common_reg_data *reg_data; }; /* enum cam_cdm_hw_process_intf_cmd - interface commands.*/ @@ -52,83 +369,11 @@ enum cam_cdm_hw_process_intf_cmd { CAM_CDM_HW_INTF_CMD_RELEASE, CAM_CDM_HW_INTF_CMD_SUBMIT_BL, CAM_CDM_HW_INTF_CMD_RESET_HW, + CAM_CDM_HW_INTF_CMD_FLUSH_HW, + CAM_CDM_HW_INTF_CMD_HANDLE_ERROR, CAM_CDM_HW_INTF_CMD_INVALID, }; -/* enum cam_cdm_regs - CDM driver offset enums.*/ -enum cam_cdm_regs { - /*cfg_offsets 0*/ - CDM_CFG_HW_VERSION, - CDM_CFG_TITAN_VERSION, - CDM_CFG_RST_CMD, - CDM_CFG_CGC_CFG, - CDM_CFG_CORE_CFG, - CDM_CFG_CORE_EN, - CDM_CFG_FE_CFG, - /*irq_offsets 7*/ - CDM_IRQ_MASK, - CDM_IRQ_CLEAR, - CDM_IRQ_CLEAR_CMD, - CDM_IRQ_SET, - CDM_IRQ_SET_CMD, - CDM_IRQ_STATUS, - CDM_IRQ_USR_DATA, - /*BL FIFO Registers 14*/ - CDM_BL_FIFO_BASE_REG, - CDM_BL_FIFO_LEN_REG, - CDM_BL_FIFO_STORE_REG, - CDM_BL_FIFO_CFG, - CDM_BL_FIFO_RB, - CDM_BL_FIFO_BASE_RB, - CDM_BL_FIFO_LEN_RB, - CDM_BL_FIFO_PENDING_REQ_RB, - /*CDM System Debug Registers 22*/ - CDM_DBG_WAIT_STATUS, - CDM_DBG_SCRATCH_0_REG, - CDM_DBG_SCRATCH_1_REG, - CDM_DBG_SCRATCH_2_REG, - CDM_DBG_SCRATCH_3_REG, - CDM_DBG_SCRATCH_4_REG, - CDM_DBG_SCRATCH_5_REG, - CDM_DBG_SCRATCH_6_REG, - CDM_DBG_SCRATCH_7_REG, - CDM_DBG_LAST_AHB_ADDR, - CDM_DBG_LAST_AHB_DATA, - CDM_DBG_CORE_DBUG, - CDM_DBG_LAST_AHB_ERR_ADDR, - CDM_DBG_LAST_AHB_ERR_DATA, - CDM_DBG_CURRENT_BL_BASE, - CDM_DBG_CURRENT_BL_LEN, - CDM_DBG_CURRENT_USED_AHB_BASE, - CDM_DBG_DEBUG_STATUS, - /*FE Bus Miser Registers 40*/ - CDM_BUS_MISR_CFG_0, - CDM_BUS_MISR_CFG_1, - CDM_BUS_MISR_RD_VAL, - /*Performance Counter registers 43*/ - CDM_PERF_MON_CTRL, - CDM_PERF_MON_0, - CDM_PERF_MON_1, - CDM_PERF_MON_2, - /*Spare registers 47*/ - CDM_SPARE, -}; - -/* struct cam_cdm_reg_offset - struct for offset with attribute.*/ -struct cam_cdm_reg_offset { - uint32_t offset; - enum cam_cdm_reg_attr attribute; -}; - -/* struct cam_cdm_reg_offset_table - struct for whole offset table.*/ -struct cam_cdm_reg_offset_table { - uint32_t first_offset; - uint32_t last_offset; - uint32_t reg_count; - const struct cam_cdm_reg_offset *offsets; - uint32_t offset_max_size; -}; - /* enum cam_cdm_flags - Bit fields for CDM flags used */ enum cam_cdm_flags { CAM_CDM_FLAG_SHARED_CDM, @@ -147,6 +392,29 @@ enum cam_cdm_mem_base_index { CAM_HW_CDM_MAX_INDEX = CAM_SOC_MAX_BLOCK, }; +/* enum cam_cdm_bl_cb_type - Enum for possible CAM CDM cb request types */ +enum cam_cdm_bl_cb_type { + CAM_HW_CDM_BL_CB_CLIENT = 1, + CAM_HW_CDM_BL_CB_INTERNAL, +}; + +/* enum cam_cdm_arbitration - Enum type of arbitration */ +enum cam_cdm_arbitration { + CAM_CDM_ARBITRATION_NONE, + CAM_CDM_ARBITRATION_ROUND_ROBIN, + CAM_CDM_ARBITRATION_PRIORITY_BASED, + CAM_CDM_ARBITRATION_MAX, +}; + +enum cam_cdm_hw_version { + CAM_CDM_VERSION = 0, + CAM_CDM_VERSION_1_0 = 0x10000000, + CAM_CDM_VERSION_1_1 = 0x10010000, + CAM_CDM_VERSION_1_2 = 0x10020000, + CAM_CDM_VERSION_2_0 = 0x20000000, + CAM_CDM_VERSION_MAX, +}; + /* struct cam_cdm_client - struct for cdm clients data.*/ struct cam_cdm_client { struct cam_cdm_acquire_data data; @@ -162,15 +430,10 @@ struct cam_cdm_work_payload { struct cam_hw_info *hw; uint32_t irq_status; uint32_t irq_data; + int fifo_idx; struct work_struct work; }; -/* enum cam_cdm_bl_cb_type - Enum for possible CAM CDM cb request types */ -enum cam_cdm_bl_cb_type { - CAM_HW_CDM_BL_CB_CLIENT = 1, - CAM_HW_CDM_BL_CB_INTERNAL, -}; - /* struct cam_cdm_bl_cb_request_entry - callback entry for work to process.*/ struct cam_cdm_bl_cb_request_entry { uint8_t bl_tag; @@ -195,28 +458,63 @@ struct cam_cdm_hw_mem { size_t size; }; -/* struct cam_cdm - CDM hw device struct */ +/* struct cam_cdm_bl_fifo - CDM hw memory struct */ +struct cam_cdm_bl_fifo { + struct completion bl_complete; + struct workqueue_struct *work_queue; + struct list_head bl_request_list; + struct mutex fifo_lock; + uint8_t bl_tag; + uint32_t bl_depth; +}; + +/** + * struct cam_cdm - CDM hw device struct + * + * @index: index of CDM hardware + * @name: cdm_name + * @id: enum for possible CDM hardwares + * @flags: enum to tell if CDM is private of shared + * @reset_complete: completion event to make CDM wait for reset + * @work_queue: workqueue to schedule work for virtual CDM + * @bl_request_list: bl_request list for submitted commands in + * virtual CDM + * @version: CDM version with major, minor, incr and reserved + * @hw_version: CDM version as read from the cdm_version register + * @hw_family_version: version of hw family the CDM belongs to + * @iommu_hdl: CDM iommu handle + * @offsets: pointer to structure of CDM registers + * @ops: CDM ops for generating cdm commands + * @clients: CDM clients array currently active on CDM + * @bl_fifo: structure with per fifo related attributes + * @cdm_status: bitfield with bits assigned for different cdm status + * @bl_tag: slot value at which the next bl cmd will be written + * in case of virtual CDM + * @gen_irq: memory region in which gen_irq command will be written + * @cpas_handle: handle for cpas driver + * @arbitration: type of arbitration to be used for the CDM + */ struct cam_cdm { uint32_t index; char name[128]; enum cam_cdm_id id; enum cam_cdm_flags flags; struct completion reset_complete; - struct completion bl_complete; struct workqueue_struct *work_queue; struct list_head bl_request_list; struct cam_hw_version version; uint32_t hw_version; uint32_t hw_family_version; struct cam_iommu_handle iommu_hdl; - struct cam_cdm_reg_offset_table *offset_tbl; + struct cam_cdm_hw_reg_offset *offsets; struct cam_cdm_utils_ops *ops; struct cam_cdm_client *clients[CAM_PER_CDM_MAX_REGISTERED_CLIENTS]; + struct cam_cdm_bl_fifo bl_fifo[CAM_CDM_BL_FIFO_MAX]; + unsigned long cdm_status; uint8_t bl_tag; - atomic_t error; - atomic_t bl_done; - struct cam_cdm_hw_mem gen_irq; + struct cam_cdm_hw_mem gen_irq[CAM_CDM_BL_FIFO_MAX]; uint32_t cpas_handle; + enum cam_cdm_arbitration arbitration; }; /* struct cam_cdm_private_dt_data - CDM hw custom dt data */ @@ -224,6 +522,8 @@ struct cam_cdm_private_dt_data { bool dt_cdm_shared; uint32_t dt_num_supported_clients; const char *dt_cdm_client_name[CAM_PER_CDM_MAX_REGISTERED_CLIENTS]; + bool config_fifo; + uint32_t fifo_depth[CAM_CDM_BL_FIFO_MAX]; }; /* struct cam_cdm_intf_devices - CDM mgr interface devices */ diff --git a/drivers/cam_cdm/cam_cdm_core_common.c b/drivers/cam_cdm/cam_cdm_core_common.c index e903dc805e..034fdb5fa4 100644 --- a/drivers/cam_cdm/cam_cdm_core_common.c +++ b/drivers/cam_cdm/cam_cdm_core_common.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #include @@ -45,9 +45,10 @@ bool cam_cdm_set_cam_hw_version( uint32_t ver, struct cam_hw_version *cam_version) { switch (ver) { - case CAM_CDM170_VERSION: - case CAM_CDM175_VERSION: - case CAM_CDM480_VERSION: + case CAM_CDM100_VERSION: + case CAM_CDM110_VERSION: + case CAM_CDM120_VERSION: + case CAM_CDM200_VERSION: cam_version->major = (ver & 0xF0000000); cam_version->minor = (ver & 0xFFF0000); cam_version->incr = (ver & 0xFFFF); @@ -76,9 +77,10 @@ struct cam_cdm_utils_ops *cam_cdm_get_ops( { if (by_cam_version == false) { switch (ver) { - case CAM_CDM170_VERSION: - case CAM_CDM175_VERSION: - case CAM_CDM480_VERSION: + case CAM_CDM100_VERSION: + case CAM_CDM110_VERSION: + case CAM_CDM120_VERSION: + case CAM_CDM200_VERSION: return &CDM170_ops; default: CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util", @@ -183,6 +185,7 @@ void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw, return; } cam_cdm_get_client_refcount(client); + mutex_lock(&client->lock); if (client->data.cam_cdm_callback) { CAM_DBG(CAM_CDM, "Calling client=%s cb cookie=%d", client->data.identifier, node->cookie); @@ -195,6 +198,38 @@ void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw, CAM_ERR(CAM_CDM, "No cb registered for client hdl=%x", node->client_hdl); } + mutex_unlock(&client->lock); + cam_cdm_put_client_refcount(client); + return; + } else if (status == CAM_CDM_CB_STATUS_HW_RESET_DONE || + status == CAM_CDM_CB_STATUS_HW_FLUSH || + status == CAM_CDM_CB_STATUS_HW_RESUBMIT || + status == CAM_CDM_CB_STATUS_HW_ERROR) { + int client_idx; + struct cam_cdm_bl_cb_request_entry *node = + (struct cam_cdm_bl_cb_request_entry *)data; + + client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl); + client = core->clients[client_idx]; + if ((!client) || (client->handle != node->client_hdl)) { + CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client, + node->client_hdl); + return; + } + cam_cdm_get_client_refcount(client); + mutex_lock(&client->lock); + if (client->data.cam_cdm_callback) { + client->data.cam_cdm_callback( + client->handle, + client->data.userdata, + status, + node->cookie); + } else { + CAM_ERR(CAM_CDM, + "No cb registered for client: name %s, hdl=%x", + client->data.identifier, client->handle); + } + mutex_unlock(&client->lock); cam_cdm_put_client_refcount(client); return; } @@ -202,6 +237,7 @@ void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw, for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) { if (core->clients[i] != NULL) { client = core->clients[i]; + cam_cdm_get_client_refcount(client); mutex_lock(&client->lock); CAM_DBG(CAM_CDM, "Found client slot %d", i); if (client->data.cam_cdm_callback) { @@ -221,6 +257,7 @@ void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw, client->handle); } mutex_unlock(&client->lock); + cam_cdm_put_client_refcount(client); } } } @@ -479,6 +516,27 @@ int cam_cdm_process_cmd(void *hw_priv, data = (struct cam_cdm_acquire_data *)cmd_args; CAM_DBG(CAM_CDM, "Trying to acquire client=%s in hw idx=%d", data->identifier, core->index); + + if (data->priority >= CAM_CDM_BL_FIFO_MAX) { + mutex_unlock(&cdm_hw->hw_mutex); + CAM_ERR(CAM_CDM, + "Invalid priority requested %d", + data->priority); + rc = -EINVAL; + break; + } + + if (core->id != CAM_CDM_VIRTUAL && + core->bl_fifo[data->priority].bl_depth == 0) { + mutex_unlock(&cdm_hw->hw_mutex); + CAM_ERR(CAM_CDM, + "FIFO %d not supported for core %d", + data->priority, + core->id); + rc = -EINVAL; + break; + } + idx = cam_cdm_find_free_client_slot(core); if ((idx < 0) || (core->clients[idx])) { mutex_unlock(&cdm_hw->hw_mutex); @@ -527,6 +585,7 @@ int cam_cdm_process_cmd(void *hw_priv, sizeof(struct cam_cdm_acquire_data)); client->handle = CAM_CDM_CREATE_CLIENT_HANDLE( core->index, + data->priority, idx); client->stream_on = false; data->handle = client->handle; @@ -575,8 +634,133 @@ int cam_cdm_process_cmd(void *hw_priv, break; } case CAM_CDM_HW_INTF_CMD_RESET_HW: { - CAM_ERR(CAM_CDM, "CDM HW reset not supported for handle =%x", - *((uint32_t *)cmd_args)); + uint32_t *handle = cmd_args; + int idx; + struct cam_cdm_client *client; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CDM, + "Invalid CDM cmd %d size=%x for handle=%x", + cmd, arg_size, *handle); + return -EINVAL; + } + idx = CAM_CDM_GET_CLIENT_IDX(*handle); + mutex_lock(&cdm_hw->hw_mutex); + client = core->clients[idx]; + if (!client) { + CAM_ERR(CAM_CDM, + "Client not present for handle %d", + *handle); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + + if (*handle != client->handle) { + CAM_ERR(CAM_CDM, + "handle mismatch, client handle %d index %d received handle %d", + client->handle, idx, *handle); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + rc = cam_hw_cdm_reset_hw(cdm_hw, *handle); + if (rc) { + CAM_ERR(CAM_CDM, + "CDM HW reset failed for handle 0x%x rc = %d", + *handle, rc); + } else { + CAM_INFO_RATE_LIMIT(CAM_CDM, + "CDM HW reset done for handle 0x%x", + *handle); + } + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + case CAM_CDM_HW_INTF_CMD_FLUSH_HW: { + uint32_t *handle = cmd_args; + int idx; + struct cam_cdm_client *client; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CDM, + "Invalid CDM cmd %d size=%x for handle=%x", + cmd, arg_size, *handle); + return -EINVAL; + } + idx = CAM_CDM_GET_CLIENT_IDX(*handle); + mutex_lock(&cdm_hw->hw_mutex); + client = core->clients[idx]; + if (!client) { + CAM_ERR(CAM_CDM, + "Client not present for handle %d", + *handle); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + + if (*handle != client->handle) { + CAM_ERR(CAM_CDM, + "handle mismatch, client handle %d index %d received handle %d", + client->handle, idx, *handle); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + + rc = cam_hw_cdm_flush_hw(cdm_hw, *handle); + if (rc) { + CAM_ERR(CAM_CDM, + "CDM HW flush failed for handle 0x%x rc = %d", + *handle, rc); + } else { + CAM_INFO_RATE_LIMIT(CAM_CDM, + "CDM HW flush done for handle 0x%x", + *handle); + } + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + case CAM_CDM_HW_INTF_CMD_HANDLE_ERROR: { + uint32_t *handle = cmd_args; + int idx; + struct cam_cdm_client *client; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CDM, + "Invalid CDM cmd %d size=%x for handle=%x", + cmd, arg_size, *handle); + return -EINVAL; + } + + idx = CAM_CDM_GET_CLIENT_IDX(*handle); + mutex_lock(&cdm_hw->hw_mutex); + client = core->clients[idx]; + if (!client) { + CAM_ERR(CAM_CDM, + "Client not present for handle %d", + *handle); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + + if (*handle != client->handle) { + CAM_ERR(CAM_CDM, + "handle mismatch, client handle %d index %d received handle %d", + client->handle, idx, *handle); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + + rc = cam_hw_cdm_handle_error(cdm_hw, *handle); + if (rc) { + CAM_ERR(CAM_CDM, + "CDM HW handle error failed for handle 0x%x rc = %d", + *handle, rc); + } else { + CAM_INFO_RATE_LIMIT(CAM_CDM, + "CDM HW handle error done for handle 0x%x", + *handle); + } + + mutex_unlock(&cdm_hw->hw_mutex); break; } default: diff --git a/drivers/cam_cdm/cam_cdm_core_common.h b/drivers/cam_cdm/cam_cdm_core_common.h index 8dcbe8ed19..055c7cf1fe 100644 --- a/drivers/cam_cdm/cam_cdm_core_common.h +++ b/drivers/cam_cdm/cam_cdm_core_common.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #ifndef _CAM_CDM_CORE_COMMON_H_ @@ -8,9 +8,19 @@ #include "cam_mem_mgr.h" -#define CAM_CDM170_VERSION 0x10000000 -#define CAM_CDM175_VERSION 0x10010000 -#define CAM_CDM480_VERSION 0x10020000 +#define CAM_CDM100_VERSION 0x10000000 +#define CAM_CDM110_VERSION 0x10010000 +#define CAM_CDM120_VERSION 0x10020000 +#define CAM_CDM200_VERSION 0x20000000 + +#define CAM_CDM_AHB_BURST_LEN_1 (BIT(1) - 1) +#define CAM_CDM_AHB_BURST_LEN_4 (BIT(2) - 1) +#define CAM_CDM_AHB_BURST_LEN_8 (BIT(3) - 1) +#define CAM_CDM_AHB_BURST_LEN_16 (BIT(4) - 1) +#define CAM_CDM_AHB_BURST_EN BIT(5) +#define CAM_CDM_AHB_STOP_ON_ERROR BIT(8) +#define CAM_CDM_ARB_SEL_RR BIT(16) +#define CAM_CDM_IMPLICIT_WAIT_EN BIT(17) extern struct cam_cdm_utils_ops CDM170_ops; @@ -37,6 +47,9 @@ int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw, int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw, struct cam_cdm_hw_intf_cmd_submit_bl *req, struct cam_cdm_client *client); +int cam_hw_cdm_reset_hw(struct cam_hw_info *cdm_hw, uint32_t handle); +int cam_hw_cdm_flush_hw(struct cam_hw_info *cdm_hw, uint32_t handle); +int cam_hw_cdm_handle_error(struct cam_hw_info *cdm_hw, uint32_t handle); struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag( uint32_t tag, struct list_head *bl_list); void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw, diff --git a/drivers/cam_cdm/cam_cdm_hw_core.c b/drivers/cam_cdm/cam_cdm_hw_core.c index 5a59e4c611..1142f2776f 100644 --- a/drivers/cam_cdm/cam_cdm_hw_core.c +++ b/drivers/cam_cdm/cam_cdm_hw_core.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #include @@ -18,14 +18,13 @@ #include "cam_cdm_core_common.h" #include "cam_cdm_soc.h" #include "cam_io_util.h" -#include "cam_hw_cdm170_reg.h" - -#define CAM_HW_CDM_CPAS_0_NAME "qcom,cam170-cpas-cdm0" -#define CAM_HW_CDM_IPE_0_NAME "qcom,cam170-ipe0-cdm" -#define CAM_HW_CDM_IPE_1_NAME "qcom,cam170-ipe1-cdm" -#define CAM_HW_CDM_BPS_NAME "qcom,cam170-bps-cdm" +#include "cam_cdm_hw_reg_1_0.h" +#include "cam_cdm_hw_reg_1_1.h" +#include "cam_cdm_hw_reg_1_2.h" +#include "cam_cdm_hw_reg_2_0.h" #define CAM_CDM_BL_FIFO_WAIT_TIMEOUT 2000 +#define CAM_CDM_DBG_GEN_IRQ_USR_DATA 0xff static void cam_hw_cdm_work(struct work_struct *work); @@ -33,65 +32,96 @@ static void cam_hw_cdm_work(struct work_struct *work); static const struct of_device_id msm_cam_hw_cdm_dt_match[] = { { .compatible = CAM_HW_CDM_CPAS_0_NAME, - .data = &cam170_cpas_cdm_offset_table, + .data = &cam_cdm_1_0_reg_offset, + }, + { + .compatible = CAM_HW_CDM_CPAS_NAME_1_0, + .data = &cam_cdm_1_0_reg_offset, + }, + { + .compatible = CAM_HW_CDM_CPAS_NAME_1_1, + .data = &cam_cdm_1_1_reg_offset, + }, + { + .compatible = CAM_HW_CDM_CPAS_NAME_1_2, + .data = &cam_cdm_1_2_reg_offset, + }, + { + .compatible = CAM_HW_CDM_IFE_NAME_1_2, + .data = &cam_cdm_1_2_reg_offset, + }, + { + .compatible = CAM_HW_CDM_CPAS_NAME_2_0, + .data = &cam_cdm_2_0_reg_offset, + }, + { + .compatible = CAM_HW_CDM_OPE_NAME_2_0, + .data = &cam_cdm_2_0_reg_offset, }, - {} }; static enum cam_cdm_id cam_hw_cdm_get_id_by_name(char *name) { - if (!strcmp(CAM_HW_CDM_CPAS_0_NAME, name)) - return CAM_CDM_CPAS_0; + if (strnstr(name, CAM_HW_CDM_CPAS_0_NAME, + strlen(CAM_HW_CDM_CPAS_0_NAME))) + return CAM_CDM_CPAS; + if (strnstr(name, CAM_HW_CDM_CPAS_NAME_1_0, + strlen(CAM_HW_CDM_CPAS_NAME_1_0))) + return CAM_CDM_CPAS; + if (strnstr(name, CAM_HW_CDM_CPAS_NAME_1_1, + strlen(CAM_HW_CDM_CPAS_NAME_1_1))) + return CAM_CDM_CPAS; + if (strnstr(name, CAM_HW_CDM_CPAS_NAME_1_2, + strlen(CAM_HW_CDM_CPAS_NAME_1_2))) + return CAM_CDM_CPAS; + if (strnstr(name, CAM_HW_CDM_IFE_NAME_1_2, + strlen(CAM_HW_CDM_CPAS_NAME_1_2))) + return CAM_CDM_IFE; + if (strnstr(name, CAM_HW_CDM_CPAS_NAME_2_0, + strlen(CAM_HW_CDM_CPAS_NAME_2_0))) + return CAM_CDM_CPAS; + if (strnstr(name, CAM_HW_CDM_OPE_NAME_2_0, + strlen(CAM_HW_CDM_CPAS_NAME_2_0))) + return CAM_CDM_OPE; return CAM_CDM_MAX; } -int cam_hw_cdm_bl_fifo_pending_bl_rb(struct cam_hw_info *cdm_hw, - uint32_t *pending_bl) -{ - int rc = 0; - - if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB, - pending_bl)) { - CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's"); - rc = -EIO; - } - - return rc; -} - static int cam_hw_cdm_enable_bl_done_irq(struct cam_hw_info *cdm_hw, - bool enable) + bool enable, uint32_t fifo_idx) { int rc = -EIO; uint32_t irq_mask = 0; struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; - if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_MASK, - &irq_mask)) { + if (cam_cdm_read_hw_reg(cdm_hw, + core->offsets->irq_reg[fifo_idx]->irq_mask, + &irq_mask)) { CAM_ERR(CAM_CDM, "Failed to read CDM IRQ mask"); return rc; } if (enable == true) { - if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, - (irq_mask | 0x4))) { + if (cam_cdm_write_hw_reg(cdm_hw, + core->offsets->irq_reg[fifo_idx]->irq_mask, + (irq_mask | 0x4))) { CAM_ERR(CAM_CDM, "Write failed to enable BL done irq"); } else { - atomic_inc(&core->bl_done); + set_bit(fifo_idx, &core->cdm_status); rc = 0; CAM_DBG(CAM_CDM, "BL done irq enabled =%d", - atomic_read(&core->bl_done)); + test_bit(fifo_idx, &core->cdm_status)); } } else { - if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, - (irq_mask & 0x70003))) { + if (cam_cdm_write_hw_reg(cdm_hw, + core->offsets->irq_reg[fifo_idx]->irq_mask, + (irq_mask & 0x70003))) { CAM_ERR(CAM_CDM, "Write failed to disable BL done irq"); } else { - atomic_dec(&core->bl_done); + clear_bit(fifo_idx, &core->cdm_status); rc = 0; CAM_DBG(CAM_CDM, "BL done irq disable =%d", - atomic_read(&core->bl_done)); + test_bit(fifo_idx, &core->cdm_status)); } } return rc; @@ -100,14 +130,19 @@ static int cam_hw_cdm_enable_bl_done_irq(struct cam_hw_info *cdm_hw, static int cam_hw_cdm_enable_core(struct cam_hw_info *cdm_hw, bool enable) { int rc = 0; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; if (enable == true) { - if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x01)) { + if (cam_cdm_write_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_en, + 0x01)) { CAM_ERR(CAM_CDM, "Failed to Write CDM HW core enable"); rc = -EIO; } } else { - if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x02)) { + if (cam_cdm_write_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_en, + 0x02)) { CAM_ERR(CAM_CDM, "Failed to Write CDM HW core disable"); rc = -EIO; } @@ -118,8 +153,11 @@ static int cam_hw_cdm_enable_core(struct cam_hw_info *cdm_hw, bool enable) int cam_hw_cdm_enable_core_dbg(struct cam_hw_info *cdm_hw) { int rc = 0; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; - if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0x10100)) { + if (cam_cdm_write_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_debug, + 0x10100)) { CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug"); rc = -EIO; } @@ -130,8 +168,10 @@ int cam_hw_cdm_enable_core_dbg(struct cam_hw_info *cdm_hw) int cam_hw_cdm_disable_core_dbg(struct cam_hw_info *cdm_hw) { int rc = 0; + struct cam_cdm *cdm_core = (struct cam_cdm *)cdm_hw->core_info; - if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0)) { + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->cmn_reg->core_debug, 0)) { CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug"); rc = -EIO; } @@ -142,129 +182,362 @@ int cam_hw_cdm_disable_core_dbg(struct cam_hw_info *cdm_hw) void cam_hw_cdm_dump_scratch_registors(struct cam_hw_info *cdm_hw) { uint32_t dump_reg = 0; + int i; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; - cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_en, &dump_reg); CAM_ERR(CAM_CDM, "dump core en=%x", dump_reg); - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_0_REG, &dump_reg); - CAM_ERR(CAM_CDM, "dump scratch0=%x", dump_reg); + for (i = 0; i < core->offsets->reg_data->num_scratch_reg; i++) { + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->scratch[i]->scratch_reg, + &dump_reg); + CAM_ERR(CAM_CDM, "dump scratch%d=%x", i, dump_reg); + } +} - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_1_REG, &dump_reg); - CAM_ERR(CAM_CDM, "dump scratch1=%x", dump_reg); +int cam_hw_cdm_bl_fifo_pending_bl_rb_in_fifo( + struct cam_hw_info *cdm_hw, + uint32_t fifo_idx, + uint32_t *pending_bl_req) +{ + int rc = 0; + uint32_t fifo_reg; + uint32_t fifo_id; - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_2_REG, &dump_reg); - CAM_ERR(CAM_CDM, "dump scratch2=%x", dump_reg); + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_3_REG, &dump_reg); - CAM_ERR(CAM_CDM, "dump scratch3=%x", dump_reg); + if (fifo_idx >= CAM_CDM_BL_FIFO_REG_NUM) { + CAM_ERR(CAM_CDM, + "BL_FIFO index is wrong. fifo_idx %d", + fifo_idx); + rc = -EINVAL; + goto end; + } - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_4_REG, &dump_reg); - CAM_ERR(CAM_CDM, "dump scratch4=%x", dump_reg); + fifo_reg = fifo_idx / 2; + fifo_id = fifo_idx % 2; - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_5_REG, &dump_reg); - CAM_ERR(CAM_CDM, "dump scratch5=%x", dump_reg); + if (core->offsets->cmn_reg->pending_req[fifo_reg]) { + if (cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->pending_req + [fifo_reg]->rb_offset, + pending_bl_req)) { + CAM_ERR(CAM_CDM, "Error reading CDM register"); + rc = -EIO; + goto end; + } - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_6_REG, &dump_reg); - CAM_ERR(CAM_CDM, "dump scratch6=%x", dump_reg); + *pending_bl_req = (*pending_bl_req >> ( + core->offsets->cmn_reg->pending_req + [fifo_reg]->rb_next_fifo_shift * + fifo_id)) & core->offsets->cmn_reg->pending_req + [fifo_reg]->rb_mask; + rc = 0; + } - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_7_REG, &dump_reg); - CAM_ERR(CAM_CDM, "dump scratch7=%x", dump_reg); + CAM_DBG(CAM_CDM, "pending_bl_req %d fifo_reg %d, fifo_id %d", + *pending_bl_req, fifo_reg, fifo_id); +end: + return rc; +} + +int cam_hw_cdm_enable_core_dbg_per_fifo( + struct cam_hw_info *cdm_hw, + uint32_t fifo_idx) +{ + int rc = 0; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + + if (cam_cdm_write_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_debug, + (0x10100 | fifo_idx << 20))) { + CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug"); + rc = -EIO; + } + + return rc; +} + +void cam_hw_cdm_dump_bl_fifo_data(struct cam_hw_info *cdm_hw) +{ + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + int i, j; + uint32_t num_pending_req = 0, dump_reg; + + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) { + cam_hw_cdm_bl_fifo_pending_bl_rb_in_fifo(cdm_hw, + i, &num_pending_req); + + if (cam_hw_cdm_enable_core_dbg_per_fifo(cdm_hw, i)) { + CAM_ERR(CAM_CDM, + "Problem in selecting the fifo for readback"); + continue; + } + for (j = 0 ; j < num_pending_req ; j++) { + cam_cdm_write_hw_reg(cdm_hw, + core->offsets->cmn_reg->bl_fifo_rb, j); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->bl_fifo_base_rb, + &dump_reg); + CAM_INFO(CAM_CDM, "BL(%d) base addr =%x", j, dump_reg); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->bl_fifo_len_rb, + &dump_reg); + CAM_INFO(CAM_CDM, + "CDM HW current BL len=%d ARB %d tag=%d, ", + (dump_reg & CAM_CDM_CURRENT_BL_LEN), + (dump_reg & CAM_CDM_CURRENT_BL_ARB) >> + CAM_CDM_CURRENT_BL_ARB_SHIFT, + (dump_reg & CAM_CDM_CURRENT_BL_TAG) >> + CAM_CDM_CURRENT_BL_TAG_SHIFT); + } + } } void cam_hw_cdm_dump_core_debug_registers( struct cam_hw_info *cdm_hw) { - uint32_t dump_reg, core_dbg, loop_cnt; + uint32_t dump_reg, core_dbg; + int i; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; - mutex_lock(&cdm_hw->hw_mutex); - cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg); + cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->core_en, &dump_reg); CAM_ERR(CAM_CDM, "CDM HW core status=%x", dump_reg); + /* First pause CDM, If it fails still proceed to dump debug info */ cam_hw_cdm_enable_core(cdm_hw, false); - cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg); - CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg); - loop_cnt = dump_reg; - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_DEBUG_STATUS, &dump_reg); - CAM_ERR(CAM_CDM, "CDM HW Debug status reg=%x", dump_reg); - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, &core_dbg); + + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->debug_status, + &dump_reg); + CAM_INFO(CAM_CDM, "CDM HW Debug status reg=%x", dump_reg); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_debug, + &core_dbg); if (core_dbg & 0x100) { - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_ADDR, &dump_reg); - CAM_ERR(CAM_CDM, "AHB dump reglastaddr=%x", dump_reg); - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_DATA, &dump_reg); - CAM_ERR(CAM_CDM, "AHB dump reglastdata=%x", dump_reg); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->last_ahb_addr, + &dump_reg); + CAM_INFO(CAM_CDM, "AHB dump reglastaddr=%x", dump_reg); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->last_ahb_data, + &dump_reg); + CAM_INFO(CAM_CDM, "AHB dump reglastdata=%x", dump_reg); } else { - CAM_ERR(CAM_CDM, "CDM HW AHB dump not enable"); + CAM_INFO(CAM_CDM, "CDM HW AHB dump not enable"); } - if (core_dbg & 0x10000) { - int i; + cam_hw_cdm_dump_bl_fifo_data(cdm_hw); - CAM_ERR(CAM_CDM, "CDM HW BL FIFO dump with loop count=%d", - loop_cnt); - for (i = 0 ; i < loop_cnt ; i++) { - cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_RB, i); - cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_RB, - &dump_reg); - CAM_ERR(CAM_CDM, "BL(%d) base addr =%x", i, dump_reg); - cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_RB, - &dump_reg); - CAM_ERR(CAM_CDM, "BL(%d) len=%d tag=%d", i, - (dump_reg & 0xFFFFF), (dump_reg & 0xFF000000)); - } - } else { - CAM_ERR(CAM_CDM, "CDM HW BL FIFO readback not enable"); + CAM_INFO(CAM_CDM, "CDM HW default dump"); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_cfg, &dump_reg); + CAM_INFO(CAM_CDM, "CDM HW core cfg=%x", dump_reg); + + for (i = 0; i < + core->offsets->reg_data->num_bl_fifo_irq; + i++) { + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->irq_reg[i]->irq_status, &dump_reg); + CAM_INFO(CAM_CDM, "CDM HW irq status%d=%x", i, dump_reg); + + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->irq_reg[i]->irq_set, &dump_reg); + CAM_INFO(CAM_CDM, "CDM HW irq set%d=%x", i, dump_reg); + + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->irq_reg[i]->irq_mask, &dump_reg); + CAM_INFO(CAM_CDM, "CDM HW irq mask%d=%x", i, dump_reg); + + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->irq_reg[i]->irq_clear, &dump_reg); + CAM_INFO(CAM_CDM, "CDM HW irq clear%d=%x", i, dump_reg); } - CAM_ERR(CAM_CDM, "CDM HW default dump"); - cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_CFG, &dump_reg); - CAM_ERR(CAM_CDM, "CDM HW core cfg=%x", dump_reg); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->current_bl_base, &dump_reg); + CAM_INFO(CAM_CDM, "CDM HW current BL base=%x", dump_reg); - cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS, &dump_reg); - CAM_ERR(CAM_CDM, "CDM HW irq status=%x", dump_reg); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->current_bl_len, &dump_reg); + CAM_INFO(CAM_CDM, + "CDM HW current BL len=%d ARB %d FIFO %d tag=%d, ", + (dump_reg & CAM_CDM_CURRENT_BL_LEN), + (dump_reg & CAM_CDM_CURRENT_BL_ARB) >> + CAM_CDM_CURRENT_BL_ARB_SHIFT, + (dump_reg & CAM_CDM_CURRENT_BL_FIFO) >> + CAM_CDM_CURRENT_BL_FIFO_SHIFT, + (dump_reg & CAM_CDM_CURRENT_BL_TAG) >> + CAM_CDM_CURRENT_BL_TAG_SHIFT); - cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_SET, &dump_reg); - CAM_ERR(CAM_CDM, "CDM HW irq set reg=%x", dump_reg); - - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_BASE, &dump_reg); - CAM_ERR(CAM_CDM, "CDM HW current BL base=%x", dump_reg); - - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_LEN, &dump_reg); - CAM_ERR(CAM_CDM, "CDM HW current BL len=%d tag=%d", - (dump_reg & 0xFFFFF), (dump_reg & 0xFF000000)); - - cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_USED_AHB_BASE, &dump_reg); - CAM_ERR(CAM_CDM, "CDM HW current AHB base=%x", dump_reg); - - cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg); - CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->current_used_ahb_base, &dump_reg); + CAM_INFO(CAM_CDM, "CDM HW current AHB base=%x", dump_reg); /* Enable CDM back */ cam_hw_cdm_enable_core(cdm_hw, true); - mutex_unlock(&cdm_hw->hw_mutex); - } -int cam_hw_cdm_wait_for_bl_fifo(struct cam_hw_info *cdm_hw, - uint32_t bl_count) +enum cam_cdm_arbitration cam_cdm_get_arbitration_type( + uint32_t cdm_version, + enum cam_cdm_id id) +{ + enum cam_cdm_arbitration arbitration; + + if (cdm_version < CAM_CDM_VERSION_2_0) { + arbitration = CAM_CDM_ARBITRATION_NONE; + goto end; + } + + switch (id) { + case CAM_CDM_CPAS: + arbitration = CAM_CDM_ARBITRATION_ROUND_ROBIN; + break; + default: + arbitration = CAM_CDM_ARBITRATION_PRIORITY_BASED; + break; + } +end: + return arbitration; +} + +int cam_hw_cdm_set_cdm_blfifo_cfg(struct cam_hw_info *cdm_hw) +{ + uint32_t blfifo_cfg_mask = 0; + int rc = 0, i; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + + blfifo_cfg_mask = blfifo_cfg_mask | + CAM_CDM_BL_FIFO_REQ_SIZE_MAX; + + for (i = 0; i < core->offsets->reg_data->num_bl_fifo_irq; i++) { + rc = cam_cdm_write_hw_reg(cdm_hw, + core->offsets->irq_reg[i]->irq_mask, 0x70003); + if (rc) { + CAM_ERR(CAM_CDM, + "Unable to write to cdm irq mask register"); + rc = -EIO; + goto end; + } + } + + if (core->hw_version >= CAM_CDM_VERSION_2_0) { + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) { + blfifo_cfg_mask = blfifo_cfg_mask | + (core->bl_fifo[i].bl_depth + << CAM_CDM_BL_FIFO_LENGTH_CFG_SHIFT); + rc = cam_cdm_write_hw_reg(cdm_hw, + core->offsets->bl_fifo_reg[i]->bl_fifo_cfg, + blfifo_cfg_mask); + if (rc) { + CAM_ERR(CAM_CDM, + "Unable to write to cdm irq mask register"); + rc = -EIO; + goto end; + } + } + } else { + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) { + rc = cam_cdm_write_hw_reg(cdm_hw, + core->offsets->bl_fifo_reg[i]->bl_fifo_cfg, + blfifo_cfg_mask); + if (rc) { + CAM_ERR(CAM_CDM, + "Unable to write to cdm irq mask register"); + rc = -EIO; + goto end; + } + } + } +end: + return rc; +} + +int cam_hw_cdm_set_cdm_core_cfg(struct cam_hw_info *cdm_hw) +{ + uint32_t cdm_version; + uint32_t cfg_mask = 0; + int rc; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + + cfg_mask = cfg_mask | + CAM_CDM_AHB_STOP_ON_ERROR| + CAM_CDM_AHB_BURST_EN| + CAM_CDM_AHB_BURST_LEN_16; + + /* use version from cdm_core structure. */ + if (cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->cdm_hw_version, + &cdm_version)) { + CAM_ERR(CAM_CDM, "Error reading CDM register"); + rc = -EIO; + goto end; + } + + if (cdm_version < CAM_CDM_VERSION_2_0) { + rc = cam_cdm_write_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_cfg, cfg_mask); + if (rc) { + CAM_ERR(CAM_CDM, "Error writing cdm core cfg"); + rc = -EIO; + goto end; + } + } else { + if (core->id != CAM_CDM_CPAS) + cfg_mask = cfg_mask | CAM_CDM_IMPLICIT_WAIT_EN; + + if (core->arbitration == CAM_CDM_ARBITRATION_ROUND_ROBIN) + cfg_mask = cfg_mask | CAM_CDM_ARB_SEL_RR; + + rc = cam_cdm_write_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_cfg, cfg_mask); + if (rc) { + CAM_ERR(CAM_CDM, "Error writing cdm core cfg"); + rc = -EIO; + goto end; + } + } + +end: + return rc; +} + +int cam_hw_cdm_wait_for_bl_fifo( + struct cam_hw_info *cdm_hw, + uint32_t bl_count, + uint32_t fifo_idx) { uint32_t pending_bl = 0; int32_t available_bl_slots = 0; int rc = -EIO; long time_left; struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + struct cam_cdm_bl_fifo *bl_fifo = NULL; + + if (fifo_idx >= CAM_CDM_BL_FIFO_MAX) { + rc = -EINVAL; + CAM_ERR(CAM_CDM, + "Invalid fifo index %d rc = %d", + fifo_idx, rc); + goto end; + } + + bl_fifo = &core->bl_fifo[fifo_idx]; do { - if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB, - &pending_bl)) { + if (cam_hw_cdm_bl_fifo_pending_bl_rb_in_fifo( + cdm_hw, fifo_idx, &pending_bl)) { CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's"); rc = -EIO; break; } - available_bl_slots = CAM_CDM_HWFIFO_SIZE - pending_bl; + available_bl_slots = bl_fifo->bl_depth - pending_bl; if (available_bl_slots < 0) { CAM_ERR(CAM_CDM, "Invalid available slots %d:%d:%d", - available_bl_slots, CAM_CDM_HWFIFO_SIZE, + available_bl_slots, bl_fifo->bl_depth, pending_bl); break; } @@ -275,25 +548,28 @@ int cam_hw_cdm_wait_for_bl_fifo(struct cam_hw_info *cdm_hw, rc = bl_count; break; } else if (0 == (available_bl_slots - 1)) { - rc = cam_hw_cdm_enable_bl_done_irq(cdm_hw, true); + rc = cam_hw_cdm_enable_bl_done_irq(cdm_hw, + true, fifo_idx); if (rc) { CAM_ERR(CAM_CDM, "Enable BL done irq failed"); break; } time_left = wait_for_completion_timeout( - &core->bl_complete, msecs_to_jiffies( + &core->bl_fifo[fifo_idx].bl_complete, + msecs_to_jiffies( CAM_CDM_BL_FIFO_WAIT_TIMEOUT)); if (time_left <= 0) { CAM_ERR(CAM_CDM, "CDM HW BL Wait timed out failed"); if (cam_hw_cdm_enable_bl_done_irq(cdm_hw, - false)) + false, fifo_idx)) CAM_ERR(CAM_CDM, "Disable BL done irq failed"); rc = -EIO; break; } - if (cam_hw_cdm_enable_bl_done_irq(cdm_hw, false)) + if (cam_hw_cdm_enable_bl_done_irq(cdm_hw, + false, fifo_idx)) CAM_ERR(CAM_CDM, "Disable BL done irq failed"); rc = 0; CAM_DBG(CAM_CDM, "CDM HW is ready for data"); @@ -303,78 +579,110 @@ int cam_hw_cdm_wait_for_bl_fifo(struct cam_hw_info *cdm_hw, } } while (1); +end: + return rc; } -bool cam_hw_cdm_bl_write(struct cam_hw_info *cdm_hw, uint32_t src, - uint32_t len, uint32_t tag) +bool cam_hw_cdm_bl_write( + struct cam_hw_info *cdm_hw, uint32_t src, + uint32_t len, uint32_t tag, bool set_arb, + uint32_t fifo_idx) { - if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_REG, src)) { + struct cam_cdm *cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->bl_fifo_reg[fifo_idx]->bl_fifo_base, + src)) { CAM_ERR(CAM_CDM, "Failed to write CDM base to BL base"); return true; } - if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_REG, - ((len & 0xFFFFF) | ((tag & 0xFF) << 20)))) { + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->bl_fifo_reg[fifo_idx]->bl_fifo_len, + ((len & 0xFFFFF) | ((tag & 0xFF) << 24)) | + ((set_arb) ? (1 << 20) : (0)))) { CAM_ERR(CAM_CDM, "Failed to write CDM BL len"); return true; } return false; } -bool cam_hw_cdm_commit_bl_write(struct cam_hw_info *cdm_hw) +bool cam_hw_cdm_commit_bl_write(struct cam_hw_info *cdm_hw, uint32_t fifo_idx) { - if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_STORE_REG, 1)) { + struct cam_cdm *cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->bl_fifo_reg[fifo_idx]->bl_fifo_store, + 1)) { CAM_ERR(CAM_CDM, "Failed to write CDM commit BL"); return true; } return false; } -int cam_hw_cdm_submit_gen_irq(struct cam_hw_info *cdm_hw, - struct cam_cdm_hw_intf_cmd_submit_bl *req) +int cam_hw_cdm_submit_gen_irq( + struct cam_hw_info *cdm_hw, + struct cam_cdm_hw_intf_cmd_submit_bl *req, + uint32_t fifo_idx, bool set_arb) { struct cam_cdm_bl_cb_request_entry *node; struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; uint32_t len; int rc; + bool bit_wr_enable = false; - if (core->bl_tag > 63) { - CAM_ERR(CAM_CDM, "bl_tag invalid =%d", core->bl_tag); + if (core->bl_fifo[fifo_idx].bl_tag > 63) { + CAM_ERR(CAM_CDM, + "bl_tag invalid =%d", + core->bl_fifo[fifo_idx].bl_tag); rc = -EINVAL; goto end; } CAM_DBG(CAM_CDM, "CDM write BL last cmd tag=%x total=%d cookie=%d", - core->bl_tag, req->data->cmd_arrary_count, req->data->cookie); + core->bl_fifo[fifo_idx].bl_tag, + req->data->cmd_arrary_count, + req->data->cookie); + node = kzalloc(sizeof(struct cam_cdm_bl_cb_request_entry), GFP_KERNEL); if (!node) { rc = -ENOMEM; goto end; } + + if (core->offsets->reg_data->num_bl_fifo > 1) + bit_wr_enable = true; + node->request_type = CAM_HW_CDM_BL_CB_CLIENT; node->client_hdl = req->handle; node->cookie = req->data->cookie; - node->bl_tag = core->bl_tag; + node->bl_tag = core->bl_fifo[fifo_idx].bl_tag; node->userdata = req->data->userdata; - list_add_tail(&node->entry, &core->bl_request_list); - len = core->ops->cdm_required_size_genirq() * core->bl_tag; - core->ops->cdm_write_genirq(((uint32_t *)core->gen_irq.kmdvaddr + len), - core->bl_tag); - rc = cam_hw_cdm_bl_write(cdm_hw, (core->gen_irq.vaddr + (4*len)), + list_add_tail(&node->entry, &core->bl_fifo[fifo_idx].bl_request_list); + len = core->ops->cdm_required_size_genirq() * + core->bl_fifo[fifo_idx].bl_tag; + core->ops->cdm_write_genirq( + ((uint32_t *)core->gen_irq[fifo_idx].kmdvaddr + len), + core->bl_fifo[fifo_idx].bl_tag, + bit_wr_enable, fifo_idx); + rc = cam_hw_cdm_bl_write(cdm_hw, + (core->gen_irq[fifo_idx].vaddr + (4*len)), ((4 * core->ops->cdm_required_size_genirq()) - 1), - core->bl_tag); + core->bl_fifo[fifo_idx].bl_tag, + set_arb, fifo_idx); if (rc) { CAM_ERR(CAM_CDM, "CDM hw bl write failed for gen irq bltag=%d", - core->bl_tag); + core->bl_fifo[fifo_idx].bl_tag); list_del_init(&node->entry); kfree(node); rc = -EIO; goto end; } - if (cam_hw_cdm_commit_bl_write(cdm_hw)) { - CAM_ERR(CAM_CDM, "Cannot commit the genirq BL with tag tag=%d", - core->bl_tag); + if (cam_hw_cdm_commit_bl_write(cdm_hw, fifo_idx)) { + CAM_ERR(CAM_CDM, + "Cannot commit the genirq BL with tag tag=%d", + core->bl_fifo[fifo_idx].bl_tag); list_del_init(&node->entry); kfree(node); rc = -EIO; @@ -384,6 +692,52 @@ end: return rc; } +int cam_hw_cdm_submit_debug_gen_irq( + struct cam_hw_info *cdm_hw, + uint32_t fifo_idx) +{ + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + uint32_t len; + int rc; + bool bit_wr_enable = false; + + CAM_DBG(CAM_CDM, + "CDM write BL last cmd tag=0x%x", + core->bl_fifo[fifo_idx].bl_tag); + + if (core->offsets->reg_data->num_bl_fifo > 1) + bit_wr_enable = true; + + len = core->ops->cdm_required_size_genirq() * + core->bl_fifo[fifo_idx].bl_tag; + core->ops->cdm_write_genirq( + ((uint32_t *)core->gen_irq[fifo_idx].kmdvaddr + len), + CAM_CDM_DBG_GEN_IRQ_USR_DATA, bit_wr_enable, fifo_idx); + rc = cam_hw_cdm_bl_write(cdm_hw, + (core->gen_irq[fifo_idx].vaddr + (4*len)), + ((4 * core->ops->cdm_required_size_genirq()) - 1), + core->bl_fifo[fifo_idx].bl_tag, + false, fifo_idx); + if (rc) { + CAM_ERR(CAM_CDM, + "CDM hw bl write failed for dbggenirq USRdata=%d tag 0x%x", + CAM_CDM_DBG_GEN_IRQ_USR_DATA, + core->bl_fifo[fifo_idx].bl_tag); + rc = -EIO; + goto end; + } + if (cam_hw_cdm_commit_bl_write(cdm_hw, fifo_idx)) { + CAM_ERR(CAM_CDM, + "Cannot commit the dbggenirq BL with tag tag=0x%x", + core->bl_fifo[fifo_idx].bl_tag); + rc = -EIO; + goto end; + } + +end: + return rc; +} + int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw, struct cam_cdm_hw_intf_cmd_submit_bl *req, struct cam_cdm_client *client) @@ -391,24 +745,42 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw, int i, rc; struct cam_cdm_bl_request *cdm_cmd = req->data; struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; - uint32_t pending_bl = 0; + struct cam_cdm_bl_fifo *bl_fifo = NULL; + uint32_t pending_bl = 0, fifo_idx = 0; int write_count = 0; - if (req->data->cmd_arrary_count > CAM_CDM_HWFIFO_SIZE) { - pr_info("requested BL more than max size, cnt=%d max=%d", - req->data->cmd_arrary_count, CAM_CDM_HWFIFO_SIZE); + fifo_idx = CAM_CDM_GET_BLFIFO_IDX(client->handle); + + if (fifo_idx >= CAM_CDM_BL_FIFO_MAX) { + rc = -EINVAL; + CAM_ERR(CAM_CDM, "Invalid handle 0x%x, rc = %d", + client->handle, rc); + goto end; } - if (atomic_read(&core->error)) - return -EIO; + bl_fifo = &core->bl_fifo[fifo_idx]; - mutex_lock(&cdm_hw->hw_mutex); + if (req->data->cmd_arrary_count > bl_fifo->bl_depth) { + CAM_INFO(CAM_CDM, + "requested BL more than max size, cnt=%d max=%d", + req->data->cmd_arrary_count, + bl_fifo->bl_depth); + } + + if (test_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status) || + test_bit(CAM_CDM_RESET_HW_STATUS, &core->cdm_status)) + return -EAGAIN; + + mutex_lock(&core->bl_fifo[fifo_idx].fifo_lock); mutex_lock(&client->lock); - rc = cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &pending_bl); + + rc = cam_hw_cdm_bl_fifo_pending_bl_rb_in_fifo(cdm_hw, + fifo_idx, &pending_bl); + if (rc) { CAM_ERR(CAM_CDM, "Cannot read the current BL depth"); mutex_unlock(&client->lock); - mutex_unlock(&cdm_hw->hw_mutex); + mutex_unlock(&core->bl_fifo[fifo_idx].fifo_lock); return rc; } @@ -425,16 +797,19 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw, rc = -EINVAL; break; } - if (atomic_read(&core->error)) { + if (test_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status) || + test_bit(CAM_CDM_RESET_HW_STATUS, + &core->cdm_status)) { CAM_ERR_RATE_LIMIT(CAM_CDM, - "In error state cnt=%d total cnt=%d\n", - i, req->data->cmd_arrary_count); - rc = -EIO; + "In error/reset state cnt=%d total cnt=%d cdm_status 0x%x", + i, req->data->cmd_arrary_count, + core->cdm_status); + rc = -EAGAIN; break; } if (write_count == 0) { write_count = cam_hw_cdm_wait_for_bl_fifo(cdm_hw, - (req->data->cmd_arrary_count - i)); + (req->data->cmd_arrary_count - i), fifo_idx); if (write_count < 0) { CAM_ERR(CAM_CDM, "wait for bl fifo failed %d:%d", @@ -485,13 +860,16 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw, } CAM_DBG(CAM_CDM, "Got the HW VA"); - if (core->bl_tag >= - (CAM_CDM_HWFIFO_SIZE - 1)) - core->bl_tag = 0; + if (core->bl_fifo[fifo_idx].bl_tag >= + (bl_fifo->bl_depth - 1)) + core->bl_fifo[fifo_idx].bl_tag = 0; rc = cam_hw_cdm_bl_write(cdm_hw, ((uint32_t)hw_vaddr_ptr + cdm_cmd->cmd[i].offset), - (cdm_cmd->cmd[i].len - 1), core->bl_tag); + (cdm_cmd->cmd[i].len - 1), + core->bl_fifo[fifo_idx].bl_tag, + cdm_cmd->cmd[i].arbitrate, + fifo_idx); if (rc) { CAM_ERR(CAM_CDM, "Hw bl write failed %d:%d", i, req->data->cmd_arrary_count); @@ -512,40 +890,92 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw, if (!rc) { CAM_DBG(CAM_CDM, "write BL success for cnt=%d with tag=%d total_cnt=%d", - i, core->bl_tag, req->data->cmd_arrary_count); + i, core->bl_fifo[fifo_idx].bl_tag, + req->data->cmd_arrary_count); CAM_DBG(CAM_CDM, "Now commit the BL"); - if (cam_hw_cdm_commit_bl_write(cdm_hw)) { + if (cam_hw_cdm_commit_bl_write(cdm_hw, fifo_idx)) { CAM_ERR(CAM_CDM, "Cannot commit the BL %d tag=%d", - i, core->bl_tag); + i, core->bl_fifo[fifo_idx].bl_tag); rc = -EIO; break; } CAM_DBG(CAM_CDM, "BL commit success BL %d tag=%d", i, - core->bl_tag); - core->bl_tag++; + core->bl_fifo[fifo_idx].bl_tag); + core->bl_fifo[fifo_idx].bl_tag++; + + if (cdm_cmd->cmd[i].enable_debug_gen_irq) { + rc = cam_hw_cdm_submit_debug_gen_irq(cdm_hw, + fifo_idx); + if (rc == 0) + core->bl_fifo[fifo_idx].bl_tag++; + if (core->bl_fifo[fifo_idx].bl_tag >= + (bl_fifo->bl_depth - + 1)) + core->bl_fifo[fifo_idx].bl_tag = 0; + } + if ((req->data->flag == true) && (i == (req->data->cmd_arrary_count - 1))) { rc = cam_hw_cdm_submit_gen_irq( - cdm_hw, req); + cdm_hw, req, fifo_idx, + cdm_cmd->gen_irq_arb); if (rc == 0) - core->bl_tag++; + core->bl_fifo[fifo_idx].bl_tag++; } } } mutex_unlock(&client->lock); - mutex_unlock(&cdm_hw->hw_mutex); + mutex_unlock(&core->bl_fifo[fifo_idx].fifo_lock); + +end: return rc; } +static void cam_hw_cdm_reset_cleanup( + struct cam_hw_info *cdm_hw, + uint32_t handle) +{ + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + int i; + struct cam_cdm_bl_cb_request_entry *node, *tnode; + bool flush_hw = false; + + if (test_bit(CAM_CDM_FLUSH_HW_STATUS, &core->cdm_status)) + flush_hw = true; + + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) { + list_for_each_entry_safe(node, tnode, + &core->bl_fifo[i].bl_request_list, entry) { + if (node->request_type == + CAM_HW_CDM_BL_CB_CLIENT) { + if (flush_hw) + cam_cdm_notify_clients(cdm_hw, + (node->client_hdl == handle) ? + CAM_CDM_CB_STATUS_HW_FLUSH : + CAM_CDM_CB_STATUS_HW_RESUBMIT, + (void *)node); + else + cam_cdm_notify_clients(cdm_hw, + CAM_CDM_CB_STATUS_HW_RESET_DONE, + (void *)node); + } + list_del_init(&node->entry); + kfree(node); + } + core->bl_fifo[i].bl_tag = 0; + } +} + static void cam_hw_cdm_work(struct work_struct *work) { struct cam_cdm_work_payload *payload; struct cam_hw_info *cdm_hw; struct cam_cdm *core; + int i; payload = container_of(work, struct cam_cdm_work_payload, work); if (payload) { @@ -554,14 +984,24 @@ static void cam_hw_cdm_work(struct work_struct *work) CAM_DBG(CAM_CDM, "IRQ status=0x%x", payload->irq_status); if (payload->irq_status & - CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) { + CAM_CDM_IRQ_STATUS_INLINE_IRQ_MASK) { struct cam_cdm_bl_cb_request_entry *node, *tnode; CAM_DBG(CAM_CDM, "inline IRQ data=0x%x", payload->irq_data); - mutex_lock(&cdm_hw->hw_mutex); + + if (payload->irq_data == 0xff) { + CAM_INFO(CAM_CDM, "Debug genirq received"); + kfree(payload); + return; + } + + mutex_lock(&core->bl_fifo[payload->fifo_idx] + .fifo_lock); list_for_each_entry_safe(node, tnode, - &core->bl_request_list, entry) { + &core->bl_fifo[payload->fifo_idx] + .bl_request_list, + entry) { if (node->request_type == CAM_HW_CDM_BL_CB_CLIENT) { cam_cdm_notify_clients(cdm_hw, @@ -580,41 +1020,42 @@ static void cam_hw_cdm_work(struct work_struct *work) } kfree(node); } - mutex_unlock(&cdm_hw->hw_mutex); + mutex_unlock(&core->bl_fifo[payload->fifo_idx] + .fifo_lock); } if (payload->irq_status & - CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK) { + CAM_CDM_IRQ_STATUS_RST_DONE_MASK) { CAM_DBG(CAM_CDM, "CDM HW reset done IRQ"); complete(&core->reset_complete); } if (payload->irq_status & - CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK) { - if (atomic_read(&core->bl_done)) { + CAM_CDM_IRQ_STATUS_BL_DONE_MASK) { + if (test_bit(payload->fifo_idx, &core->cdm_status)) { CAM_DBG(CAM_CDM, "CDM HW BL done IRQ"); - complete(&core->bl_complete); + complete(&core->bl_fifo[payload->fifo_idx] + .bl_complete); } } if (payload->irq_status & - CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) { + CAM_CDM_IRQ_STATUS_ERRORS) { CAM_ERR_RATE_LIMIT(CAM_CDM, - "Invalid command IRQ, Need HW reset\n"); - atomic_inc(&core->error); + "CDM Error IRQ status %d\n", + payload->irq_status); + set_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status); + mutex_lock(&cdm_hw->hw_mutex); + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; + i++) + mutex_lock(&core->bl_fifo[i].fifo_lock); cam_hw_cdm_dump_core_debug_registers(cdm_hw); - } - if (payload->irq_status & - CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) { - CAM_ERR_RATE_LIMIT(CAM_CDM, "AHB Error IRQ\n"); - atomic_inc(&core->error); - cam_hw_cdm_dump_core_debug_registers(cdm_hw); - atomic_dec(&core->error); - } - if (payload->irq_status & - CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK) { - CAM_ERR_RATE_LIMIT(CAM_CDM, "Overflow Error IRQ\n"); - atomic_inc(&core->error); - cam_hw_cdm_dump_core_debug_registers(cdm_hw); - atomic_dec(&core->error); + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; + i++) + mutex_unlock(&core->bl_fifo[i].fifo_lock); + mutex_unlock(&cdm_hw->hw_mutex); + if (!(payload->irq_status & + CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK)) + clear_bit(CAM_CDM_ERROR_HW_STATUS, + &core->cdm_status); } kfree(payload); } else { @@ -629,17 +1070,24 @@ static void cam_hw_cdm_iommu_fault_handler(struct iommu_domain *domain, { struct cam_hw_info *cdm_hw = NULL; struct cam_cdm *core = NULL; + int i; if (token) { cdm_hw = (struct cam_hw_info *)token; core = (struct cam_cdm *)cdm_hw->core_info; - atomic_inc(&core->error); + set_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status); + mutex_lock(&cdm_hw->hw_mutex); + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) + mutex_lock(&core->bl_fifo[i].fifo_lock); cam_hw_cdm_dump_core_debug_registers(cdm_hw); + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) + mutex_unlock(&core->bl_fifo[i].fifo_lock); + mutex_unlock(&cdm_hw->hw_mutex); CAM_ERR_RATE_LIMIT(CAM_CDM, "Page fault iova addr %pK\n", (void *)iova); cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT, (void *)iova); - atomic_dec(&core->error); + clear_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status); } else { CAM_ERR(CAM_CDM, "Invalid token"); } @@ -650,46 +1098,88 @@ irqreturn_t cam_hw_cdm_irq(int irq_num, void *data) { struct cam_hw_info *cdm_hw = data; struct cam_cdm *cdm_core = cdm_hw->core_info; - struct cam_cdm_work_payload *payload; + struct cam_cdm_work_payload *payload[CAM_CDM_BL_FIFO_MAX] = {0}; + uint32_t user_data = 0; + uint32_t irq_status[CAM_CDM_BL_FIFO_MAX] = {0}; bool work_status; + int i; CAM_DBG(CAM_CDM, "Got irq"); - payload = kzalloc(sizeof(struct cam_cdm_work_payload), GFP_ATOMIC); - if (payload) { - if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS, - &payload->irq_status)) { + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo_irq; i++) { + if (cam_cdm_read_hw_reg(cdm_hw, + cdm_core->offsets->irq_reg[i]->irq_status, + &irq_status[i])) { CAM_ERR(CAM_CDM, "Failed to read CDM HW IRQ status"); } - if (!payload->irq_status) { - CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid irq received\n"); - kfree(payload); - return IRQ_HANDLED; + } + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo_irq; i++) { + if (!irq_status[i]) { + cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->irq_reg[i]->irq_clear, + irq_status[i]); + continue; } - if (payload->irq_status & - CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) { - if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_USR_DATA, - &payload->irq_data)) { + + payload[i] = kzalloc(sizeof(struct cam_cdm_work_payload), + GFP_ATOMIC); + + if (!payload[i]) + continue; + + if (irq_status[i] & + CAM_CDM_IRQ_STATUS_INLINE_IRQ_MASK) { + if (cam_cdm_read_hw_reg(cdm_hw, + cdm_core->offsets->cmn_reg->usr_data, + &user_data)) { CAM_ERR(CAM_CDM, "Failed to read CDM HW IRQ data"); + kfree(payload[i]); + return IRQ_HANDLED; } + + payload[i]->irq_data = user_data >> (i * 0x8); + + if (payload[i]->irq_data == + CAM_CDM_DBG_GEN_IRQ_USR_DATA) + CAM_INFO(CAM_CDM, + "Debug gen_irq received"); } - CAM_DBG(CAM_CDM, "Got payload=%d", payload->irq_status); - payload->hw = cdm_hw; - INIT_WORK((struct work_struct *)&payload->work, + + payload[i]->fifo_idx = i; + payload[i]->irq_status = irq_status[i]; + payload[i]->hw = cdm_hw; + + INIT_WORK((struct work_struct *)&payload[i]->work, cam_hw_cdm_work); - if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR, - payload->irq_status)) - CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ Clear"); - if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR_CMD, 0x01)) - CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ cmd"); - work_status = queue_work(cdm_core->work_queue, &payload->work); + + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->irq_reg[i]->irq_clear, + payload[i]->irq_status)) { + CAM_ERR(CAM_CDM, + "Failed to Write CDM HW IRQ Clear"); + kfree(payload[i]); + return IRQ_HANDLED; + } + + work_status = queue_work( + cdm_core->bl_fifo[i].work_queue, + &payload[i]->work); + if (work_status == false) { - CAM_ERR(CAM_CDM, "Failed to queue work for irq=0x%x", - payload->irq_status); - kfree(payload); + CAM_ERR(CAM_CDM, + "Failed to queue work for irq=0x%x", + payload[i]->irq_status); + kfree(payload[i]); } } + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->irq_reg[0]->irq_clear_cmd, + 0x01)) + CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ cmd 0"); + return IRQ_HANDLED; } @@ -699,27 +1189,32 @@ int cam_hw_cdm_alloc_genirq_mem(void *hw_priv) struct cam_mem_mgr_request_desc genirq_alloc_cmd; struct cam_mem_mgr_memory_desc genirq_alloc_out; struct cam_cdm *cdm_core = NULL; - int rc = -EINVAL; + int rc = -EINVAL, i; if (!hw_priv) return rc; cdm_core = (struct cam_cdm *)cdm_hw->core_info; genirq_alloc_cmd.align = 0; - genirq_alloc_cmd.size = (8 * CAM_CDM_HWFIFO_SIZE); genirq_alloc_cmd.smmu_hdl = cdm_core->iommu_hdl.non_secure; genirq_alloc_cmd.flags = CAM_MEM_FLAG_HW_READ_WRITE; - rc = cam_mem_mgr_request_mem(&genirq_alloc_cmd, - &genirq_alloc_out); - if (rc) { - CAM_ERR(CAM_CDM, "Failed to get genirq cmd space rc=%d", rc); - goto end; + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) { + genirq_alloc_cmd.size = (8 * + cdm_core->bl_fifo[i].bl_depth); + rc = cam_mem_mgr_request_mem(&genirq_alloc_cmd, + &genirq_alloc_out); + if (rc) { + CAM_ERR(CAM_CDM, + "Failed to get genirq cmd space rc=%d", + rc); + goto end; + } + cdm_core->gen_irq[i].handle = genirq_alloc_out.mem_handle; + cdm_core->gen_irq[i].vaddr = (genirq_alloc_out.iova & + 0xFFFFFFFF); + cdm_core->gen_irq[i].kmdvaddr = genirq_alloc_out.kva; + cdm_core->gen_irq[i].size = genirq_alloc_out.len; } - cdm_core->gen_irq.handle = genirq_alloc_out.mem_handle; - cdm_core->gen_irq.vaddr = (genirq_alloc_out.iova & 0xFFFFFFFF); - cdm_core->gen_irq.kmdvaddr = genirq_alloc_out.kva; - cdm_core->gen_irq.size = genirq_alloc_out.len; - end: return rc; } @@ -729,28 +1224,292 @@ int cam_hw_cdm_release_genirq_mem(void *hw_priv) struct cam_hw_info *cdm_hw = hw_priv; struct cam_cdm *cdm_core = NULL; struct cam_mem_mgr_memory_desc genirq_release_cmd; - int rc = -EINVAL; + int rc = -EINVAL, i; if (!hw_priv) return rc; cdm_core = (struct cam_cdm *)cdm_hw->core_info; - genirq_release_cmd.mem_handle = cdm_core->gen_irq.handle; - rc = cam_mem_mgr_release_mem(&genirq_release_cmd); - if (rc) - CAM_ERR(CAM_CDM, "Failed to put genirq cmd space for hw"); + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) { + genirq_release_cmd.mem_handle = cdm_core->gen_irq[i].handle; + rc = cam_mem_mgr_release_mem(&genirq_release_cmd); + if (rc) + CAM_ERR(CAM_CDM, + "Failed to put genirq cmd space for hw rc %d", + rc); + } return rc; } +int cam_hw_cdm_reset_hw(struct cam_hw_info *cdm_hw, uint32_t handle) +{ + struct cam_cdm *cdm_core = NULL; + long time_left; + int i, rc = -EIO; + + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + set_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status); + reinit_completion(&cdm_core->reset_complete); + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + mutex_lock(&cdm_core->bl_fifo[i].fifo_lock); + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) { + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->irq_reg[i]->irq_mask, + 0x70003)) { + CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ mask"); + goto end; + } + } + + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->cmn_reg->rst_cmd, 0x9)) { + CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset"); + goto end; + } + + CAM_DBG(CAM_CDM, "Waiting for CDM HW reset done"); + time_left = wait_for_completion_timeout(&cdm_core->reset_complete, + msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT)); + + if (time_left <= 0) { + rc = -ETIMEDOUT; + CAM_ERR(CAM_CDM, "CDM HW reset Wait failed rc=%d", rc); + goto end; + } + + rc = cam_hw_cdm_set_cdm_core_cfg(cdm_hw); + if (rc) { + CAM_ERR(CAM_CDM, "Failed to configure CDM rc=%d", rc); + goto end; + } + + rc = cam_hw_cdm_set_cdm_blfifo_cfg(cdm_hw); + if (rc) { + CAM_ERR(CAM_CDM, "Failed to configure CDM fifo rc=%d", rc); + goto end; + } + + cam_hw_cdm_reset_cleanup(cdm_hw, handle); +end: + clear_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status); + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + mutex_unlock(&cdm_core->bl_fifo[i].fifo_lock); + + return rc; +} + +int cam_hw_cdm_handle_error_info( + struct cam_hw_info *cdm_hw, + uint32_t handle) +{ + struct cam_cdm *cdm_core = NULL; + struct cam_cdm_bl_cb_request_entry *node = NULL; + long time_left; + int i, rc = -EIO, reset_hw_hdl = 0x0; + uint32_t current_bl_data = 0, current_fifo = 0, current_tag = 0; + + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + set_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status); + set_bit(CAM_CDM_FLUSH_HW_STATUS, &cdm_core->cdm_status); + reinit_completion(&cdm_core->reset_complete); + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + mutex_lock(&cdm_core->bl_fifo[i].fifo_lock); + + rc = cam_cdm_read_hw_reg(cdm_hw, + cdm_core->offsets->cmn_reg->current_bl_len, + ¤t_bl_data); + + current_fifo = ((CAM_CDM_CURRENT_BL_FIFO & current_bl_data) + >> CAM_CDM_CURRENT_BL_FIFO_SHIFT); + current_tag = ((CAM_CDM_CURRENT_BL_TAG & current_bl_data) + >> CAM_CDM_CURRENT_BL_TAG_SHIFT); + + if (current_fifo >= CAM_CDM_BL_FIFO_MAX) { + rc = -EFAULT; + goto end; + } + + CAM_ERR(CAM_CDM, "Hang detected for fifo %d with tag 0x%x", + current_fifo, current_tag); + + /* dump cdm registers for further debug */ + cam_hw_cdm_dump_core_debug_registers(cdm_hw); + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) { + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->irq_reg[i]->irq_mask, + 0x70003)) { + CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ mask"); + goto end; + } + } + + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->cmn_reg->rst_cmd, 0x9)) { + CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset"); + goto end; + } + + CAM_DBG(CAM_CDM, "Waiting for CDM HW resetdone"); + time_left = wait_for_completion_timeout(&cdm_core->reset_complete, + msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT)); + + if (time_left <= 0) { + rc = -ETIMEDOUT; + CAM_ERR(CAM_CDM, "CDM HW reset Wait failed rc=%d", rc); + goto end; + } + + rc = cam_hw_cdm_set_cdm_core_cfg(cdm_hw); + + if (rc) { + CAM_ERR(CAM_CDM, "Failed to configure CDM rc=%d", rc); + goto end; + } + + rc = cam_hw_cdm_set_cdm_blfifo_cfg(cdm_hw); + + if (rc) { + CAM_ERR(CAM_CDM, "Failed to configure CDM fifo rc=%d", rc); + goto end; + } + + node = list_first_entry_or_null( + &cdm_core->bl_fifo[current_fifo].bl_request_list, + struct cam_cdm_bl_cb_request_entry, entry); + + if (node != NULL) { + if (node->request_type == CAM_HW_CDM_BL_CB_CLIENT) { + cam_cdm_notify_clients(cdm_hw, + CAM_CDM_CB_STATUS_HW_ERROR, + (void *)node); + } else if (node->request_type == CAM_HW_CDM_BL_CB_INTERNAL) { + CAM_ERR(CAM_CDM, "Invalid node=%pK %d", node, + node->request_type); + } + list_del_init(&node->entry); + kfree(node); + } + + cam_hw_cdm_reset_cleanup(cdm_hw, reset_hw_hdl); +end: + clear_bit(CAM_CDM_FLUSH_HW_STATUS, &cdm_core->cdm_status); + clear_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status); + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + mutex_unlock(&cdm_core->bl_fifo[i].fifo_lock); + + return rc; +} + +int cam_hw_cdm_flush_hw(struct cam_hw_info *cdm_hw, uint32_t handle) +{ + struct cam_cdm *cdm_core = NULL; + int rc = 0; + + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + set_bit(CAM_CDM_FLUSH_HW_STATUS, &cdm_core->cdm_status); + rc = cam_hw_cdm_reset_hw(cdm_hw, handle); + clear_bit(CAM_CDM_FLUSH_HW_STATUS, &cdm_core->cdm_status); + + return rc; +} + +int cam_hw_cdm_handle_error( + struct cam_hw_info *cdm_hw, + uint32_t handle) +{ + struct cam_cdm *cdm_core = NULL; + int rc = 0; + + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + /* First pause CDM, If it fails still proceed to dump debug info */ + cam_hw_cdm_enable_core(cdm_hw, false); + + rc = cam_hw_cdm_handle_error_info(cdm_hw, handle); + + return rc; +} + +int cam_hw_cdm_get_cdm_config(struct cam_hw_info *cdm_hw) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_cdm *core = NULL; + int rc = 0; + + core = (struct cam_cdm *)cdm_hw->core_info; + soc_info = &cdm_hw->soc_info; + rc = cam_soc_util_enable_platform_resource(soc_info, true, + CAM_SVS_VOTE, true); + if (rc) { + CAM_ERR(CAM_CDM, "Enable platform failed for dev %s", + soc_info->dev_name); + goto end; + } else { + CAM_DBG(CAM_CDM, "CDM init success"); + cdm_hw->hw_state = CAM_HW_STATE_POWER_UP; + } + + if (cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->cdm_hw_version, + &core->hw_version)) { + CAM_ERR(CAM_CDM, "Failed to read CDM HW Version"); + rc = -EIO; + goto disable_platform_resource; + } + + if (core->offsets->cmn_reg->cam_version) { + if (cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->cam_version->hw_version, + &core->hw_family_version)) { + CAM_ERR(CAM_CDM, "Failed to read CDM family Version"); + rc = -EIO; + goto disable_platform_resource; + } + } + + CAM_DBG(CAM_CDM, + "CDM Hw version read success family =%x hw =%x", + core->hw_family_version, core->hw_version); + + core->ops = cam_cdm_get_ops(core->hw_version, NULL, + false); + + if (!core->ops) { + CAM_ERR(CAM_CDM, "Failed to util ops for cdm hw name %s", + core->name); + rc = -EINVAL; + goto disable_platform_resource; + } + +disable_platform_resource: + rc = cam_soc_util_disable_platform_resource(soc_info, true, true); + + if (rc) { + CAM_ERR(CAM_CDM, "disable platform failed for dev %s", + soc_info->dev_name); + } else { + CAM_DBG(CAM_CDM, "CDM Deinit success"); + cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + } +end: + return rc; +} + int cam_hw_cdm_init(void *hw_priv, void *init_hw_args, uint32_t arg_size) { struct cam_hw_info *cdm_hw = hw_priv; struct cam_hw_soc_info *soc_info = NULL; struct cam_cdm *cdm_core = NULL; - int rc; - long time_left; + int rc, i, reset_hw_hdl = 0x0; if (!hw_priv) return -EINVAL; @@ -768,31 +1527,25 @@ int cam_hw_cdm_init(void *hw_priv, CAM_DBG(CAM_CDM, "Enable soc done"); /* Before triggering the reset to HW, clear the reset complete */ - atomic_set(&cdm_core->error, 0); - atomic_set(&cdm_core->bl_done, 0); - reinit_completion(&cdm_core->reset_complete); - reinit_completion(&cdm_core->bl_complete); + clear_bit(CAM_CDM_ERROR_HW_STATUS, &cdm_core->cdm_status); - if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003)) { - CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ mask"); - goto disable_return; - } - if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_RST_CMD, 0x9)) { - CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset"); - goto disable_return; + for (i = 0; i < CAM_CDM_BL_FIFO_MAX; i++) { + clear_bit(i, &cdm_core->cdm_status); + reinit_completion(&cdm_core->bl_fifo[i].bl_complete); } - CAM_DBG(CAM_CDM, "Waiting for CDM HW resetdone"); - time_left = wait_for_completion_timeout(&cdm_core->reset_complete, - msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT)); + rc = cam_hw_cdm_reset_hw(cdm_hw, reset_hw_hdl); - if (time_left <= 0) { + if (rc) { CAM_ERR(CAM_CDM, "CDM HW reset Wait failed rc=%d", rc); goto disable_return; } else { CAM_DBG(CAM_CDM, "CDM Init success"); cdm_hw->hw_state = CAM_HW_STATE_POWER_UP; - cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003); + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->irq_reg[i]->irq_mask, + 0x70003); rc = 0; goto end; } @@ -830,7 +1583,7 @@ int cam_hw_cdm_deinit(void *hw_priv, int cam_hw_cdm_probe(struct platform_device *pdev) { - int rc; + int rc, len = 0, i, j; struct cam_hw_info *cdm_hw = NULL; struct cam_hw_intf *cdm_hw_intf = NULL; struct cam_cdm *cdm_core = NULL; @@ -838,6 +1591,7 @@ int cam_hw_cdm_probe(struct platform_device *pdev) struct cam_cpas_register_params cpas_parms; struct cam_ahb_vote ahb_vote; struct cam_axi_vote axi_vote = {0}; + char cdm_name[128], work_q_name[128]; cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL); if (!cdm_hw_intf) @@ -880,16 +1634,17 @@ int cam_hw_cdm_probe(struct platform_device *pdev) else cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM; - cdm_core->bl_tag = 0; cdm_core->id = cam_hw_cdm_get_id_by_name(cdm_core->name); + + CAM_DBG(CAM_CDM, "cdm_name %s", cdm_core->name); + if (cdm_core->id >= CAM_CDM_MAX) { CAM_ERR(CAM_CDM, "Failed to get CDM HW name for %s", cdm_core->name); goto release_private_mem; } - INIT_LIST_HEAD(&cdm_core->bl_request_list); + init_completion(&cdm_core->reset_complete); - init_completion(&cdm_core->bl_complete); cdm_hw_intf->hw_priv = cdm_hw; cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps; cdm_hw_intf->hw_ops.init = cam_hw_cdm_init; @@ -906,19 +1661,47 @@ int cam_hw_cdm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, cdm_hw_intf); - rc = cam_smmu_get_handle("cpas-cdm0", &cdm_core->iommu_hdl.non_secure); + snprintf(cdm_name, sizeof(cdm_name), "%s%d", + cdm_hw->soc_info.label_name, cdm_hw->soc_info.index); + + rc = cam_smmu_get_handle(cdm_name, &cdm_core->iommu_hdl.non_secure); if (rc < 0) { - CAM_ERR(CAM_CDM, "cpas-cdm get iommu handle failed"); - goto unlock_release_mem; + if (rc != -EALREADY) { + CAM_ERR(CAM_CDM, + "%s get iommu handle failed, rc = %d", + cdm_name, rc); + goto unlock_release_mem; + } + rc = 0; } + cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure, cam_hw_cdm_iommu_fault_handler, cdm_hw); cdm_core->iommu_hdl.secure = -1; - cdm_core->work_queue = alloc_workqueue(cdm_core->name, - WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, - CAM_CDM_INFLIGHT_WORKS); + for (i = 0; i < CAM_CDM_BL_FIFO_MAX; i++) { + INIT_LIST_HEAD(&cdm_core->bl_fifo[i].bl_request_list); + + mutex_init(&cdm_core->bl_fifo[i].fifo_lock); + + init_completion(&cdm_core->bl_fifo[i].bl_complete); + + len = strlcpy(work_q_name, cdm_core->name, + sizeof(cdm_core->name)); + snprintf(work_q_name + len, sizeof(work_q_name) - len, "%d", i); + cdm_core->bl_fifo[i].work_queue = alloc_workqueue(work_q_name, + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, + CAM_CDM_INFLIGHT_WORKS); + if (!cdm_core->bl_fifo[i].work_queue) { + CAM_ERR(CAM_CDM, + "Workqueue allocation failed for FIFO %d, cdm %s", + i, cdm_core->name); + goto failed_workq_create; + } + + CAM_DBG(CAM_CDM, "wq %s", work_q_name); + } rc = cam_soc_util_request_platform_resource(&cdm_hw->soc_info, cam_hw_cdm_irq, cdm_hw); @@ -926,12 +1709,12 @@ int cam_hw_cdm_probe(struct platform_device *pdev) CAM_ERR(CAM_CDM, "Failed to request platform resource"); goto destroy_non_secure_hdl; } - cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb; cpas_parms.cell_index = cdm_hw->soc_info.index; cpas_parms.dev = &pdev->dev; cpas_parms.userdata = cdm_hw_intf; - strlcpy(cpas_parms.identifier, "cpas-cdm", CAM_HW_IDENTIFIER_LENGTH); + strlcpy(cpas_parms.identifier, cdm_hw->soc_info.label_name, + CAM_HW_IDENTIFIER_LENGTH); rc = cam_cpas_register_client(&cpas_parms); if (rc) { CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed"); @@ -956,6 +1739,33 @@ int cam_hw_cdm_probe(struct platform_device *pdev) goto cpas_unregister; } + rc = cam_hw_cdm_get_cdm_config(cdm_hw); + if (rc) { + CAM_ERR(CAM_CDM, "Failed to get cdm configuration rc = %d", rc); + goto cpas_stop; + } + + if (cdm_core->hw_version < CAM_CDM_VERSION_2_0) { + for (i = 0; i < CAM_CDM_BL_FIFO_MAX; i++) { + cdm_core->bl_fifo[i].bl_depth = + CAM_CDM_BL_FIFO_LENGTH_MAX_DEFAULT; + CAM_DBG(CAM_CDM, "Setting FIFO%d length to %d", + i, cdm_core->bl_fifo[i].bl_depth); + } + } else { + for (i = 0; i < CAM_CDM_BL_FIFO_MAX; i++) { + cdm_core->bl_fifo[i].bl_depth = + soc_private->fifo_depth[i]; + CAM_DBG(CAM_CDM, "Setting FIFO%d length to %d", + i, cdm_core->bl_fifo[i].bl_depth); + } + } + + cdm_core->arbitration = cam_cdm_get_arbitration_type( + cdm_core->hw_version, cdm_core->id); + + cdm_core->cdm_status = CAM_CDM_HW_INIT_STATUS; + rc = cam_hw_cdm_init(cdm_hw, NULL, 0); if (rc) { CAM_ERR(CAM_CDM, "Failed to Init CDM HW"); @@ -963,30 +1773,10 @@ int cam_hw_cdm_probe(struct platform_device *pdev) } cdm_hw->open_count++; - if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION, - &cdm_core->hw_version)) { - CAM_ERR(CAM_CDM, "Failed to read CDM HW Version"); - goto deinit; - } - - if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_TITAN_VERSION, - &cdm_core->hw_family_version)) { - CAM_ERR(CAM_CDM, "Failed to read CDM family Version"); - goto deinit; - } - - CAM_DBG(CAM_CDM, "CDM Hw version read success family =%x hw =%x", - cdm_core->hw_family_version, cdm_core->hw_version); - cdm_core->ops = cam_cdm_get_ops(cdm_core->hw_version, NULL, - false); - if (!cdm_core->ops) { - CAM_ERR(CAM_CDM, "Failed to util ops for hw"); - goto deinit; - } - if (!cam_cdm_set_cam_hw_version(cdm_core->hw_version, &cdm_core->version)) { - CAM_ERR(CAM_CDM, "Failed to set cam he version for hw"); + CAM_ERR(CAM_CDM, "Failed to set cam hw version for hw"); + rc = -EINVAL; goto deinit; } @@ -1014,7 +1804,7 @@ int cam_hw_cdm_probe(struct platform_device *pdev) cdm_hw->open_count--; mutex_unlock(&cdm_hw->hw_mutex); - CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx); + CAM_DBG(CAM_CDM, "%s probe successful", cdm_core->name); return rc; @@ -1031,9 +1821,11 @@ cpas_unregister: release_platform_resource: if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info)) CAM_ERR(CAM_CDM, "Release platform resource failed"); - - flush_workqueue(cdm_core->work_queue); - destroy_workqueue(cdm_core->work_queue); +failed_workq_create: + for (j = 0; j < i; j++) { + flush_workqueue(cdm_core->bl_fifo[j].work_queue); + destroy_workqueue(cdm_core->bl_fifo[j].work_queue); + } destroy_non_secure_hdl: cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure, NULL, cdm_hw); @@ -1053,7 +1845,7 @@ release_mem: int cam_hw_cdm_remove(struct platform_device *pdev) { - int rc = -EBUSY; + int rc = -EBUSY, i; struct cam_hw_info *cdm_hw = NULL; struct cam_hw_intf *cdm_hw_intf = NULL; struct cam_cdm *cdm_core = NULL; @@ -1102,8 +1894,10 @@ int cam_hw_cdm_remove(struct platform_device *pdev) if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info)) CAM_ERR(CAM_CDM, "Release platform resource failed"); - flush_workqueue(cdm_core->work_queue); - destroy_workqueue(cdm_core->work_queue); + for (i = 0; i < CAM_CDM_BL_FIFO_MAX; i++) { + flush_workqueue(cdm_core->bl_fifo[i].work_queue); + destroy_workqueue(cdm_core->bl_fifo[i].work_queue); + } if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure)) CAM_ERR(CAM_CDM, "Release iommu secure hdl failed"); diff --git a/drivers/cam_cdm/cam_cdm_hw_reg_1_0.h b/drivers/cam_cdm/cam_cdm_hw_reg_1_0.h new file mode 100644 index 0000000000..f1564dfafc --- /dev/null +++ b/drivers/cam_cdm/cam_cdm_hw_reg_1_0.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include "cam_cdm.h" + +static struct cam_version_reg cdm_hw_1_0_titan_version = { + .hw_version = 0x4, +}; + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_1_0_bl_pending_req0 = { + .rb_offset = 0x6c, + .rb_mask = 0x7F, + .rb_num_fifo = 0x1, + .rb_next_fifo_shift = 0x0, +}; + +static struct cam_cdm_irq_regs cdm_hw_1_0_irq0 = { + .irq_mask = 0x30, + .irq_clear = 0x34, + .irq_clear_cmd = 0x38, + .irq_set = 0x3c, + .irq_set_cmd = 0x40, + .irq_status = 0x44, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_1_0_bl_fifo0 = { + .bl_fifo_base = 0x50, + .bl_fifo_len = 0x54, + .bl_fifo_store = 0x58, + .bl_fifo_cfg = 0x5c, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg0 = { + .scratch_reg = 0x90, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg1 = { + .scratch_reg = 0x94, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg2 = { + .scratch_reg = 0x98, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg3 = { + .scratch_reg = 0x9c, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg4 = { + .scratch_reg = 0xa0, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg5 = { + .scratch_reg = 0xa4, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg6 = { + .scratch_reg = 0xa8, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg7 = { + .scratch_reg = 0xac, +}; + +static struct cam_cdm_perf_mon_regs cdm_1_0_perf_mon0 = { + .perf_mon_ctrl = 0x110, + .perf_mon_0 = 0x114, + .perf_mon_1 = 0x118, + .perf_mon_2 = 0x11c, +}; + +static struct cam_cdm_common_regs cdm_hw_1_0_cmn_reg_offset = { + .cdm_hw_version = 0x0, + .cam_version = &cdm_hw_1_0_titan_version, + .rst_cmd = 0x10, + .cgc_cfg = 0x14, + .core_cfg = 0x18, + .core_en = 0x1c, + .fe_cfg = 0x20, + .bl_fifo_rb = 0x60, + .bl_fifo_base_rb = 0x64, + .bl_fifo_len_rb = 0x68, + .usr_data = 0x80, + .wait_status = 0x84, + .last_ahb_addr = 0xd0, + .last_ahb_data = 0xd4, + .core_debug = 0xd8, + .last_ahb_err_addr = 0xe0, + .last_ahb_err_data = 0xe4, + .current_bl_base = 0xe8, + .current_bl_len = 0xec, + .current_used_ahb_base = 0xf0, + .debug_status = 0xf4, + .bus_misr_cfg0 = 0x100, + .bus_misr_cfg1 = 0x104, + .bus_misr_rd_val = 0x108, + .pending_req = { + &cdm_hw_1_0_bl_pending_req0, + NULL, + }, + .comp_wait = { NULL, NULL }, + .perf_mon = { + &cdm_1_0_perf_mon0, + NULL, + }, + .scratch = { + &cdm_1_0_scratch_reg0, + &cdm_1_0_scratch_reg1, + &cdm_1_0_scratch_reg2, + &cdm_1_0_scratch_reg3, + &cdm_1_0_scratch_reg4, + &cdm_1_0_scratch_reg5, + &cdm_1_0_scratch_reg6, + &cdm_1_0_scratch_reg7, + NULL, + NULL, + NULL, + NULL, + }, + .perf_reg = NULL, + .icl_reg = NULL, + .spare = 0x200, +}; + +static struct cam_cdm_common_reg_data cdm_hw_1_0_cmn_reg_data = { + .num_bl_fifo = 0x1, + .num_bl_fifo_irq = 0x1, + .num_bl_pending_req_reg = 0x1, + .num_scratch_reg = 0x8, +}; + +static struct cam_cdm_hw_reg_offset cam_cdm_1_0_reg_offset = { + .cmn_reg = &cdm_hw_1_0_cmn_reg_offset, + .bl_fifo_reg = { + &cdm_hw_1_0_bl_fifo0, + NULL, + NULL, + NULL, + }, + .irq_reg = { + &cdm_hw_1_0_irq0, + NULL, + NULL, + NULL, + }, + .reg_data = &cdm_hw_1_0_cmn_reg_data, +}; diff --git a/drivers/cam_cdm/cam_cdm_hw_reg_1_1.h b/drivers/cam_cdm/cam_cdm_hw_reg_1_1.h new file mode 100644 index 0000000000..d5a0b10633 --- /dev/null +++ b/drivers/cam_cdm/cam_cdm_hw_reg_1_1.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include "cam_cdm.h" + +static struct cam_version_reg cdm_hw_1_1_titan_version = { + .hw_version = 0x4, +}; + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_1_1_bl_pending_req0 = { + .rb_offset = 0x6c, + .rb_mask = 0x7f, + .rb_num_fifo = 0x1, + .rb_next_fifo_shift = 0x0, +}; + +static struct cam_cdm_irq_regs cdm_hw_1_1_irq0 = { + .irq_mask = 0x30, + .irq_clear = 0x34, + .irq_clear_cmd = 0x38, + .irq_set = 0x3c, + .irq_set_cmd = 0x40, + .irq_status = 0x44, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_1_1_bl_fifo0 = { + .bl_fifo_base = 0x50, + .bl_fifo_len = 0x54, + .bl_fifo_store = 0x58, + .bl_fifo_cfg = 0x5c, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg0 = { + .scratch_reg = 0x90, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg1 = { + .scratch_reg = 0x94, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg2 = { + .scratch_reg = 0x98, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg3 = { + .scratch_reg = 0x9c, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg4 = { + .scratch_reg = 0xa0, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg5 = { + .scratch_reg = 0xa4, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg6 = { + .scratch_reg = 0xa8, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg7 = { + .scratch_reg = 0xac, +}; + +static struct cam_cdm_perf_mon_regs cdm_1_1_perf_mon0 = { + .perf_mon_ctrl = 0x110, + .perf_mon_0 = 0x114, + .perf_mon_1 = 0x118, + .perf_mon_2 = 0x11c, +}; + +static struct cam_cdm_comp_wait_status cdm_1_1_comp_wait_status0 = { + .comp_wait_status = 0x88, +}; + +static struct cam_cdm_comp_wait_status cdm_1_1_comp_wait_status1 = { + .comp_wait_status = 0x8c, +}; + +static struct cam_cdm_common_regs cdm_hw_1_1_cmn_reg_offset = { + .cdm_hw_version = 0x0, + .cam_version = &cdm_hw_1_1_titan_version, + .rst_cmd = 0x10, + .cgc_cfg = 0x14, + .core_cfg = 0x18, + .core_en = 0x1c, + .fe_cfg = 0x20, + .bl_fifo_rb = 0x60, + .bl_fifo_base_rb = 0x64, + .bl_fifo_len_rb = 0x68, + .usr_data = 0x80, + .wait_status = 0x84, + .last_ahb_addr = 0xd0, + .last_ahb_data = 0xd4, + .core_debug = 0xd8, + .last_ahb_err_addr = 0xe0, + .last_ahb_err_data = 0xe4, + .current_bl_base = 0xe8, + .current_bl_len = 0xec, + .current_used_ahb_base = 0xf0, + .debug_status = 0xf4, + .bus_misr_cfg0 = 0x100, + .bus_misr_cfg1 = 0x104, + .bus_misr_rd_val = 0x108, + .pending_req = { + &cdm_hw_1_1_bl_pending_req0, + NULL, + }, + .comp_wait = { + &cdm_1_1_comp_wait_status0, + &cdm_1_1_comp_wait_status1, + }, + .perf_mon = { + &cdm_1_1_perf_mon0, + NULL, + }, + .scratch = { + &cdm_1_1_scratch_reg0, + &cdm_1_1_scratch_reg1, + &cdm_1_1_scratch_reg2, + &cdm_1_1_scratch_reg3, + &cdm_1_1_scratch_reg4, + &cdm_1_1_scratch_reg5, + &cdm_1_1_scratch_reg6, + &cdm_1_1_scratch_reg7, + NULL, + NULL, + NULL, + NULL, + }, + .perf_reg = NULL, + .icl_reg = NULL, + .spare = 0x1fc, +}; + +static struct cam_cdm_common_reg_data cdm_hw_1_1_cmn_reg_data = { + .num_bl_fifo = 0x1, + .num_bl_fifo_irq = 0x1, + .num_bl_pending_req_reg = 0x1, + .num_scratch_reg = 0x8, +}; + +struct cam_cdm_hw_reg_offset cam_cdm_1_1_reg_offset = { + .cmn_reg = &cdm_hw_1_1_cmn_reg_offset, + .bl_fifo_reg = { + &cdm_hw_1_1_bl_fifo0, + NULL, + NULL, + NULL, + }, + .irq_reg = { + &cdm_hw_1_1_irq0, + NULL, + NULL, + NULL, + }, + .reg_data = &cdm_hw_1_1_cmn_reg_data, +}; diff --git a/drivers/cam_cdm/cam_cdm_hw_reg_1_2.h b/drivers/cam_cdm/cam_cdm_hw_reg_1_2.h new file mode 100644 index 0000000000..08949b7dae --- /dev/null +++ b/drivers/cam_cdm/cam_cdm_hw_reg_1_2.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include "cam_cdm.h" + +static struct cam_version_reg cdm_hw_1_2_titan_version = { + .hw_version = 0x4, +}; + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_1_2_bl_pending_req0 = { + .rb_offset = 0x6c, + .rb_mask = 0x7f, + .rb_num_fifo = 0x1, + .rb_next_fifo_shift = 0x0, +}; + +static struct cam_cdm_irq_regs cdm_hw_1_2_irq0 = { + .irq_mask = 0x30, + .irq_clear = 0x34, + .irq_clear_cmd = 0x38, + .irq_set = 0x3c, + .irq_set_cmd = 0x40, + .irq_status = 0x44, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_1_2_bl_fifo0 = { + .bl_fifo_base = 0x50, + .bl_fifo_len = 0x54, + .bl_fifo_store = 0x58, + .bl_fifo_cfg = 0x5c, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg0 = { + .scratch_reg = 0x90, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg1 = { + .scratch_reg = 0x94, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg2 = { + .scratch_reg = 0x98, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg3 = { + .scratch_reg = 0x9c, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg4 = { + .scratch_reg = 0xa0, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg5 = { + .scratch_reg = 0xa4, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg6 = { + .scratch_reg = 0xa8, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg7 = { + .scratch_reg = 0xac, +}; + +static struct cam_cdm_perf_mon_regs cdm_1_2_perf_mon0 = { + .perf_mon_ctrl = 0x110, + .perf_mon_0 = 0x114, + .perf_mon_1 = 0x118, + .perf_mon_2 = 0x11c, +}; + +static struct cam_cdm_comp_wait_status cdm_1_2_comp_wait_status0 = { + .comp_wait_status = 0x88, +}; + +static struct cam_cdm_comp_wait_status cdm_1_2_comp_wait_status1 = { + .comp_wait_status = 0x8c, +}; + +static struct cam_cdm_perf_regs cdm_1_2_perf = { + .count_cfg_0 = 0x180, + .always_count_val = 0x184, + .busy_count_val = 0x188, + .stall_axi_count_val = 0x18c, + .count_status = 0x190, +}; + +static struct cam_cdm_icl_data_regs cdm_1_2_icl_data = { + .icl_last_data_0 = 0x1c0, + .icl_last_data_1 = 0x1c4, + .icl_last_data_2 = 0x1c8, + .icl_inv_data = 0x1cc, +}; + +static struct cam_cdm_icl_regs cdm_1_2_icl = { + .data_regs = &cdm_1_2_icl_data, + .misc_regs = NULL, +}; + +static struct cam_cdm_common_regs cdm_hw_1_2_cmn_reg_offset = { + .cdm_hw_version = 0x0, + .cam_version = &cdm_hw_1_2_titan_version, + .rst_cmd = 0x10, + .cgc_cfg = 0x14, + .core_cfg = 0x18, + .core_en = 0x1c, + .fe_cfg = 0x20, + .bl_fifo_rb = 0x60, + .bl_fifo_base_rb = 0x64, + .bl_fifo_len_rb = 0x68, + .usr_data = 0x80, + .wait_status = 0x84, + .last_ahb_addr = 0xd0, + .last_ahb_data = 0xd4, + .core_debug = 0xd8, + .last_ahb_err_addr = 0xe0, + .last_ahb_err_data = 0xe4, + .current_bl_base = 0xe8, + .current_bl_len = 0xec, + .current_used_ahb_base = 0xf0, + .debug_status = 0xf4, + .bus_misr_cfg0 = 0x100, + .bus_misr_cfg1 = 0x104, + .bus_misr_rd_val = 0x108, + .pending_req = { + &cdm_hw_1_2_bl_pending_req0, + NULL, + }, + .comp_wait = { + &cdm_1_2_comp_wait_status0, + &cdm_1_2_comp_wait_status1, + }, + .perf_mon = { + &cdm_1_2_perf_mon0, + NULL, + }, + .scratch = { + &cdm_1_2_scratch_reg0, + &cdm_1_2_scratch_reg1, + &cdm_1_2_scratch_reg2, + &cdm_1_2_scratch_reg3, + &cdm_1_2_scratch_reg4, + &cdm_1_2_scratch_reg5, + &cdm_1_2_scratch_reg6, + &cdm_1_2_scratch_reg7, + NULL, + NULL, + NULL, + NULL, + }, + .perf_reg = &cdm_1_2_perf, + .icl_reg = &cdm_1_2_icl, + .spare = 0x1fc, +}; + +static struct cam_cdm_common_reg_data cdm_hw_1_2_cmn_reg_data = { + .num_bl_fifo = 0x1, + .num_bl_fifo_irq = 0x1, + .num_bl_pending_req_reg = 0x1, + .num_scratch_reg = 0x8, +}; + +struct cam_cdm_hw_reg_offset cam_cdm_1_2_reg_offset = { + .cmn_reg = &cdm_hw_1_2_cmn_reg_offset, + .bl_fifo_reg = { + &cdm_hw_1_2_bl_fifo0, + NULL, + NULL, + NULL, + }, + .irq_reg = { + &cdm_hw_1_2_irq0, + NULL, + NULL, + NULL, + }, + .reg_data = &cdm_hw_1_2_cmn_reg_data, +}; diff --git a/drivers/cam_cdm/cam_cdm_hw_reg_2_0.h b/drivers/cam_cdm/cam_cdm_hw_reg_2_0.h new file mode 100644 index 0000000000..1e224c6d9c --- /dev/null +++ b/drivers/cam_cdm/cam_cdm_hw_reg_2_0.h @@ -0,0 +1,251 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include "cam_cdm.h" + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_2_0_bl_pending_req0 = { + .rb_offset = 0x6c, + .rb_mask = 0x1ff, + .rb_num_fifo = 0x2, + .rb_next_fifo_shift = 0x10, +}; + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_2_0_bl_pending_req1 = { + .rb_offset = 0x70, + .rb_mask = 0x1ff, + .rb_num_fifo = 0x2, + .rb_next_fifo_shift = 0x10, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_0_irq0 = { + .irq_mask = 0x30, + .irq_clear = 0x34, + .irq_clear_cmd = 0x38, + .irq_set = 0x3c, + .irq_set_cmd = 0x40, + .irq_status = 0x44, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_0_irq1 = { + .irq_mask = 0x130, + .irq_clear = 0x134, + .irq_clear_cmd = 0x138, + .irq_set = 0x13c, + .irq_set_cmd = 0x140, + .irq_status = 0x144, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_0_irq2 = { + .irq_mask = 0x230, + .irq_clear = 0x234, + .irq_clear_cmd = 0x238, + .irq_set = 0x23c, + .irq_set_cmd = 0x240, + .irq_status = 0x244, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_0_irq3 = { + .irq_mask = 0x330, + .irq_clear = 0x334, + .irq_clear_cmd = 0x338, + .irq_set = 0x33c, + .irq_set_cmd = 0x340, + .irq_status = 0x344, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_0_bl_fifo0 = { + .bl_fifo_base = 0x50, + .bl_fifo_len = 0x54, + .bl_fifo_store = 0x58, + .bl_fifo_cfg = 0x5c, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_0_bl_fifo1 = { + .bl_fifo_base = 0x150, + .bl_fifo_len = 0x154, + .bl_fifo_store = 0x158, + .bl_fifo_cfg = 0x15c, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_0_bl_fifo2 = { + .bl_fifo_base = 0x250, + .bl_fifo_len = 0x254, + .bl_fifo_store = 0x258, + .bl_fifo_cfg = 0x25c, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_0_bl_fifo3 = { + .bl_fifo_base = 0x350, + .bl_fifo_len = 0x354, + .bl_fifo_store = 0x358, + .bl_fifo_cfg = 0x35c, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg0 = { + .scratch_reg = 0x90, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg1 = { + .scratch_reg = 0x94, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg2 = { + .scratch_reg = 0x98, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg3 = { + .scratch_reg = 0x9c, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg4 = { + .scratch_reg = 0xa0, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg5 = { + .scratch_reg = 0xa4, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg6 = { + .scratch_reg = 0xa8, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg7 = { + .scratch_reg = 0xac, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg8 = { + .scratch_reg = 0xb0, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg9 = { + .scratch_reg = 0xb4, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg10 = { + .scratch_reg = 0xb8, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg11 = { + .scratch_reg = 0xbc, +}; + +static struct cam_cdm_perf_mon_regs cdm_2_0_perf_mon0 = { + .perf_mon_ctrl = 0x110, + .perf_mon_0 = 0x114, + .perf_mon_1 = 0x118, + .perf_mon_2 = 0x11c, +}; + +static struct cam_cdm_perf_mon_regs cdm_2_0_perf_mon1 = { + .perf_mon_ctrl = 0x120, + .perf_mon_0 = 0x124, + .perf_mon_1 = 0x128, + .perf_mon_2 = 0x12c, +}; + +static struct cam_cdm_comp_wait_status cdm_2_0_comp_wait_status0 = { + .comp_wait_status = 0x88, +}; + +static struct cam_cdm_comp_wait_status cdm_2_0_comp_wait_status1 = { + .comp_wait_status = 0x8c, +}; + +static struct cam_cdm_icl_data_regs cdm_2_0_icl_data = { + .icl_last_data_0 = 0x1c0, + .icl_last_data_1 = 0x1c4, + .icl_last_data_2 = 0x1c8, + .icl_inv_data = 0x1cc, +}; + +static struct cam_cdm_icl_misc_regs cdm_2_0_icl_misc = { + .icl_inv_bl_addr = 0x1d0, + .icl_status = 0x1d4, +}; + +static struct cam_cdm_icl_regs cdm_2_0_icl = { + .data_regs = &cdm_2_0_icl_data, + .misc_regs = &cdm_2_0_icl_misc, +}; + +static struct cam_cdm_common_regs cdm_hw_2_0_cmn_reg_offset = { + .cdm_hw_version = 0x0, + .cam_version = NULL, + .rst_cmd = 0x10, + .cgc_cfg = 0x14, + .core_cfg = 0x18, + .core_en = 0x1c, + .fe_cfg = 0x20, + .bl_fifo_rb = 0x60, + .bl_fifo_base_rb = 0x64, + .bl_fifo_len_rb = 0x68, + .usr_data = 0x80, + .wait_status = 0x84, + .last_ahb_addr = 0xd0, + .last_ahb_data = 0xd4, + .core_debug = 0xd8, + .last_ahb_err_addr = 0xe0, + .last_ahb_err_data = 0xe4, + .current_bl_base = 0xe8, + .current_bl_len = 0xec, + .current_used_ahb_base = 0xf0, + .debug_status = 0xf4, + .bus_misr_cfg0 = 0x100, + .bus_misr_cfg1 = 0x104, + .bus_misr_rd_val = 0x108, + .pending_req = { + &cdm_hw_2_0_bl_pending_req0, + &cdm_hw_2_0_bl_pending_req1, + }, + .comp_wait = { + &cdm_2_0_comp_wait_status0, + &cdm_2_0_comp_wait_status1, + }, + .perf_mon = { + &cdm_2_0_perf_mon0, + &cdm_2_0_perf_mon1, + }, + .scratch = { + &cdm_2_0_scratch_reg0, + &cdm_2_0_scratch_reg1, + &cdm_2_0_scratch_reg2, + &cdm_2_0_scratch_reg3, + &cdm_2_0_scratch_reg4, + &cdm_2_0_scratch_reg5, + &cdm_2_0_scratch_reg6, + &cdm_2_0_scratch_reg7, + &cdm_2_0_scratch_reg8, + &cdm_2_0_scratch_reg9, + &cdm_2_0_scratch_reg10, + &cdm_2_0_scratch_reg11, + }, + .perf_reg = NULL, + .icl_reg = &cdm_2_0_icl, + .spare = 0x1fc, +}; + +static struct cam_cdm_common_reg_data cdm_hw_2_0_cmn_reg_data = { + .num_bl_fifo = 0x4, + .num_bl_fifo_irq = 0x4, + .num_bl_pending_req_reg = 0x2, + .num_scratch_reg = 0xc, +}; + +struct cam_cdm_hw_reg_offset cam_cdm_2_0_reg_offset = { + .cmn_reg = &cdm_hw_2_0_cmn_reg_offset, + .bl_fifo_reg = { + &cdm_hw_2_0_bl_fifo0, + &cdm_hw_2_0_bl_fifo1, + &cdm_hw_2_0_bl_fifo2, + &cdm_hw_2_0_bl_fifo3, + }, + .irq_reg = { + &cdm_hw_2_0_irq0, + &cdm_hw_2_0_irq1, + &cdm_hw_2_0_irq2, + &cdm_hw_2_0_irq3, + }, + .reg_data = &cdm_hw_2_0_cmn_reg_data, +}; diff --git a/drivers/cam_cdm/cam_cdm_intf.c b/drivers/cam_cdm/cam_cdm_intf.c index 990ee39f00..bbb97d21c1 100644 --- a/drivers/cam_cdm/cam_cdm_intf.c +++ b/drivers/cam_cdm/cam_cdm_intf.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #include @@ -15,6 +15,7 @@ #include "cam_cdm_virtual.h" #include "cam_soc_util.h" #include "cam_cdm_soc.h" +#include "cam_cdm_core_common.h" static struct cam_cdm_intf_mgr cdm_mgr; static DEFINE_MUTEX(cam_cdm_mgr_lock); @@ -77,13 +78,15 @@ static int get_cdm_index_by_id(char *identifier, uint32_t cell_index, uint32_t *hw_index) { int rc = -EPERM, i, j; - char client_name[128]; + char client_name[128], name_index[160]; - CAM_DBG(CAM_CDM, "Looking for HW id of =%s and index=%d", - identifier, cell_index); snprintf(client_name, sizeof(client_name), "%s", identifier); - CAM_DBG(CAM_CDM, "Looking for HW id of %s count:%d", client_name, - cdm_mgr.cdm_count); + snprintf(name_index, sizeof(name_index), "%s%d", + identifier, cell_index); + + CAM_DBG(CAM_CDM, + "Looking for HW id of =%s or %s and index=%d cdm_count %d", + identifier, name_index, cell_index, cdm_mgr.cdm_count); mutex_lock(&cam_cdm_mgr_lock); for (i = 0; i < cdm_mgr.cdm_count; i++) { mutex_lock(&cdm_mgr.nodes[i].lock); @@ -92,11 +95,14 @@ static int get_cdm_index_by_id(char *identifier, for (j = 0; j < cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) { - CAM_DBG(CAM_CDM, "client name:%s", - cdm_mgr.nodes[i].data->dt_cdm_client_name[j]); + CAM_DBG(CAM_CDM, "client name:%s dev Index: %d", + cdm_mgr.nodes[i].data->dt_cdm_client_name[j], + i); if (!strcmp( cdm_mgr.nodes[i].data->dt_cdm_client_name[j], - client_name)) { + client_name) || !strcmp( + cdm_mgr.nodes[i].data->dt_cdm_client_name[j], + name_index)) { rc = 0; *hw_index = i; break; @@ -131,9 +137,14 @@ int cam_cdm_get_iommu_handle(char *identifier, mutex_unlock(&cdm_mgr.nodes[i].lock); continue; } + CAM_DBG(CAM_CDM, "dt_num_supported_clients=%d", + cdm_mgr.nodes[i].data->dt_num_supported_clients); for (j = 0; j < cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) { + CAM_DBG(CAM_CDM, "client name:%s dev Index: %d", + cdm_mgr.nodes[i].data->dt_cdm_client_name[j], + i); if (!strcmp( cdm_mgr.nodes[i].data->dt_cdm_client_name[j], identifier)) { @@ -155,6 +166,8 @@ int cam_cdm_acquire(struct cam_cdm_acquire_data *data) { int rc = -EPERM; struct cam_hw_intf *hw; + struct cam_hw_info *cdm_hw; + struct cam_cdm *core = NULL; uint32_t hw_index = 0; if ((!data) || (!data->base_array_cnt)) @@ -177,12 +190,17 @@ int cam_cdm_acquire(struct cam_cdm_acquire_data *data) CAM_ERR(CAM_CDM, "Failed to identify associated hw id"); goto end; } else { - CAM_DBG(CAM_CDM, "hw_index:%d", hw_index); hw = cdm_mgr.nodes[hw_index].device; if (hw && hw->hw_ops.process_cmd) { + cdm_hw = hw->hw_priv; + core = (struct cam_cdm *)cdm_hw->core_info; + data->id = core->id; + CAM_DBG(CAM_CDM, + "Device = %s, hw_index = %d, CDM id = %d", + data->identifier, hw_index, data->id); rc = hw->hw_ops.process_cmd(hw->hw_priv, - CAM_CDM_HW_INTF_CMD_ACQUIRE, data, - sizeof(struct cam_cdm_acquire_data)); + CAM_CDM_HW_INTF_CMD_ACQUIRE, data, + sizeof(struct cam_cdm_acquire_data)); if (rc < 0) { CAM_ERR(CAM_CDM, "CDM hw acquire failed"); goto end; @@ -203,6 +221,19 @@ end: } EXPORT_SYMBOL(cam_cdm_acquire); +struct cam_cdm_utils_ops *cam_cdm_publish_ops(void) +{ + struct cam_hw_version cdm_version; + + cdm_version.major = 1; + cdm_version.minor = 0; + cdm_version.incr = 0; + cdm_version.reserved = 0; + + return cam_cdm_get_ops(0, &cdm_version, true); +} +EXPORT_SYMBOL(cam_cdm_publish_ops); + int cam_cdm_release(uint32_t handle) { uint32_t hw_index; @@ -379,6 +410,75 @@ int cam_cdm_reset_hw(uint32_t handle) } EXPORT_SYMBOL(cam_cdm_reset_hw); +int cam_cdm_flush_hw(uint32_t handle) +{ + uint32_t hw_index; + int rc = -EINVAL; + struct cam_hw_intf *hw; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + rc = -EPERM; + return rc; + } + + hw_index = CAM_CDM_GET_HW_IDX(handle); + if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) { + hw = cdm_mgr.nodes[hw_index].device; + if (hw && hw->hw_ops.process_cmd) { + rc = hw->hw_ops.process_cmd(hw->hw_priv, + CAM_CDM_HW_INTF_CMD_FLUSH_HW, &handle, + sizeof(handle)); + if (rc < 0) + CAM_ERR(CAM_CDM, + "CDM hw release failed for handle=%x", + handle); + } else { + CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops", + hw_index); + } + } + put_cdm_mgr_refcount(); + + return rc; +} +EXPORT_SYMBOL(cam_cdm_flush_hw); + +int cam_cdm_handle_error(uint32_t handle) +{ + uint32_t hw_index; + int rc = -EINVAL; + struct cam_hw_intf *hw; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + rc = -EPERM; + return rc; + } + + hw_index = CAM_CDM_GET_HW_IDX(handle); + if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) { + hw = cdm_mgr.nodes[hw_index].device; + if (hw && hw->hw_ops.process_cmd) { + rc = hw->hw_ops.process_cmd(hw->hw_priv, + CAM_CDM_HW_INTF_CMD_HANDLE_ERROR, + &handle, + sizeof(handle)); + if (rc < 0) + CAM_ERR(CAM_CDM, + "CDM hw release failed for handle=%x", + handle); + } else { + CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops", + hw_index); + } + } + put_cdm_mgr_refcount(); + + return rc; +} +EXPORT_SYMBOL(cam_cdm_handle_error); + int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw, struct cam_cdm_private_dt_data *data, enum cam_cdm_type type, uint32_t *index) diff --git a/drivers/cam_cdm/cam_cdm_intf_api.h b/drivers/cam_cdm/cam_cdm_intf_api.h index 9340435371..0c9508db41 100644 --- a/drivers/cam_cdm/cam_cdm_intf_api.h +++ b/drivers/cam_cdm/cam_cdm_intf_api.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #ifndef _CAM_CDM_API_H_ @@ -14,7 +14,10 @@ enum cam_cdm_id { CAM_CDM_VIRTUAL, CAM_CDM_HW_ANY, - CAM_CDM_CPAS_0, + CAM_CDM_CPAS, + CAM_CDM_IFE, + CAM_CDM_TFE, + CAM_CDM_OPE, CAM_CDM_IPE0, CAM_CDM_IPE1, CAM_CDM_BPS, @@ -29,6 +32,9 @@ enum cam_cdm_cb_status { CAM_CDM_CB_STATUS_PAGEFAULT, CAM_CDM_CB_STATUS_HW_RESET_ONGOING, CAM_CDM_CB_STATUS_HW_RESET_DONE, + CAM_CDM_CB_STATUS_HW_FLUSH, + CAM_CDM_CB_STATUS_HW_RESUBMIT, + CAM_CDM_CB_STATUS_HW_ERROR, CAM_CDM_CB_STATUS_UNKNOWN_ERROR, }; @@ -39,17 +45,26 @@ enum cam_cdm_bl_cmd_addr_type { CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA, }; +/* enum cam_cdm_bl_fifo - interface commands.*/ +enum cam_cdm_bl_fifo_queue { + CAM_CDM_BL_FIFO_0, + CAM_CDM_BL_FIFO_1, + CAM_CDM_BL_FIFO_2, + CAM_CDM_BL_FIFO_3, + CAM_CDM_BL_FIFO_MAX, +}; + /** * struct cam_cdm_acquire_data - Cam CDM acquire data structure * * @identifier : Input identifier string which is the device label from dt - * like vfe, ife, jpeg etc + * like vfe, ife, jpeg etc * @cell_index : Input integer identifier pointing to the cell index from dt * of the device. This can be used to form a unique string * with @identifier like vfe0, ife1, jpeg0 etc * @id : ID of a specific or any CDM HW which needs to be acquired. * @userdata : Input private data which will be returned as part - * of callback. + * of callback. * @cam_cdm_callback : Input callback pointer for triggering the * callbacks from CDM driver * @handle : CDM Client handle @@ -57,12 +72,14 @@ enum cam_cdm_bl_cmd_addr_type { * @status : Callback status * @cookie : Cookie if the callback is gen irq status * @base_array_cnt : Input number of ioremapped address pair pointing - * in base_array, needed only if selected cdm is a virtual. + * in base_array, needed only if selected cdm is a virtual. * @base_array : Input pointer to ioremapped address pair arrary - * needed only if selected cdm is a virtual. + * needed only if selected cdm is a virtual. + * @priority : Priority of the client. * @cdm_version : CDM version is output while acquiring HW cdm and - * it is Input while acquiring virtual cdm, Currently fixing it - * to one version below acquire API. + * it is Input while acquiring virtual cdm. + * Currently fixing it to one version below + * acquire API. * @ops : Output pointer updated by cdm driver to the CDM * util ops for this HW version of CDM acquired. * @handle : Output Unique handle generated for this acquire @@ -77,6 +94,7 @@ struct cam_cdm_acquire_data { enum cam_cdm_cb_status status, uint64_t cookie); uint32_t base_array_cnt; struct cam_soc_reg_map *base_array[CAM_SOC_MAX_BLOCK]; + enum cam_cdm_bl_fifo_queue priority; struct cam_hw_version cdm_version; struct cam_cdm_utils_ops *ops; uint32_t handle; @@ -92,7 +110,8 @@ struct cam_cdm_acquire_data { * @len : Input length of the BL command, Cannot be more than 1MB and * this is will be validated with offset+size of the memory pointed * by mem_handle - * + * @enable_debug_gen_irq : bool flag to submit extra gen_irq afteR bl_command + * @arbitrate : bool flag to arbitrate on submitted BL boundary */ struct cam_cdm_bl_cmd { union { @@ -102,6 +121,8 @@ struct cam_cdm_bl_cmd { } bl_addr; uint32_t offset; uint32_t len; + bool enable_debug_gen_irq; + bool arbitrate; }; /** @@ -114,6 +135,7 @@ struct cam_cdm_bl_cmd { * @cookie : Cookie if the callback is gen irq status * @type : type of the submitted bl cmd address. * @cmd_arrary_count : Input number of BL commands to be submitted to CDM + * @gen_irq_arb : enum for setting arbitration in gen_irq * @bl_cmd_array : Input payload holding the BL cmd's arrary * to be sumbitted. * @@ -124,6 +146,7 @@ struct cam_cdm_bl_request { uint64_t cookie; enum cam_cdm_bl_cmd_addr_type type; uint32_t cmd_arrary_count; + bool gen_irq_arb; struct cam_cdm_bl_cmd cmd[1]; }; @@ -191,7 +214,7 @@ int cam_cdm_stream_off(uint32_t handle); /** * @brief : API to reset previously acquired CDM, - * this can be only performed only the CDM is private. + * this should be only performed only if the CDM is private. * * @handle : Input handle of the CDM to reset * @@ -220,4 +243,33 @@ void cam_cdm_intf_exit_module(void); * @brief : API to remove CDM hw from platform framework. */ void cam_hw_cdm_exit_module(void); + +/** + * @brief : API to flush previously acquired CDM, + * this should be only performed only if the CDM is private. + * + * @handle : Input handle of the CDM to reset + * + * @return 0 on success + */ +int cam_cdm_flush_hw(uint32_t handle); + +/** + * @brief : API to detect culprit bl_tag in previously acquired CDM, + * this should be only performed only if the CDM is private. + * + * @handle : Input handle of the CDM to reset + * + * @return 0 on success + */ +int cam_cdm_handle_error(uint32_t handle); + +/** + * @brief : API get CDM ops + * + * @return : CDM operations + * + */ +struct cam_cdm_utils_ops *cam_cdm_publish_ops(void); + #endif /* _CAM_CDM_API_H_ */ diff --git a/drivers/cam_cdm/cam_cdm_soc.c b/drivers/cam_cdm/cam_cdm_soc.c index 2fb5d5fe97..3d81270c11 100644 --- a/drivers/cam_cdm/cam_cdm_soc.c +++ b/drivers/cam_cdm/cam_cdm_soc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #include @@ -15,45 +15,35 @@ #include "cam_cdm.h" #include "cam_soc_util.h" #include "cam_io_util.h" +#include "cam_cdm_soc.h" #define CAM_CDM_OFFSET_FROM_REG(x, y) ((x)->offsets[y].offset) #define CAM_CDM_ATTR_FROM_REG(x, y) ((x)->offsets[y].attribute) bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw, - enum cam_cdm_regs reg, uint32_t *value) + uint32_t reg, uint32_t *value) { void __iomem *reg_addr; - struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info; void __iomem *base = cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base; resource_size_t mem_len = cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size; - CAM_DBG(CAM_CDM, "E: b=%pK blen=%d reg=%x off=%x", (void __iomem *)base, - (int)mem_len, reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, - reg))); - CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x", (void __iomem *)base, - reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg))); + CAM_DBG(CAM_CDM, "E: b=%pK blen=%d off=%x", (void __iomem *)base, + (int)mem_len, reg); - if ((reg > cdm->offset_tbl->offset_max_size) || - (reg > cdm->offset_tbl->last_offset)) { - CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid reg=%d\n", reg); + reg_addr = (base + reg); + if (reg_addr > (base + mem_len)) { + CAM_ERR_RATE_LIMIT(CAM_CDM, + "Invalid mapped region %d", reg); goto permission_error; - } else { - reg_addr = (base + (CAM_CDM_OFFSET_FROM_REG( - cdm->offset_tbl, reg))); - if (reg_addr > (base + mem_len)) { - CAM_ERR_RATE_LIMIT(CAM_CDM, - "Invalid mapped region %d", reg); - goto permission_error; - } - *value = cam_io_r_mb(reg_addr); - CAM_DBG(CAM_CDM, "X b=%pK reg=%x off=%x val=%x", - (void __iomem *)base, reg, - (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)), - *value); - return false; } + *value = cam_io_r_mb(reg_addr); + CAM_DBG(CAM_CDM, "X b=%pK off=%x val=%x", + (void __iomem *)base, reg, + *value); + return false; + permission_error: *value = 0; return true; @@ -61,36 +51,27 @@ permission_error: } bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw, - enum cam_cdm_regs reg, uint32_t value) + uint32_t reg, uint32_t value) { void __iomem *reg_addr; - struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info; void __iomem *base = cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base; resource_size_t mem_len = cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size; - CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x val=%x", (void __iomem *)base, - reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)), value); + CAM_DBG(CAM_CDM, "E: b=%pK off=%x val=%x", (void __iomem *)base, + reg, value); - if ((reg > cdm->offset_tbl->offset_max_size) || - (reg > cdm->offset_tbl->last_offset)) { - CAM_ERR_RATE_LIMIT(CAM_CDM, "CDM accessing invalid reg=%d\n", + reg_addr = (base + reg); + if (reg_addr > (base + mem_len)) { + CAM_ERR_RATE_LIMIT(CAM_CDM, + "Accessing invalid region:%d\n", reg); goto permission_error; - } else { - reg_addr = (base + CAM_CDM_OFFSET_FROM_REG( - cdm->offset_tbl, reg)); - if (reg_addr > (base + mem_len)) { - CAM_ERR_RATE_LIMIT(CAM_CDM, - "Accessing invalid region %d:%d\n", - reg, (CAM_CDM_OFFSET_FROM_REG( - cdm->offset_tbl, reg))); - goto permission_error; - } - cam_io_w_mb(value, reg_addr); - return false; } + cam_io_w_mb(value, reg_addr); + return false; + permission_error: return true; @@ -99,7 +80,7 @@ permission_error: int cam_cdm_soc_load_dt_private(struct platform_device *pdev, struct cam_cdm_private_dt_data *ptr) { - int i, rc = -EINVAL; + int i, rc = -EINVAL, num_fifo_entries = 0; ptr->dt_num_supported_clients = of_property_count_strings( pdev->dev.of_node, @@ -111,7 +92,7 @@ int cam_cdm_soc_load_dt_private(struct platform_device *pdev, CAM_ERR(CAM_CDM, "Invalid count of client names count=%d", ptr->dt_num_supported_clients); rc = -EINVAL; - return rc; + goto end; } if (ptr->dt_num_supported_clients < 0) { CAM_DBG(CAM_CDM, "No cdm client names found"); @@ -127,10 +108,43 @@ int cam_cdm_soc_load_dt_private(struct platform_device *pdev, ptr->dt_cdm_client_name[i]); if (rc < 0) { CAM_ERR(CAM_CDM, "Reading cdm-client-names failed"); - break; + goto end; } } + ptr->config_fifo = of_property_read_bool(pdev->dev.of_node, + "config-fifo"); + if (ptr->config_fifo) { + num_fifo_entries = of_property_count_u32_elems( + pdev->dev.of_node, + "fifo-depths"); + if (num_fifo_entries != CAM_CDM_NUM_BL_FIFO) { + CAM_ERR(CAM_CDM, + "Wrong number of configurable FIFOs %d", + num_fifo_entries); + rc = -EINVAL; + goto end; + } + for (i = 0; i < num_fifo_entries; i++) { + rc = of_property_read_u32_index(pdev->dev.of_node, + "fifo-depths", i, &ptr->fifo_depth[i]); + if (rc < 0) { + CAM_ERR(CAM_CDM, + "Unable to read fifo-depth rc %d", + rc); + goto end; + } + CAM_DBG(CAM_CDM, "FIFO%d depth is %d", + i, ptr->fifo_depth[i]); + } + } else { + for (i = 0; i < CAM_CDM_BL_FIFO_MAX; i++) { + ptr->fifo_depth[i] = CAM_CDM_BL_FIFO_LENGTH_MAX_DEFAULT; + CAM_DBG(CAM_CDM, "FIFO%d depth is %d", + i, ptr->fifo_depth[i]); + } + } +end: return rc; } @@ -140,6 +154,7 @@ int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw, int rc; struct cam_hw_soc_info *soc_ptr; const struct of_device_id *id; + struct cam_cdm *cdm_core = cdm_hw->core_info; if (!cdm_hw || (cdm_hw->soc_info.soc_private) || !(cdm_hw->soc_info.pdev)) @@ -150,38 +165,44 @@ int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw, rc = cam_soc_util_get_dt_properties(soc_ptr); if (rc != 0) { CAM_ERR(CAM_CDM, "Failed to retrieve the CDM dt properties"); - } else { - soc_ptr->soc_private = kzalloc( - sizeof(struct cam_cdm_private_dt_data), - GFP_KERNEL); - if (!soc_ptr->soc_private) - return -ENOMEM; - - rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev, - soc_ptr->soc_private); - if (rc != 0) { - CAM_ERR(CAM_CDM, "Failed to load CDM dt private data"); - goto error; - } - id = of_match_node(table, soc_ptr->pdev->dev.of_node); - if ((!id) || !(id->data)) { - CAM_ERR(CAM_CDM, "Failed to retrieve the CDM id table"); - goto error; - } - CAM_DBG(CAM_CDM, "CDM Hw Id compatible =%s", id->compatible); - ((struct cam_cdm *)cdm_hw->core_info)->offset_tbl = - (struct cam_cdm_reg_offset_table *)id->data; - strlcpy(((struct cam_cdm *)cdm_hw->core_info)->name, - id->compatible, - sizeof(((struct cam_cdm *)cdm_hw->core_info)->name)); + goto end; } - return rc; + soc_ptr->soc_private = kzalloc( + sizeof(struct cam_cdm_private_dt_data), + GFP_KERNEL); + if (!soc_ptr->soc_private) + return -ENOMEM; + + rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev, + soc_ptr->soc_private); + if (rc != 0) { + CAM_ERR(CAM_CDM, "Failed to load CDM dt private data"); + goto error; + } + + id = of_match_node(table, soc_ptr->pdev->dev.of_node); + if ((!id) || !(id->data)) { + CAM_ERR(CAM_CDM, "Failed to retrieve the CDM id table"); + goto error; + } + cdm_core->offsets = + (struct cam_cdm_hw_reg_offset *)id->data; + + CAM_DBG(CAM_CDM, "name %s", cdm_core->name); + + snprintf(cdm_core->name, sizeof(cdm_core->name) + 1, "%s%d", + id->compatible, soc_ptr->index); + + CAM_DBG(CAM_CDM, "name %s", cdm_core->name); + + goto end; error: rc = -EINVAL; kfree(soc_ptr->soc_private); soc_ptr->soc_private = NULL; +end: return rc; } diff --git a/drivers/cam_cdm/cam_cdm_soc.h b/drivers/cam_cdm/cam_cdm_soc.h index b422b34f24..25b5a5dc02 100644 --- a/drivers/cam_cdm/cam_cdm_soc.h +++ b/drivers/cam_cdm/cam_cdm_soc.h @@ -1,17 +1,25 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #ifndef _CAM_CDM_SOC_H_ #define _CAM_CDM_SOC_H_ +#define CAM_HW_CDM_CPAS_0_NAME "qcom,cam170-cpas-cdm0" +#define CAM_HW_CDM_CPAS_NAME_1_0 "qcom,cam-cpas-cdm1_0" +#define CAM_HW_CDM_CPAS_NAME_1_1 "qcom,cam-cpas-cdm1_1" +#define CAM_HW_CDM_CPAS_NAME_1_2 "qcom,cam-cpas-cdm1_2" +#define CAM_HW_CDM_IFE_NAME_1_2 "qcom,cam-ife-cdm1_2" +#define CAM_HW_CDM_CPAS_NAME_2_0 "qcom,cam-cpas-cdm2_0" +#define CAM_HW_CDM_OPE_NAME_2_0 "qcom,cam-ope-cdm2_0" + int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw, const struct of_device_id *table); bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw, - enum cam_cdm_regs reg, uint32_t *value); + uint32_t reg, uint32_t *value); bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw, - enum cam_cdm_regs reg, uint32_t value); + uint32_t reg, uint32_t value); int cam_cdm_intf_mgr_soc_get_dt_properties( struct platform_device *pdev, struct cam_cdm_intf_mgr *mgr); diff --git a/drivers/cam_cdm/cam_cdm_util.c b/drivers/cam_cdm/cam_cdm_util.c index 278dadb18d..89951d24d0 100644 --- a/drivers/cam_cdm/cam_cdm_util.c +++ b/drivers/cam_cdm/cam_cdm_util.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #include @@ -38,6 +38,9 @@ static unsigned int CDMCmdHeaderSizes[ 1, /* PERF_CONTROL*/ 3, /* DMI32*/ 3, /* DMI64*/ + 3, /* WaitCompEvent*/ + 3, /* ClearCompEvent*/ + 3, /* WaitPrefetchDisable*/ }; /** @@ -156,11 +159,34 @@ struct cdm_perf_ctrl_cmd { unsigned int cmd : 8; } __attribute__((__packed__)); +struct cdm_wait_comp_event_cmd { + unsigned int reserved : 8; + unsigned int id : 8; + unsigned int id_reserved: 8; + unsigned int cmd : 8; + unsigned int mask1; + unsigned int mask2; +} __attribute__((__packed__)); + +struct cdm_prefetch_disable_event_cmd { + unsigned int reserved : 8; + unsigned int id : 8; + unsigned int id_reserved: 8; + unsigned int cmd : 8; + unsigned int mask1; + unsigned int mask2; +} __attribute__((__packed__)); + uint32_t cdm_get_cmd_header_size(unsigned int command) { return CDMCmdHeaderSizes[command]; } +uint32_t cdm_required_size_dmi(void) +{ + return cdm_get_cmd_header_size(CAM_CDM_CMD_DMI); +} + uint32_t cdm_required_size_reg_continuous(uint32_t numVals) { return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT) + numVals; @@ -172,9 +198,9 @@ uint32_t cdm_required_size_reg_random(uint32_t numRegVals) (2 * numRegVals); } -uint32_t cdm_required_size_dmi(void) +uint32_t cdm_required_size_indirect(void) { - return cdm_get_cmd_header_size(CAM_CDM_CMD_DMI); + return cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT); } uint32_t cdm_required_size_genirq(void) @@ -182,9 +208,9 @@ uint32_t cdm_required_size_genirq(void) return cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ); } -uint32_t cdm_required_size_indirect(void) +uint32_t cdm_required_size_wait_event(void) { - return cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT); + return cdm_get_cmd_header_size(CAM_CDM_CMD_WAIT_EVENT); } uint32_t cdm_required_size_changebase(void) @@ -192,6 +218,16 @@ uint32_t cdm_required_size_changebase(void) return cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE); } +uint32_t cdm_required_size_comp_wait(void) +{ + return cdm_get_cmd_header_size(CAM_CDM_COMP_WAIT); +} + +uint32_t cdm_required_size_prefetch_disable(void) +{ + return cdm_get_cmd_header_size(CAM_CDM_WAIT_PREFETCH_DISABLE); +} + uint32_t cdm_offsetof_dmi_addr(void) { return offsetof(struct cdm_dmi_cmd, addr); @@ -202,6 +238,23 @@ uint32_t cdm_offsetof_indirect_addr(void) return offsetof(struct cdm_indirect_cmd, addr); } +uint32_t *cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd, + uint32_t DMIAddr, uint8_t DMISel, uint32_t dmiBufferAddr, + uint32_t length) +{ + struct cdm_dmi_cmd *pHeader = (struct cdm_dmi_cmd *)pCmdBuffer; + + pHeader->cmd = CAM_CDM_CMD_DMI; + pHeader->addr = dmiBufferAddr; + pHeader->length = length; + pHeader->DMIAddr = DMIAddr; + pHeader->DMISel = DMISel; + + pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_DMI); + + return pCmdBuffer; +} + uint32_t *cdm_write_regcontinuous(uint32_t *pCmdBuffer, uint32_t reg, uint32_t numVals, uint32_t *pVals) { @@ -248,23 +301,6 @@ uint32_t *cdm_write_regrandom(uint32_t *pCmdBuffer, uint32_t numRegVals, return dst; } -uint32_t *cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd, - uint32_t DMIAddr, uint8_t DMISel, uint32_t dmiBufferAddr, - uint32_t length) -{ - struct cdm_dmi_cmd *pHeader = (struct cdm_dmi_cmd *)pCmdBuffer; - - pHeader->cmd = dmiCmd; - pHeader->addr = dmiBufferAddr; - pHeader->length = length - 1; - pHeader->DMIAddr = DMIAddr; - pHeader->DMISel = DMISel; - - pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_DMI); - - return pCmdBuffer; -} - uint32_t *cdm_write_indirect(uint32_t *pCmdBuffer, uint32_t indirectBufAddr, uint32_t length) { @@ -280,11 +316,50 @@ uint32_t *cdm_write_indirect(uint32_t *pCmdBuffer, uint32_t indirectBufAddr, return pCmdBuffer; } +void cdm_write_genirq(uint32_t *pCmdBuffer, uint32_t userdata, + bool bit_wr_enable, uint32_t fifo_idx) +{ + struct cdm_genirq_cmd *pHeader = (struct cdm_genirq_cmd *)pCmdBuffer; + + CAM_DBG(CAM_CDM, "userdata 0x%x, fifo_idx %d", + userdata, fifo_idx); + + if (bit_wr_enable) + pHeader->reserved = (unsigned int)((fifo_idx << 1) + | (unsigned int)(bit_wr_enable)); + + pHeader->cmd = CAM_CDM_CMD_GEN_IRQ; + pHeader->userdata = (userdata << (8 * fifo_idx)); +} + +uint32_t *cdm_write_wait_event(uint32_t *pcmdbuffer, uint32_t iw, + uint32_t id, uint32_t mask, + uint32_t offset, uint32_t data) +{ + struct cdm_wait_event_cmd *pheader = + (struct cdm_wait_event_cmd *)pcmdbuffer; + + pheader->cmd = CAM_CDM_CMD_WAIT_EVENT; + pheader->mask = mask; + pheader->data = data; + pheader->id = id; + pheader->iw = iw; + pheader->offset = offset; + pheader->iw_reserved = 0; + pheader->offset_reserved = 0; + + pcmdbuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_WAIT_EVENT); + + return pcmdbuffer; +} + uint32_t *cdm_write_changebase(uint32_t *pCmdBuffer, uint32_t base) { struct cdm_changebase_cmd *pHeader = (struct cdm_changebase_cmd *)pCmdBuffer; + CAM_DBG(CAM_CDM, "Change to base 0x%x", base); + pHeader->cmd = CAM_CDM_CMD_CHANGE_BASE; pHeader->base = base; pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE); @@ -292,30 +367,63 @@ uint32_t *cdm_write_changebase(uint32_t *pCmdBuffer, uint32_t base) return pCmdBuffer; } -void cdm_write_genirq(uint32_t *pCmdBuffer, uint32_t userdata) +uint32_t *cdm_write_wait_comp_event( + uint32_t *pCmdBuffer, uint32_t mask1, uint32_t mask2) { - struct cdm_genirq_cmd *pHeader = (struct cdm_genirq_cmd *)pCmdBuffer; + struct cdm_wait_comp_event_cmd *pHeader = + (struct cdm_wait_comp_event_cmd *)pCmdBuffer; - pHeader->cmd = CAM_CDM_CMD_GEN_IRQ; - pHeader->userdata = userdata; + pHeader->cmd = CAM_CDM_COMP_WAIT; + pHeader->mask1 = mask1; + pHeader->mask2 = mask2; + + pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_COMP_WAIT); + + return pCmdBuffer; } +uint32_t *cdm_write_wait_prefetch_disable( + uint32_t *pCmdBuffer, + uint32_t id, + uint32_t mask1, + uint32_t mask2) +{ + struct cdm_prefetch_disable_event_cmd *pHeader = + (struct cdm_prefetch_disable_event_cmd *)pCmdBuffer; + + pHeader->cmd = CAM_CDM_WAIT_PREFETCH_DISABLE; + pHeader->id = id; + pHeader->mask1 = mask1; + pHeader->mask2 = mask2; + + pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_WAIT_PREFETCH_DISABLE); + + return pCmdBuffer; +} + + struct cam_cdm_utils_ops CDM170_ops = { cdm_get_cmd_header_size, + cdm_required_size_dmi, cdm_required_size_reg_continuous, cdm_required_size_reg_random, - cdm_required_size_dmi, - cdm_required_size_genirq, cdm_required_size_indirect, + cdm_required_size_genirq, + cdm_required_size_wait_event, cdm_required_size_changebase, + cdm_required_size_comp_wait, + cdm_required_size_prefetch_disable, cdm_offsetof_dmi_addr, cdm_offsetof_indirect_addr, + cdm_write_dmi, cdm_write_regcontinuous, cdm_write_regrandom, - cdm_write_dmi, cdm_write_indirect, - cdm_write_changebase, cdm_write_genirq, + cdm_write_wait_event, + cdm_write_changebase, + cdm_write_wait_comp_event, + cdm_write_wait_prefetch_disable, }; int cam_cdm_get_ioremap_from_base(uint32_t hw_base, @@ -672,7 +780,7 @@ void cam_cdm_util_dump_cmd_buf( uint32_t cmd = 0; if (!cmd_buf_start || !cmd_buf_end) { - CAM_INFO(CAM_CDM, "Invalid args"); + CAM_ERR(CAM_CDM, "Invalid args"); return; } @@ -708,7 +816,7 @@ void cam_cdm_util_dump_cmd_buf( buf_now += cam_cdm_util_dump_perf_ctrl_cmd(buf_now); break; default: - CAM_INFO(CAM_CDM, "Invalid CMD: 0x%x buf 0x%x", + CAM_ERR(CAM_CDM, "Invalid CMD: 0x%x buf 0x%x", cmd, *buf_now); buf_now++; break; diff --git a/drivers/cam_cdm/cam_cdm_util.h b/drivers/cam_cdm/cam_cdm_util.h index 663eca92a5..4d4095072e 100644 --- a/drivers/cam_cdm/cam_cdm_util.h +++ b/drivers/cam_cdm/cam_cdm_util.h @@ -1,11 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #ifndef _CAM_CDM_UTIL_H_ #define _CAM_CDM_UTIL_H_ +#include + enum cam_cdm_command { CAM_CDM_CMD_UNUSED = 0x0, CAM_CDM_CMD_DMI = 0x1, @@ -19,10 +21,13 @@ enum cam_cdm_command { CAM_CDM_CMD_PERF_CTRL = 0x9, CAM_CDM_CMD_DMI_32 = 0xa, CAM_CDM_CMD_DMI_64 = 0xb, - CAM_CDM_CMD_PRIVATE_BASE = 0xc, + CAM_CDM_COMP_WAIT = 0xc, + CAM_CDM_CLEAR_COMP_WAIT = 0xd, + CAM_CDM_WAIT_PREFETCH_DISABLE = 0xe, + CAM_CDM_CMD_PRIVATE_BASE = 0xf, CAM_CDM_CMD_SWD_DMI_32 = (CAM_CDM_CMD_PRIVATE_BASE + 0x64), CAM_CDM_CMD_SWD_DMI_64 = (CAM_CDM_CMD_PRIVATE_BASE + 0x65), - CAM_CDM_CMD_PRIVATE_BASE_MAX = 0x7F + CAM_CDM_CMD_PRIVATE_BASE_MAX = 0x7F, }; /** @@ -53,6 +58,10 @@ enum cam_cdm_command { * in dwords. * @return Size in dwords * + * @cdm_required_size_comp_wait: Calculates the size of a comp-wait command + * in dwords. + * @return Size in dwords + * * @cdm_required_size_changebase: Calculates the size of a change-base command * in dwords. * @return Size in dwords @@ -102,46 +111,73 @@ enum cam_cdm_command { * @base: New base (device) address * @return Pointer in command buffer pointing past the written commands * - * @cdm_write_genirq: Writes a gen irq command into the command buffer. + * @cdm_write_genirq: Writes a gen irq command into the command buffer. * @pCmdBuffer: Pointer to command buffer * @userdata: userdata or cookie return by hardware during irq. + * + * @cdm_write_wait_comp_event: Writes a wait comp event cmd into the + * command buffer. + * @pCmdBuffer: Pointer to command buffer + * @mask1: This value decides which comp events to wait (0 - 31). + * @mask2: This value decides which comp events to wait (32 - 65). */ struct cam_cdm_utils_ops { uint32_t (*cdm_get_cmd_header_size)(unsigned int command); +uint32_t (*cdm_required_size_dmi)(void); uint32_t (*cdm_required_size_reg_continuous)(uint32_t numVals); uint32_t (*cdm_required_size_reg_random)(uint32_t numRegVals); -uint32_t (*cdm_required_size_dmi)(void); -uint32_t (*cdm_required_size_genirq)(void); uint32_t (*cdm_required_size_indirect)(void); +uint32_t (*cdm_required_size_genirq)(void); +uint32_t (*cdm_required_size_wait_event)(void); uint32_t (*cdm_required_size_changebase)(void); +uint32_t (*cdm_required_size_comp_wait)(void); +uint32_t (*cdm_required_size_prefetch_disable)(void); uint32_t (*cdm_offsetof_dmi_addr)(void); uint32_t (*cdm_offsetof_indirect_addr)(void); +uint32_t *(*cdm_write_dmi)( + uint32_t *pCmdBuffer, + uint8_t dmiCmd, + uint32_t DMIAddr, + uint8_t DMISel, + uint32_t dmiBufferAddr, + uint32_t length); uint32_t* (*cdm_write_regcontinuous)( uint32_t *pCmdBuffer, - uint32_t reg, - uint32_t numVals, + uint32_t reg, + uint32_t numVals, uint32_t *pVals); uint32_t *(*cdm_write_regrandom)( uint32_t *pCmdBuffer, - uint32_t numRegVals, + uint32_t numRegVals, uint32_t *pRegVals); -uint32_t *(*cdm_write_dmi)( - uint32_t *pCmdBuffer, - uint8_t dmiCmd, - uint32_t DMIAddr, - uint8_t DMISel, - uint32_t dmiBufferAddr, - uint32_t length); uint32_t *(*cdm_write_indirect)( uint32_t *pCmdBuffer, - uint32_t indirectBufferAddr, - uint32_t length); -uint32_t *(*cdm_write_changebase)( - uint32_t *pCmdBuffer, - uint32_t base); + uint32_t indirectBufferAddr, + uint32_t length); void (*cdm_write_genirq)( uint32_t *pCmdBuffer, - uint32_t userdata); + uint32_t userdata, + bool bit_wr_enable, + uint32_t fifo_idx); +uint32_t *(*cdm_write_wait_event)( + uint32_t *pCmdBuffer, + uint32_t iw, + uint32_t id, + uint32_t mask, + uint32_t offset, + uint32_t data); +uint32_t *(*cdm_write_changebase)( + uint32_t *pCmdBuffer, + uint32_t base); +uint32_t *(*cdm_write_wait_comp_event)( + uint32_t *pCmdBuffer, + uint32_t mask1, + uint32_t mask2); +uint32_t *(*cdm_write_wait_prefetch_disable)( + uint32_t *pCmdBuffer, + uint32_t id, + uint32_t mask1, + uint32_t mask2); }; /** diff --git a/drivers/cam_cdm/cam_hw_cdm170_reg.h b/drivers/cam_cdm/cam_hw_cdm170_reg.h deleted file mode 100644 index 4a0fbda825..0000000000 --- a/drivers/cam_cdm/cam_hw_cdm170_reg.h +++ /dev/null @@ -1,135 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. - */ - -#ifndef _CAM_HW_CDM170_REG_H_ -#define _CAM_HW_CDM170_REG_H_ - -#define CAM_CDM_REG_OFFSET_FIRST 0x0 -#define CAM_CDM_REG_OFFSET_LAST 0x200 -#define CAM_CDM_REGS_COUNT 0x30 -#define CAM_CDM_HWFIFO_SIZE 0x40 - -#define CAM_CDM_OFFSET_HW_VERSION 0x0 -#define CAM_CDM_OFFSET_TITAN_VERSION 0x4 -#define CAM_CDM_OFFSET_RST_CMD 0x10 -#define CAM_CDM_OFFSET_CGC_CFG 0x14 -#define CAM_CDM_OFFSET_CORE_CFG 0x18 -#define CAM_CDM_OFFSET_CORE_EN 0x1c -#define CAM_CDM_OFFSET_FE_CFG 0x20 -#define CAM_CDM_OFFSET_IRQ_MASK 0x30 -#define CAM_CDM_OFFSET_IRQ_CLEAR 0x34 -#define CAM_CDM_OFFSET_IRQ_CLEAR_CMD 0x38 -#define CAM_CDM_OFFSET_IRQ_SET 0x3c -#define CAM_CDM_OFFSET_IRQ_SET_CMD 0x40 - -#define CAM_CDM_OFFSET_IRQ_STATUS 0x44 -#define CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK 0x1 -#define CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK 0x2 -#define CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK 0x4 -#define CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK 0x10000 -#define CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK 0x20000 -#define CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK 0x40000 - -#define CAM_CDM_OFFSET_BL_FIFO_BASE_REG 0x50 -#define CAM_CDM_OFFSET_BL_FIFO_LEN_REG 0x54 -#define CAM_CDM_OFFSET_BL_FIFO_STORE_REG 0x58 -#define CAM_CDM_OFFSET_BL_FIFO_CFG 0x5c -#define CAM_CDM_OFFSET_BL_FIFO_RB 0x60 -#define CAM_CDM_OFFSET_BL_FIFO_BASE_RB 0x64 -#define CAM_CDM_OFFSET_BL_FIFO_LEN_RB 0x68 -#define CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB 0x6c -#define CAM_CDM_OFFSET_IRQ_USR_DATA 0x80 -#define CAM_CDM_OFFSET_WAIT_STATUS 0x84 -#define CAM_CDM_OFFSET_SCRATCH_0_REG 0x90 -#define CAM_CDM_OFFSET_SCRATCH_1_REG 0x94 -#define CAM_CDM_OFFSET_SCRATCH_2_REG 0x98 -#define CAM_CDM_OFFSET_SCRATCH_3_REG 0x9c -#define CAM_CDM_OFFSET_SCRATCH_4_REG 0xa0 -#define CAM_CDM_OFFSET_SCRATCH_5_REG 0xa4 -#define CAM_CDM_OFFSET_SCRATCH_6_REG 0xa8 -#define CAM_CDM_OFFSET_SCRATCH_7_REG 0xac -#define CAM_CDM_OFFSET_LAST_AHB_ADDR 0xd0 -#define CAM_CDM_OFFSET_LAST_AHB_DATA 0xd4 -#define CAM_CDM_OFFSET_CORE_DBUG 0xd8 -#define CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR 0xe0 -#define CAM_CDM_OFFSET_LAST_AHB_ERR_DATA 0xe4 -#define CAM_CDM_OFFSET_CURRENT_BL_BASE 0xe8 -#define CAM_CDM_OFFSET_CURRENT_BL_LEN 0xec -#define CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE 0xf0 -#define CAM_CDM_OFFSET_DEBUG_STATUS 0xf4 -#define CAM_CDM_OFFSET_BUS_MISR_CFG_0 0x100 -#define CAM_CDM_OFFSET_BUS_MISR_CFG_1 0x104 -#define CAM_CDM_OFFSET_BUS_MISR_RD_VAL 0x108 -#define CAM_CDM_OFFSET_PERF_MON_CTRL 0x110 -#define CAM_CDM_OFFSET_PERF_MON_0 0x114 -#define CAM_CDM_OFFSET_PERF_MON_1 0x118 -#define CAM_CDM_OFFSET_PERF_MON_2 0x11c -#define CAM_CDM_OFFSET_SPARE 0x200 - -/* - * Always make sure below register offsets are aligned with - * enum cam_cdm_regs offsets - */ -struct cam_cdm_reg_offset cam170_cpas_cdm_register_offsets[] = { - { CAM_CDM_OFFSET_HW_VERSION, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_TITAN_VERSION, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_RST_CMD, CAM_REG_ATTR_WRITE }, - { CAM_CDM_OFFSET_CGC_CFG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_CORE_CFG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_CORE_EN, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_FE_CFG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_IRQ_MASK, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_IRQ_CLEAR, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_IRQ_CLEAR_CMD, CAM_REG_ATTR_WRITE }, - { CAM_CDM_OFFSET_IRQ_SET, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_IRQ_SET_CMD, CAM_REG_ATTR_WRITE }, - { CAM_CDM_OFFSET_IRQ_STATUS, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_IRQ_USR_DATA, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_BL_FIFO_BASE_REG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_BL_FIFO_LEN_REG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_BL_FIFO_STORE_REG, CAM_REG_ATTR_WRITE }, - { CAM_CDM_OFFSET_BL_FIFO_CFG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_BL_FIFO_RB, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_BL_FIFO_BASE_RB, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_BL_FIFO_LEN_RB, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_WAIT_STATUS, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_SCRATCH_0_REG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_SCRATCH_1_REG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_SCRATCH_2_REG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_SCRATCH_3_REG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_SCRATCH_4_REG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_SCRATCH_5_REG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_SCRATCH_6_REG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_SCRATCH_7_REG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_LAST_AHB_ADDR, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_LAST_AHB_DATA, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_CORE_DBUG, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_LAST_AHB_ERR_DATA, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_CURRENT_BL_BASE, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_CURRENT_BL_LEN, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_DEBUG_STATUS, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_BUS_MISR_CFG_0, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_BUS_MISR_CFG_1, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_BUS_MISR_RD_VAL, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_PERF_MON_CTRL, CAM_REG_ATTR_READ_WRITE }, - { CAM_CDM_OFFSET_PERF_MON_0, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_PERF_MON_1, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_PERF_MON_2, CAM_REG_ATTR_READ }, - { CAM_CDM_OFFSET_SPARE, CAM_REG_ATTR_READ_WRITE } -}; - -struct cam_cdm_reg_offset_table cam170_cpas_cdm_offset_table = { - .first_offset = 0x0, - .last_offset = 0x200, - .reg_count = 0x30, - .offsets = cam170_cpas_cdm_register_offsets, - .offset_max_size = (sizeof(cam170_cpas_cdm_register_offsets)/ - sizeof(struct cam_cdm_reg_offset)), -}; - -#endif /* _CAM_HW_CDM170_REG_H_ */ diff --git a/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c b/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c index c28fcdf3ef..bfb9fdba0a 100644 --- a/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c +++ b/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #include "cam_fd_hw_core.h" @@ -903,12 +903,14 @@ int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size) cdm_cmd->flag = false; cdm_cmd->userdata = NULL; cdm_cmd->cookie = 0; + cdm_cmd->gen_irq_arb = false; for (i = 0 ; i <= start_args->num_hw_update_entries; i++) { cmd = (start_args->hw_update_entries + i); cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle; cdm_cmd->cmd[i].offset = cmd->offset; cdm_cmd->cmd[i].len = cmd->len; + cdm_cmd->cmd[i].arbitrate = false; } rc = cam_cdm_submit_bls(ctx_hw_private->cdm_handle, cdm_cmd); @@ -1032,6 +1034,7 @@ int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size) cdm_acquire.cam_cdm_callback = cam_fd_hw_util_cdm_callback; cdm_acquire.id = CAM_CDM_VIRTUAL; cdm_acquire.base_array_cnt = fd_hw->soc_info.num_reg_map; + cdm_acquire.priority = CAM_CDM_BL_FIFO_0; for (i = 0; i < fd_hw->soc_info.num_reg_map; i++) cdm_acquire.base_array[i] = &fd_hw->soc_info.reg_map[i]; diff --git a/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c index 05a166cd01..6fdc18b053 100644 --- a/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c +++ b/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c @@ -2697,8 +2697,9 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args) cdm_acquire.base_array[j++] = ife_hw_mgr->cdm_reg_map[i]; } - cdm_acquire.base_array_cnt = j; + cdm_acquire.base_array_cnt = j; + cdm_acquire.priority = CAM_CDM_BL_FIFO_0; cdm_acquire.id = CAM_CDM_VIRTUAL; cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback; rc = cam_cdm_acquire(&cdm_acquire); @@ -2885,9 +2886,9 @@ static int cam_ife_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args) cdm_acquire.base_array[j++] = ife_hw_mgr->cdm_reg_map[i]; } + cdm_acquire.base_array_cnt = j; - - + cdm_acquire.priority = CAM_CDM_BL_FIFO_0; cdm_acquire.id = CAM_CDM_VIRTUAL; cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback; rc = cam_cdm_acquire(&cdm_acquire); @@ -3412,6 +3413,7 @@ static int cam_ife_mgr_config_hw(void *hw_mgr_priv, cdm_cmd->flag = true; cdm_cmd->userdata = hw_update_data; cdm_cmd->cookie = cfg->request_id; + cdm_cmd->gen_irq_arb = false; for (i = 0 ; i < cfg->num_hw_update_entries; i++) { cmd = (cfg->hw_update_entries + i); @@ -3430,6 +3432,7 @@ static int cam_ife_mgr_config_hw(void *hw_mgr_priv, cdm_cmd->cmd[i - skip].bl_addr.mem_handle = cmd->handle; cdm_cmd->cmd[i - skip].offset = cmd->offset; cdm_cmd->cmd[i - skip].len = cmd->len; + cdm_cmd->cmd[i - skip].arbitrate = false; } cdm_cmd->cmd_arrary_count = cfg->num_hw_update_entries - skip; diff --git a/drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c index 09b4b8d3e0..793415c924 100644 --- a/drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c +++ b/drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #include @@ -303,6 +303,7 @@ static int cam_jpeg_insert_cdm_change_base( config_args->hw_update_entries[CAM_JPEG_CHBASE].offset; cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].len = size * sizeof(uint32_t); cdm_cmd->cmd_arrary_count++; + cdm_cmd->gen_irq_arb = false; ch_base_iova_addr += size; *ch_base_iova_addr = 0; @@ -439,6 +440,7 @@ static int cam_jpeg_mgr_process_cmd(void *priv, void *data) cdm_cmd->userdata = NULL; cdm_cmd->cookie = 0; cdm_cmd->cmd_arrary_count = 0; + cdm_cmd->gen_irq_arb = false; rc = cam_jpeg_insert_cdm_change_base(config_args, ctx_data, hw_mgr); @@ -457,6 +459,8 @@ static int cam_jpeg_mgr_process_cmd(void *priv, void *data) cmd->offset; cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].len = cmd->len; + cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].arbitrate = + false; CAM_DBG(CAM_JPEG, "i %d entry h %d o %d l %d", i, cmd->handle, cmd->offset, cmd->len); cdm_cmd->cmd_arrary_count++; @@ -1186,6 +1190,7 @@ static int cam_jpeg_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args) cdm_acquire.base_array_cnt = 1; cdm_acquire.id = CAM_CDM_VIRTUAL; cdm_acquire.cam_cdm_callback = NULL; + cdm_acquire.priority = CAM_CDM_BL_FIFO_0; rc = cam_cdm_acquire(&cdm_acquire); if (rc) { diff --git a/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c index b736708fa9..7f34de1d23 100644 --- a/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c +++ b/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #include "cam_lrme_hw_core.h" @@ -416,12 +416,14 @@ static int cam_lrme_hw_util_submit_req(struct cam_lrme_core *lrme_core, cdm_cmd->flag = false; cdm_cmd->userdata = NULL; cdm_cmd->cookie = 0; + cdm_cmd->gen_irq_arb = false; for (i = 0; i <= frame_req->num_hw_update_entries; i++) { cmd = (frame_req->hw_update_entries + i); cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle; cdm_cmd->cmd[i].offset = cmd->offset; cdm_cmd->cmd[i].len = cmd->len; + cdm_cmd->cmd[i].arbitrate = false; } rc = cam_cdm_submit_bls(hw_cdm_info->cdm_handle, cdm_cmd); diff --git a/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c b/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c index 6ea5d343ec..e42591dc90 100644 --- a/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c +++ b/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #include @@ -53,6 +53,7 @@ static int cam_lrme_hw_dev_util_cdm_acquire(struct cam_lrme_core *lrme_core, cdm_acquire.cam_cdm_callback = NULL; cdm_acquire.id = CAM_CDM_VIRTUAL; cdm_acquire.base_array_cnt = lrme_hw->soc_info.num_reg_map; + cdm_acquire.priority = CAM_CDM_BL_FIFO_0; for (i = 0; i < lrme_hw->soc_info.num_reg_map; i++) cdm_acquire.base_array[i] = &lrme_hw->soc_info.reg_map[i]; diff --git a/drivers/cam_smmu/cam_smmu_api.c b/drivers/cam_smmu/cam_smmu_api.c index 78d1b2cee9..ae237c41a0 100644 --- a/drivers/cam_smmu/cam_smmu_api.c +++ b/drivers/cam_smmu/cam_smmu_api.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. */ #include @@ -31,7 +31,8 @@ #define COOKIE_SIZE (BYTE_SIZE*COOKIE_NUM_BYTE) #define COOKIE_MASK ((1<> COOKIE_SIZE) & COOKIE_MASK) @@ -93,7 +94,7 @@ struct cam_context_bank_info { struct iommu_domain *domain; dma_addr_t va_start; size_t va_len; - const char *name; + const char *name[CAM_SMMU_SHARED_HDL_MAX]; bool is_secure; uint8_t scratch_buf_support; uint8_t firmware_support; @@ -128,9 +129,11 @@ struct cam_context_bank_info { int cb_count; int secure_count; int pf_count; - size_t io_mapping_size; size_t shared_mapping_size; + bool is_mul_client; + int device_count; + int num_shared_hdl; }; struct cam_iommu_cb_set { @@ -369,12 +372,16 @@ static void cam_smmu_print_kernel_list(int idx) static void cam_smmu_print_table(void) { - int i; + int i, j; for (i = 0; i < iommu_cb_set.cb_num; i++) { - CAM_ERR(CAM_SMMU, "i= %d, handle= %d, name_addr=%pK", i, - (int)iommu_cb_set.cb_info[i].handle, - (void *)iommu_cb_set.cb_info[i].name); + for (j = 0; j < iommu_cb_set.cb_info[i].num_shared_hdl; j++) { + CAM_ERR(CAM_SMMU, + "i= %d, handle= %d, name_addr=%pK name %s", + i, (int)iommu_cb_set.cb_info[i].handle, + (void *)iommu_cb_set.cb_info[i].name[j], + iommu_cb_set.cb_info[i].name[j]); + } CAM_ERR(CAM_SMMU, "dev = %pK", iommu_cb_set.cb_info[i].dev); } } @@ -399,7 +406,7 @@ static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr) "Found va 0x%lx in:0x%lx-0x%lx, fd %d cb:%s", current_addr, start_addr, end_addr, mapping->ion_fd, - iommu_cb_set.cb_info[idx].name); + iommu_cb_set.cb_info[idx].name[0]); goto end; } else { if (start_addr > current_addr) @@ -429,9 +436,9 @@ end: closest_mapping->buf, buf_handle); } else - CAM_INFO(CAM_SMMU, + CAM_ERR(CAM_SMMU, "Cannot find vaddr:%lx in SMMU %s virt address", - current_addr, iommu_cb_set.cb_info[idx].name); + current_addr, iommu_cb_set.cb_info[idx].name[0]); return buf_handle; } @@ -467,7 +474,7 @@ void cam_smmu_set_client_page_fault_handler(int handle, if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) { CAM_ERR(CAM_SMMU, "%s Should not regiester more handlers", - iommu_cb_set.cb_info[idx].name); + iommu_cb_set.cb_info[idx].name[0]); mutex_unlock(&iommu_cb_set.cb_info[idx].lock); return; } @@ -495,7 +502,7 @@ void cam_smmu_set_client_page_fault_handler(int handle, if (i == CAM_SMMU_CB_MAX) CAM_ERR(CAM_SMMU, "Error: hdl %x no matching tokens: %s", - handle, iommu_cb_set.cb_info[idx].name); + handle, iommu_cb_set.cb_info[idx].name[0]); } mutex_unlock(&iommu_cb_set.cb_info[idx].lock); } @@ -537,7 +544,7 @@ void cam_smmu_unset_client_page_fault_handler(int handle, void *token) } if (i == CAM_SMMU_CB_MAX) CAM_ERR(CAM_SMMU, "Error: hdl %x no matching tokens: %s", - handle, iommu_cb_set.cb_info[idx].name); + handle, iommu_cb_set.cb_info[idx].name[0]); mutex_unlock(&iommu_cb_set.cb_info[idx].lock); } @@ -560,7 +567,7 @@ static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain, cb_name = (char *)token; /* Check whether it is in the table */ for (idx = 0; idx < iommu_cb_set.cb_num; idx++) { - if (!strcmp(iommu_cb_set.cb_info[idx].name, cb_name)) + if (!strcmp(iommu_cb_set.cb_info[idx].name[0], cb_name)) break; } @@ -714,45 +721,69 @@ static int cam_smmu_attach_device(int idx) static int cam_smmu_create_add_handle_in_table(char *name, int *hdl) { - int i; + int i, j; int handle; + bool valid = false; /* create handle and add in the iommu hardware table */ for (i = 0; i < iommu_cb_set.cb_num; i++) { - if (!strcmp(iommu_cb_set.cb_info[i].name, name)) { - mutex_lock(&iommu_cb_set.cb_info[i].lock); - if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) { - if (iommu_cb_set.cb_info[i].is_secure) - iommu_cb_set.cb_info[i].secure_count++; + for (j = 0; j < iommu_cb_set.cb_info[i].num_shared_hdl; j++) { + if (!strcmp(iommu_cb_set.cb_info[i].name[j], name)) + valid = true; - mutex_unlock(&iommu_cb_set.cb_info[i].lock); + if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT && + valid) { + mutex_lock(&iommu_cb_set.cb_info[i].lock); if (iommu_cb_set.cb_info[i].is_secure) { + iommu_cb_set.cb_info[i].secure_count++; *hdl = iommu_cb_set.cb_info[i].handle; + mutex_unlock( + &iommu_cb_set.cb_info[i].lock); + return 0; + } + + if (iommu_cb_set.cb_info[i].is_mul_client) { + iommu_cb_set.cb_info[i].device_count++; + *hdl = iommu_cb_set.cb_info[i].handle; + mutex_unlock( + &iommu_cb_set.cb_info[i].lock); + CAM_INFO(CAM_SMMU, + "%s already got handle 0x%x", + name, + iommu_cb_set.cb_info[i].handle); return 0; } CAM_ERR(CAM_SMMU, "Error: %s already got handle 0x%x", name, iommu_cb_set.cb_info[i].handle); - - return -EINVAL; + mutex_unlock(&iommu_cb_set.cb_info[i].lock); + return -EALREADY; } - /* make sure handle is unique */ - do { - handle = cam_smmu_create_iommu_handle(i); - } while (cam_smmu_check_handle_unique(handle)); + if (iommu_cb_set.cb_info[i].handle == HANDLE_INIT && + valid) { + /* make sure handle is unique */ + do { + handle = + cam_smmu_create_iommu_handle(i); + } while (cam_smmu_check_handle_unique(handle)); - /* put handle in the table */ - iommu_cb_set.cb_info[i].handle = handle; - iommu_cb_set.cb_info[i].cb_count = 0; - if (iommu_cb_set.cb_info[i].is_secure) - iommu_cb_set.cb_info[i].secure_count++; - *hdl = handle; - CAM_DBG(CAM_SMMU, "%s creates handle 0x%x", - name, handle); - mutex_unlock(&iommu_cb_set.cb_info[i].lock); - return 0; + /* put handle in the table */ + iommu_cb_set.cb_info[i].handle = handle; + iommu_cb_set.cb_info[i].cb_count = 0; + if (iommu_cb_set.cb_info[i].is_secure) + iommu_cb_set.cb_info[i].secure_count++; + + if (iommu_cb_set.cb_info[i].is_mul_client) + iommu_cb_set.cb_info[i].device_count++; + + *hdl = handle; + CAM_DBG(CAM_SMMU, "%s creates handle 0x%x", + name, handle); + mutex_unlock(&iommu_cb_set.cb_info[i].lock); + return 0; + } } } @@ -2038,7 +2069,7 @@ static enum cam_smmu_buf_state cam_smmu_validate_secure_fd_in_list(int idx, int cam_smmu_get_handle(char *identifier, int *handle_ptr) { - int ret = 0; + int rc = 0; if (!identifier) { CAM_ERR(CAM_SMMU, "Error: iommu hardware name is NULL"); @@ -2051,11 +2082,12 @@ int cam_smmu_get_handle(char *identifier, int *handle_ptr) } /* create and put handle in the table */ - ret = cam_smmu_create_add_handle_in_table(identifier, handle_ptr); - if (ret < 0) - CAM_ERR(CAM_SMMU, "Error: %s get handle fail", identifier); + rc = cam_smmu_create_add_handle_in_table(identifier, handle_ptr); + if (rc < 0) + CAM_ERR(CAM_SMMU, "Error: %s get handle fail, rc %d", + identifier, rc); - return ret; + return rc; } EXPORT_SYMBOL(cam_smmu_get_handle); @@ -2328,7 +2360,7 @@ int cam_smmu_get_scratch_iova(int handle, if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) { CAM_ERR(CAM_SMMU, "Err:Dev %s should call SMMU attach before map buffer", - iommu_cb_set.cb_info[idx].name); + iommu_cb_set.cb_info[idx].name[0]); rc = -EINVAL; goto error; } @@ -2736,7 +2768,7 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap, if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) { CAM_ERR(CAM_SMMU, "Err:Dev %s should call SMMU attach before map buffer", - iommu_cb_set.cb_info[idx].name); + iommu_cb_set.cb_info[idx].name[0]); rc = -EINVAL; goto get_addr_end; } @@ -2800,7 +2832,7 @@ int cam_smmu_map_kernel_iova(int handle, struct dma_buf *buf, if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) { CAM_ERR(CAM_SMMU, "Err:Dev %s should call SMMU attach before map buffer", - iommu_cb_set.cb_info[idx].name); + iommu_cb_set.cb_info[idx].name[0]); rc = -EINVAL; goto get_addr_end; } @@ -3139,7 +3171,7 @@ int cam_smmu_destroy_handle(int handle) if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) { CAM_ERR(CAM_SMMU, "UMD %s buffer list is not clean", - iommu_cb_set.cb_info[idx].name); + iommu_cb_set.cb_info[idx].name[0]); cam_smmu_print_user_list(idx); cam_smmu_clean_user_buffer_list(idx); } @@ -3147,7 +3179,7 @@ int cam_smmu_destroy_handle(int handle) if (!list_empty_careful( &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list)) { CAM_ERR(CAM_SMMU, "KMD %s buffer list is not clean", - iommu_cb_set.cb_info[idx].name); + iommu_cb_set.cb_info[idx].name[0]); cam_smmu_print_kernel_list(idx); cam_smmu_clean_kernel_buffer_list(idx); } @@ -3168,6 +3200,19 @@ int cam_smmu_destroy_handle(int handle) return 0; } + if (iommu_cb_set.cb_info[idx].is_mul_client && + iommu_cb_set.cb_info[idx].device_count) { + iommu_cb_set.cb_info[idx].device_count--; + + if (!iommu_cb_set.cb_info[idx].device_count) { + iommu_cb_set.cb_info[idx].cb_count = 0; + iommu_cb_set.cb_info[idx].handle = HANDLE_INIT; + } + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return 0; + } + + iommu_cb_set.cb_info[idx].device_count = 0; iommu_cb_set.cb_info[idx].cb_count = 0; iommu_cb_set.cb_info[idx].handle = HANDLE_INIT; mutex_unlock(&iommu_cb_set.cb_info[idx].lock); @@ -3445,7 +3490,7 @@ static int cam_smmu_get_memory_regions_info(struct device_node *of_node, region_id); } - CAM_DBG(CAM_SMMU, "Found label -> %s", cb->name); + CAM_DBG(CAM_SMMU, "Found label -> %s", cb->name[0]); CAM_DBG(CAM_SMMU, "Found region -> %s", region_name); CAM_DBG(CAM_SMMU, "region_start -> %X", region_start); CAM_DBG(CAM_SMMU, "region_len -> %X", region_len); @@ -3468,6 +3513,7 @@ static int cam_populate_smmu_context_banks(struct device *dev, int rc = 0; struct cam_context_bank_info *cb; struct device *ctx = NULL; + int i = 0; if (!dev) { CAM_ERR(CAM_SMMU, "Error: Invalid device"); @@ -3484,8 +3530,24 @@ static int cam_populate_smmu_context_banks(struct device *dev, /* read the context bank from cb set */ cb = &iommu_cb_set.cb_info[iommu_cb_set.cb_init_count]; + cb->is_mul_client = + of_property_read_bool(dev->of_node, "multiple-client-devices"); + + cb->num_shared_hdl = of_property_count_strings(dev->of_node, + "label"); + + if (cb->num_shared_hdl > + CAM_SMMU_SHARED_HDL_MAX) { + CAM_ERR(CAM_CDM, "Invalid count of client names count=%d", + cb->num_shared_hdl); + rc = -EINVAL; + return rc; + } + /* set the name of the context bank */ - rc = of_property_read_string(dev->of_node, "label", &cb->name); + for (i = 0; i < cb->num_shared_hdl; i++) + rc = of_property_read_string_index(dev->of_node, + "label", i, &cb->name[i]); if (rc < 0) { CAM_ERR(CAM_SMMU, "Error: failed to read label from sub device"); @@ -3509,22 +3571,23 @@ static int cam_populate_smmu_context_banks(struct device *dev, /* set up the iommu mapping for the context bank */ if (type == CAM_QSMMU) { CAM_ERR(CAM_SMMU, "Error: QSMMU ctx not supported for : %s", - cb->name); + cb->name[0]); return -ENODEV; } ctx = dev; - CAM_DBG(CAM_SMMU, "getting Arm SMMU ctx : %s", cb->name); + CAM_DBG(CAM_SMMU, "getting Arm SMMU ctx : %s", cb->name[0]); rc = cam_smmu_setup_cb(cb, ctx); if (rc < 0) { - CAM_ERR(CAM_SMMU, "Error: failed to setup cb : %s", cb->name); + CAM_ERR(CAM_SMMU, "Error: failed to setup cb : %s", + cb->name[0]); goto cb_init_fail; } if (cb->io_support && cb->domain) iommu_set_fault_handler(cb->domain, cam_smmu_iommu_fault_handler, - (void *)cb->name); + (void *)cb->name[0]); if (!dev->dma_parms) dev->dma_parms = devm_kzalloc(dev, diff --git a/drivers/cam_utils/cam_soc_util.c b/drivers/cam_utils/cam_soc_util.c index 4e3dacaa82..a6220d1566 100644 --- a/drivers/cam_utils/cam_soc_util.c +++ b/drivers/cam_utils/cam_soc_util.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. */ #include @@ -1280,6 +1280,10 @@ int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info) } } + rc = of_property_read_string(of_node, "label", &soc_info->label_name); + if (rc) + CAM_DBG(CAM_UTIL, "Label is not available in the node: %d", rc); + if (soc_info->num_mem_block > 0) { rc = of_property_read_u32_array(of_node, "reg-cam-base", soc_info->mem_block_cam_base, soc_info->num_mem_block); diff --git a/drivers/cam_utils/cam_soc_util.h b/drivers/cam_utils/cam_soc_util.h index 25c08fc189..5e51fa8f1b 100644 --- a/drivers/cam_utils/cam_soc_util.h +++ b/drivers/cam_utils/cam_soc_util.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. */ #ifndef _CAM_SOC_UTIL_H_ @@ -123,6 +123,7 @@ struct cam_soc_gpio_data { * @index: Instance id for the camera device * @dev_name: Device Name * @irq_name: Name of the irq associated with the device + * @label_name: label name * @irq_line: Irq resource * @irq_data: Private data that is passed when IRQ is requested * @compatible: Compatible string associated with the device @@ -171,6 +172,7 @@ struct cam_hw_soc_info { uint32_t index; const char *dev_name; const char *irq_name; + const char *label_name; struct resource *irq_line; void *irq_data; const char *compatible;