msm: camera: icp: Teach A5 to power resume/collapse via its hw_ops

The LX7 processor will require a new mechanism to resume/collapse.
Make the current mechanism transparent to the ICP HW manager in
preparation for the new proc. By going through the hw_ops we let
the device interface decide which mechanism to use.

CRs-Fixed: 2722486
Change-Id: I719314b3f505270a33892cb247082e43dad2e92d
Signed-off-by: Fernando Pacheco <fpacheco@codeaurora.org>
This commit is contained in:
Fernando Pacheco
2020-05-19 17:56:17 -07:00
committed by Gerrit - the friendly Code Review server
parent 8fe045c35a
commit ecd191e638
6 changed files with 162 additions and 172 deletions

View File

@@ -73,12 +73,11 @@ int hfi_read_message(uint32_t *pmsg, uint8_t q_id, uint32_t *words_read);
* @event_driven_mode: event mode
* @hfi_mem: hfi memory info
* @icp_base: icp base address
* @debug: debug flag
*
* Returns success(zero)/failure(non zero)
*/
int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
void *__iomem icp_base, bool debug);
void *__iomem icp_base);
/**
* hfi_get_hw_caps() - hardware capabilities from firmware
@@ -96,18 +95,6 @@ int hfi_get_hw_caps(void *query_caps);
*/
void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size);
/**
* cam_hfi_enable_cpu() - enable A5 CPU
* @icp_base: icp base address
*/
void cam_hfi_enable_cpu(void __iomem *icp_base);
/**
* cam_hfi_disable_cpu() - disable A5 CPU
* @icp_base: icp base address
*/
void cam_hfi_disable_cpu(void __iomem *icp_base);
/**
* cam_hfi_deinit() - cleanup HFI
*/
@@ -154,12 +141,10 @@ int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg);
* cam_hfi_resume() - function to resume
* @hfi_mem: hfi memory info
* @icp_base: icp base address
* @debug: debug flag
*
* Returns success(zero)/failure(non zero)
*/
int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
void __iomem *icp_base, bool debug);
int cam_hfi_resume(struct hfi_mem_info *hfi_mem, void __iomem *icp_base);
/**
* cam_hfi_queue_dump() - utility function to dump hfi queues

View File

@@ -9,21 +9,10 @@
#include <linux/types.h>
#include "hfi_intf.h"
/* start of ICP CSR registers */
#define HFI_REG_A5_HW_VERSION 0x0
#define HFI_REG_A5_CSR_NSEC_RESET 0x4
#define HFI_REG_A5_CSR_A5_CONTROL 0x8
#define HFI_REG_A5_CSR_ETM 0xC
#define HFI_REG_A5_CSR_A2HOSTINTEN 0x10
#define HFI_REG_A5_CSR_A2HOSTINT 0x14
#define HFI_REG_A5_CSR_A2HOSTINTCLR 0x18
#define HFI_REG_A5_CSR_A2HOSTINTSTATUS 0x1C
#define HFI_REG_A5_CSR_A2HOSTINTSET 0x20
#define HFI_REG_A5_CSR_HOST2ICPINT 0x30
#define HFI_REG_A5_CSR_A5_STATUS 0x200
#define HFI_REG_A5_QGIC2_LM_ID 0x204
#define HFI_REG_A5_SPARE 0x400
/* general purpose registers from */
#define HFI_REG_FW_VERSION 0x44
@@ -44,23 +33,6 @@
/* end of ICP CSR registers */
/* flags for ICP CSR registers */
#define ICP_FLAG_CSR_WAKE_UP_EN (1 << 4)
#define ICP_FLAG_CSR_A5_EN (1 << 9)
#define ICP_CSR_EN_CLKGATE_WFI (1 << 12)
#define ICP_CSR_EDBGRQ (1 << 14)
#define ICP_CSR_DBGSWENABLE (1 << 22)
#define ICP_CSR_A5_STATUS_WFI (1 << 7)
#define ICP_FLAG_A5_CTRL_DBG_EN (ICP_FLAG_CSR_WAKE_UP_EN|\
ICP_FLAG_CSR_A5_EN|\
ICP_CSR_EDBGRQ|\
ICP_CSR_DBGSWENABLE)
#define ICP_FLAG_A5_CTRL_EN (ICP_FLAG_CSR_WAKE_UP_EN|\
ICP_FLAG_CSR_A5_EN|\
ICP_CSR_EN_CLKGATE_WFI)
/* start of Queue table and queues */
#define MAX_ICP_HFI_QUEUES 4
#define ICP_QHDR_TX_TYPE_MASK 0xFF000000

View File

@@ -34,9 +34,6 @@
#define HFI_POLL_DELAY_US 100
#define HFI_POLL_TIMEOUT_US 10000
#define HFI_MAX_PC_POLL_TRY 150
#define HFI_POLL_TRY_SLEEP 1
static struct hfi_info *g_hfi;
unsigned int g_icp_mmu_hdl;
static DEFINE_MUTEX(hfi_cmd_q_mutex);
@@ -542,75 +539,13 @@ int hfi_get_hw_caps(void *query_buf)
return 0;
}
void cam_hfi_disable_cpu(void __iomem *icp_base)
{
uint32_t data;
uint32_t val;
uint32_t try = 0;
while (try < HFI_MAX_PC_POLL_TRY) {
data = cam_io_r_mb(icp_base + HFI_REG_A5_CSR_A5_STATUS);
CAM_DBG(CAM_HFI, "wfi status = %x\n", (int)data);
if (data & ICP_CSR_A5_STATUS_WFI)
break;
/* Need to poll here to confirm that FW is going trigger wfi
* and Host can the proceed. No interrupt is expected from FW
* at this time.
*/
usleep_range(HFI_POLL_TRY_SLEEP * 1000,
(HFI_POLL_TRY_SLEEP * 1000) + 1000);
try++;
}
val = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_CONTROL);
val &= ~(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN);
cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);
val = cam_io_r(icp_base + HFI_REG_A5_CSR_NSEC_RESET);
cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_RESET,
icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
cam_io_w_mb((uint32_t)INTR_DISABLE,
icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
}
void cam_hfi_enable_cpu(void __iomem *icp_base)
{
cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
cam_io_w_mb((uint32_t)0x10, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
}
int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
void __iomem *icp_base, bool debug)
int cam_hfi_resume(struct hfi_mem_info *hfi_mem, void __iomem *icp_base)
{
int rc = 0;
uint32_t data;
uint32_t fw_version, status = 0;
cam_hfi_enable_cpu(icp_base);
g_hfi->csr_base = icp_base;
if (debug) {
cam_io_w_mb(ICP_FLAG_A5_CTRL_DBG_EN,
(icp_base + HFI_REG_A5_CSR_A5_CONTROL));
/* Barrier needed as next write should be done after
* sucessful previous write. Next write enable clock
* gating
*/
wmb();
cam_io_w_mb((uint32_t)ICP_FLAG_A5_CTRL_EN,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
} else {
cam_io_w_mb((uint32_t)ICP_FLAG_A5_CTRL_EN,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
}
if (readl_poll_timeout(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE,
status, status == ICP_INIT_RESP_SUCCESS,
HFI_POLL_DELAY_US, HFI_POLL_TIMEOUT_US)) {
@@ -625,9 +560,6 @@ int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
CAM_DBG(CAM_HFI, "fw version : [%x]", fw_version);
data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
CAM_DBG(CAM_HFI, "wfi status = %x", (int)data);
cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
cam_io_w_mb((uint32_t)hfi_mem->sfr_buf.iova,
icp_base + HFI_REG_SFR_PTR);
@@ -661,7 +593,7 @@ int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
}
int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
void __iomem *icp_base, bool debug)
void __iomem *icp_base)
{
int rc = 0;
struct hfi_qtbl *qtbl;
@@ -688,27 +620,6 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
memcpy(&g_hfi->map, hfi_mem, sizeof(g_hfi->map));
g_hfi->hfi_state = HFI_DEINIT;
if (debug) {
cam_io_w_mb(
(uint32_t)(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN |
ICP_CSR_EDBGRQ | ICP_CSR_DBGSWENABLE),
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
msleep(100);
cam_io_w_mb((uint32_t)(ICP_FLAG_CSR_A5_EN |
ICP_FLAG_CSR_WAKE_UP_EN | ICP_CSR_EN_CLKGATE_WFI),
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
} else {
/* Due to hardware bug in V1 ICP clock gating has to be
* disabled, this is supposed to be fixed in V-2. But enabling
* the clock gating is causing the firmware hang, hence
* disabling the clock gating on both V1 and V2 until the
* hardware team root causes this
*/
cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN |
ICP_FLAG_CSR_WAKE_UP_EN |
ICP_CSR_EN_CLKGATE_WFI,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
}
qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva;
qtbl_hdr = &qtbl->q_tbl_hdr;

View File

@@ -13,12 +13,14 @@
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/elf.h>
#include <linux/iopoll.h>
#include <media/cam_icp.h>
#include "cam_io_util.h"
#include "cam_a5_hw_intf.h"
#include "cam_hw.h"
#include "cam_hw_intf.h"
#include "a5_core.h"
#include "a5_reg.h"
#include "a5_soc.h"
#include "cam_soc_util.h"
#include "cam_io_util.h"
@@ -28,6 +30,9 @@
#include "cam_cpas_api.h"
#include "cam_debug_util.h"
#define PC_POLL_DELAY_US 100
#define PC_POLL_TIMEOUT_US 10000
static int cam_a5_cpas_vote(struct cam_a5_device_core_info *core_info,
struct cam_icp_cpas_vote *cpas_vote)
{
@@ -388,6 +393,60 @@ int cam_a5_deinit_hw(void *device_priv,
return rc;
}
static int cam_a5_power_resume(struct cam_hw_info *a5_info, bool debug_enabled)
{
uint32_t val = A5_CSR_FULL_CPU_EN;
void __iomem *base;
if (!a5_info) {
CAM_ERR(CAM_ICP, "invalid A5 device info");
return -EINVAL;
}
base = a5_info->soc_info.reg_map[A5_SIERRA_BASE].mem_base;
cam_io_w_mb(A5_CSR_A5_CPU_EN, base + ICP_SIERRA_A5_CSR_A5_CONTROL);
cam_io_w_mb(A5_CSR_FUNC_RESET, base + ICP_SIERRA_A5_CSR_NSEC_RESET);
if (debug_enabled)
val |= A5_CSR_FULL_DBG_EN;
cam_io_w_mb(val, base + ICP_SIERRA_A5_CSR_A5_CONTROL);
return 0;
}
static int cam_a5_power_collapse(struct cam_hw_info *a5_info)
{
uint32_t val, status = 0;
void __iomem *base;
if (!a5_info) {
CAM_ERR(CAM_ICP, "invalid A5 device info");
return -EINVAL;
}
base = a5_info->soc_info.reg_map[A5_SIERRA_BASE].mem_base;
/**
* Need to poll here to confirm that FW has triggered WFI
* and Host can then proceed. No interrupt is expected
* from FW at this time.
*/
if (readl_poll_timeout(base + ICP_SIERRA_A5_CSR_A5_STATUS,
status, status & A5_CSR_A5_STANDBYWFI,
PC_POLL_DELAY_US, PC_POLL_TIMEOUT_US)) {
CAM_ERR(CAM_ICP, "WFI poll timed out: status=0x%08x", status);
return -ETIMEDOUT;
}
val = cam_io_r(base + ICP_SIERRA_A5_CSR_A5_CONTROL);
val &= ~(A5_CSR_A5_CPU_EN | A5_CSR_WAKE_UP_EN);
cam_io_w_mb(val, base + ICP_SIERRA_A5_CSR_A5_CONTROL);
return 0;
}
irqreturn_t cam_a5_irq(int irq_num, void *data)
{
struct cam_hw_info *a5_dev = data;
@@ -458,6 +517,12 @@ int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
case CAM_ICP_A5_CMD_FW_DOWNLOAD:
rc = cam_a5_download_fw(device_priv);
break;
case CAM_ICP_A5_CMD_POWER_COLLAPSE:
rc = cam_a5_power_collapse(a5_dev);
break;
case CAM_ICP_A5_CMD_POWER_RESUME:
rc = cam_a5_power_resume(a5_dev, *((bool *)cmd_args));
break;
case CAM_ICP_A5_CMD_SET_FW_BUF: {
struct cam_icp_a5_set_fw_buf_info *fw_buf_info = cmd_args;

View File

@@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_A5_REG_H_
#define _CAM_A5_REG_H_
#define ICP_SIERRA_A5_CSR_NSEC_RESET 0x4
#define A5_CSR_FUNC_RESET (1 << 4)
#define A5_CSR_DBG_RESET (1 << 3)
#define A5_CSR_CPU_RESET (1 << 2)
#define ICP_SIERRA_A5_CSR_A5_CONTROL 0x8
#define A5_CSR_DBGSWENABLE (1 << 22)
#define A5_CSR_EDBGRQ (1 << 14)
#define A5_CSR_EN_CLKGATE_WFI (1 << 12)
#define A5_CSR_A5_CPU_EN (1 << 9)
#define A5_CSR_WAKE_UP_EN (1 << 4)
#define A5_CSR_FULL_DBG_EN (A5_CSR_DBGSWENABLE | A5_CSR_EDBGRQ)
#define A5_CSR_FULL_CPU_EN (A5_CSR_A5_CPU_EN | \
A5_CSR_WAKE_UP_EN | \
A5_CSR_EN_CLKGATE_WFI)
#define ICP_SIERRA_A5_CSR_A5_STATUS 0x200
#define A5_CSR_A5_STANDBYWFI (1 << 7)
#endif /* _CAM_A5_REG_H_ */

View File

@@ -3054,32 +3054,63 @@ static int cam_icp_mgr_hw_close_k(void *hw_priv, void *hw_close_args)
}
static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr)
static int cam_icp_mgr_proc_resume(struct cam_icp_hw_mgr *hw_mgr)
{
int rc;
struct cam_hw_intf *a5_dev_intf = NULL;
struct cam_hw_info *a5_dev = NULL;
struct cam_hw_intf *icp_dev_intf = hw_mgr->a5_dev_intf;
CAM_DBG(CAM_PERF, "ENTER");
a5_dev_intf = hw_mgr->a5_dev_intf;
if (!a5_dev_intf) {
CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
if (!icp_dev_intf)
return -EINVAL;
}
a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
return icp_dev_intf->hw_ops.process_cmd(icp_dev_intf->hw_priv,
CAM_ICP_A5_CMD_POWER_RESUME,
&hw_mgr->a5_jtag_debug,
sizeof(hw_mgr->a5_jtag_debug));
}
static void cam_icp_mgr_proc_suspend(struct cam_icp_hw_mgr *hw_mgr)
{
struct cam_hw_intf *icp_dev_intf = hw_mgr->a5_dev_intf;
if (!icp_dev_intf)
return;
icp_dev_intf->hw_ops.process_cmd(icp_dev_intf->hw_priv,
CAM_ICP_A5_CMD_POWER_COLLAPSE,
NULL, 0);
}
static int __power_collapse(struct cam_icp_hw_mgr *hw_mgr)
{
int rc = 0;
if (!hw_mgr->icp_pc_flag || atomic_read(&hw_mgr->recovery)) {
cam_hfi_disable_cpu(
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
cam_icp_mgr_proc_suspend(hw_mgr);
rc = cam_icp_mgr_hw_close_k(hw_mgr, NULL);
} else {
CAM_DBG(CAM_PERF, "Sending PC prep ICP PC enabled");
rc = cam_icp_mgr_send_pc_prep(hw_mgr);
cam_hfi_disable_cpu(
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
cam_icp_mgr_proc_suspend(hw_mgr);
}
return rc;
}
static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr)
{
struct cam_hw_intf *a5_dev_intf = hw_mgr->a5_dev_intf;
int rc;
CAM_DBG(CAM_PERF, "ENTER");
if (!a5_dev_intf) {
CAM_ERR(CAM_ICP, "A5 device interface is NULL");
return -EINVAL;
}
rc = __power_collapse(hw_mgr);
a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
CAM_DBG(CAM_PERF, "EXIT");
return rc;
@@ -3173,8 +3204,7 @@ static int cam_icp_mgr_hfi_resume(struct cam_icp_hw_mgr *hw_mgr)
hfi_mem.io_mem2.len);
return cam_hfi_resume(&hfi_mem,
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
hw_mgr->a5_jtag_debug);
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
}
static int cam_icp_retry_wait_for_abort(
@@ -3550,7 +3580,6 @@ static int cam_icp_mgr_fw_download(struct cam_icp_hw_mgr *hw_mgr)
{
int rc;
struct cam_hw_intf *a5_dev_intf = NULL;
struct cam_hw_info *a5_dev = NULL;
struct cam_icp_a5_set_irq_cb irq_cb;
struct cam_icp_a5_set_fw_buf_info fw_buf_info;
@@ -3559,7 +3588,6 @@ static int cam_icp_mgr_fw_download(struct cam_icp_hw_mgr *hw_mgr)
CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
return -EINVAL;
}
a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
irq_cb.icp_hw_mgr_cb = cam_icp_hw_mgr_cb;
irq_cb.data = hw_mgr;
@@ -3568,7 +3596,7 @@ static int cam_icp_mgr_fw_download(struct cam_icp_hw_mgr *hw_mgr)
CAM_ICP_A5_SET_IRQ_CB,
&irq_cb, sizeof(irq_cb));
if (rc)
goto set_irq_failed;
return rc;
fw_buf_info.kva = icp_hw_mgr.hfi_mem.fw_buf.kva;
fw_buf_info.iova = icp_hw_mgr.hfi_mem.fw_buf.iova;
@@ -3579,22 +3607,16 @@ static int cam_icp_mgr_fw_download(struct cam_icp_hw_mgr *hw_mgr)
CAM_ICP_A5_CMD_SET_FW_BUF,
&fw_buf_info, sizeof(fw_buf_info));
if (rc)
goto set_irq_failed;
cam_hfi_enable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
return rc;
rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_CMD_FW_DOWNLOAD,
NULL, 0);
if (rc)
goto fw_download_failed;
return rc;
return rc;
fw_download_failed:
cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
set_irq_failed:
return rc;
return cam_icp_mgr_proc_resume(hw_mgr);
}
static int cam_icp_mgr_hfi_init(struct cam_icp_hw_mgr *hw_mgr)
@@ -3667,8 +3689,7 @@ static int cam_icp_mgr_hfi_init(struct cam_icp_hw_mgr *hw_mgr)
}
return cam_hfi_init(0, &hfi_mem,
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
hw_mgr->a5_jtag_debug);
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
}
static int cam_icp_mgr_send_fw_init(struct cam_icp_hw_mgr *hw_mgr)
@@ -3760,14 +3781,22 @@ static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr)
if (rc)
return -EINVAL;
rc = cam_icp_mgr_proc_resume(hw_mgr);
if (rc)
goto hw_deinit;
rc = cam_icp_mgr_hfi_resume(hw_mgr);
if (rc)
goto hfi_resume_failed;
goto power_collapse;
CAM_DBG(CAM_ICP, "Exit");
return rc;
hfi_resume_failed:
cam_icp_mgr_icp_power_collapse(hw_mgr);
power_collapse:
__power_collapse(hw_mgr);
hw_deinit:
a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
return rc;
}
@@ -3843,8 +3872,7 @@ fw_init_failed:
cam_hfi_deinit(
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
hfi_init_failed:
cam_hfi_disable_cpu(
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
cam_icp_mgr_proc_suspend(hw_mgr);
fw_download_failed:
cam_icp_mgr_device_deinit(hw_mgr);
dev_init_fail: