msm: eva: propagate tip of CVP driver to EVA
Merge cvp driver to EVA Change-Id: I3fb962bfae750f49f821c16061bd78fa5425959e Signed-off-by: George Shen <sqiao@codeaurora.org>
This commit is contained in:
@@ -148,9 +148,13 @@ struct cvp_kmd_hfi_packet {
|
||||
#define CVP_KMD_PROP_PWR_FW_OP 0x1B
|
||||
#define CVP_KMD_PROP_PWR_DDR_OP 0x1C
|
||||
#define CVP_KMD_PROP_PWR_SYSCACHE_OP 0x1D
|
||||
#define CVP_KMD_PROP_PWR_FPS_FDU 0x1E
|
||||
#define CVP_KMD_PROP_PWR_FPS_MPU 0x1F
|
||||
#define CVP_KMD_PROP_PWR_FPS_OD 0x20
|
||||
#define CVP_KMD_PROP_PWR_FPS_ICA 0x21
|
||||
|
||||
#define MAX_KMD_PROP_NUM_PER_PACKET 8
|
||||
#define MAX_KMD_PROP_TYPE (CVP_KMD_PROP_PWR_SYSCACHE_OP + 1)
|
||||
#define MAX_KMD_PROP_TYPE (CVP_KMD_PROP_PWR_FPS_ICA + 1)
|
||||
|
||||
struct cvp_kmd_sys_property {
|
||||
__u32 prop_type;
|
||||
@@ -159,7 +163,7 @@ struct cvp_kmd_sys_property {
|
||||
|
||||
struct cvp_kmd_sys_properties {
|
||||
__u32 prop_num;
|
||||
struct cvp_kmd_sys_property prop_data[8];
|
||||
struct cvp_kmd_sys_property prop_data[MAX_KMD_PROP_NUM_PER_PACKET];
|
||||
};
|
||||
|
||||
#define SESSION_CREATE 1
|
||||
|
@@ -18,7 +18,8 @@ msm-eva-objs := eva/cvp.o \
|
||||
eva/msm_cvp_clocks.o\
|
||||
eva/msm_cvp_dsp.o \
|
||||
eva/msm_cvp_buf.o \
|
||||
eva/msm_cvp_synx.o
|
||||
eva/msm_cvp_synx.o \
|
||||
eva/cvp_fw_load.o
|
||||
|
||||
obj-$(CONFIG_MSM_EVA) := msm-eva.o
|
||||
|
||||
|
@@ -107,6 +107,17 @@ static int read_platform_resources(struct msm_cvp_core *core,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void init_cycle_info(struct cvp_cycle_info *info)
|
||||
{
|
||||
memset(info->sum_fps, 0, HFI_MAX_HW_THREADS*sizeof(u32));
|
||||
memset(info->hi_ctrl_lim, 0, HFI_MAX_HW_THREADS*sizeof(u32));
|
||||
memset(info->lo_ctrl_lim, 0, HFI_MAX_HW_THREADS*sizeof(u32));
|
||||
memset(info->cycle, 0,
|
||||
HFI_MAX_HW_THREADS*sizeof(struct cvp_cycle_stat));
|
||||
info->conf_freq = 0;
|
||||
mutex_init(&info->lock);
|
||||
}
|
||||
|
||||
static int msm_cvp_initialize_core(struct platform_device *pdev,
|
||||
struct msm_cvp_core *core)
|
||||
{
|
||||
@@ -132,6 +143,7 @@ static int msm_cvp_initialize_core(struct platform_device *pdev,
|
||||
|
||||
INIT_DELAYED_WORK(&core->fw_unload_work, msm_cvp_fw_unload_handler);
|
||||
INIT_WORK(&core->ssr_work, msm_cvp_ssr_handler);
|
||||
init_cycle_info(&core->dyn_clk);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -490,6 +502,7 @@ static int msm_cvp_remove(struct platform_device *pdev)
|
||||
sysfs_remove_group(&pdev->dev.kobj, &msm_cvp_core_attr_group);
|
||||
dev_set_drvdata(&pdev->dev, NULL);
|
||||
mutex_destroy(&core->lock);
|
||||
mutex_destroy(&core->dyn_clk.lock);
|
||||
kfree(core);
|
||||
return rc;
|
||||
}
|
||||
|
@@ -268,4 +268,6 @@ int cvp_iris_hfi_initialize(struct cvp_hfi_device *hdev, u32 device_id,
|
||||
struct msm_cvp_platform_resources *res,
|
||||
hfi_cmd_response_callback callback);
|
||||
|
||||
int load_cvp_fw_impl(struct iris_hfi_device *device);
|
||||
int unload_cvp_fw_impl(struct iris_hfi_device *device);
|
||||
#endif
|
||||
|
166
msm/eva/cvp_fw_load.c
Normal file
166
msm/eva/cvp_fw_load.c
Normal file
@@ -0,0 +1,166 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/qcom_scm.h>
|
||||
#include "msm_cvp_debug.h"
|
||||
#include "cvp_comm_def.h"
|
||||
#include "cvp_core_hfi.h"
|
||||
#include "cvp_hfi.h"
|
||||
#ifdef CVP_MDT_ENABLED
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/soc/qcom/mdt_loader.h>
|
||||
#else
|
||||
#include <soc/qcom/subsystem_restart.h>
|
||||
#endif
|
||||
|
||||
#define MAX_FIRMWARE_NAME_SIZE 128
|
||||
|
||||
#ifdef CVP_MDT_ENABLED
|
||||
static int __load_fw_to_memory(struct platform_device *pdev,
|
||||
const char *fw_name)
|
||||
{
|
||||
int rc = 0;
|
||||
const struct firmware *firmware = NULL;
|
||||
char firmware_name[MAX_FIRMWARE_NAME_SIZE] = {0};
|
||||
struct device_node *node = NULL;
|
||||
struct resource res = {0};
|
||||
phys_addr_t phys = 0;
|
||||
size_t res_size = 0;
|
||||
ssize_t fw_size = 0;
|
||||
void *virt = NULL;
|
||||
int pas_id = 0;
|
||||
|
||||
if (!fw_name || !(*fw_name) || !pdev) {
|
||||
dprintk(CVP_ERR, "%s: Invalid inputs\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (strlen(fw_name) >= MAX_FIRMWARE_NAME_SIZE - 4) {
|
||||
dprintk(CVP_ERR, "%s: Invalid fw name\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
scnprintf(firmware_name, ARRAY_SIZE(firmware_name), "%s.mdt", fw_name);
|
||||
|
||||
rc = of_property_read_u32(pdev->dev.of_node, "pas-id", &pas_id);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: error %d while reading DT for \"pas-id\"\n",
|
||||
__func__, rc);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
|
||||
if (!node) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: DT error getting \"memory-region\" property\n",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = of_address_to_resource(node, 0, &res);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: error %d getting \"memory-region\" resource\n",
|
||||
__func__, rc);
|
||||
goto exit;
|
||||
}
|
||||
phys = res.start;
|
||||
res_size = (size_t)resource_size(&res);
|
||||
|
||||
rc = request_firmware(&firmware, firmware_name, &pdev->dev);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "%s: error %d requesting \"%s\"\n",
|
||||
__func__, rc, firmware_name);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
fw_size = qcom_mdt_get_size(firmware);
|
||||
if (fw_size < 0 || res_size < (size_t)fw_size) {
|
||||
rc = -EINVAL;
|
||||
dprintk(CVP_ERR,
|
||||
"%s: Corrupted fw image. Alloc size: %lu, fw size: %ld",
|
||||
__func__, res_size, fw_size);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
virt = memremap(phys, res_size, MEMREMAP_WC);
|
||||
if (!virt) {
|
||||
rc = -ENOMEM;
|
||||
dprintk(CVP_ERR, "%s: unable to remap firmware memory\n",
|
||||
__func__);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
rc = qcom_mdt_load(&pdev->dev, firmware, firmware_name,
|
||||
pas_id, virt, phys, res_size, NULL);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "%s: error %d loading \"%s\"\n",
|
||||
__func__, rc, firmware_name);
|
||||
goto exit;
|
||||
}
|
||||
rc = qcom_scm_pas_auth_and_reset(pas_id);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "%s: error %d authenticating \"%s\"\n",
|
||||
__func__, rc, firmware_name);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
memunmap(virt);
|
||||
release_firmware(firmware);
|
||||
dprintk(CVP_CORE, "%s: firmware \"%s\" loaded successfully\n",
|
||||
__func__, firmware_name);
|
||||
return pas_id;
|
||||
|
||||
exit:
|
||||
if (virt)
|
||||
memunmap(virt);
|
||||
if (firmware)
|
||||
release_firmware(firmware);
|
||||
return rc;
|
||||
}
|
||||
#endif
|
||||
|
||||
int load_cvp_fw_impl(struct iris_hfi_device *device)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!device->resources.fw.cookie) {
|
||||
#ifdef CVP_MDT_ENABLED
|
||||
device->resources.fw.cookie =
|
||||
__load_fw_to_memory(device->res->pdev,
|
||||
device->res->fw_name);
|
||||
if (device->resources.fw.cookie <= 0) {
|
||||
dprintk(CVP_ERR, "Failed to download firmware\n");
|
||||
device->resources.fw.cookie = 0;
|
||||
rc = -ENOMEM;
|
||||
}
|
||||
#else
|
||||
device->resources.fw.cookie =
|
||||
subsystem_get_with_fwname("evass",
|
||||
device->res->fw_name);
|
||||
if (IS_ERR_OR_NULL(device->resources.fw.cookie)) {
|
||||
dprintk(CVP_ERR, "Failed to download firmware\n");
|
||||
device->resources.fw.cookie = NULL;
|
||||
rc = -ENOMEM;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
int unload_cvp_fw_impl(struct iris_hfi_device *device)
|
||||
{
|
||||
#ifdef CVP_MDT_ENABLED
|
||||
qcom_scm_pas_shutdown(device->resources.fw.cookie);
|
||||
device->resources.fw.cookie = 0;
|
||||
#else
|
||||
subsystem_put(device->resources.fw.cookie);
|
||||
device->resources.fw.cookie = NULL;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
@@ -21,7 +21,6 @@
|
||||
#include <linux/soc/qcom/llcc-qcom.h>
|
||||
#include <linux/qcom_scm.h>
|
||||
#include <linux/soc/qcom/smem.h>
|
||||
#include <soc/qcom/subsystem_restart.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/reset.h>
|
||||
#include "hfi_packetization.h"
|
||||
@@ -304,6 +303,7 @@ static void power_off_iris2(struct iris_hfi_device *device);
|
||||
|
||||
static int __set_ubwc_config(struct iris_hfi_device *device);
|
||||
static void __noc_error_info_iris2(struct iris_hfi_device *device);
|
||||
static int __enable_hw_power_collapse(struct iris_hfi_device *device);
|
||||
|
||||
static struct iris_hfi_vpu_ops iris2_ops = {
|
||||
.interrupt_init = interrupt_init_iris2,
|
||||
@@ -364,9 +364,34 @@ int get_hfi_version(void)
|
||||
return hfi->version;
|
||||
}
|
||||
|
||||
unsigned int get_msg_size(void)
|
||||
unsigned int get_msg_size(struct cvp_hfi_msg_session_hdr *hdr)
|
||||
{
|
||||
return sizeof(struct cvp_hfi_msg_session_hdr);
|
||||
struct msm_cvp_core *core;
|
||||
struct iris_hfi_device *device;
|
||||
u32 minor_ver;
|
||||
|
||||
core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
|
||||
if (core)
|
||||
device = core->device->hfi_device_data;
|
||||
else
|
||||
return 0;
|
||||
|
||||
if (!device) {
|
||||
dprintk(CVP_ERR, "%s: NULL device\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
minor_ver = (device->version & HFI_VERSION_MINOR_MASK) >>
|
||||
HFI_VERSION_MINOR_SHIFT;
|
||||
|
||||
if (minor_ver < 2)
|
||||
return sizeof(struct cvp_hfi_msg_session_hdr);
|
||||
|
||||
if (hdr->packet_type == HFI_MSG_SESSION_CVP_FD)
|
||||
return sizeof(struct cvp_hfi_msg_session_hdr_ext);
|
||||
else
|
||||
return sizeof(struct cvp_hfi_msg_session_hdr);
|
||||
|
||||
}
|
||||
|
||||
unsigned int get_msg_session_id(void *msg)
|
||||
@@ -1102,8 +1127,31 @@ static int __tzbsp_set_cvp_state(enum tzbsp_subsys_state state)
|
||||
|
||||
static inline int __boot_firmware(struct iris_hfi_device *device)
|
||||
{
|
||||
int rc = 0;
|
||||
int rc = 0, loop = 10;
|
||||
u32 ctrl_init_val = 0, ctrl_status = 0, count = 0, max_tries = 1000;
|
||||
u32 reg_gdsc;
|
||||
|
||||
/*
|
||||
* Hand off control of regulators to h/w _after_ enabling clocks.
|
||||
* Note that the GDSC will turn off when switching from normal
|
||||
* (s/w triggered) to fast (HW triggered) unless the h/w vote is
|
||||
* present. Since Iris isn't up yet, the GDSC will be off briefly.
|
||||
*/
|
||||
if (__enable_hw_power_collapse(device))
|
||||
dprintk(CVP_ERR, "Failed to enabled inter-frame PC\n");
|
||||
|
||||
while (loop) {
|
||||
reg_gdsc = __read_register(device, CVP_CC_MVS1_GDSCR);
|
||||
if (reg_gdsc & 0x80000000) {
|
||||
usleep_range(100, 200);
|
||||
loop--;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!loop)
|
||||
dprintk(CVP_ERR, "fail to power off CORE during resume\n");
|
||||
|
||||
ctrl_init_val = BIT(0);
|
||||
__write_register(device, CVP_CTRL_INIT, ctrl_init_val);
|
||||
@@ -1492,8 +1540,6 @@ static int __interface_dsp_queues_init(struct iris_hfi_device *dev)
|
||||
{
|
||||
int rc = 0;
|
||||
u32 i;
|
||||
struct cvp_hfi_queue_table_header *q_tbl_hdr;
|
||||
struct cvp_hfi_queue_header *q_hdr;
|
||||
struct cvp_iface_q_info *iface_q;
|
||||
int offset = 0;
|
||||
phys_addr_t fw_bias = 0;
|
||||
@@ -1552,43 +1598,11 @@ static int __interface_dsp_queues_init(struct iris_hfi_device *dev)
|
||||
iface_q->q_array.align_virtual_addr = kvaddr + offset;
|
||||
iface_q->q_array.mem_size = CVP_IFACEQ_QUEUE_SIZE;
|
||||
offset += iface_q->q_array.mem_size;
|
||||
iface_q->q_hdr = CVP_IFACEQ_GET_QHDR_START_ADDR(
|
||||
dev->dsp_iface_q_table.align_virtual_addr, i);
|
||||
__set_queue_hdr_defaults(iface_q->q_hdr);
|
||||
spin_lock_init(&iface_q->hfi_lock);
|
||||
}
|
||||
|
||||
q_tbl_hdr = (struct cvp_hfi_queue_table_header *)
|
||||
dev->dsp_iface_q_table.align_virtual_addr;
|
||||
q_tbl_hdr->qtbl_version = 0;
|
||||
q_tbl_hdr->device_addr = (void *)dev;
|
||||
strlcpy(q_tbl_hdr->name, "msm_cvp", sizeof(q_tbl_hdr->name));
|
||||
q_tbl_hdr->qtbl_size = CVP_IFACEQ_TABLE_SIZE;
|
||||
q_tbl_hdr->qtbl_qhdr0_offset =
|
||||
sizeof(struct cvp_hfi_queue_table_header);
|
||||
q_tbl_hdr->qtbl_qhdr_size = sizeof(struct cvp_hfi_queue_header);
|
||||
q_tbl_hdr->qtbl_num_q = CVP_IFACEQ_NUMQ;
|
||||
q_tbl_hdr->qtbl_num_active_q = CVP_IFACEQ_NUMQ;
|
||||
cvp_dsp_init_hfi_queue_hdr(dev);
|
||||
|
||||
iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_CMDQ_IDX];
|
||||
q_hdr = iface_q->q_hdr;
|
||||
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
|
||||
q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
|
||||
|
||||
iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_MSGQ_IDX];
|
||||
q_hdr = iface_q->q_hdr;
|
||||
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
|
||||
q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
|
||||
|
||||
iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_DBGQ_IDX];
|
||||
q_hdr = iface_q->q_hdr;
|
||||
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
|
||||
q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
|
||||
/*
|
||||
* Set receive request to zero on debug queue as there is no
|
||||
* need of interrupt from cvp hardware for debug messages
|
||||
*/
|
||||
q_hdr->qhdr_rx_req = 0;
|
||||
return rc;
|
||||
|
||||
fail_dma_map:
|
||||
@@ -1964,6 +1978,7 @@ static int __sys_set_power_control(struct iris_hfi_device *device,
|
||||
static int iris_hfi_core_init(void *device)
|
||||
{
|
||||
int rc = 0;
|
||||
u32 ipcc_iova;
|
||||
struct cvp_hfi_cmd_sys_init_packet pkt;
|
||||
struct cvp_hfi_cmd_sys_get_property_packet version_pkt;
|
||||
struct iris_hfi_device *dev;
|
||||
@@ -2011,6 +2026,12 @@ static int iris_hfi_core_init(void *device)
|
||||
goto err_core_init;
|
||||
}
|
||||
|
||||
rc = msm_cvp_map_ipcc_regs(&ipcc_iova);
|
||||
if (!rc) {
|
||||
dprintk(CVP_CORE, "IPCC iova 0x%x\n", ipcc_iova);
|
||||
__write_register(dev, CVP_MMAP_ADDR, ipcc_iova);
|
||||
}
|
||||
|
||||
rc = __boot_firmware(dev);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "Failed to start core\n");
|
||||
@@ -3350,7 +3371,7 @@ static int reset_ahb2axi_bridge(struct iris_hfi_device *device)
|
||||
}
|
||||
|
||||
/* wait for deassert */
|
||||
usleep_range(400, 450);
|
||||
usleep_range(1000, 1050);
|
||||
|
||||
rc = __handle_reset_clk(device->res, i, DEASSERT, s);
|
||||
if (rc) {
|
||||
@@ -3625,11 +3646,6 @@ static void __deinit_resources(struct iris_hfi_device *device)
|
||||
device->sys_init_capabilities = NULL;
|
||||
}
|
||||
|
||||
static int __protect_cp_mem(struct iris_hfi_device *device)
|
||||
{
|
||||
return device ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static int __disable_regulator(struct regulator_info *rinfo,
|
||||
struct iris_hfi_device *device)
|
||||
{
|
||||
@@ -4014,15 +4030,6 @@ static int __iris_power_on(struct iris_hfi_device *device)
|
||||
device->intr_status = 0;
|
||||
enable_irq(device->cvp_hal_data->irq);
|
||||
|
||||
/*
|
||||
* Hand off control of regulators to h/w _after_ enabling clocks.
|
||||
* Note that the GDSC will turn off when switching from normal
|
||||
* (s/w triggered) to fast (HW triggered) unless the h/w vote is
|
||||
* present. Since Iris isn't up yet, the GDSC will be off briefly.
|
||||
*/
|
||||
if (__enable_hw_power_collapse(device))
|
||||
dprintk(CVP_ERR, "Failed to enabled inter-frame PC\n");
|
||||
|
||||
return rc;
|
||||
|
||||
fail_enable_clks:
|
||||
@@ -4175,10 +4182,10 @@ static void power_off_iris2(struct iris_hfi_device *device)
|
||||
/* HPG 6.1.2 Step 6 */
|
||||
__disable_unprepare_clks(device);
|
||||
|
||||
/* HPG 6.1.2 Step 7 & 8 */
|
||||
if (call_iris_op(device, reset_ahb2axi_bridge, device))
|
||||
dprintk(CVP_ERR, "Failed to reset ahb2axi\n");
|
||||
|
||||
/*
|
||||
* HPG 6.1.2 Step 7 & 8
|
||||
* per new HPG update, core clock reset will be unnecessary
|
||||
*/
|
||||
if (__unvote_buses(device))
|
||||
dprintk(CVP_WARN, "Failed to unvote for buses\n");
|
||||
|
||||
@@ -4212,8 +4219,6 @@ static inline int __resume(struct iris_hfi_device *device)
|
||||
goto err_iris_power_on;
|
||||
}
|
||||
|
||||
|
||||
|
||||
reg_gdsc = __read_register(device, CVP_CC_MVS1C_GDSCR);
|
||||
reg_cbcr = __read_register(device, CVP_CC_MVS1C_CBCR);
|
||||
if (!(reg_gdsc & 0x80000000) || (reg_cbcr & 0x80000000))
|
||||
@@ -4228,6 +4233,7 @@ static inline int __resume(struct iris_hfi_device *device)
|
||||
}
|
||||
|
||||
__setup_ucregion_memory_map(device);
|
||||
|
||||
/* Wait for boot completion */
|
||||
rc = __boot_firmware(device);
|
||||
if (rc) {
|
||||
@@ -4293,31 +4299,13 @@ static int __load_fw(struct iris_hfi_device *device)
|
||||
|
||||
if ((!device->res->use_non_secure_pil && !device->res->firmware_base)
|
||||
|| device->res->use_non_secure_pil) {
|
||||
if (!device->resources.fw.cookie)
|
||||
device->resources.fw.cookie =
|
||||
subsystem_get_with_fwname("evass",
|
||||
device->res->fw_name);
|
||||
|
||||
if (IS_ERR_OR_NULL(device->resources.fw.cookie)) {
|
||||
dprintk(CVP_ERR, "Failed to download firmware\n");
|
||||
device->resources.fw.cookie = NULL;
|
||||
rc = -ENOMEM;
|
||||
rc = load_cvp_fw_impl(device);
|
||||
if (rc)
|
||||
goto fail_load_fw;
|
||||
}
|
||||
}
|
||||
|
||||
if (!device->res->firmware_base) {
|
||||
rc = __protect_cp_mem(device);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "Failed to protect memory\n");
|
||||
goto fail_protect_mem;
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
fail_protect_mem:
|
||||
if (device->resources.fw.cookie)
|
||||
subsystem_put(device->resources.fw.cookie);
|
||||
device->resources.fw.cookie = NULL;
|
||||
|
||||
fail_load_fw:
|
||||
call_iris_op(device, power_off, device);
|
||||
fail_iris_power_on:
|
||||
@@ -4336,10 +4324,9 @@ static void __unload_fw(struct iris_hfi_device *device)
|
||||
if (device->state != IRIS_STATE_DEINIT)
|
||||
flush_workqueue(device->iris_pm_workq);
|
||||
|
||||
subsystem_put(device->resources.fw.cookie);
|
||||
unload_cvp_fw_impl(device);
|
||||
__interface_queues_release(device);
|
||||
call_iris_op(device, power_off, device);
|
||||
device->resources.fw.cookie = NULL;
|
||||
__deinit_resources(device);
|
||||
|
||||
dprintk(CVP_WARN, "Firmware unloaded\n");
|
||||
@@ -4389,9 +4376,12 @@ static int iris_hfi_get_core_capabilities(void *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 cvp_arp_test_regs[16];
|
||||
static u32 cvp_dma_test_regs[512];
|
||||
|
||||
static void __noc_error_info_iris2(struct iris_hfi_device *device)
|
||||
{
|
||||
u32 val = 0;
|
||||
u32 val = 0, regi, i;
|
||||
|
||||
val = __read_register(device, CVP_NOC_ERR_SWID_LOW_OFFS);
|
||||
dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_SWID_LOW: %#x\n", val);
|
||||
@@ -4446,6 +4436,32 @@ static void __noc_error_info_iris2(struct iris_hfi_device *device)
|
||||
dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG3_LOW: %#x\n", val);
|
||||
val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG3_HIGH_OFFS);
|
||||
dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG3_HIGH: %#x\n", val);
|
||||
#define CVP_SS_CLK_HALT 0x8
|
||||
#define CVP_SS_CLK_EN 0xC
|
||||
#define CVP_SS_ARP_TEST_BUS_CONTROL 0x700
|
||||
#define CVP_SS_ARP_TEST_BUS_REGISTER 0x704
|
||||
#define CVP_DMA_TEST_BUS_CONTROL 0x66A0
|
||||
#define CVP_DMA_TEST_BUS_REGISTER 0x66A4
|
||||
#define CVP_VPU_WRAPPER_CORE_CONFIG 0xB0088
|
||||
__write_register(device, CVP_SS_CLK_HALT, 0);
|
||||
__write_register(device, CVP_SS_CLK_EN, 0x3f);
|
||||
__write_register(device, CVP_VPU_WRAPPER_CORE_CONFIG, 0);
|
||||
|
||||
for (i = 0; i < 15; i++) {
|
||||
regi = 0xC0000000 + i;
|
||||
__write_register(device, CVP_SS_ARP_TEST_BUS_CONTROL, regi);
|
||||
val = __read_register(device, CVP_SS_ARP_TEST_BUS_REGISTER);
|
||||
cvp_arp_test_regs[i] = val;
|
||||
dprintk(CVP_ERR, "ARP_CTL:%x - %x\n", regi, val);
|
||||
}
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
regi = 0x40000000 + i;
|
||||
__write_register(device, CVP_DMA_TEST_BUS_CONTROL, regi);
|
||||
val = __read_register(device, CVP_DMA_TEST_BUS_REGISTER);
|
||||
cvp_dma_test_regs[i] = val;
|
||||
dprintk(CVP_ERR, "DMA_CTL:%x - %x\n", regi, val);
|
||||
}
|
||||
}
|
||||
|
||||
static int iris_hfi_noc_error_info(void *dev)
|
||||
|
@@ -9,6 +9,7 @@
|
||||
#include <media/msm_media_info.h>
|
||||
#include "cvp_hfi_helper.h"
|
||||
#include "cvp_hfi_api.h"
|
||||
#include "cvp_comm_def.h"
|
||||
|
||||
#define HFI_CMD_SESSION_CVP_START \
|
||||
(HFI_DOMAIN_BASE_CVP + HFI_ARCH_COMMON_OFFSET + \
|
||||
@@ -199,7 +200,11 @@ struct cvp_hal_session {
|
||||
};
|
||||
|
||||
struct msm_cvp_fw {
|
||||
#ifdef CVP_MDT_ENABLED
|
||||
int cookie;
|
||||
#else
|
||||
void *cookie;
|
||||
#endif
|
||||
};
|
||||
|
||||
int cvp_hfi_process_msg_packet(u32 device_id,
|
||||
|
@@ -369,7 +369,7 @@ void cvp_hfi_deinitialize(enum msm_cvp_hfi_type hfi_type,
|
||||
int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr);
|
||||
int get_signal_from_pkt_type(unsigned int type);
|
||||
int get_hfi_version(void);
|
||||
unsigned int get_msg_size(void);
|
||||
unsigned int get_msg_size(struct cvp_hfi_msg_session_hdr *hdr);
|
||||
unsigned int get_msg_session_id(void *msg);
|
||||
unsigned int get_msg_errorcode(void *msg);
|
||||
int get_msg_opconfigs(void *msg, unsigned int *session_id,
|
||||
|
@@ -336,6 +336,29 @@ struct cvp_hfi_msg_session_hdr {
|
||||
u32 stream_idx;
|
||||
} __packed;
|
||||
|
||||
#define HFI_MAX_HW_ACTIVATIONS_PER_FRAME (6)
|
||||
#define HFI_MAX_HW_THREADS (4)
|
||||
|
||||
enum hfi_hw_thread {
|
||||
HFI_HW_FDU,
|
||||
HFI_HW_MPU,
|
||||
HFI_HW_OD,
|
||||
HFI_HW_ICA
|
||||
};
|
||||
|
||||
struct cvp_hfi_msg_session_hdr_ext {
|
||||
u32 size;
|
||||
u32 packet_type;
|
||||
u32 session_id;
|
||||
u32 error_type;
|
||||
struct cvp_hfi_client client_data;
|
||||
u32 stream_idx;
|
||||
u32 busy_cycles;
|
||||
u32 total_cycles;
|
||||
u32 hw_cycles[HFI_MAX_HW_THREADS][HFI_MAX_HW_ACTIVATIONS_PER_FRAME];
|
||||
u32 fw_cycles[HFI_MAX_HW_ACTIVATIONS_PER_FRAME];
|
||||
} __packed;
|
||||
|
||||
struct cvp_hfi_buffer_mapping_type {
|
||||
u32 index;
|
||||
u32 device_addr;
|
||||
|
@@ -220,4 +220,5 @@
|
||||
#define CVP_CC_MVS0C_GDSCR (CVP_CC_BASE_OFFS + 0xBF8)
|
||||
#define CVP_CC_MVS1C_GDSCR (CVP_CC_BASE_OFFS + 0xC98)
|
||||
#define CVP_CC_MVS1C_CBCR (CVP_CC_BASE_OFFS + 0xCD4)
|
||||
#define CVP_CC_MVS1_GDSCR (CVP_CC_BASE_OFFS + 0xD98)
|
||||
#endif
|
||||
|
@@ -313,7 +313,7 @@ static int hfi_process_session_set_buf_done(u32 device_id,
|
||||
struct cvp_hfi_msg_session_hdr *pkt =
|
||||
(struct cvp_hfi_msg_session_hdr *)hdr;
|
||||
struct msm_cvp_cb_cmd_done cmd_done = {0};
|
||||
unsigned int pkt_size = get_msg_size();
|
||||
unsigned int pkt_size = get_msg_size(pkt);
|
||||
|
||||
if (!pkt || pkt->size < pkt_size) {
|
||||
dprintk(CVP_ERR, "bad packet/packet size %d\n",
|
||||
@@ -367,7 +367,7 @@ static int hfi_process_session_rel_buf_done(u32 device_id,
|
||||
struct cvp_hfi_msg_session_hdr *pkt =
|
||||
(struct cvp_hfi_msg_session_hdr *)hdr;
|
||||
struct msm_cvp_cb_cmd_done cmd_done = {0};
|
||||
unsigned int pkt_size = get_msg_size();
|
||||
unsigned int pkt_size = get_msg_size(pkt);
|
||||
|
||||
if (!pkt || pkt->size < pkt_size) {
|
||||
dprintk(CVP_ERR, "bad packet/packet size %d\n",
|
||||
@@ -393,6 +393,8 @@ static int hfi_process_session_cvp_operation_config(u32 device_id,
|
||||
{
|
||||
struct cvp_hfi_msg_session_op_cfg_packet *pkt =
|
||||
(struct cvp_hfi_msg_session_op_cfg_packet *)hdr;
|
||||
struct cvp_hfi_msg_session_hdr *lhdr =
|
||||
(struct cvp_hfi_msg_session_hdr *)hdr;
|
||||
struct msm_cvp_cb_cmd_done cmd_done = {0};
|
||||
int signal;
|
||||
unsigned int conf_id, session_id, error_type;
|
||||
@@ -400,7 +402,7 @@ static int hfi_process_session_cvp_operation_config(u32 device_id,
|
||||
if (!pkt) {
|
||||
dprintk(CVP_ERR, "%s: invalid param\n", __func__);
|
||||
return -EINVAL;
|
||||
} else if (pkt->size < get_msg_size()) {
|
||||
} else if (pkt->size < get_msg_size(lhdr)) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: bad_pkt_size\n", __func__);
|
||||
return -E2BIG;
|
||||
@@ -514,7 +516,7 @@ static int hfi_process_session_cvp_msg(u32 device_id,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(&sess_msg->pkt, pkt, get_msg_size());
|
||||
memcpy(&sess_msg->pkt, pkt, get_msg_size(pkt));
|
||||
|
||||
dprintk(CVP_HFI,
|
||||
"%s: Received msg %x cmd_done.status=%d sessionid=%x\n",
|
||||
|
@@ -111,12 +111,15 @@ static int cvp_wait_process_message(struct msm_cvp_inst *inst,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (out)
|
||||
memcpy(out, &msg->pkt, sizeof(struct cvp_hfi_msg_session_hdr));
|
||||
if (!out) {
|
||||
kmem_cache_free(cvp_driver->msg_cache, msg);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
kmem_cache_free(cvp_driver->msg_cache, msg);
|
||||
hdr = (struct cvp_hfi_msg_session_hdr *)out;
|
||||
hdr = (struct cvp_hfi_msg_session_hdr *)&msg->pkt;
|
||||
memcpy(out, &msg->pkt, get_msg_size(hdr));
|
||||
msm_cvp_unmap_frame(inst, hdr->client_data.kdata);
|
||||
kmem_cache_free(cvp_driver->msg_cache, msg);
|
||||
|
||||
exit:
|
||||
return rc;
|
||||
@@ -271,7 +274,7 @@ static bool cvp_fence_wait(struct cvp_fence_queue *q,
|
||||
|
||||
f = list_first_entry(&q->wait_list, struct cvp_fence_command, list);
|
||||
list_del_init(&f->list);
|
||||
list_add_tail(&q->sched_list, &f->list);
|
||||
list_add_tail(&f->list, &q->sched_list);
|
||||
|
||||
mutex_unlock(&q->lock);
|
||||
*fence = f;
|
||||
@@ -279,6 +282,166 @@ static bool cvp_fence_wait(struct cvp_fence_queue *q,
|
||||
return true;
|
||||
}
|
||||
|
||||
static int cvp_readjust_clock(struct msm_cvp_core *core,
|
||||
u32 avg_cycles, enum hfi_hw_thread i)
|
||||
{
|
||||
int rc = 0;
|
||||
struct allowed_clock_rates_table *tbl = NULL;
|
||||
unsigned int tbl_size = 0;
|
||||
unsigned int cvp_min_rate = 0, cvp_max_rate = 0;
|
||||
unsigned long tmp = core->curr_freq;
|
||||
unsigned long lo_freq = 0;
|
||||
u32 j;
|
||||
|
||||
dprintk(CVP_PWR,
|
||||
"%s:%d - %d - avg_cycles %u > hi_tresh %u\n",
|
||||
__func__, __LINE__, i, avg_cycles,
|
||||
core->dyn_clk.hi_ctrl_lim[i]);
|
||||
|
||||
core->curr_freq = ((avg_cycles * core->dyn_clk.sum_fps[i]) << 1)/3;
|
||||
dprintk(CVP_PWR,
|
||||
"%s - cycles tot %u, avg %u. sum_fps %u, cur_freq %u\n",
|
||||
__func__,
|
||||
core->dyn_clk.cycle[i].total,
|
||||
avg_cycles,
|
||||
core->dyn_clk.sum_fps[i],
|
||||
core->curr_freq);
|
||||
|
||||
tbl = core->resources.allowed_clks_tbl;
|
||||
tbl_size = core->resources.allowed_clks_tbl_size;
|
||||
cvp_min_rate = tbl[0].clock_rate;
|
||||
cvp_max_rate = tbl[tbl_size - 1].clock_rate;
|
||||
|
||||
if (core->curr_freq > cvp_max_rate) {
|
||||
core->curr_freq = cvp_max_rate;
|
||||
lo_freq = (tbl_size > 1) ?
|
||||
tbl[tbl_size - 2].clock_rate :
|
||||
cvp_min_rate;
|
||||
} else if (core->curr_freq <= cvp_min_rate) {
|
||||
core->curr_freq = cvp_min_rate;
|
||||
lo_freq = cvp_min_rate;
|
||||
} else {
|
||||
for (j = 1; j < tbl_size; j++)
|
||||
if (core->curr_freq <= tbl[j].clock_rate)
|
||||
break;
|
||||
core->curr_freq = tbl[j].clock_rate;
|
||||
lo_freq = tbl[j-1].clock_rate;
|
||||
}
|
||||
|
||||
dprintk(CVP_PWR,
|
||||
"%s:%d - %d - Readjust to %u\n",
|
||||
__func__, __LINE__, i, core->curr_freq);
|
||||
rc = msm_cvp_set_clocks(core);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
"Failed to set clock rate %u: %d %s\n",
|
||||
core->curr_freq, rc, __func__);
|
||||
core->curr_freq = tmp;
|
||||
} else {
|
||||
lo_freq = (lo_freq < core->dyn_clk.conf_freq) ?
|
||||
core->dyn_clk.conf_freq : lo_freq;
|
||||
core->dyn_clk.hi_ctrl_lim[i] = core->dyn_clk.sum_fps[i] ?
|
||||
((core->curr_freq*3)>>1)/core->dyn_clk.sum_fps[i] : 0;
|
||||
core->dyn_clk.lo_ctrl_lim[i] =
|
||||
core->dyn_clk.sum_fps[i] ?
|
||||
((lo_freq*3)>>1)/core->dyn_clk.sum_fps[i] : 0;
|
||||
|
||||
dprintk(CVP_PWR,
|
||||
"%s - Readjust clk to %u. New lim [%d] hi %u lo %u\n",
|
||||
__func__, core->curr_freq, i,
|
||||
core->dyn_clk.hi_ctrl_lim[i],
|
||||
core->dyn_clk.lo_ctrl_lim[i]);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cvp_check_clock(struct msm_cvp_inst *inst,
|
||||
struct cvp_hfi_msg_session_hdr_ext *hdr)
|
||||
{
|
||||
int rc = 0;
|
||||
u32 i, j;
|
||||
u32 hw_cycles[HFI_MAX_HW_THREADS] = {0};
|
||||
u32 fw_cycles = 0;
|
||||
struct msm_cvp_core *core = inst->core;
|
||||
|
||||
for (i = 0; i < HFI_MAX_HW_ACTIVATIONS_PER_FRAME; ++i)
|
||||
fw_cycles += hdr->fw_cycles[i];
|
||||
|
||||
for (i = 0; i < HFI_MAX_HW_THREADS; ++i)
|
||||
for (j = 0; j < HFI_MAX_HW_ACTIVATIONS_PER_FRAME; ++j)
|
||||
hw_cycles[i] += hdr->hw_cycles[i][j];
|
||||
|
||||
dprintk(CVP_PWR, "%s - cycles fw %u. FDU %d MPU %d ODU %d ICA %d\n",
|
||||
__func__, fw_cycles, hw_cycles[0],
|
||||
hw_cycles[1], hw_cycles[2], hw_cycles[3]);
|
||||
|
||||
mutex_lock(&core->dyn_clk.lock);
|
||||
for (i = 0; i < HFI_MAX_HW_THREADS; ++i) {
|
||||
dprintk(CVP_PWR, "%s - %d: hw_cycles %u, tens_thresh %u\n",
|
||||
__func__, i, hw_cycles[i],
|
||||
core->dyn_clk.hi_ctrl_lim[i]);
|
||||
if (core->dyn_clk.hi_ctrl_lim[i]) {
|
||||
if (core->dyn_clk.cycle[i].size < CVP_CYCLE_STAT_SIZE)
|
||||
core->dyn_clk.cycle[i].size++;
|
||||
else
|
||||
core->dyn_clk.cycle[i].total -=
|
||||
core->dyn_clk.cycle[i].busy[
|
||||
core->dyn_clk.cycle[i].idx];
|
||||
if (hw_cycles[i]) {
|
||||
core->dyn_clk.cycle[i].busy[
|
||||
core->dyn_clk.cycle[i].idx]
|
||||
= hw_cycles[i] + fw_cycles;
|
||||
core->dyn_clk.cycle[i].total
|
||||
+= hw_cycles[i] + fw_cycles;
|
||||
dprintk(CVP_PWR,
|
||||
"%s: busy (hw + fw) cycles = %u\n",
|
||||
__func__,
|
||||
core->dyn_clk.cycle[i].busy[
|
||||
core->dyn_clk.cycle[i].idx]);
|
||||
dprintk(CVP_PWR, "total cycles %u\n",
|
||||
core->dyn_clk.cycle[i].total);
|
||||
} else {
|
||||
core->dyn_clk.cycle[i].busy[
|
||||
core->dyn_clk.cycle[i].idx] =
|
||||
hdr->busy_cycles;
|
||||
core->dyn_clk.cycle[i].total +=
|
||||
hdr->busy_cycles;
|
||||
dprintk(CVP_PWR,
|
||||
"%s - busy cycles = %u total %u\n",
|
||||
__func__,
|
||||
core->dyn_clk.cycle[i].busy[
|
||||
core->dyn_clk.cycle[i].idx],
|
||||
core->dyn_clk.cycle[i].total);
|
||||
}
|
||||
|
||||
core->dyn_clk.cycle[i].idx =
|
||||
(core->dyn_clk.cycle[i].idx ==
|
||||
CVP_CYCLE_STAT_SIZE-1) ?
|
||||
0 : core->dyn_clk.cycle[i].idx+1;
|
||||
|
||||
dprintk(CVP_PWR, "%s - %d: size %u, tens_thresh %u\n",
|
||||
__func__, i, core->dyn_clk.cycle[i].size,
|
||||
core->dyn_clk.hi_ctrl_lim[i]);
|
||||
if (core->dyn_clk.cycle[i].size == CVP_CYCLE_STAT_SIZE
|
||||
&& core->dyn_clk.hi_ctrl_lim[i] != 0) {
|
||||
u32 avg_cycles =
|
||||
core->dyn_clk.cycle[i].total>>3;
|
||||
if ((avg_cycles > core->dyn_clk.hi_ctrl_lim[i])
|
||||
|| (avg_cycles <=
|
||||
core->dyn_clk.lo_ctrl_lim[i])) {
|
||||
rc = cvp_readjust_clock(core,
|
||||
avg_cycles,
|
||||
i);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&core->dyn_clk.lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cvp_fence_proc(struct msm_cvp_inst *inst,
|
||||
struct cvp_fence_command *fc,
|
||||
struct cvp_hfi_cmd_session_hdr *pkt)
|
||||
@@ -290,7 +453,8 @@ static int cvp_fence_proc(struct msm_cvp_inst *inst,
|
||||
struct cvp_hfi_device *hdev;
|
||||
struct cvp_session_queue *sq;
|
||||
u32 hfi_err = HFI_ERR_NONE;
|
||||
struct cvp_hfi_msg_session_hdr *hdr;
|
||||
struct cvp_hfi_msg_session_hdr_ext hdr;
|
||||
bool clock_check = false;
|
||||
|
||||
dprintk(CVP_SYNX, "%s %s\n", current->comm, __func__);
|
||||
|
||||
@@ -298,7 +462,8 @@ static int cvp_fence_proc(struct msm_cvp_inst *inst,
|
||||
sq = &inst->session_queue_fence;
|
||||
ktid = pkt->client_data.kdata;
|
||||
|
||||
if (cvp_synx_ops(inst, CVP_INPUT_SYNX, fc, &synx_state)) {
|
||||
rc = cvp_synx_ops(inst, CVP_INPUT_SYNX, fc, &synx_state);
|
||||
if (rc) {
|
||||
msm_cvp_unmap_frame(inst, pkt->client_data.kdata);
|
||||
goto exit;
|
||||
}
|
||||
@@ -314,9 +479,16 @@ static int cvp_fence_proc(struct msm_cvp_inst *inst,
|
||||
|
||||
timeout = msecs_to_jiffies(CVP_MAX_WAIT_TIME);
|
||||
rc = cvp_wait_process_message(inst, sq, &ktid, timeout,
|
||||
(struct cvp_kmd_hfi_packet *)pkt);
|
||||
hdr = (struct cvp_hfi_msg_session_hdr *)pkt;
|
||||
hfi_err = hdr->error_type;
|
||||
(struct cvp_kmd_hfi_packet *)&hdr);
|
||||
if (get_msg_size((struct cvp_hfi_msg_session_hdr *) &hdr)
|
||||
== sizeof(struct cvp_hfi_msg_session_hdr_ext)) {
|
||||
struct cvp_hfi_msg_session_hdr_ext *fhdr =
|
||||
(struct cvp_hfi_msg_session_hdr_ext *)&hdr;
|
||||
dprintk(CVP_HFI, "busy cycle 0x%x, total 0x%x\n",
|
||||
fhdr->busy_cycles, fhdr->total_cycles);
|
||||
clock_check = true;
|
||||
}
|
||||
hfi_err = hdr.error_type;
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "%s %s: cvp_wait_process_message rc %d\n",
|
||||
current->comm, __func__, rc);
|
||||
@@ -339,19 +511,23 @@ static int cvp_fence_proc(struct msm_cvp_inst *inst,
|
||||
|
||||
exit:
|
||||
rc = cvp_synx_ops(inst, CVP_OUTPUT_SYNX, fc, &synx_state);
|
||||
|
||||
if (clock_check)
|
||||
cvp_check_clock(inst,
|
||||
(struct cvp_hfi_msg_session_hdr_ext *)&hdr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cvp_alloc_fence_data(struct cvp_fence_command **f, u32 size)
|
||||
{
|
||||
struct cvp_fence_command *fcmd;
|
||||
int alloc_size = sizeof(struct cvp_hfi_msg_session_hdr_ext);
|
||||
|
||||
fcmd = kzalloc(sizeof(struct cvp_fence_command), GFP_KERNEL);
|
||||
if (!fcmd)
|
||||
return -ENOMEM;
|
||||
|
||||
fcmd->pkt = kzalloc(size, GFP_KERNEL);
|
||||
alloc_size = (alloc_size >= size) ? alloc_size : size;
|
||||
fcmd->pkt = kzalloc(alloc_size, GFP_KERNEL);
|
||||
if (!fcmd->pkt) {
|
||||
kfree(fcmd);
|
||||
return -ENOMEM;
|
||||
@@ -414,6 +590,7 @@ wait:
|
||||
mutex_lock(&q->lock);
|
||||
cvp_release_synx(inst, f);
|
||||
list_del_init(&f->list);
|
||||
state = q->state;
|
||||
mutex_unlock(&q->lock);
|
||||
|
||||
dprintk(CVP_SYNX, "%s done with %d ktid %llu frameID %llu rc %d\n",
|
||||
@@ -421,6 +598,9 @@ wait:
|
||||
|
||||
cvp_free_fence_data(f);
|
||||
|
||||
if (rc && state != QUEUE_ACTIVE)
|
||||
goto exit;
|
||||
|
||||
goto wait;
|
||||
|
||||
exit:
|
||||
@@ -587,7 +767,7 @@ static void aggregate_power_update(struct msm_cvp_core *core,
|
||||
} else {
|
||||
i = 1;
|
||||
}
|
||||
dprintk(CVP_PROF, "pwrUpdate %pK fdu %u od %u mpu %u ica %u\n",
|
||||
dprintk(CVP_PROF, "pwrUpdate fdu %u od %u mpu %u ica %u\n",
|
||||
inst->prop.fdu_cycles,
|
||||
inst->prop.od_cycles,
|
||||
inst->prop.mpu_cycles,
|
||||
@@ -629,6 +809,21 @@ static void aggregate_power_update(struct msm_cvp_core *core,
|
||||
op_bw_max[i] =
|
||||
(op_bw_max[i] >= inst->prop.ddr_op_bw) ?
|
||||
op_bw_max[i] : inst->prop.ddr_op_bw;
|
||||
|
||||
dprintk(CVP_PWR, "%s:%d - fps fdu %d mpu %d od %d ica %d\n",
|
||||
__func__, __LINE__,
|
||||
inst->prop.fps[HFI_HW_FDU], inst->prop.fps[HFI_HW_MPU],
|
||||
inst->prop.fps[HFI_HW_OD], inst->prop.fps[HFI_HW_ICA]);
|
||||
core->dyn_clk.sum_fps[HFI_HW_FDU] += inst->prop.fps[HFI_HW_FDU];
|
||||
core->dyn_clk.sum_fps[HFI_HW_MPU] += inst->prop.fps[HFI_HW_MPU];
|
||||
core->dyn_clk.sum_fps[HFI_HW_OD] += inst->prop.fps[HFI_HW_OD];
|
||||
core->dyn_clk.sum_fps[HFI_HW_ICA] += inst->prop.fps[HFI_HW_ICA];
|
||||
dprintk(CVP_PWR, "%s:%d - sum_fps fdu %d mpu %d od %d ica %d\n",
|
||||
__func__, __LINE__,
|
||||
core->dyn_clk.sum_fps[HFI_HW_FDU],
|
||||
core->dyn_clk.sum_fps[HFI_HW_MPU],
|
||||
core->dyn_clk.sum_fps[HFI_HW_OD],
|
||||
core->dyn_clk.sum_fps[HFI_HW_ICA]);
|
||||
}
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
@@ -680,6 +875,7 @@ static int adjust_bw_freqs(void)
|
||||
struct cvp_power_level rt_pwr = {0}, nrt_pwr = {0};
|
||||
unsigned long tmp, core_sum, op_core_sum, bw_sum;
|
||||
int i, rc = 0;
|
||||
unsigned long ctrl_freq;
|
||||
|
||||
core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
|
||||
|
||||
@@ -714,7 +910,7 @@ static int adjust_bw_freqs(void)
|
||||
|
||||
if (core_sum > cvp_max_rate) {
|
||||
core_sum = cvp_max_rate;
|
||||
} else if (core_sum < cvp_min_rate) {
|
||||
} else if (core_sum <= cvp_min_rate) {
|
||||
core_sum = cvp_min_rate;
|
||||
} else {
|
||||
for (i = 1; i < tbl_size; i++)
|
||||
@@ -745,6 +941,18 @@ static int adjust_bw_freqs(void)
|
||||
core->curr_freq = tmp;
|
||||
return rc;
|
||||
}
|
||||
|
||||
ctrl_freq = (core->curr_freq*3)>>1;
|
||||
mutex_lock(&core->dyn_clk.lock);
|
||||
core->dyn_clk.conf_freq = core->curr_freq;
|
||||
for (i = 0; i < HFI_MAX_HW_THREADS; ++i) {
|
||||
core->dyn_clk.hi_ctrl_lim[i] = core->dyn_clk.sum_fps[i] ?
|
||||
ctrl_freq/core->dyn_clk.sum_fps[i] : 0;
|
||||
core->dyn_clk.lo_ctrl_lim[i] =
|
||||
core->dyn_clk.hi_ctrl_lim[i];
|
||||
}
|
||||
mutex_unlock(&core->dyn_clk.lock);
|
||||
|
||||
hdev->clk_freq = core->curr_freq;
|
||||
rc = icc_set_bw(bus->client, bw_sum, 0);
|
||||
if (rc)
|
||||
@@ -1122,7 +1330,7 @@ static int msm_cvp_set_sysprop(struct msm_cvp_inst *inst,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (props->prop_num >= MAX_KMD_PROP_NUM_PER_PACKET) {
|
||||
if (props->prop_num > MAX_KMD_PROP_NUM_PER_PACKET) {
|
||||
dprintk(CVP_ERR, "Too many properties %d to set\n",
|
||||
props->prop_num);
|
||||
return -E2BIG;
|
||||
@@ -1194,6 +1402,18 @@ static int msm_cvp_set_sysprop(struct msm_cvp_inst *inst,
|
||||
case CVP_KMD_PROP_PWR_SYSCACHE_OP:
|
||||
session_prop->ddr_op_cache = prop_array[i].data;
|
||||
break;
|
||||
case CVP_KMD_PROP_PWR_FPS_FDU:
|
||||
session_prop->fps[HFI_HW_FDU] = prop_array[i].data;
|
||||
break;
|
||||
case CVP_KMD_PROP_PWR_FPS_MPU:
|
||||
session_prop->fps[HFI_HW_MPU] = prop_array[i].data;
|
||||
break;
|
||||
case CVP_KMD_PROP_PWR_FPS_OD:
|
||||
session_prop->fps[HFI_HW_OD] = prop_array[i].data;
|
||||
break;
|
||||
case CVP_KMD_PROP_PWR_FPS_ICA:
|
||||
session_prop->fps[HFI_HW_ICA] = prop_array[i].data;
|
||||
break;
|
||||
default:
|
||||
dprintk(CVP_ERR,
|
||||
"unrecognized sys property to set %d\n",
|
||||
@@ -1275,10 +1495,8 @@ static int cvp_drain_fence_sched_list(struct msm_cvp_inst *inst)
|
||||
mutex_lock(&q->lock);
|
||||
list_for_each_entry(f, &q->sched_list, list) {
|
||||
ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
|
||||
dprintk(CVP_SYNX, "%s: frame %llu is in sched_list\n",
|
||||
__func__, ktid);
|
||||
dprintk(CVP_SYNX, "%s: frameID %llu is in sched_list\n",
|
||||
__func__, f->frame_id);
|
||||
dprintk(CVP_SYNX, "%s: frame %llu %llu is in sched_list\n",
|
||||
__func__, ktid, f->frame_id);
|
||||
++count;
|
||||
}
|
||||
mutex_unlock(&q->lock);
|
||||
@@ -1310,14 +1528,71 @@ retry:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void cvp_clean_fence_queue(struct msm_cvp_inst *inst, int synx_state)
|
||||
{
|
||||
struct cvp_fence_queue *q;
|
||||
struct cvp_fence_command *f, *d;
|
||||
u64 ktid;
|
||||
|
||||
q = &inst->fence_cmd_queue;
|
||||
|
||||
mutex_lock(&q->lock);
|
||||
q->mode = OP_DRAINING;
|
||||
|
||||
list_for_each_entry_safe(f, d, &q->wait_list, list) {
|
||||
ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
|
||||
|
||||
dprintk(CVP_SYNX, "%s: (%#x) flush frame %llu %llu wait_list\n",
|
||||
__func__, hash32_ptr(inst->session), ktid, f->frame_id);
|
||||
|
||||
list_del_init(&f->list);
|
||||
msm_cvp_unmap_frame(inst, f->pkt->client_data.kdata);
|
||||
cvp_cancel_synx(inst, CVP_OUTPUT_SYNX, f, synx_state);
|
||||
cvp_release_synx(inst, f);
|
||||
cvp_free_fence_data(f);
|
||||
}
|
||||
|
||||
list_for_each_entry(f, &q->sched_list, list) {
|
||||
ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
|
||||
|
||||
dprintk(CVP_SYNX, "%s: (%#x)flush frame %llu %llu sched_list\n",
|
||||
__func__, hash32_ptr(inst->session), ktid, f->frame_id);
|
||||
cvp_cancel_synx(inst, CVP_INPUT_SYNX, f, synx_state);
|
||||
}
|
||||
|
||||
mutex_unlock(&q->lock);
|
||||
}
|
||||
|
||||
int cvp_stop_clean_fence_queue(struct msm_cvp_inst *inst)
|
||||
{
|
||||
struct cvp_fence_queue *q;
|
||||
u32 count = 0, max_retries = 100;
|
||||
|
||||
cvp_clean_fence_queue(inst, SYNX_STATE_SIGNALED_ERROR);
|
||||
cvp_fence_thread_stop(inst);
|
||||
|
||||
/* Waiting for all output synx sent */
|
||||
q = &inst->fence_cmd_queue;
|
||||
retry:
|
||||
mutex_lock(&q->lock);
|
||||
if (list_empty(&q->sched_list)) {
|
||||
mutex_unlock(&q->lock);
|
||||
return 0;
|
||||
}
|
||||
mutex_unlock(&q->lock);
|
||||
usleep_range(500, 1000);
|
||||
if (++count > max_retries)
|
||||
return -EBUSY;
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
static int cvp_flush_all(struct msm_cvp_inst *inst)
|
||||
{
|
||||
int rc = 0;
|
||||
struct msm_cvp_inst *s;
|
||||
struct cvp_fence_queue *q;
|
||||
struct cvp_fence_command *f, *d;
|
||||
struct cvp_hfi_device *hdev;
|
||||
u64 ktid;
|
||||
|
||||
if (!inst || !inst->core) {
|
||||
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
||||
@@ -1328,40 +1603,15 @@ static int cvp_flush_all(struct msm_cvp_inst *inst)
|
||||
if (!s)
|
||||
return -ECONNRESET;
|
||||
|
||||
dprintk(CVP_SESS, "session %llx (%#x)flush all starts\n",
|
||||
inst, hash32_ptr(inst->session));
|
||||
q = &inst->fence_cmd_queue;
|
||||
hdev = inst->core->device;
|
||||
|
||||
mutex_lock(&q->lock);
|
||||
q->mode = OP_DRAINING;
|
||||
cvp_clean_fence_queue(inst, SYNX_STATE_SIGNALED_CANCEL);
|
||||
|
||||
list_for_each_entry_safe(f, d, &q->wait_list, list) {
|
||||
ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
|
||||
|
||||
dprintk(CVP_SESS, "%s: flush frame %llu from wait_list\n",
|
||||
__func__, ktid);
|
||||
dprintk(CVP_SESS, "%s: flush frameID %llu from wait_list\n",
|
||||
__func__, f->frame_id);
|
||||
|
||||
list_del_init(&f->list);
|
||||
msm_cvp_unmap_frame(inst, f->pkt->client_data.kdata);
|
||||
cvp_cancel_synx(inst, CVP_OUTPUT_SYNX, f);
|
||||
cvp_release_synx(inst, f);
|
||||
cvp_free_fence_data(f);
|
||||
}
|
||||
|
||||
list_for_each_entry(f, &q->sched_list, list) {
|
||||
ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
|
||||
|
||||
dprintk(CVP_SESS, "%s: flush frame %llu from sched_list\n",
|
||||
__func__, ktid);
|
||||
dprintk(CVP_SESS, "%s: flush frameID %llu from sched_list\n",
|
||||
__func__, f->frame_id);
|
||||
cvp_cancel_synx(inst, CVP_INPUT_SYNX, f);
|
||||
}
|
||||
|
||||
mutex_unlock(&q->lock);
|
||||
|
||||
dprintk(CVP_SESS, "%s: send flush to fw\n", __func__);
|
||||
dprintk(CVP_SESS, "%s: (%#x) send flush to fw\n",
|
||||
__func__, hash32_ptr(inst->session));
|
||||
|
||||
/* Send flush to FW */
|
||||
rc = call_hfi_op(hdev, session_flush, (void *)inst->session);
|
||||
@@ -1377,7 +1627,8 @@ static int cvp_flush_all(struct msm_cvp_inst *inst)
|
||||
dprintk(CVP_WARN, "%s: wait for signal failed, rc %d\n",
|
||||
__func__, rc);
|
||||
|
||||
dprintk(CVP_SESS, "%s: received flush from fw\n", __func__);
|
||||
dprintk(CVP_SESS, "%s: (%#x) received flush from fw\n",
|
||||
__func__, hash32_ptr(inst->session));
|
||||
|
||||
exit:
|
||||
rc = cvp_drain_fence_sched_list(inst);
|
||||
@@ -1440,6 +1691,8 @@ static int cvp_flush_frame(struct msm_cvp_inst *inst, u64 frame_id)
|
||||
if (!s)
|
||||
return -ECONNRESET;
|
||||
|
||||
dprintk(CVP_SESS, "Session %llx, flush frame with id %llu\n",
|
||||
inst, frame_id);
|
||||
q = &inst->fence_cmd_queue;
|
||||
|
||||
mutex_lock(&q->lock);
|
||||
@@ -1453,14 +1706,13 @@ static int cvp_flush_frame(struct msm_cvp_inst *inst, u64 frame_id)
|
||||
|
||||
ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
|
||||
|
||||
dprintk(CVP_SESS, "%s: flush frame %llu from wait_list\n",
|
||||
__func__, ktid);
|
||||
dprintk(CVP_SESS, "%s: flush frameID %llu from wait_list\n",
|
||||
__func__, f->frame_id);
|
||||
dprintk(CVP_SYNX, "%s: flush frame %llu %llu from wait_list\n",
|
||||
__func__, ktid, f->frame_id);
|
||||
|
||||
list_del_init(&f->list);
|
||||
msm_cvp_unmap_frame(inst, f->pkt->client_data.kdata);
|
||||
cvp_cancel_synx(inst, CVP_OUTPUT_SYNX, f);
|
||||
cvp_cancel_synx(inst, CVP_OUTPUT_SYNX, f,
|
||||
SYNX_STATE_SIGNALED_CANCEL);
|
||||
cvp_release_synx(inst, f);
|
||||
cvp_free_fence_data(f);
|
||||
}
|
||||
@@ -1471,11 +1723,10 @@ static int cvp_flush_frame(struct msm_cvp_inst *inst, u64 frame_id)
|
||||
|
||||
ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
|
||||
|
||||
dprintk(CVP_SESS, "%s: flush frame %llu from sched_list\n",
|
||||
__func__, ktid);
|
||||
dprintk(CVP_SESS, "%s: flush frameID %llu from sched_list\n",
|
||||
__func__, f->frame_id);
|
||||
cvp_cancel_synx(inst, CVP_INPUT_SYNX, f);
|
||||
dprintk(CVP_SYNX, "%s: flush frame %llu %llu from sched_list\n",
|
||||
__func__, ktid, f->frame_id);
|
||||
cvp_cancel_synx(inst, CVP_INPUT_SYNX, f,
|
||||
SYNX_STATE_SIGNALED_CANCEL);
|
||||
}
|
||||
|
||||
mutex_unlock(&q->lock);
|
||||
@@ -1640,7 +1891,7 @@ int msm_cvp_session_init(struct msm_cvp_inst *inst)
|
||||
inst->prop.priority = 0;
|
||||
inst->prop.is_secure = 0;
|
||||
inst->prop.dsp_mask = 0;
|
||||
inst->prop.fthread_nr = 2;
|
||||
inst->prop.fthread_nr = 3;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@@ -33,4 +33,5 @@ int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg);
|
||||
int msm_cvp_session_init(struct msm_cvp_inst *inst);
|
||||
int msm_cvp_session_deinit(struct msm_cvp_inst *inst);
|
||||
int msm_cvp_session_queue_stop(struct msm_cvp_inst *inst);
|
||||
int cvp_stop_clean_fence_queue(struct msm_cvp_inst *inst);
|
||||
#endif
|
||||
|
@@ -498,6 +498,10 @@ static void msm_cvp_unmap_frame_buf(struct msm_cvp_inst *inst,
|
||||
} else if (atomic_dec_and_test(&smem->refcount)) {
|
||||
clear_bit(smem->bitmap_index,
|
||||
&inst->dma_cache.usage_bitmap);
|
||||
dprintk(CVP_MEM, "smem %x %d iova %#x to be reused\n",
|
||||
hash32_ptr(inst->session),
|
||||
smem->size,
|
||||
smem->device_addr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -515,7 +519,8 @@ void msm_cvp_unmap_frame(struct msm_cvp_inst *inst, u64 ktid)
|
||||
}
|
||||
|
||||
ktid &= (FENCE_BIT - 1);
|
||||
dprintk(CVP_MEM, "%s: unmap frame %llu\n", __func__, ktid);
|
||||
dprintk(CVP_MEM, "%s: (%#x) unmap frame %llu\n",
|
||||
__func__, hash32_ptr(inst->session), ktid);
|
||||
|
||||
found = false;
|
||||
mutex_lock(&inst->frames.lock);
|
||||
|
@@ -166,6 +166,7 @@ int msm_cvp_smem_cache_operations(struct dma_buf *dbuf,
|
||||
enum smem_cache_ops cache_op,
|
||||
unsigned long offset,
|
||||
unsigned long size);
|
||||
int msm_cvp_map_ipcc_regs(u32 *iova);
|
||||
|
||||
/* CVP driver internal buffer management functions*/
|
||||
struct cvp_internal_buf *cvp_allocate_arp_bufs(struct msm_cvp_inst *inst,
|
||||
|
@@ -8,7 +8,9 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bitops.h>
|
||||
#ifndef CVP_MDT_ENABLED
|
||||
#include <soc/qcom/subsystem_restart.h>
|
||||
#endif
|
||||
#include <asm/div64.h>
|
||||
#include "msm_cvp_common.h"
|
||||
#include "cvp_hfi_api.h"
|
||||
@@ -402,6 +404,8 @@ int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
|
||||
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
|
||||
dump_hfi_queue(hdev->hfi_device_data);
|
||||
rc = -EIO;
|
||||
} else if (inst->state == MSM_CVP_CORE_INVALID) {
|
||||
rc = -ECONNRESET;
|
||||
} else {
|
||||
rc = 0;
|
||||
}
|
||||
@@ -623,11 +627,13 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
|
||||
struct msm_cvp_core *core = NULL;
|
||||
struct cvp_hfi_device *hdev = NULL;
|
||||
struct msm_cvp_inst *inst = NULL;
|
||||
int rc = 0;
|
||||
int i, rc = 0;
|
||||
unsigned long flags = 0;
|
||||
enum cvp_core_state cur_state;
|
||||
|
||||
#ifndef CVP_MDT_ENABLED
|
||||
subsystem_crashed("evass");
|
||||
#endif
|
||||
if (!response) {
|
||||
dprintk(CVP_ERR,
|
||||
"Failed to get valid response for sys error\n");
|
||||
@@ -663,6 +669,10 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
|
||||
inst->cur_cmd_type, inst->state);
|
||||
if (inst->state != MSM_CVP_CORE_INVALID) {
|
||||
change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
|
||||
if (cvp_stop_clean_fence_queue(inst))
|
||||
dprintk(CVP_ERR, "Failed to clean fences\n");
|
||||
for (i = 0; i < ARRAY_SIZE(inst->completions); i++)
|
||||
complete(&inst->completions[i]);
|
||||
spin_lock_irqsave(&inst->event_handler.lock, flags);
|
||||
inst->event_handler.event = CVP_SSR_EVENT;
|
||||
spin_unlock_irqrestore(
|
||||
|
@@ -17,6 +17,8 @@ static int dspVMperm[DSP_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC,
|
||||
PERM_READ | PERM_WRITE | PERM_EXEC };
|
||||
static int hlosVMperm[HLOS_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC };
|
||||
|
||||
static int cvp_reinit_dsp(void);
|
||||
|
||||
static int cvp_dsp_send_cmd(struct cvp_dsp_cmd_msg *cmd, uint32_t len)
|
||||
{
|
||||
int rc = 0;
|
||||
@@ -40,7 +42,8 @@ exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cvp_dsp_send_cmd_sync(struct cvp_dsp_cmd_msg *cmd, uint32_t len)
|
||||
static int cvp_dsp_send_cmd_sync(struct cvp_dsp_cmd_msg *cmd,
|
||||
uint32_t len, struct cvp_dsp_rsp_msg *rsp)
|
||||
{
|
||||
int rc = 0;
|
||||
struct cvp_dsp_apps *me = &gfa_cv;
|
||||
@@ -63,12 +66,15 @@ static int cvp_dsp_send_cmd_sync(struct cvp_dsp_cmd_msg *cmd, uint32_t len)
|
||||
}
|
||||
|
||||
exit:
|
||||
rsp->ret = me->pending_dsp2cpu_rsp.ret;
|
||||
rsp->dsp_state = me->pending_dsp2cpu_rsp.dsp_state;
|
||||
me->pending_dsp2cpu_rsp.type = CVP_INVALID_RPMSG_TYPE;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
|
||||
uint32_t size_in_bytes)
|
||||
uint32_t size_in_bytes,
|
||||
struct cvp_dsp_rsp_msg *rsp)
|
||||
{
|
||||
int rc = 0;
|
||||
struct cvp_dsp_cmd_msg cmd;
|
||||
@@ -88,7 +94,7 @@ static int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
|
||||
"%s: address of buffer, PA=0x%pK size_buff=%d ddr_type=%d\n",
|
||||
__func__, phys_addr, size_in_bytes, cmd.ddr_type);
|
||||
|
||||
rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg));
|
||||
rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg), rsp);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: cvp_dsp_send_cmd failed rc = %d\n",
|
||||
@@ -224,6 +230,8 @@ int cvp_dsp_suspend(uint32_t session_flag)
|
||||
int rc = 0;
|
||||
struct cvp_dsp_cmd_msg cmd;
|
||||
struct cvp_dsp_apps *me = &gfa_cv;
|
||||
struct cvp_dsp_rsp_msg rsp;
|
||||
bool retried = false;
|
||||
|
||||
cmd.type = CPU2DSP_SUSPEND;
|
||||
|
||||
@@ -231,8 +239,11 @@ int cvp_dsp_suspend(uint32_t session_flag)
|
||||
if (me->state != DSP_READY)
|
||||
goto exit;
|
||||
|
||||
retry:
|
||||
/* Use cvp_dsp_send_cmd_sync after dsp driver is ready */
|
||||
rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg));
|
||||
rc = cvp_dsp_send_cmd_sync(&cmd,
|
||||
sizeof(struct cvp_dsp_cmd_msg),
|
||||
&rsp);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: cvp_dsp_send_cmd failed rc = %d\n",
|
||||
@@ -240,8 +251,31 @@ int cvp_dsp_suspend(uint32_t session_flag)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
me->state = DSP_SUSPEND;
|
||||
if (rsp.ret == CPU2DSP_EUNAVAILABLE)
|
||||
goto fatal_exit;
|
||||
|
||||
if (rsp.ret == CPU2DSP_EFATAL) {
|
||||
if (!retried) {
|
||||
mutex_unlock(&me->lock);
|
||||
retried = true;
|
||||
rc = cvp_reinit_dsp();
|
||||
mutex_lock(&me->lock);
|
||||
if (rc)
|
||||
goto fatal_exit;
|
||||
else
|
||||
goto retry;
|
||||
} else {
|
||||
goto fatal_exit;
|
||||
}
|
||||
}
|
||||
|
||||
me->state = DSP_SUSPEND;
|
||||
goto exit;
|
||||
|
||||
fatal_exit:
|
||||
me->state = DSP_INVALID;
|
||||
cvp_hyp_assign_from_dsp();
|
||||
rc = -ENOTSUPP;
|
||||
exit:
|
||||
mutex_unlock(&me->lock);
|
||||
return rc;
|
||||
@@ -252,6 +286,7 @@ int cvp_dsp_resume(uint32_t session_flag)
|
||||
int rc = 0;
|
||||
struct cvp_dsp_cmd_msg cmd;
|
||||
struct cvp_dsp_apps *me = &gfa_cv;
|
||||
struct cvp_dsp_rsp_msg rsp;
|
||||
|
||||
cmd.type = CPU2DSP_RESUME;
|
||||
|
||||
@@ -260,7 +295,9 @@ int cvp_dsp_resume(uint32_t session_flag)
|
||||
goto exit;
|
||||
|
||||
/* Use cvp_dsp_send_cmd_sync after dsp driver is ready */
|
||||
rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg));
|
||||
rc = cvp_dsp_send_cmd_sync(&cmd,
|
||||
sizeof(struct cvp_dsp_cmd_msg),
|
||||
&rsp);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: cvp_dsp_send_cmd failed rc = %d\n",
|
||||
@@ -280,6 +317,7 @@ int cvp_dsp_shutdown(uint32_t session_flag)
|
||||
struct cvp_dsp_apps *me = &gfa_cv;
|
||||
int rc = 0;
|
||||
struct cvp_dsp_cmd_msg cmd;
|
||||
struct cvp_dsp_rsp_msg rsp;
|
||||
|
||||
cmd.type = CPU2DSP_SHUTDOWN;
|
||||
|
||||
@@ -288,7 +326,7 @@ int cvp_dsp_shutdown(uint32_t session_flag)
|
||||
goto exit;
|
||||
|
||||
me->state = DSP_INACTIVE;
|
||||
rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg));
|
||||
rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg), &rsp);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: cvp_dsp_send_cmd failed with rc = %d\n",
|
||||
@@ -312,6 +350,8 @@ int cvp_dsp_register_buffer(uint32_t session_id, uint32_t buff_fd,
|
||||
struct cvp_dsp_cmd_msg cmd;
|
||||
int rc;
|
||||
struct cvp_dsp_apps *me = &gfa_cv;
|
||||
struct cvp_dsp_rsp_msg rsp;
|
||||
bool retried = false;
|
||||
|
||||
cmd.type = CPU2DSP_REGISTER_BUFFER;
|
||||
cmd.session_id = session_id;
|
||||
@@ -330,12 +370,43 @@ int cvp_dsp_register_buffer(uint32_t session_id, uint32_t buff_fd,
|
||||
__func__, cmd.buff_size, cmd.session_id);
|
||||
|
||||
mutex_lock(&me->lock);
|
||||
rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg));
|
||||
retry:
|
||||
rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg), &rsp);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "%s send failed rc = %d\n", __func__, rc);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (rsp.ret == CPU2DSP_EFAIL || rsp.ret == CPU2DSP_EUNSUPPORTED) {
|
||||
dprintk(CVP_WARN, "%s, DSP return err %d\n", __func__, rsp.ret);
|
||||
rc = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (rsp.ret == CPU2DSP_EUNAVAILABLE)
|
||||
goto fatal_exit;
|
||||
|
||||
if (rsp.ret == CPU2DSP_EFATAL) {
|
||||
if (!retried) {
|
||||
mutex_unlock(&me->lock);
|
||||
retried = true;
|
||||
rc = cvp_reinit_dsp();
|
||||
mutex_lock(&me->lock);
|
||||
if (rc)
|
||||
goto fatal_exit;
|
||||
else
|
||||
goto retry;
|
||||
} else {
|
||||
goto fatal_exit;
|
||||
}
|
||||
}
|
||||
|
||||
goto exit;
|
||||
|
||||
fatal_exit:
|
||||
me->state = DSP_INVALID;
|
||||
cvp_hyp_assign_from_dsp();
|
||||
rc = -ENOTSUPP;
|
||||
exit:
|
||||
mutex_unlock(&me->lock);
|
||||
return rc;
|
||||
@@ -349,6 +420,8 @@ int cvp_dsp_deregister_buffer(uint32_t session_id, uint32_t buff_fd,
|
||||
struct cvp_dsp_cmd_msg cmd;
|
||||
int rc;
|
||||
struct cvp_dsp_apps *me = &gfa_cv;
|
||||
struct cvp_dsp_rsp_msg rsp;
|
||||
bool retried = false;
|
||||
|
||||
cmd.type = CPU2DSP_DEREGISTER_BUFFER;
|
||||
cmd.session_id = session_id;
|
||||
@@ -367,12 +440,43 @@ int cvp_dsp_deregister_buffer(uint32_t session_id, uint32_t buff_fd,
|
||||
__func__, cmd.buff_size, cmd.session_id);
|
||||
|
||||
mutex_lock(&me->lock);
|
||||
rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg));
|
||||
retry:
|
||||
rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg), &rsp);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "%s send failed rc = %d\n", __func__, rc);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (rsp.ret == CPU2DSP_EFAIL || rsp.ret == CPU2DSP_EUNSUPPORTED) {
|
||||
dprintk(CVP_WARN, "%s, DSP return err %d\n", __func__, rsp.ret);
|
||||
rc = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (rsp.ret == CPU2DSP_EUNAVAILABLE)
|
||||
goto fatal_exit;
|
||||
|
||||
if (rsp.ret == CPU2DSP_EFATAL) {
|
||||
if (!retried) {
|
||||
mutex_unlock(&me->lock);
|
||||
retried = true;
|
||||
rc = cvp_reinit_dsp();
|
||||
mutex_lock(&me->lock);
|
||||
if (rc)
|
||||
goto fatal_exit;
|
||||
else
|
||||
goto retry;
|
||||
} else {
|
||||
goto fatal_exit;
|
||||
}
|
||||
}
|
||||
|
||||
goto exit;
|
||||
|
||||
fatal_exit:
|
||||
me->state = DSP_INVALID;
|
||||
cvp_hyp_assign_from_dsp();
|
||||
rc = -ENOTSUPP;
|
||||
exit:
|
||||
mutex_unlock(&me->lock);
|
||||
return rc;
|
||||
@@ -393,11 +497,156 @@ static struct rpmsg_driver cvp_dsp_rpmsg_client = {
|
||||
},
|
||||
};
|
||||
|
||||
static void cvp_dsp_set_queue_hdr_defaults(struct cvp_hfi_queue_header *q_hdr)
|
||||
{
|
||||
q_hdr->qhdr_status = 0x1;
|
||||
q_hdr->qhdr_type = CVP_IFACEQ_DFLT_QHDR;
|
||||
q_hdr->qhdr_q_size = CVP_IFACEQ_QUEUE_SIZE / 4;
|
||||
q_hdr->qhdr_pkt_size = 0;
|
||||
q_hdr->qhdr_rx_wm = 0x1;
|
||||
q_hdr->qhdr_tx_wm = 0x1;
|
||||
q_hdr->qhdr_rx_req = 0x1;
|
||||
q_hdr->qhdr_tx_req = 0x0;
|
||||
q_hdr->qhdr_rx_irq_status = 0x0;
|
||||
q_hdr->qhdr_tx_irq_status = 0x0;
|
||||
q_hdr->qhdr_read_idx = 0x0;
|
||||
q_hdr->qhdr_write_idx = 0x0;
|
||||
}
|
||||
|
||||
void cvp_dsp_init_hfi_queue_hdr(struct iris_hfi_device *device)
|
||||
{
|
||||
u32 i;
|
||||
struct cvp_hfi_queue_table_header *q_tbl_hdr;
|
||||
struct cvp_hfi_queue_header *q_hdr;
|
||||
struct cvp_iface_q_info *iface_q;
|
||||
|
||||
for (i = 0; i < CVP_IFACEQ_NUMQ; i++) {
|
||||
iface_q = &device->dsp_iface_queues[i];
|
||||
iface_q->q_hdr = CVP_IFACEQ_GET_QHDR_START_ADDR(
|
||||
device->dsp_iface_q_table.align_virtual_addr, i);
|
||||
cvp_dsp_set_queue_hdr_defaults(iface_q->q_hdr);
|
||||
}
|
||||
q_tbl_hdr = (struct cvp_hfi_queue_table_header *)
|
||||
device->dsp_iface_q_table.align_virtual_addr;
|
||||
q_tbl_hdr->qtbl_version = 0;
|
||||
q_tbl_hdr->device_addr = (void *)device;
|
||||
strlcpy(q_tbl_hdr->name, "msm_cvp", sizeof(q_tbl_hdr->name));
|
||||
q_tbl_hdr->qtbl_size = CVP_IFACEQ_TABLE_SIZE;
|
||||
q_tbl_hdr->qtbl_qhdr0_offset =
|
||||
sizeof(struct cvp_hfi_queue_table_header);
|
||||
q_tbl_hdr->qtbl_qhdr_size = sizeof(struct cvp_hfi_queue_header);
|
||||
q_tbl_hdr->qtbl_num_q = CVP_IFACEQ_NUMQ;
|
||||
q_tbl_hdr->qtbl_num_active_q = CVP_IFACEQ_NUMQ;
|
||||
|
||||
iface_q = &device->dsp_iface_queues[CVP_IFACEQ_CMDQ_IDX];
|
||||
q_hdr = iface_q->q_hdr;
|
||||
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
|
||||
q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
|
||||
|
||||
iface_q = &device->dsp_iface_queues[CVP_IFACEQ_MSGQ_IDX];
|
||||
q_hdr = iface_q->q_hdr;
|
||||
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
|
||||
q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
|
||||
|
||||
iface_q = &device->dsp_iface_queues[CVP_IFACEQ_DBGQ_IDX];
|
||||
q_hdr = iface_q->q_hdr;
|
||||
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
|
||||
q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
|
||||
/*
|
||||
* Set receive request to zero on debug queue as there is no
|
||||
* need of interrupt from cvp hardware for debug messages
|
||||
*/
|
||||
q_hdr->qhdr_rx_req = 0;
|
||||
}
|
||||
|
||||
static int __reinit_dsp(void)
|
||||
{
|
||||
int rc;
|
||||
uint32_t flag = 0;
|
||||
uint64_t addr;
|
||||
uint32_t size;
|
||||
struct cvp_dsp_apps *me = &gfa_cv;
|
||||
struct cvp_dsp_rsp_msg rsp;
|
||||
struct msm_cvp_core *core;
|
||||
struct iris_hfi_device *device;
|
||||
|
||||
core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
|
||||
if (core && core->device)
|
||||
device = core->device->hfi_device_data;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
if (!device) {
|
||||
dprintk(CVP_ERR, "%s: NULL device\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Force shutdown DSP */
|
||||
rc = cvp_dsp_shutdown(flag);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Resend HFI queue */
|
||||
mutex_lock(&me->lock);
|
||||
if (!device->dsp_iface_q_table.align_virtual_addr) {
|
||||
dprintk(CVP_ERR, "%s: DSP HFI queue released\n", __func__);
|
||||
rc = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
addr = (uint64_t)device->dsp_iface_q_table.mem_data.dma_handle;
|
||||
size = device->dsp_iface_q_table.mem_data.size;
|
||||
|
||||
if (!addr || !size) {
|
||||
dprintk(CVP_DSP, "%s: HFI queue is not ready\n", __func__);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
rc = cvp_hyp_assign_to_dsp(addr, size);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "%s: cvp_hyp_assign_to_dsp. rc=%d\n",
|
||||
__func__, rc);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
rc = cvp_dsp_send_cmd_hfi_queue((phys_addr_t *)addr, size, &rsp);
|
||||
if (rc) {
|
||||
dprintk(CVP_WARN, "%s: Send HFI Queue failed rc = %d\n",
|
||||
__func__, rc);
|
||||
|
||||
goto exit;
|
||||
}
|
||||
if (rsp.ret) {
|
||||
dprintk(CVP_ERR, "%s: DSP error %d %d\n", __func__,
|
||||
rsp.ret, rsp.dsp_state);
|
||||
rc = -ENODEV;
|
||||
}
|
||||
exit:
|
||||
mutex_unlock(&me->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cvp_reinit_dsp(void)
|
||||
{
|
||||
int rc;
|
||||
struct cvp_dsp_apps *me = &gfa_cv;
|
||||
|
||||
rc = __reinit_dsp();
|
||||
if (rc) {
|
||||
mutex_lock(&me->lock);
|
||||
me->state = DSP_INVALID;
|
||||
cvp_hyp_assign_from_dsp();
|
||||
mutex_unlock(&me->lock);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
void cvp_dsp_send_hfi_queue(void)
|
||||
{
|
||||
struct msm_cvp_core *core;
|
||||
struct iris_hfi_device *device;
|
||||
struct cvp_dsp_apps *me = &gfa_cv;
|
||||
struct cvp_dsp_rsp_msg rsp;
|
||||
uint64_t addr;
|
||||
uint32_t size;
|
||||
int rc;
|
||||
@@ -418,6 +667,13 @@ void cvp_dsp_send_hfi_queue(void)
|
||||
mutex_lock(&device->lock);
|
||||
mutex_lock(&me->lock);
|
||||
|
||||
if (!device->dsp_iface_q_table.align_virtual_addr) {
|
||||
dprintk(CVP_ERR, "%s: DSP HFI queue released\n", __func__);
|
||||
mutex_unlock(&me->lock);
|
||||
mutex_unlock(&device->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
addr = (uint64_t)device->dsp_iface_q_table.mem_data.dma_handle;
|
||||
size = device->dsp_iface_q_table.mem_data.size;
|
||||
|
||||
@@ -436,7 +692,13 @@ void cvp_dsp_send_hfi_queue(void)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
rc = cvp_dsp_send_cmd_hfi_queue((phys_addr_t *)addr, size);
|
||||
if (me->state == DSP_PROBED) {
|
||||
cvp_dsp_init_hfi_queue_hdr(device);
|
||||
dprintk(CVP_WARN,
|
||||
"%s: Done init of HFI queue headers\n", __func__);
|
||||
}
|
||||
|
||||
rc = cvp_dsp_send_cmd_hfi_queue((phys_addr_t *)addr, size, &rsp);
|
||||
if (rc) {
|
||||
dprintk(CVP_WARN, "%s: Send HFI Queue failed rc = %d\n",
|
||||
__func__, rc);
|
||||
@@ -444,6 +706,30 @@ void cvp_dsp_send_hfi_queue(void)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (rsp.ret == CPU2DSP_EUNSUPPORTED) {
|
||||
dprintk(CVP_WARN, "%s unsupported cmd %d\n",
|
||||
__func__, rsp.type);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (rsp.ret == CPU2DSP_EFATAL || rsp.ret == CPU2DSP_EUNAVAILABLE) {
|
||||
dprintk(CVP_ERR, "%s fatal error returned %d\n",
|
||||
__func__, rsp.dsp_state);
|
||||
me->state = DSP_INVALID;
|
||||
cvp_hyp_assign_from_dsp();
|
||||
goto exit;
|
||||
} else if (rsp.ret == CPU2DSP_EINVALSTATE) {
|
||||
dprintk(CVP_ERR, "%s dsp invalid state %d\n",
|
||||
__func__, rsp.dsp_state);
|
||||
mutex_unlock(&me->lock);
|
||||
if (cvp_reinit_dsp()) {
|
||||
dprintk(CVP_ERR, "%s reinit dsp fail\n", __func__);
|
||||
mutex_unlock(&device->lock);
|
||||
return;
|
||||
}
|
||||
mutex_lock(&me->lock);
|
||||
}
|
||||
|
||||
dprintk(CVP_DSP, "%s: dsp initialized\n", __func__);
|
||||
me->state = DSP_READY;
|
||||
|
||||
@@ -498,8 +784,10 @@ wait_dsp:
|
||||
switch (me->pending_dsp2cpu_cmd.type) {
|
||||
case DSP2CPU_POWERON:
|
||||
{
|
||||
if (me->state == DSP_READY)
|
||||
if (me->state == DSP_READY) {
|
||||
cmd.ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&me->lock);
|
||||
old_state = me->state;
|
||||
|
@@ -24,6 +24,16 @@
|
||||
int cvp_dsp_device_init(void);
|
||||
void cvp_dsp_device_exit(void);
|
||||
void cvp_dsp_send_hfi_queue(void);
|
||||
void cvp_dsp_init_hfi_queue_hdr(struct iris_hfi_device *device);
|
||||
|
||||
enum CPU2DSP_STATUS {
|
||||
CPU2DSP_SUCCESS = 0,
|
||||
CPU2DSP_EFAIL = 1,
|
||||
CPU2DSP_EFATAL = 2,
|
||||
CPU2DSP_EUNAVAILABLE = 3,
|
||||
CPU2DSP_EINVALSTATE = 4,
|
||||
CPU2DSP_EUNSUPPORTED = 5,
|
||||
};
|
||||
|
||||
enum CVP_DSP_COMMAND {
|
||||
CPU2DSP_SEND_HFI_QUEUE = 0,
|
||||
@@ -58,7 +68,8 @@ struct cvp_dsp_cmd_msg {
|
||||
struct cvp_dsp_rsp_msg {
|
||||
uint32_t type;
|
||||
int32_t ret;
|
||||
uint32_t reserved[CVP_DSP_MAX_RESERVED];
|
||||
uint32_t dsp_state;
|
||||
uint32_t reserved[CVP_DSP_MAX_RESERVED - 1];
|
||||
};
|
||||
|
||||
struct cvp_dsp2cpu_cmd_msg {
|
||||
|
@@ -23,6 +23,7 @@
|
||||
#include <media/msm_media_info.h>
|
||||
#include <media/msm_eva_private.h>
|
||||
#include "cvp_hfi_api.h"
|
||||
#include "cvp_hfi_helper.h"
|
||||
#include <synx_api.h>
|
||||
|
||||
#define MAX_SUPPORTED_INSTANCES 16
|
||||
@@ -192,7 +193,7 @@ enum msm_cvp_modes {
|
||||
|
||||
struct cvp_session_msg {
|
||||
struct list_head node;
|
||||
struct cvp_hfi_msg_session_hdr pkt;
|
||||
struct cvp_hfi_msg_session_hdr_ext pkt;
|
||||
};
|
||||
|
||||
struct cvp_session_queue {
|
||||
@@ -203,6 +204,23 @@ struct cvp_session_queue {
|
||||
wait_queue_head_t wq;
|
||||
};
|
||||
|
||||
#define CVP_CYCLE_STAT_SIZE 8
|
||||
struct cvp_cycle_stat {
|
||||
u32 busy[CVP_CYCLE_STAT_SIZE];
|
||||
u32 total;
|
||||
u32 idx;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
struct cvp_cycle_info {
|
||||
u32 sum_fps[HFI_MAX_HW_THREADS];
|
||||
u32 hi_ctrl_lim[HFI_MAX_HW_THREADS];
|
||||
u32 lo_ctrl_lim[HFI_MAX_HW_THREADS];
|
||||
struct cvp_cycle_stat cycle[HFI_MAX_HW_THREADS];
|
||||
unsigned long conf_freq;
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
struct cvp_session_prop {
|
||||
u32 type;
|
||||
u32 kernel_mask;
|
||||
@@ -224,6 +242,7 @@ struct cvp_session_prop {
|
||||
u32 ddr_op_bw;
|
||||
u32 ddr_cache;
|
||||
u32 ddr_op_cache;
|
||||
u32 fps[HFI_MAX_HW_THREADS];
|
||||
};
|
||||
|
||||
enum cvp_event_t {
|
||||
@@ -265,6 +284,7 @@ struct msm_cvp_core {
|
||||
u32 last_fault_addr;
|
||||
bool trigger_ssr;
|
||||
unsigned long curr_freq;
|
||||
struct cvp_cycle_info dyn_clk;
|
||||
atomic64_t kernel_trans_id;
|
||||
};
|
||||
|
||||
|
@@ -10,6 +10,9 @@ static int _get_pkt_hdr_from_user(struct cvp_kmd_arg __user *up,
|
||||
struct cvp_hal_session_cmd_pkt *pkt_hdr)
|
||||
{
|
||||
struct cvp_kmd_hfi_packet *u;
|
||||
struct cvp_hfi_msg_session_hdr *hdr;
|
||||
|
||||
hdr = (struct cvp_hfi_msg_session_hdr *)pkt_hdr;
|
||||
|
||||
u = &up->data.hfi_pkt;
|
||||
|
||||
@@ -33,7 +36,7 @@ static int _get_pkt_hdr_from_user(struct cvp_kmd_arg __user *up,
|
||||
return 0;
|
||||
|
||||
set_default_pkt_hdr:
|
||||
pkt_hdr->size = get_msg_size();
|
||||
pkt_hdr->size = get_msg_size(hdr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -451,7 +454,7 @@ static int _put_user_session_info(
|
||||
static int convert_to_user(struct cvp_kmd_arg *kp, unsigned long arg)
|
||||
{
|
||||
int rc = 0;
|
||||
int i, size = get_msg_size() >> 2;
|
||||
int i, size;
|
||||
struct cvp_kmd_arg __user *up = (struct cvp_kmd_arg *)arg;
|
||||
struct cvp_hal_session_cmd_pkt pkt_hdr;
|
||||
|
||||
@@ -467,9 +470,12 @@ static int convert_to_user(struct cvp_kmd_arg *kp, unsigned long arg)
|
||||
case CVP_KMD_RECEIVE_MSG_PKT:
|
||||
{
|
||||
struct cvp_kmd_hfi_packet *k, *u;
|
||||
struct cvp_hfi_msg_session_hdr *hdr;
|
||||
|
||||
k = &kp->data.hfi_pkt;
|
||||
u = &up->data.hfi_pkt;
|
||||
hdr = (struct cvp_hfi_msg_session_hdr *)k;
|
||||
size = get_msg_size(hdr) >> 2;
|
||||
for (i = 0; i < size; i++)
|
||||
if (put_user(k->pkt_data[i], &u->pkt_data[i]))
|
||||
return -EFAULT;
|
||||
|
@@ -115,6 +115,25 @@ void msm_cvp_free_platform_resources(
|
||||
msm_cvp_free_bus_vectors(res);
|
||||
}
|
||||
|
||||
static int msm_cvp_load_ipcc_regs(struct msm_cvp_platform_resources *res)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int reg_config[2];
|
||||
struct platform_device *pdev = res->pdev;
|
||||
|
||||
ret = of_property_read_u32_array(pdev->dev.of_node, "qcom,ipcc-reg",
|
||||
reg_config, 2);
|
||||
if (ret) {
|
||||
dprintk(CVP_ERR, "Failed to read ipcc reg: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
res->ipcc_reg_base = reg_config[0];
|
||||
res->ipcc_reg_size = reg_config[1];
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msm_cvp_load_reg_table(struct msm_cvp_platform_resources *res)
|
||||
{
|
||||
struct reg_set *reg_set;
|
||||
@@ -783,6 +802,10 @@ int cvp_read_platform_resources_from_dt(
|
||||
goto err_load_reg_table;
|
||||
}
|
||||
|
||||
rc = msm_cvp_load_ipcc_regs(res);
|
||||
if (rc)
|
||||
dprintk(CVP_ERR, "Failed to load IPCC regs: %d\n", rc);
|
||||
|
||||
rc = msm_cvp_load_regulator_table(res);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "Failed to load list of regulators %d\n", rc);
|
||||
|
@@ -136,7 +136,9 @@ struct msm_cvp_mem_cdsp {
|
||||
struct msm_cvp_platform_resources {
|
||||
phys_addr_t firmware_base;
|
||||
phys_addr_t register_base;
|
||||
phys_addr_t ipcc_reg_base;
|
||||
uint32_t register_size;
|
||||
uint32_t ipcc_reg_size;
|
||||
uint32_t irq;
|
||||
uint32_t sku_version;
|
||||
struct allowed_clock_rates_table *allowed_clks_tbl;
|
||||
|
@@ -73,8 +73,8 @@ int cvp_import_synx(struct msm_cvp_inst *inst, struct cvp_fence_command *fc,
|
||||
rc = synx_import(ssid, ¶ms);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: synx_import failed\n",
|
||||
__func__);
|
||||
"%s: %d synx_import failed\n",
|
||||
__func__, h_synx);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
@@ -102,8 +102,8 @@ int cvp_release_synx(struct msm_cvp_inst *inst, struct cvp_fence_command *fc)
|
||||
rc = synx_release(ssid, h_synx);
|
||||
if (rc)
|
||||
dprintk(CVP_ERR,
|
||||
"%s: synx_release %d failed\n",
|
||||
__func__, i);
|
||||
"%s: synx_release %d, %d failed\n",
|
||||
__func__, h_synx, i);
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
@@ -111,14 +111,14 @@ int cvp_release_synx(struct msm_cvp_inst *inst, struct cvp_fence_command *fc)
|
||||
|
||||
static int cvp_cancel_synx_impl(struct msm_cvp_inst *inst,
|
||||
enum cvp_synx_type type,
|
||||
struct cvp_fence_command *fc)
|
||||
struct cvp_fence_command *fc,
|
||||
int synx_state)
|
||||
{
|
||||
int rc = 0;
|
||||
int i;
|
||||
int h_synx;
|
||||
struct synx_session ssid;
|
||||
int start = 0, end = 0;
|
||||
int synx_state = SYNX_STATE_SIGNALED_CANCEL;
|
||||
|
||||
ssid = inst->synx_session_id;
|
||||
|
||||
@@ -137,11 +137,12 @@ static int cvp_cancel_synx_impl(struct msm_cvp_inst *inst,
|
||||
h_synx = fc->synx[i];
|
||||
if (h_synx) {
|
||||
rc = synx_signal(ssid, h_synx, synx_state);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "%s: synx_signal %d failed\n",
|
||||
__func__, i);
|
||||
synx_state = SYNX_STATE_SIGNALED_ERROR;
|
||||
}
|
||||
dprintk(CVP_SYNX, "Cancel synx %d session %llx\n",
|
||||
h_synx, inst);
|
||||
if (rc)
|
||||
dprintk(CVP_ERR,
|
||||
"%s: synx_signal %d %d %d failed\n",
|
||||
__func__, h_synx, i, synx_state);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -151,14 +152,14 @@ static int cvp_cancel_synx_impl(struct msm_cvp_inst *inst,
|
||||
}
|
||||
|
||||
int cvp_cancel_synx(struct msm_cvp_inst *inst, enum cvp_synx_type type,
|
||||
struct cvp_fence_command *fc)
|
||||
struct cvp_fence_command *fc, int synx_state)
|
||||
{
|
||||
if (fc->signature != 0xFEEDFACE) {
|
||||
dprintk(CVP_ERR, "%s deprecated synx path\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return cvp_cancel_synx_impl(inst, type, fc);
|
||||
return cvp_cancel_synx_impl(inst, type, fc, synx_state);
|
||||
}
|
||||
|
||||
static int cvp_wait_synx(struct synx_session ssid, u32 *synx, u32 num_synx,
|
||||
@@ -186,6 +187,8 @@ static int cvp_wait_synx(struct synx_session ssid, u32 *synx, u32 num_synx,
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
dprintk(CVP_SYNX, "Wait synx %d returned succes\n",
|
||||
h_synx);
|
||||
}
|
||||
++i;
|
||||
}
|
||||
@@ -203,10 +206,12 @@ static int cvp_signal_synx(struct synx_session ssid, u32 *synx, u32 num_synx,
|
||||
if (h_synx) {
|
||||
rc = synx_signal(ssid, h_synx, synx_state);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "%s: synx_signal %d failed\n",
|
||||
current->comm, i);
|
||||
dprintk(CVP_ERR,
|
||||
"%s: synx_signal %d %d failed\n",
|
||||
current->comm, h_synx, i);
|
||||
synx_state = SYNX_STATE_SIGNALED_ERROR;
|
||||
}
|
||||
dprintk(CVP_SYNX, "Signaled synx %d\n", h_synx);
|
||||
}
|
||||
++i;
|
||||
}
|
||||
|
@@ -48,7 +48,7 @@ int cvp_import_synx(struct msm_cvp_inst *inst, struct cvp_fence_command *fc,
|
||||
u32 *fence);
|
||||
int cvp_release_synx(struct msm_cvp_inst *inst, struct cvp_fence_command *fc);
|
||||
int cvp_cancel_synx(struct msm_cvp_inst *inst, enum cvp_synx_type type,
|
||||
struct cvp_fence_command *fc);
|
||||
struct cvp_fence_command *fc, int synx_state);
|
||||
int cvp_synx_ops(struct msm_cvp_inst *inst, enum cvp_synx_type type,
|
||||
struct cvp_fence_command *fc, u32 *synx_state);
|
||||
void cvp_dump_fence_queue(struct msm_cvp_inst *inst);
|
||||
|
@@ -14,6 +14,7 @@
|
||||
#include "msm_cvp_core.h"
|
||||
#include "msm_cvp_debug.h"
|
||||
#include "msm_cvp_resources.h"
|
||||
#include "cvp_core_hfi.h"
|
||||
|
||||
|
||||
static int msm_dma_get_device_address(struct dma_buf *dbuf, u32 align,
|
||||
@@ -471,3 +472,41 @@ struct context_bank_info *msm_cvp_smem_get_context_bank(bool is_secure,
|
||||
|
||||
return match;
|
||||
}
|
||||
|
||||
int msm_cvp_map_ipcc_regs(u32 *iova)
|
||||
{
|
||||
struct context_bank_info *cb;
|
||||
struct msm_cvp_core *core;
|
||||
struct cvp_hfi_device *hfi_ops;
|
||||
struct iris_hfi_device *dev = NULL;
|
||||
phys_addr_t paddr;
|
||||
u32 size;
|
||||
|
||||
core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
|
||||
if (core) {
|
||||
hfi_ops = core->device;
|
||||
if (hfi_ops)
|
||||
dev = hfi_ops->hfi_device_data;
|
||||
}
|
||||
|
||||
if (!dev)
|
||||
return -EINVAL;
|
||||
|
||||
paddr = dev->res->ipcc_reg_base;
|
||||
size = dev->res->ipcc_reg_size;
|
||||
|
||||
if (!paddr || !size)
|
||||
return -EINVAL;
|
||||
|
||||
cb = msm_cvp_smem_get_context_bank(false, dev->res, 0);
|
||||
if (!cb) {
|
||||
dprintk(CVP_ERR, "%s: fail to get context bank\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
*iova = dma_map_resource(cb->dev, paddr, size, DMA_BIDIRECTIONAL, 0);
|
||||
if (*iova == DMA_MAPPING_ERROR) {
|
||||
dprintk(CVP_WARN, "%s: fail to map IPCC regs\n", __func__);
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user