msm: eva: merged tip from eva-kernel.lnx.1.0
merged tip source code from eva-kernel.lnx.1.0, and verify the promotion flow. Change-Id: I031508fd8a23995a166506f3d190e5e228eb13c2 Signed-off-by: Yu SI <ysi@codeaurora.org>
Dieser Commit ist enthalten in:
14
msm/Kbuild
14
msm/Kbuild
@@ -14,6 +14,20 @@ KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM
|
||||
# ported from Android.mk
|
||||
$(info within KBUILD file KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTRA_SYMBOLS))
|
||||
|
||||
ifeq ($(CONFIG_ARCH_WAIPIO), y)
|
||||
$(info within KBUILD file CONFIG_ARCH_WAIPIO = $(CONFIG_ARCH_WAIPIO))
|
||||
# include $(EVA_ROOT)/config/waipio.mk
|
||||
KBUILD_CPPFLAGS += -DCONFIG_EVA_WAIPIO=1
|
||||
ccflags-y += -DCONFIG_EVA_WAIPIO=1
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_ARCH_KALAMA), y)
|
||||
$(info within KBUILD file CONFIG_ARCH_KALAMA = $(CONFIG_ARCH_KALAMA))
|
||||
# include $(EVA_ROOT)/config/waipio.mk
|
||||
KBUILD_CPPFLAGS += -DCONFIG_EVA_KALAMA=1
|
||||
ccflags-y += -DCONFIG_EVA_KALAMA=1
|
||||
endif
|
||||
|
||||
msm-eva-objs := eva/cvp.o \
|
||||
eva/msm_cvp_ioctl.o \
|
||||
eva/msm_cvp_platform.o \
|
||||
|
@@ -23,7 +23,11 @@ enum queue_state {
|
||||
QUEUE_INVALID,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_EVA_WAIPIO
|
||||
#define CVP_SYNX_ENABLED 1
|
||||
#define CVP_MMRM_ENABLED 1
|
||||
#define CVP_FASTRPC_ENABLED 1
|
||||
#define CVP_MINIDUMP_ENABLED 1
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -23,7 +23,6 @@
|
||||
#include <linux/soc/qcom/smem.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/reset.h>
|
||||
#include <soc/qcom/minidump.h>
|
||||
#include "hfi_packetization.h"
|
||||
#include "msm_cvp_debug.h"
|
||||
#include "cvp_core_hfi.h"
|
||||
@@ -33,17 +32,40 @@
|
||||
#include "msm_cvp_clocks.h"
|
||||
#include "cvp_dump.h"
|
||||
|
||||
#ifdef CVP_MINIDUMP_ENABLED
|
||||
/*Declare and init the head node of the linked list
|
||||
for queue va_md dump*/
|
||||
LIST_HEAD(head_node_hfi_queue);
|
||||
static LIST_HEAD(head_node_hfi_queue);
|
||||
|
||||
/*Declare and init the head node of the linked list
|
||||
for debug struct va_md dump*/
|
||||
LIST_HEAD(head_node_dbg_struct);
|
||||
static LIST_HEAD(head_node_dbg_struct);
|
||||
|
||||
static int eva_struct_list_notif_handler(struct notifier_block *this,
|
||||
unsigned long event, void *ptr);
|
||||
|
||||
static int eva_hfiq_list_notif_handler(struct notifier_block *this,
|
||||
unsigned long event, void *ptr);
|
||||
|
||||
static struct notifier_block eva_struct_list_notif_blk = {
|
||||
.notifier_call = eva_struct_list_notif_handler,
|
||||
.priority = INT_MAX-1,
|
||||
};
|
||||
|
||||
static struct notifier_block eva_hfiq_list_notif_blk = {
|
||||
.notifier_call = eva_hfiq_list_notif_handler,
|
||||
.priority = INT_MAX,
|
||||
};
|
||||
|
||||
struct list_head *dump_array[CVP_MAX_DUMP] = {
|
||||
[CVP_QUEUE_DUMP] = &head_node_hfi_queue,
|
||||
[CVP_DBG_DUMP] = &head_node_dbg_struct,
|
||||
};
|
||||
|
||||
int md_eva_dump(const char* name, u64 virt, u64 phys, u64 size)
|
||||
{
|
||||
struct md_region md_entry;
|
||||
|
||||
if (msm_minidump_enabled()) {
|
||||
dprintk(CVP_INFO, "Minidump is enabled!\n");
|
||||
|
||||
@@ -73,6 +95,7 @@ void cvp_va_md_register(char* name, void* notf_blk_ptr)
|
||||
{
|
||||
int rc = 0;
|
||||
struct notifier_block* notf_blk = (struct notifier_block*)notf_blk_ptr;
|
||||
|
||||
rc = qcom_va_md_register(name, notf_blk);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
@@ -109,12 +132,18 @@ void cvp_free_va_md_list(void)
|
||||
}
|
||||
}
|
||||
|
||||
void add_va_node_to_list(void *list_head_node, void *buff_va, u32 buff_size,
|
||||
void add_va_node_to_list(enum cvp_dump_type type, void *buff_va, u32 buff_size,
|
||||
const char *region_name, bool copy)
|
||||
{
|
||||
struct list_head *head_node = (struct list_head *)list_head_node;
|
||||
struct list_head *head_node;
|
||||
struct eva_va_md_queue *temp_node = NULL;
|
||||
|
||||
if (type >= CVP_MAX_DUMP)
|
||||
return;
|
||||
|
||||
head_node = dump_array[type];
|
||||
|
||||
/*Creating Node*/
|
||||
temp_node = kzalloc(sizeof(struct eva_va_md_queue), GFP_KERNEL);
|
||||
if (!temp_node) {
|
||||
dprintk(CVP_ERR, "Memory allocation failed for list node\n");
|
||||
@@ -144,23 +173,23 @@ void add_hfi_queue_to_va_md_list(void *device)
|
||||
dev = (struct iris_hfi_device*)device;
|
||||
|
||||
iface_q = &dev->iface_queues[CVP_IFACEQ_CMDQ_IDX];
|
||||
add_va_node_to_list(&head_node_hfi_queue,
|
||||
add_va_node_to_list(CVP_QUEUE_DUMP,
|
||||
iface_q->q_array.align_virtual_addr,
|
||||
iface_q->q_array.mem_size,
|
||||
"eva_cmdq_cpu", false);
|
||||
iface_q = &dev->iface_queues[CVP_IFACEQ_MSGQ_IDX];
|
||||
add_va_node_to_list(&head_node_hfi_queue,
|
||||
add_va_node_to_list(CVP_QUEUE_DUMP,
|
||||
iface_q->q_array.align_virtual_addr,
|
||||
iface_q->q_array.mem_size,
|
||||
"eva_msgq_cpu", false);
|
||||
|
||||
iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_CMDQ_IDX];
|
||||
add_va_node_to_list(&head_node_hfi_queue,
|
||||
add_va_node_to_list(CVP_QUEUE_DUMP,
|
||||
iface_q->q_array.align_virtual_addr,
|
||||
iface_q->q_array.mem_size,
|
||||
"eva_cmdq_dsp", false);
|
||||
iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_MSGQ_IDX];
|
||||
add_va_node_to_list(&head_node_hfi_queue,
|
||||
add_va_node_to_list(CVP_QUEUE_DUMP,
|
||||
iface_q->q_array.align_virtual_addr,
|
||||
iface_q->q_array.mem_size,
|
||||
"eva_msgq_dsp", false);
|
||||
@@ -176,30 +205,30 @@ void add_queue_header_to_va_md_list(void *device)
|
||||
|
||||
iface_q = &dev->iface_queues[CVP_IFACEQ_CMDQ_IDX];
|
||||
queue = (struct cvp_hfi_queue_header *)iface_q->q_hdr;
|
||||
add_va_node_to_list(&head_node_dbg_struct,
|
||||
add_va_node_to_list(CVP_DBG_DUMP,
|
||||
queue, sizeof(struct cvp_hfi_queue_header),
|
||||
"cvp_hfi_queue_header-cpucmdQ", false);
|
||||
|
||||
iface_q = &dev->iface_queues[CVP_IFACEQ_MSGQ_IDX];
|
||||
queue = (struct cvp_hfi_queue_header *)iface_q->q_hdr;
|
||||
add_va_node_to_list(&head_node_dbg_struct,
|
||||
add_va_node_to_list(CVP_DBG_DUMP,
|
||||
queue, sizeof(struct cvp_hfi_queue_header),
|
||||
"cvp_hfi_queue_header-cpumsgQ", false);
|
||||
|
||||
iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_CMDQ_IDX];
|
||||
queue = (struct cvp_hfi_queue_header *)iface_q->q_hdr;
|
||||
add_va_node_to_list(&head_node_dbg_struct,
|
||||
add_va_node_to_list(CVP_DBG_DUMP,
|
||||
queue, sizeof(struct cvp_hfi_queue_header),
|
||||
"cvp_hfi_queue_header-dspcmdQ", false);
|
||||
|
||||
iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_MSGQ_IDX];
|
||||
queue = (struct cvp_hfi_queue_header *)iface_q->q_hdr;
|
||||
add_va_node_to_list(&head_node_dbg_struct,
|
||||
add_va_node_to_list(CVP_DBG_DUMP,
|
||||
queue, sizeof(struct cvp_hfi_queue_header),
|
||||
"cvp_hfi_queue_header-dspmsgQ", false);
|
||||
}
|
||||
|
||||
int eva_hfiq_list_notif_handler(struct notifier_block *this,
|
||||
static int eva_hfiq_list_notif_handler(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct va_md_entry entry;
|
||||
@@ -240,7 +269,7 @@ int eva_hfiq_list_notif_handler(struct notifier_block *this,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
int eva_struct_list_notif_handler(struct notifier_block *this,
|
||||
static int eva_struct_list_notif_handler(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct va_md_entry entry;
|
||||
@@ -282,12 +311,4 @@ int eva_struct_list_notif_handler(struct notifier_block *this,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
struct notifier_block eva_struct_list_notif_blk = {
|
||||
.notifier_call = eva_struct_list_notif_handler,
|
||||
.priority = INT_MAX-1,
|
||||
};
|
||||
|
||||
struct notifier_block eva_hfiq_list_notif_blk = {
|
||||
.notifier_call = eva_hfiq_list_notif_handler,
|
||||
.priority = INT_MAX,
|
||||
};
|
||||
#endif
|
||||
|
@@ -8,23 +8,19 @@
|
||||
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <soc/qcom/minidump.h>
|
||||
#include "cvp_comm_def.h"
|
||||
|
||||
enum cvp_dump_type {
|
||||
CVP_QUEUE_DUMP,
|
||||
CVP_DBG_DUMP,
|
||||
CVP_MAX_DUMP,
|
||||
};
|
||||
|
||||
#define MAX_REGION_NAME_LEN 32
|
||||
#define EVAFW_IMAGE_SIZE 7*1024*1024
|
||||
|
||||
extern struct list_head head_node_hfi_queue;
|
||||
extern struct list_head head_node_dbg_struct;
|
||||
extern struct notifier_block eva_hfiq_list_notif_blk;
|
||||
extern struct notifier_block eva_struct_list_notif_blk;
|
||||
|
||||
/* notifier handler function for list of eva hfi queues */
|
||||
int eva_hfiq_list_notif_handler(struct notifier_block *this,
|
||||
unsigned long event, void *ptr);
|
||||
|
||||
/* notifier handler function for list of eva global structures */
|
||||
int eva_struct_list_notif_handler(struct notifier_block *this,
|
||||
unsigned long event, void *ptr);
|
||||
#ifdef CVP_MINIDUMP_ENABLED
|
||||
#include <soc/qcom/minidump.h>
|
||||
|
||||
/*
|
||||
* wrapper for static minidump
|
||||
@@ -39,14 +35,14 @@ int md_eva_dump(const char* name, u64 virt, u64 phys, u64 size);
|
||||
/*
|
||||
* Fucntion to add dump region to queue
|
||||
|
||||
* @list_head_node: Head node of the list which needs to be updated
|
||||
* @type: Type of the list node which needs to be updated
|
||||
* @buff_va: Virtual address of the buffer which needs to be dumped
|
||||
* @buff_size: Size of the buffer which needs to be dumped
|
||||
* @region_name: Dump will be collected with this name
|
||||
* @copy: Flag to indicate if the buffer data needs to be copied
|
||||
* to the intermidiate buffer allocated by kzmalloc.
|
||||
*/
|
||||
void add_va_node_to_list(void *list_head_node, void *buff_va,
|
||||
void add_va_node_to_list(enum cvp_dump_type type, void *buff_va,
|
||||
u32 buff_size, const char *region_name, bool copy);
|
||||
|
||||
/*
|
||||
@@ -93,5 +89,36 @@ struct eva_va_md_queue
|
||||
char region_name[MAX_REGION_NAME_LEN];
|
||||
bool copy;
|
||||
};
|
||||
#else
|
||||
static inline int md_eva_dump(const char* name, u64 virt, u64 phys, u64 size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
static inline void add_va_node_to_list(enum cvp_dump_type type, void *buff_va,
|
||||
u32 buff_size, const char *region_name, bool copy)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void cvp_va_md_register(char *name, void* notf_blk_ptr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void cvp_register_va_md_region(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void cvp_free_va_md_list(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void add_hfi_queue_to_va_md_list(void *device)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void add_queue_header_to_va_md_list(void *device)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* End of CVP_MINIDUMP_ENABLED */
|
||||
#endif
|
||||
|
@@ -812,7 +812,7 @@ static int __unvote_buses(struct iris_hfi_device *device)
|
||||
device->bus_vote.data_count = 0;
|
||||
|
||||
iris_hfi_for_each_bus(device, bus) {
|
||||
rc = icc_set_bw(bus->client, 0, 0);
|
||||
rc = msm_cvp_set_bw(bus, 0);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: Failed unvoting bus\n", __func__);
|
||||
@@ -853,7 +853,7 @@ no_data_count:
|
||||
|
||||
iris_hfi_for_each_bus(device, bus) {
|
||||
if (bus) {
|
||||
rc = icc_set_bw(bus->client, bus->range[1], 0);
|
||||
rc = msm_cvp_set_bw(bus, bus->range[1]);
|
||||
if (rc)
|
||||
dprintk(CVP_ERR,
|
||||
"Failed voting bus %s to ab %u\n",
|
||||
@@ -1760,6 +1760,53 @@ static int __sys_set_power_control(struct iris_hfi_device *device,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cvp_pm_qos_update(struct iris_hfi_device *device, bool vote_on)
|
||||
{
|
||||
u32 latency, off_vote_cnt;
|
||||
int i, err = 0;
|
||||
|
||||
spin_lock(&device->res->pm_qos.lock);
|
||||
off_vote_cnt = device->res->pm_qos.off_vote_cnt;
|
||||
spin_unlock(&device->res->pm_qos.lock);
|
||||
|
||||
if (vote_on && off_vote_cnt)
|
||||
return;
|
||||
|
||||
latency = vote_on ? device->res->pm_qos.latency_us :
|
||||
PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
|
||||
|
||||
if (device->res->pm_qos.latency_us && device->res->pm_qos.pm_qos_hdls)
|
||||
for (i = 0; i < device->res->pm_qos.silver_count; i++) {
|
||||
err = dev_pm_qos_update_request(
|
||||
&device->res->pm_qos.pm_qos_hdls[i],
|
||||
latency);
|
||||
if (err < 0) {
|
||||
if (vote_on) {
|
||||
dprintk(CVP_WARN,
|
||||
"pm qos on failed %d\n", err);
|
||||
} else {
|
||||
dprintk(CVP_WARN,
|
||||
"pm qos off failed %d\n", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
static int iris_pm_qos_update(void *device)
|
||||
{
|
||||
struct iris_hfi_device *dev;
|
||||
|
||||
if (!device) {
|
||||
dprintk(CVP_ERR, "%s Invalid device\n", __func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
dev = device;
|
||||
|
||||
cvp_pm_qos_update(dev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iris_hfi_core_init(void *device)
|
||||
{
|
||||
int rc = 0;
|
||||
@@ -1822,8 +1869,9 @@ static int iris_hfi_core_init(void *device)
|
||||
cvp_register_va_md_region();
|
||||
|
||||
// Add node for dev struct
|
||||
add_va_node_to_list(&head_node_dbg_struct, dev,
|
||||
sizeof(struct iris_hfi_device), "iris_hfi_device-dev", false);
|
||||
add_va_node_to_list(CVP_QUEUE_DUMP, dev,
|
||||
sizeof(struct iris_hfi_device),
|
||||
"iris_hfi_device-dev", false);
|
||||
add_queue_header_to_va_md_list((void*)dev);
|
||||
add_hfi_queue_to_va_md_list((void*)dev);
|
||||
|
||||
@@ -1865,10 +1913,35 @@ static int iris_hfi_core_init(void *device)
|
||||
__set_ubwc_config(device);
|
||||
__sys_set_idle_indicator(device, true);
|
||||
|
||||
if (dev->res->pm_qos_latency_us)
|
||||
cpu_latency_qos_add_request(&dev->qos,
|
||||
dev->res->pm_qos_latency_us);
|
||||
if (dev->res->pm_qos.latency_us) {
|
||||
int err = 0;
|
||||
u32 i, cpu;
|
||||
|
||||
dev->res->pm_qos.pm_qos_hdls = kcalloc(
|
||||
dev->res->pm_qos.silver_count,
|
||||
sizeof(struct dev_pm_qos_request),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!dev->res->pm_qos.pm_qos_hdls) {
|
||||
dprintk(CVP_WARN, "Failed allocate pm_qos_hdls\n");
|
||||
goto pm_qos_bail;
|
||||
}
|
||||
|
||||
for (i = 0; i < dev->res->pm_qos.silver_count; i++) {
|
||||
cpu = dev->res->pm_qos.silver_cores[i];
|
||||
err = dev_pm_qos_add_request(
|
||||
get_cpu_device(cpu),
|
||||
&dev->res->pm_qos.pm_qos_hdls[i],
|
||||
DEV_PM_QOS_RESUME_LATENCY,
|
||||
dev->res->pm_qos.latency_us);
|
||||
if (err < 0)
|
||||
dprintk(CVP_WARN,
|
||||
"%s pm_qos_add_req %d failed\n",
|
||||
__func__, i);
|
||||
}
|
||||
}
|
||||
|
||||
pm_qos_bail:
|
||||
mutex_unlock(&dev->lock);
|
||||
|
||||
cvp_dsp_send_hfi_queue();
|
||||
@@ -1895,9 +1968,10 @@ err_no_mem:
|
||||
|
||||
static int iris_hfi_core_release(void *dev)
|
||||
{
|
||||
int rc = 0;
|
||||
int rc = 0, i;
|
||||
struct iris_hfi_device *device = dev;
|
||||
struct cvp_hal_session *session, *next;
|
||||
struct dev_pm_qos_request *qos_hdl;
|
||||
|
||||
if (!device) {
|
||||
dprintk(CVP_ERR, "invalid device\n");
|
||||
@@ -1906,9 +1980,16 @@ static int iris_hfi_core_release(void *dev)
|
||||
|
||||
mutex_lock(&device->lock);
|
||||
dprintk(CVP_WARN, "Core releasing\n");
|
||||
if (device->res->pm_qos_latency_us &&
|
||||
cpu_latency_qos_request_active(&device->qos))
|
||||
cpu_latency_qos_remove_request(&device->qos);
|
||||
if (device->res->pm_qos.latency_us &&
|
||||
device->res->pm_qos.pm_qos_hdls) {
|
||||
for (i = 0; i < device->res->pm_qos.silver_count; i++) {
|
||||
qos_hdl = &device->res->pm_qos.pm_qos_hdls[i];
|
||||
if ((qos_hdl != NULL) && dev_pm_qos_request_active(qos_hdl))
|
||||
dev_pm_qos_remove_request(qos_hdl);
|
||||
}
|
||||
kfree(device->res->pm_qos.pm_qos_hdls);
|
||||
device->res->pm_qos.pm_qos_hdls = NULL;
|
||||
}
|
||||
|
||||
__resume(device);
|
||||
__set_state(device, IRIS_STATE_DEINIT);
|
||||
@@ -3844,10 +3925,6 @@ static inline int __suspend(struct iris_hfi_device *device)
|
||||
|
||||
dprintk(CVP_PWR, "Entering suspend\n");
|
||||
|
||||
if (device->res->pm_qos_latency_us &&
|
||||
cpu_latency_qos_request_active(&device->qos))
|
||||
cpu_latency_qos_remove_request(&device->qos);
|
||||
|
||||
rc = __tzbsp_set_cvp_state(TZ_SUBSYS_STATE_SUSPEND);
|
||||
if (rc) {
|
||||
dprintk(CVP_WARN, "Failed to suspend cvp core %d\n", rc);
|
||||
@@ -3857,6 +3934,10 @@ static inline int __suspend(struct iris_hfi_device *device)
|
||||
__disable_subcaches(device);
|
||||
|
||||
call_iris_op(device, power_off, device);
|
||||
|
||||
if (device->res->pm_qos.latency_us && device->res->pm_qos.pm_qos_hdls)
|
||||
cvp_pm_qos_update(device, false);
|
||||
|
||||
return rc;
|
||||
|
||||
err_tzbsp_suspend:
|
||||
@@ -4194,9 +4275,8 @@ static inline int __resume(struct iris_hfi_device *device)
|
||||
*/
|
||||
__set_threshold_registers(device);
|
||||
|
||||
if (device->res->pm_qos_latency_us)
|
||||
cpu_latency_qos_add_request(&device->qos,
|
||||
device->res->pm_qos_latency_us);
|
||||
if (device->res->pm_qos.latency_us && device->res->pm_qos.pm_qos_hdls)
|
||||
cvp_pm_qos_update(device, true);
|
||||
|
||||
__sys_set_debug(device, msm_cvp_fw_debug);
|
||||
|
||||
@@ -4684,6 +4764,7 @@ static void iris_init_hfi_callbacks(struct cvp_hfi_device *hdev)
|
||||
hdev->flush_debug_queue = iris_hfi_flush_debug_queue;
|
||||
hdev->noc_error_info = iris_hfi_noc_error_info;
|
||||
hdev->validate_session = iris_hfi_validate_session;
|
||||
hdev->pm_qos_update = iris_pm_qos_update;
|
||||
}
|
||||
|
||||
int cvp_iris_hfi_initialize(struct cvp_hfi_device *hdev, u32 device_id,
|
||||
|
@@ -265,6 +265,7 @@ struct cvp_hfi_device {
|
||||
int (*flush_debug_queue)(void *dev);
|
||||
int (*noc_error_info)(void *dev);
|
||||
int (*validate_session)(void *sess, const char *func);
|
||||
int (*pm_qos_update)(void *device);
|
||||
};
|
||||
|
||||
typedef void (*hfi_cmd_response_callback) (enum hal_command_response cmd,
|
||||
|
@@ -15,40 +15,63 @@
|
||||
#define HFI_CMD_START_OFFSET (0x00010000)
|
||||
#define HFI_MSG_START_OFFSET (0x00020000)
|
||||
|
||||
#define HFI_ERR_NONE HFI_COMMON_BASE
|
||||
#define HFI_ERR_SYS_FATAL (HFI_COMMON_BASE + 0x1)
|
||||
#define HFI_ERR_SYS_INVALID_PARAMETER (HFI_COMMON_BASE + 0x2)
|
||||
#define HFI_ERR_SYS_VERSION_MISMATCH (HFI_COMMON_BASE + 0x3)
|
||||
#define HFI_ERR_SYS_INSUFFICIENT_RESOURCES (HFI_COMMON_BASE + 0x4)
|
||||
#define HFI_ERR_SYS_MAX_SESSIONS_REACHED (HFI_COMMON_BASE + 0x5)
|
||||
#define HFI_ERR_SYS_UNSUPPORTED_CODEC (HFI_COMMON_BASE + 0x6)
|
||||
#define HFI_ERR_SYS_SESSION_IN_USE (HFI_COMMON_BASE + 0x7)
|
||||
#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE (HFI_COMMON_BASE + 0x8)
|
||||
#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN (HFI_COMMON_BASE + 0x9)
|
||||
#define HFI_ERR_SYS_NOC_ERROR (HFI_COMMON_BASE + 0x11)
|
||||
#define HFI_ERR_SESSION_FATAL (HFI_COMMON_BASE + 0x1001)
|
||||
#define HFI_ERR_SESSION_INVALID_PARAMETER (HFI_COMMON_BASE + 0x1002)
|
||||
#define HFI_ERR_SESSION_BAD_POINTER (HFI_COMMON_BASE + 0x1003)
|
||||
#define HFI_ERR_SESSION_INVALID_SESSION_ID (HFI_COMMON_BASE + 0x1004)
|
||||
#define HFI_ERR_SESSION_INVALID_STREAM_ID (HFI_COMMON_BASE + 0x1005)
|
||||
#define HFI_ERR_SESSION_INCORRECT_STATE_OPERATION \
|
||||
(HFI_COMMON_BASE + 0x1006)
|
||||
#define HFI_ERR_SESSION_UNSUPPORTED_PROPERTY (HFI_COMMON_BASE + 0x1007)
|
||||
#define HFI_ERR_NONE (HFI_COMMON_BASE) /**< Status: No error */
|
||||
#define HFI_ERR_SYS_FATAL (HFI_COMMON_BASE + 0x1) /**< Fatal system error */
|
||||
#define HFI_ERR_SYS_INVALID_PARAMETER (HFI_COMMON_BASE + 0x2) /**< Invalid system parameter encountered */
|
||||
#define HFI_ERR_SYS_VERSION_MISMATCH (HFI_COMMON_BASE + 0x3) /**< Interface version mismatch */
|
||||
#define HFI_ERR_SYS_INSUFFICIENT_RESOURCES (HFI_COMMON_BASE + 0x4) /**< Insufficient system resources */
|
||||
#define HFI_ERR_SYS_MAX_SESSIONS_REACHED (HFI_COMMON_BASE + 0x5) /**< Maximum number of sessions reached */
|
||||
#define HFI_ERR_SYS_SESSION_IN_USE (HFI_COMMON_BASE + 0x7) /**< Session ID specified is in use */
|
||||
#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE (HFI_COMMON_BASE + 0x8) /**< ID is out of range */
|
||||
#define HFI_ERR_SYS_UNSUPPORTED_TRIGCMD (HFI_COMMON_BASE + 0xA) /**< Unsupported TRIGCMD command*/
|
||||
#define HFI_ERR_SYS_UNSUPPORTED_RESOURCES (HFI_COMMON_BASE + 0xB) /**< Unsupported resource*/
|
||||
#define HFI_ERR_SYS_UNSUPPORT_CMD (HFI_COMMON_BASE + 0xC) /**< Command is not supported*/
|
||||
#define HFI_ERR_SYS_CMDSIZE (HFI_COMMON_BASE + 0xD) /**< command size err*/
|
||||
#define HFI_ERR_SYS_UNSUPPORT_PROPERTY (HFI_COMMON_BASE + 0xE) /**< Unsupported property*/
|
||||
#define HFI_ERR_SYS_INIT_EXPECTED (HFI_COMMON_BASE + 0xF) /**< Upon FW start, first command must be SYS_INIT*/
|
||||
#define HFI_ERR_SYS_INIT_IGNORED (HFI_COMMON_BASE + 0x10) /**< After FW started, SYS_INIT will be ignored*/
|
||||
#define HFI_ERR_SYS_MAX_DME_SESSIONS_REACHED (HFI_COMMON_BASE + 0x11) /**< Maximum DME sessions Reached */
|
||||
#define HFI_ERR_SYS_MAX_FD_SESSIONS_REACHED (HFI_COMMON_BASE + 0x12) /**< Maximum FD sessions Reached */
|
||||
#define HFI_ERR_SYS_MAX_ODT_SESSIONS_REACHED (HFI_COMMON_BASE + 0x13) /**< Maximum ODT sessions Reached*/
|
||||
#define HFI_ERR_SYS_MAX_CV_SESSIONS_REACHED (HFI_COMMON_BASE + 0x14) /**< Maximum CV sessions Reached*/
|
||||
#define HFI_ERR_SYS_INVALID_SESSION_TYPE (HFI_COMMON_BASE + 0x15) /**< Invalid session TYPE. */
|
||||
#define HFI_ERR_SYS_NOC_ERROR (HFI_COMMON_BASE + 0x16) /**< NOC Error encountered */
|
||||
|
||||
#define HFI_ERR_SESSION_UNSUPPORTED_SETTING (HFI_COMMON_BASE + 0x1008)
|
||||
|
||||
#define HFI_ERR_SESSION_INSUFFICIENT_RESOURCES (HFI_COMMON_BASE + 0x1009)
|
||||
|
||||
#define HFI_ERR_SESSION_STREAM_CORRUPT (HFI_COMMON_BASE + 0x100B)
|
||||
#define HFI_ERR_SESSION_ENC_OVERFLOW (HFI_COMMON_BASE + 0x100C)
|
||||
#define HFI_ERR_SESSION_UNSUPPORTED_STREAM (HFI_COMMON_BASE + 0x100D)
|
||||
#define HFI_ERR_SESSION_CMDSIZE (HFI_COMMON_BASE + 0x100E)
|
||||
#define HFI_ERR_SESSION_UNSUPPORT_CMD (HFI_COMMON_BASE + 0x100F)
|
||||
#define HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE (HFI_COMMON_BASE + 0x1010)
|
||||
#define HFI_ERR_SESSION_BUFFERCOUNT_TOOSMALL (HFI_COMMON_BASE + 0x1011)
|
||||
#define HFI_ERR_SESSION_INVALID_SCALE_FACTOR (HFI_COMMON_BASE + 0x1012)
|
||||
#define HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED (HFI_COMMON_BASE + 0x1013)
|
||||
#define HFI_ERR_SESSION_FLUSHED (HFI_COMMON_BASE + 0x101C)
|
||||
/**
|
||||
Level 2 Comment: "Session Level Error types"
|
||||
Common HFI_ERROR_SESSION_X values to be used as session level error/warning
|
||||
for event and messages
|
||||
*/
|
||||
#define HFI_ERR_SESSION_FATAL (HFI_COMMON_BASE + 0x1001) /**< Fatal session error */
|
||||
#define HFI_ERR_SESSION_INVALID_PARAMETER (HFI_COMMON_BASE + 0x1002) /**< Invalid session parameter */
|
||||
#define HFI_ERR_SESSION_BAD_POINTER (HFI_COMMON_BASE + 0x1003) /**< Bad pointer encountered */
|
||||
#define HFI_ERR_SESSION_INVALID_SESSION_ID (HFI_COMMON_BASE + 0x1004) /**< Invalid session ID. eventData2 specifies the session ID. */
|
||||
#define HFI_ERR_SESSION_INVALID_STREAM_ID (HFI_COMMON_BASE + 0x1005) /**< Invalid stream ID. eventData2 specifies the stream ID. */
|
||||
#define HFI_ERR_SESSION_INCORRECT_STATE_OPERATION (HFI_COMMON_BASE + 0x1006) /**< Incorrect state for specified operation */
|
||||
#define HFI_ERR_SESSION_UNSUPPORTED_PROPERTY (HFI_COMMON_BASE + 0x1007) /**< Unsupported property. eventData2 specifies the property index. */
|
||||
#define HFI_ERR_SESSION_UNSUPPORTED_SETTING (HFI_COMMON_BASE + 0x1008) /**< Unsupported property setting. eventData2 specifies the property index. */
|
||||
#define HFI_ERR_SESSION_INSUFFICIENT_RESOURCES (HFI_COMMON_BASE + 0x1009) /**< Insufficient resources for session */
|
||||
#define HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED (HFI_COMMON_BASE + 0x100A) /**< Stream is found to be corrupt; processing is stalled */
|
||||
#define HFI_ERR_SESSION_STREAM_CORRUPT (HFI_COMMON_BASE + 0x100B) /**< Stream is found to be corrupt; processing is recoverable */
|
||||
#define HFI_ERR_SESSION_RESERVED (HFI_COMMON_BASE + 0x100C) /**< Reserved */
|
||||
#define HFI_ERR_SESSION_UNSUPPORTED_STREAM (HFI_COMMON_BASE + 0x100D) /**< Unsupported stream */
|
||||
#define HFI_ERR_SESSION_CMDSIZE (HFI_COMMON_BASE + 0x100E) /**< Command packet size err*/
|
||||
#define HFI_ERR_SESSION_UNSUPPORT_CMD (HFI_COMMON_BASE + 0x100F) /**< Command is not supported*/
|
||||
#define HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE (HFI_COMMON_BASE + 0x1010) /**< BufferType is not supported*/
|
||||
#define HFI_ERR_SESSION_BUFFERCOUNT_TOOSMALL (HFI_COMMON_BASE + 0x1011) /**< Buffer Count is less than default*/
|
||||
#define HFI_ERR_SESSION_INVALID_SCALE_FACTOR (HFI_COMMON_BASE + 0x1012) /**< Downscaling not possible */
|
||||
#define HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED (HFI_COMMON_BASE + 0x1013) /**< Upscaling not possible */
|
||||
#define HFI_ERR_SESSION_CANNOT_KEEP_ASPECT_RATIO (HFI_COMMON_BASE + 0x1014) /**< Cannot maintain aspect ratio */
|
||||
#define HFI_ERR_SESSION_ADDRESS_NOT_ALIGNED (HFI_COMMON_BASE + 0x1016) /**Address is not aligned */
|
||||
#define HFI_ERR_SESSION_BUFFERSIZE_TOOSMALL (HFI_COMMON_BASE + 0x1017) /**< Buffer Count is less than default*/
|
||||
#define HFI_ERR_SESSION_ABORTED (HFI_COMMON_BASE + 0x1018) /**< error caused by session abort*/
|
||||
#define HFI_ERR_SESSION_BUFFER_ALREADY_SET (HFI_COMMON_BASE + 0x1019) /**< Cannot set buffer multiple times without releasing in between. */
|
||||
#define HFI_ERR_SESSION_BUFFER_ALREADY_RELEASED (HFI_COMMON_BASE + 0x101A) /**< Cannot release buffer multiple times without setting in between. */
|
||||
#define HFI_ERR_SESSION_END_BUFFER_NOT_RELEASED (HFI_COMMON_BASE + 0x101B) /**< Session was ended without properly releasing all buffers */
|
||||
#define HFI_ERR_SESSION_FLUSHED (HFI_COMMON_BASE + 0x101C) /**< Cannot set buffer multiple times without releasing in between. */
|
||||
#define HFI_ERR_SESSION_KERNEL_MAX_STREAMS_REACHED (HFI_COMMON_BASE + 0x101D) /*Maximum Streams per Kernel reached in a session*/
|
||||
#define HFI_ERR_SESSION_MAX_STREAMS_REACHED (HFI_COMMON_BASE + 0x101E) /*Maximum Streams Reached in a session*/
|
||||
#define HFI_ERR_SESSION_HW_HANG_DETECTED (HFI_COMMON_BASE + 0x101F) /*HW hang was detected in one of the HW blocks for a frame*/
|
||||
|
||||
#define HFI_EVENT_SYS_ERROR (HFI_COMMON_BASE + 0x1)
|
||||
#define HFI_EVENT_SESSION_ERROR (HFI_COMMON_BASE + 0x2)
|
||||
|
@@ -40,8 +40,6 @@ static enum cvp_status hfi_map_err_status(u32 hfi_err)
|
||||
cvp_err = CVP_ERR_BAD_PARAM;
|
||||
break;
|
||||
case HFI_ERR_SYS_INSUFFICIENT_RESOURCES:
|
||||
case HFI_ERR_SYS_UNSUPPORTED_DOMAIN:
|
||||
case HFI_ERR_SYS_UNSUPPORTED_CODEC:
|
||||
case HFI_ERR_SESSION_UNSUPPORTED_PROPERTY:
|
||||
case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
|
||||
case HFI_ERR_SESSION_INSUFFICIENT_RESOURCES:
|
||||
|
@@ -28,11 +28,9 @@ int msm_cvp_get_session_info(struct msm_cvp_inst *inst, u32 *session)
|
||||
if (!s)
|
||||
return -ECONNRESET;
|
||||
|
||||
s->cur_cmd_type = EVA_KMD_GET_SESSION_INFO;
|
||||
*session = hash32_ptr(inst->session);
|
||||
dprintk(CVP_SESS, "%s: id 0x%x\n", __func__, *session);
|
||||
|
||||
s->cur_cmd_type = 0;
|
||||
cvp_put_inst(s);
|
||||
return rc;
|
||||
}
|
||||
@@ -143,13 +141,11 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
|
||||
if (!s)
|
||||
return -ECONNRESET;
|
||||
|
||||
s->cur_cmd_type = EVA_KMD_RECEIVE_MSG_PKT;
|
||||
wait_time = msecs_to_jiffies(CVP_MAX_WAIT_TIME);
|
||||
sq = &inst->session_queue;
|
||||
|
||||
rc = cvp_wait_process_message(inst, sq, NULL, wait_time, out_pkt);
|
||||
|
||||
s->cur_cmd_type = 0;
|
||||
cvp_put_inst(inst);
|
||||
return rc;
|
||||
}
|
||||
@@ -178,7 +174,6 @@ static int msm_cvp_session_process_hfi(
|
||||
if (!s)
|
||||
return -ECONNRESET;
|
||||
|
||||
inst->cur_cmd_type = EVA_KMD_SEND_CMD_PKT;
|
||||
hdev = inst->core->device;
|
||||
|
||||
pkt_idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)in_pkt);
|
||||
@@ -251,7 +246,6 @@ static int msm_cvp_session_process_hfi(
|
||||
__func__, signal);
|
||||
|
||||
exit:
|
||||
inst->cur_cmd_type = 0;
|
||||
cvp_put_inst(inst);
|
||||
return rc;
|
||||
}
|
||||
@@ -985,10 +979,7 @@ static int adjust_bw_freqs(void)
|
||||
}
|
||||
|
||||
hdev->clk_freq = core->curr_freq;
|
||||
rc = icc_set_bw(bus->client, bw_sum, 0);
|
||||
if (rc)
|
||||
dprintk(CVP_ERR, "Failed voting bus %s to ab %u\n",
|
||||
bus->name, bw_sum);
|
||||
rc = msm_cvp_set_bw(bus, bw_sum);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -1008,13 +999,11 @@ int msm_cvp_update_power(struct msm_cvp_inst *inst)
|
||||
if (!s)
|
||||
return -ECONNRESET;
|
||||
|
||||
inst->cur_cmd_type = EVA_KMD_UPDATE_POWER;
|
||||
core = inst->core;
|
||||
|
||||
mutex_lock(&core->clk_lock);
|
||||
rc = adjust_bw_freqs();
|
||||
mutex_unlock(&core->clk_lock);
|
||||
inst->cur_cmd_type = 0;
|
||||
cvp_put_inst(s);
|
||||
|
||||
return rc;
|
||||
@@ -1157,6 +1146,7 @@ static int msm_cvp_session_start(struct msm_cvp_inst *inst,
|
||||
struct eva_kmd_arg *arg)
|
||||
{
|
||||
struct cvp_session_queue *sq;
|
||||
struct cvp_hfi_device *hdev;
|
||||
|
||||
sq = &inst->session_queue;
|
||||
spin_lock(&sq->lock);
|
||||
@@ -1169,6 +1159,14 @@ static int msm_cvp_session_start(struct msm_cvp_inst *inst,
|
||||
sq->state = QUEUE_START;
|
||||
spin_unlock(&sq->lock);
|
||||
|
||||
if (inst->prop.type == HFI_SESSION_FD
|
||||
|| inst->prop.type == HFI_SESSION_DMM) {
|
||||
spin_lock(&inst->core->resources.pm_qos.lock);
|
||||
inst->core->resources.pm_qos.off_vote_cnt++;
|
||||
spin_unlock(&inst->core->resources.pm_qos.lock);
|
||||
hdev = inst->core->device;
|
||||
call_hfi_op(hdev, pm_qos_update, hdev->hfi_device_data);
|
||||
}
|
||||
return cvp_fence_thread_start(inst);
|
||||
}
|
||||
|
||||
@@ -1500,63 +1498,6 @@ static int msm_cvp_set_sysprop(struct msm_cvp_inst *inst,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cvp_drain_fence_cmd_queue_partial(struct msm_cvp_inst *inst)
|
||||
{
|
||||
unsigned long wait_time;
|
||||
struct cvp_fence_queue *q;
|
||||
struct cvp_fence_command *f;
|
||||
int rc = 0;
|
||||
int count = 0, max_count = 0;
|
||||
|
||||
q = &inst->fence_cmd_queue;
|
||||
|
||||
mutex_lock(&q->lock);
|
||||
|
||||
list_for_each_entry(f, &q->sched_list, list) {
|
||||
if (f->mode == OP_FLUSH)
|
||||
continue;
|
||||
++count;
|
||||
}
|
||||
|
||||
list_for_each_entry(f, &q->wait_list, list) {
|
||||
if (f->mode == OP_FLUSH)
|
||||
continue;
|
||||
++count;
|
||||
}
|
||||
|
||||
mutex_unlock(&q->lock);
|
||||
wait_time = count * CVP_MAX_WAIT_TIME * 1000;
|
||||
|
||||
dprintk(CVP_SYNX, "%s: wait %d us for %d fence command\n",
|
||||
__func__, wait_time, count);
|
||||
|
||||
count = 0;
|
||||
max_count = wait_time / 100;
|
||||
|
||||
retry:
|
||||
mutex_lock(&q->lock);
|
||||
f = list_first_entry(&q->sched_list, struct cvp_fence_command, list);
|
||||
|
||||
/* Wait for all normal frames to finish before return */
|
||||
if ((f && f->mode == OP_FLUSH) ||
|
||||
(list_empty(&q->sched_list) && list_empty(&q->wait_list))) {
|
||||
mutex_unlock(&q->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
mutex_unlock(&q->lock);
|
||||
usleep_range(100, 200);
|
||||
++count;
|
||||
if (count < max_count) {
|
||||
goto retry;
|
||||
} else {
|
||||
rc = -ETIMEDOUT;
|
||||
dprintk(CVP_ERR, "%s: timed out!\n", __func__);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cvp_drain_fence_sched_list(struct msm_cvp_inst *inst)
|
||||
{
|
||||
unsigned long wait_time;
|
||||
@@ -1645,11 +1586,20 @@ int cvp_clean_session_queues(struct msm_cvp_inst *inst)
|
||||
struct cvp_session_queue *sq;
|
||||
u32 count = 0, max_retries = 100;
|
||||
|
||||
q = &inst->fence_cmd_queue;
|
||||
mutex_lock(&q->lock);
|
||||
if (q->state == QUEUE_START) {
|
||||
mutex_unlock(&q->lock);
|
||||
cvp_clean_fence_queue(inst, SYNX_STATE_SIGNALED_ERROR);
|
||||
} else {
|
||||
dprintk(CVP_WARN, "Incorrect fence cmd queue state %d\n",
|
||||
q->state);
|
||||
mutex_unlock(&q->lock);
|
||||
}
|
||||
|
||||
cvp_fence_thread_stop(inst);
|
||||
|
||||
/* Waiting for all output synx sent */
|
||||
q = &inst->fence_cmd_queue;
|
||||
retry:
|
||||
mutex_lock(&q->lock);
|
||||
if (list_empty(&q->sched_list)) {
|
||||
@@ -1667,11 +1617,6 @@ retry:
|
||||
spin_lock(&sq->lock);
|
||||
sq->state = QUEUE_INVALID;
|
||||
spin_unlock(&sq->lock);
|
||||
|
||||
sq = &inst->session_queue_fence;
|
||||
spin_lock(&sq->lock);
|
||||
sq->state = QUEUE_INVALID;
|
||||
spin_unlock(&sq->lock);
|
||||
}
|
||||
|
||||
static int cvp_flush_all(struct msm_cvp_inst *inst)
|
||||
@@ -1728,107 +1673,6 @@ exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void cvp_mark_fence_command(struct msm_cvp_inst *inst, u64 frame_id)
|
||||
{
|
||||
int found = false;
|
||||
struct cvp_fence_queue *q;
|
||||
struct cvp_fence_command *f;
|
||||
|
||||
q = &inst->fence_cmd_queue;
|
||||
|
||||
list_for_each_entry(f, &q->sched_list, list) {
|
||||
if (found) {
|
||||
f->mode = OP_FLUSH;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (f->frame_id >= frame_id) {
|
||||
found = true;
|
||||
f->mode = OP_FLUSH;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(f, &q->wait_list, list) {
|
||||
if (found) {
|
||||
f->mode = OP_FLUSH;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (f->frame_id >= frame_id) {
|
||||
found = true;
|
||||
f->mode = OP_FLUSH;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int cvp_flush_frame(struct msm_cvp_inst *inst, u64 frame_id)
|
||||
{
|
||||
int rc = 0;
|
||||
struct msm_cvp_inst *s;
|
||||
struct cvp_fence_queue *q;
|
||||
struct cvp_fence_command *f, *d;
|
||||
u64 ktid;
|
||||
|
||||
if (!inst || !inst->core) {
|
||||
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
s = cvp_get_inst_validate(inst->core, inst);
|
||||
if (!s)
|
||||
return -ECONNRESET;
|
||||
|
||||
dprintk(CVP_SESS, "Session %llx, flush frame with id %llu\n",
|
||||
inst, frame_id);
|
||||
q = &inst->fence_cmd_queue;
|
||||
|
||||
mutex_lock(&q->lock);
|
||||
q->mode = OP_DRAINING;
|
||||
|
||||
cvp_mark_fence_command(inst, frame_id);
|
||||
|
||||
list_for_each_entry_safe(f, d, &q->wait_list, list) {
|
||||
if (f->mode != OP_FLUSH)
|
||||
continue;
|
||||
|
||||
ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
|
||||
|
||||
dprintk(CVP_SYNX, "%s: flush frame %llu %llu from wait_list\n",
|
||||
__func__, ktid, f->frame_id);
|
||||
|
||||
list_del_init(&f->list);
|
||||
msm_cvp_unmap_frame(inst, f->pkt->client_data.kdata);
|
||||
cvp_cancel_synx(inst, CVP_OUTPUT_SYNX, f,
|
||||
SYNX_STATE_SIGNALED_CANCEL);
|
||||
cvp_release_synx(inst, f);
|
||||
cvp_free_fence_data(f);
|
||||
}
|
||||
|
||||
list_for_each_entry(f, &q->sched_list, list) {
|
||||
if (f->mode != OP_FLUSH)
|
||||
continue;
|
||||
|
||||
ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
|
||||
|
||||
dprintk(CVP_SYNX, "%s: flush frame %llu %llu from sched_list\n",
|
||||
__func__, ktid, f->frame_id);
|
||||
cvp_cancel_synx(inst, CVP_INPUT_SYNX, f,
|
||||
SYNX_STATE_SIGNALED_CANCEL);
|
||||
}
|
||||
|
||||
mutex_unlock(&q->lock);
|
||||
|
||||
rc = cvp_drain_fence_cmd_queue_partial(inst);
|
||||
if (rc)
|
||||
dprintk(CVP_WARN, "%s: continue flush. rc %d\n",
|
||||
__func__, rc);
|
||||
|
||||
rc = cvp_flush_all(inst);
|
||||
|
||||
cvp_put_inst(s);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct eva_kmd_arg *arg)
|
||||
{
|
||||
int rc = 0;
|
||||
@@ -1916,7 +1760,8 @@ int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct eva_kmd_arg *arg)
|
||||
rc = cvp_flush_all(inst);
|
||||
break;
|
||||
case EVA_KMD_FLUSH_FRAME:
|
||||
rc = cvp_flush_frame(inst, arg->data.frame_id);
|
||||
dprintk(CVP_WARN, "EVA_KMD_FLUSH_FRAME IOCTL deprecated\n");
|
||||
rc = 0;
|
||||
break;
|
||||
default:
|
||||
dprintk(CVP_HFI, "%s: unknown arg type %#x\n",
|
||||
|
@@ -945,7 +945,6 @@ void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst, bool log)
|
||||
return;
|
||||
}
|
||||
|
||||
dprintk(CVP_ERR, "active session cmd %d\n", inst->cur_cmd_type);
|
||||
dprintk(CVP_ERR,
|
||||
"---Buffer details for inst: %pK of type: %d---\n",
|
||||
inst, inst->session_type);
|
||||
@@ -1221,7 +1220,6 @@ int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
|
||||
if (!s)
|
||||
return -ECONNRESET;
|
||||
|
||||
inst->cur_cmd_type = EVA_KMD_REGISTER_BUFFER;
|
||||
session = (struct cvp_hal_session *)inst->session;
|
||||
if (!session) {
|
||||
dprintk(CVP_ERR, "%s: invalid session\n", __func__);
|
||||
@@ -1235,7 +1233,6 @@ int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
|
||||
dprintk(CVP_DSP, "%s: fd %d, iova 0x%x\n", __func__,
|
||||
buf->fd, buf->reserved[0]);
|
||||
exit:
|
||||
inst->cur_cmd_type = 0;
|
||||
cvp_put_inst(s);
|
||||
return rc;
|
||||
}
|
||||
@@ -1258,11 +1255,9 @@ int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
|
||||
if (!s)
|
||||
return -ECONNRESET;
|
||||
|
||||
inst->cur_cmd_type = EVA_KMD_UNREGISTER_BUFFER;
|
||||
print_client_buffer(CVP_HFI, "unregister", inst, buf);
|
||||
|
||||
rc = msm_cvp_unmap_buf_dsp(inst, buf);
|
||||
inst->cur_cmd_type = 0;
|
||||
cvp_put_inst(s);
|
||||
return rc;
|
||||
}
|
||||
|
@@ -10,7 +10,6 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-heap.h>
|
||||
#include <linux/msm_ion.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <media/msm_eva_private.h>
|
||||
|
||||
|
@@ -417,3 +417,19 @@ void msm_cvp_deinit_clocks(struct iris_hfi_device *device)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int msm_cvp_set_bw(struct bus_info *bus, unsigned long bw)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!bus->client)
|
||||
return -EINVAL;
|
||||
|
||||
rc = icc_set_bw(bus->client, bw, 0);
|
||||
if (rc)
|
||||
dprintk(CVP_ERR, "Failed voting bus %s to ab %u\n",
|
||||
bus->name, bw);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@@ -22,4 +22,5 @@ int msm_cvp_disable_unprepare_clk(struct iris_hfi_device *device,
|
||||
const char *name);
|
||||
int msm_cvp_init_clocks(struct iris_hfi_device *device);
|
||||
void msm_cvp_deinit_clocks(struct iris_hfi_device *device);
|
||||
int msm_cvp_set_bw(struct bus_info *bus, unsigned long bw);
|
||||
#endif
|
||||
|
@@ -22,6 +22,11 @@
|
||||
|
||||
static void handle_session_error(enum hal_command_response cmd, void *data);
|
||||
|
||||
static void msm_cvp_comm_generate_session_error(struct msm_cvp_inst *inst)
|
||||
{
|
||||
dprintk(CVP_WARN, "%s function is deprecated\n");
|
||||
}
|
||||
|
||||
static void dump_hfi_queue(struct iris_hfi_device *device)
|
||||
{
|
||||
struct cvp_hfi_queue_header *queue;
|
||||
@@ -36,10 +41,12 @@ static void dump_hfi_queue(struct iris_hfi_device *device)
|
||||
* main memory.
|
||||
*/
|
||||
mb();
|
||||
mutex_lock(&device->lock);
|
||||
for (i = 0; i <= CVP_IFACEQ_DBGQ_IDX; i++) {
|
||||
qinfo = &device->iface_queues[i];
|
||||
queue = (struct cvp_hfi_queue_header *)qinfo->q_hdr;
|
||||
if (!queue) {
|
||||
mutex_unlock(&device->lock);
|
||||
dprintk(CVP_ERR, "HFI queue not init, fail to dump\n");
|
||||
return;
|
||||
}
|
||||
@@ -55,6 +62,7 @@ static void dump_hfi_queue(struct iris_hfi_device *device)
|
||||
}
|
||||
|
||||
}
|
||||
mutex_unlock(&device->lock);
|
||||
}
|
||||
|
||||
struct msm_cvp_core *get_cvp_core(int core_id)
|
||||
@@ -280,6 +288,11 @@ static void handle_session_release_buf_done(enum hal_command_response cmd,
|
||||
}
|
||||
mutex_unlock(&inst->persistbufs.lock);
|
||||
|
||||
if (response->status)
|
||||
dprintk(CVP_ERR, "HFI release persist buf err 0x%x\n",
|
||||
response->status);
|
||||
inst->error_code = response->status;
|
||||
|
||||
if (IS_HAL_SESSION_CMD(cmd))
|
||||
complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
|
||||
else
|
||||
@@ -364,11 +377,13 @@ int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
|
||||
SESSION_MSG_INDEX(cmd));
|
||||
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
|
||||
dump_hfi_queue(hdev->hfi_device_data);
|
||||
rc = -EIO;
|
||||
rc = -ETIMEDOUT;
|
||||
} else if (inst->state == MSM_CVP_CORE_INVALID) {
|
||||
rc = -ECONNRESET;
|
||||
} else {
|
||||
rc = 0;
|
||||
rc = inst->error_code;
|
||||
inst->prev_error_code = inst->error_code;
|
||||
inst->error_code = CVP_ERR_NONE;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
@@ -393,25 +408,6 @@ err_same_state:
|
||||
return rc;
|
||||
}
|
||||
|
||||
void msm_cvp_notify_event(struct msm_cvp_inst *inst, int event_type)
|
||||
{
|
||||
}
|
||||
|
||||
static void msm_cvp_comm_generate_max_clients_error(struct msm_cvp_inst *inst)
|
||||
{
|
||||
enum hal_command_response cmd = HAL_SESSION_ERROR;
|
||||
struct msm_cvp_cb_cmd_done response = {0};
|
||||
|
||||
if (!inst) {
|
||||
dprintk(CVP_ERR, "%s: invalid input parameters\n", __func__);
|
||||
return;
|
||||
}
|
||||
dprintk(CVP_ERR, "%s: Too many clients\n", __func__);
|
||||
response.session_id = inst;
|
||||
response.status = CVP_ERR_MAX_CLIENTS;
|
||||
handle_session_error(cmd, (void *)&response);
|
||||
}
|
||||
|
||||
static void handle_session_init_done(enum hal_command_response cmd, void *data)
|
||||
{
|
||||
struct msm_cvp_cb_cmd_done *response = data;
|
||||
@@ -432,23 +428,16 @@ static void handle_session_init_done(enum hal_command_response cmd, void *data)
|
||||
return;
|
||||
}
|
||||
|
||||
if (response->status) {
|
||||
if (response->status)
|
||||
dprintk(CVP_ERR,
|
||||
"Session init response from FW : %#x\n",
|
||||
response->status);
|
||||
if (response->status == CVP_ERR_MAX_CLIENTS)
|
||||
msm_cvp_comm_generate_max_clients_error(inst);
|
||||
else
|
||||
msm_cvp_comm_generate_session_error(inst);
|
||||
"Session %#x init err response from FW : 0x%x\n",
|
||||
hash32_ptr(inst->session), response->status);
|
||||
|
||||
signal_session_msg_receipt(cmd, inst);
|
||||
cvp_put_inst(inst);
|
||||
return;
|
||||
}
|
||||
|
||||
dprintk(CVP_SESS, "%s: cvp session %#x\n", __func__,
|
||||
hash32_ptr(inst->session));
|
||||
else
|
||||
dprintk(CVP_SESS, "%s: cvp session %#x\n", __func__,
|
||||
hash32_ptr(inst->session));
|
||||
|
||||
inst->error_code = response->status;
|
||||
signal_session_msg_receipt(cmd, inst);
|
||||
cvp_put_inst(inst);
|
||||
return;
|
||||
@@ -530,6 +519,11 @@ static void handle_session_flush(enum hal_command_response cmd, void *data)
|
||||
return;
|
||||
}
|
||||
|
||||
if (response->status)
|
||||
dprintk(CVP_ERR, "HFI sess flush err 0x%x\n",
|
||||
response->status);
|
||||
|
||||
inst->error_code = response->status;
|
||||
signal_session_msg_receipt(cmd, inst);
|
||||
cvp_put_inst(inst);
|
||||
}
|
||||
@@ -539,7 +533,6 @@ static void handle_session_error(enum hal_command_response cmd, void *data)
|
||||
struct msm_cvp_cb_cmd_done *response = data;
|
||||
struct cvp_hfi_device *hdev = NULL;
|
||||
struct msm_cvp_inst *inst = NULL;
|
||||
int event = CVP_SYS_ERROR_EVENT;
|
||||
|
||||
if (!response) {
|
||||
dprintk(CVP_ERR,
|
||||
@@ -556,33 +549,9 @@ static void handle_session_error(enum hal_command_response cmd, void *data)
|
||||
}
|
||||
|
||||
hdev = inst->core->device;
|
||||
dprintk(CVP_ERR, "Session error received for inst %pK session %x\n",
|
||||
inst, hash32_ptr(inst->session));
|
||||
dprintk(CVP_ERR, "Sess error 0x%x received for inst %pK sess %x\n",
|
||||
response->status, inst, hash32_ptr(inst->session));
|
||||
|
||||
if (response->status == CVP_ERR_MAX_CLIENTS) {
|
||||
dprintk(CVP_WARN, "Too many clients, rejecting %pK", inst);
|
||||
event = CVP_MAX_CLIENTS_EVENT;
|
||||
|
||||
/*
|
||||
* Clean the HFI session now. Since inst->state is moved to
|
||||
* INVALID, forward thread doesn't know FW has valid session
|
||||
* or not. This is the last place driver knows that there is
|
||||
* no session in FW. Hence clean HFI session now.
|
||||
*/
|
||||
|
||||
msm_cvp_comm_session_clean(inst);
|
||||
} else if (response->status == CVP_ERR_NOT_SUPPORTED) {
|
||||
dprintk(CVP_WARN, "Unsupported bitstream in %pK", inst);
|
||||
event = CVP_HW_UNSUPPORTED_EVENT;
|
||||
} else {
|
||||
dprintk(CVP_WARN, "Unknown session error (%d) for %pK\n",
|
||||
response->status, inst);
|
||||
event = CVP_SYS_ERROR_EVENT;
|
||||
}
|
||||
|
||||
/* change state before sending error to client */
|
||||
change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
|
||||
msm_cvp_notify_event(inst, event);
|
||||
cvp_put_inst(inst);
|
||||
}
|
||||
|
||||
@@ -604,8 +573,6 @@ static void msm_comm_clean_notify_client(struct msm_cvp_core *core)
|
||||
mutex_unlock(&inst->lock);
|
||||
dprintk(CVP_WARN,
|
||||
"%s Send sys error for inst %pK\n", __func__, inst);
|
||||
msm_cvp_notify_event(inst,
|
||||
CVP_SYS_ERROR_EVENT);
|
||||
}
|
||||
mutex_unlock(&core->lock);
|
||||
}
|
||||
@@ -645,6 +612,7 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
|
||||
|
||||
cur_state = core->state;
|
||||
core->state = CVP_CORE_UNINIT;
|
||||
mutex_lock(&core->clk_lock);
|
||||
dprintk(CVP_WARN, "SYS_ERROR received for core %pK\n", core);
|
||||
if (response->status == CVP_ERR_NOC_ERROR) {
|
||||
dprintk(CVP_WARN, "Got NOC error");
|
||||
@@ -654,9 +622,9 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
|
||||
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
|
||||
list_for_each_entry(inst, &core->instances, list) {
|
||||
dprintk(CVP_WARN,
|
||||
"%s: sys error inst %#x kref %x, cmd %x state %x\n",
|
||||
"%s: sys error inst %#x kref %x, state %x\n",
|
||||
__func__, inst, kref_read(&inst->kref),
|
||||
inst->cur_cmd_type, inst->state);
|
||||
inst->state);
|
||||
if (inst->state != MSM_CVP_CORE_INVALID) {
|
||||
change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
|
||||
if (cvp_clean_session_queues(inst))
|
||||
@@ -682,9 +650,11 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "core_release failed\n");
|
||||
core->state = cur_state;
|
||||
mutex_unlock(&core->clk_lock);
|
||||
mutex_unlock(&core->lock);
|
||||
return;
|
||||
}
|
||||
mutex_unlock(&core->clk_lock);
|
||||
mutex_unlock(&core->lock);
|
||||
|
||||
dprintk(CVP_WARN, "SYS_ERROR handled.\n");
|
||||
@@ -738,6 +708,11 @@ static void handle_session_close(enum hal_command_response cmd, void *data)
|
||||
return;
|
||||
}
|
||||
|
||||
if (response->status)
|
||||
dprintk(CVP_ERR, "HFI sess close fail 0x%x\n",
|
||||
response->status);
|
||||
|
||||
inst->error_code = response->status;
|
||||
signal_session_msg_receipt(cmd, inst);
|
||||
show_stats(inst);
|
||||
cvp_put_inst(inst);
|
||||
@@ -920,8 +895,6 @@ static void handle_thermal_event(struct msm_cvp_core *core)
|
||||
dprintk(CVP_WARN,
|
||||
"%s Send sys error for inst %pK\n",
|
||||
__func__, inst);
|
||||
msm_cvp_notify_event(inst,
|
||||
CVP_SYS_ERROR_EVENT);
|
||||
} else {
|
||||
msm_cvp_comm_generate_session_error(inst);
|
||||
}
|
||||
@@ -1071,23 +1044,6 @@ int msm_cvp_deinit_core(struct msm_cvp_inst *inst)
|
||||
goto core_already_uninited;
|
||||
}
|
||||
|
||||
if (!core->resources.never_unload_fw) {
|
||||
cancel_delayed_work(&core->fw_unload_work);
|
||||
|
||||
/*
|
||||
* Delay unloading of firmware. This is useful
|
||||
* in avoiding firmware download delays in cases where we
|
||||
* will have a burst of back to back cvp sessions
|
||||
*/
|
||||
schedule_delayed_work(&core->fw_unload_work,
|
||||
msecs_to_jiffies(core->state == CVP_CORE_INIT_DONE ?
|
||||
core->resources.msm_cvp_firmware_unload_delay : 0));
|
||||
|
||||
dprintk(CVP_CORE, "firmware unload delayed by %u ms\n",
|
||||
core->state == CVP_CORE_INIT_DONE ?
|
||||
core->resources.msm_cvp_firmware_unload_delay : 0);
|
||||
}
|
||||
|
||||
core_already_uninited:
|
||||
change_cvp_inst_state(inst, MSM_CVP_CORE_UNINIT);
|
||||
mutex_unlock(&core->lock);
|
||||
@@ -1104,7 +1060,6 @@ static int msm_comm_session_init_done(int flipped_state,
|
||||
HAL_SESSION_INIT_DONE);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "Session init failed for inst %pK\n", inst);
|
||||
msm_cvp_comm_generate_sys_error(inst);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -1289,9 +1244,9 @@ int msm_cvp_comm_try_state(struct msm_cvp_inst *inst, int state)
|
||||
|
||||
mutex_unlock(&inst->sync_lock);
|
||||
|
||||
if (rc) {
|
||||
if (rc == -ETIMEDOUT) {
|
||||
dprintk(CVP_ERR,
|
||||
"Failed to move from state: %d to %d\n",
|
||||
"Timedout move from state: %d to %d\n",
|
||||
inst->state, state);
|
||||
msm_cvp_comm_kill_session(inst);
|
||||
}
|
||||
@@ -1358,9 +1313,8 @@ void msm_cvp_ssr_handler(struct work_struct *work)
|
||||
dprintk(CVP_ERR, "Session abort triggered\n");
|
||||
list_for_each_entry(inst, &core->instances, list) {
|
||||
dprintk(CVP_WARN,
|
||||
"Session to abort: inst %#x cmd %x ref %x\n",
|
||||
inst, inst->cur_cmd_type,
|
||||
kref_read(&inst->kref));
|
||||
"Session to abort: inst %#x ref %x\n",
|
||||
inst, kref_read(&inst->kref));
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1414,21 +1368,6 @@ send_again:
|
||||
mutex_unlock(&core->lock);
|
||||
}
|
||||
|
||||
void msm_cvp_comm_generate_session_error(struct msm_cvp_inst *inst)
|
||||
{
|
||||
enum hal_command_response cmd = HAL_SESSION_ERROR;
|
||||
struct msm_cvp_cb_cmd_done response = {0};
|
||||
|
||||
if (!inst || !inst->core) {
|
||||
dprintk(CVP_ERR, "%s: invalid input parameters\n", __func__);
|
||||
return;
|
||||
}
|
||||
dprintk(CVP_WARN, "%s: inst %pK\n", __func__, inst);
|
||||
response.session_id = inst;
|
||||
response.status = CVP_ERR_FAIL;
|
||||
handle_session_error(cmd, (void *)&response);
|
||||
}
|
||||
|
||||
void msm_cvp_comm_generate_sys_error(struct msm_cvp_inst *inst)
|
||||
{
|
||||
struct msm_cvp_core *core;
|
||||
|
@@ -22,7 +22,6 @@ int msm_cvp_deinit_core(struct msm_cvp_inst *inst);
|
||||
int msm_cvp_comm_suspend(int core_id);
|
||||
void msm_cvp_comm_session_clean(struct msm_cvp_inst *inst);
|
||||
int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst);
|
||||
void msm_cvp_comm_generate_session_error(struct msm_cvp_inst *inst);
|
||||
void msm_cvp_comm_generate_sys_error(struct msm_cvp_inst *inst);
|
||||
int msm_cvp_comm_smem_cache_operations(struct msm_cvp_inst *inst,
|
||||
struct msm_cvp_smem *mem, enum smem_cache_ops cache_ops);
|
||||
|
@@ -154,9 +154,8 @@ void *msm_cvp_open(int core_id, int session_type)
|
||||
dprintk(CVP_ERR, "Instance num reached Max, rejecting session");
|
||||
mutex_lock(&core->lock);
|
||||
list_for_each_entry(inst, &core->instances, list)
|
||||
dprintk(CVP_ERR, "inst %pK, cmd %d id %d\n",
|
||||
inst, inst->cur_cmd_type,
|
||||
hash32_ptr(inst->session));
|
||||
dprintk(CVP_ERR, "inst %pK, id %d\n",
|
||||
inst, hash32_ptr(inst->session));
|
||||
mutex_unlock(&core->lock);
|
||||
|
||||
return NULL;
|
||||
@@ -225,6 +224,7 @@ void *msm_cvp_open(int core_id, int session_type)
|
||||
return inst;
|
||||
fail_init:
|
||||
__deinit_session_queue(inst);
|
||||
__deinit_fence_queue(inst);
|
||||
mutex_lock(&core->lock);
|
||||
list_del(&inst->list);
|
||||
mutex_unlock(&core->lock);
|
||||
@@ -270,6 +270,7 @@ static void msm_cvp_cleanup_instance(struct msm_cvp_inst *inst)
|
||||
int max_retries;
|
||||
struct msm_cvp_frame *frame;
|
||||
struct cvp_session_queue *sq, *sqf;
|
||||
struct cvp_hfi_device *hdev;
|
||||
|
||||
if (!inst) {
|
||||
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
||||
@@ -325,6 +326,20 @@ wait:
|
||||
if (cvp_release_arp_buffers(inst))
|
||||
dprintk(CVP_ERR,
|
||||
"Failed to release persist buffers\n");
|
||||
|
||||
if (inst->prop.type == HFI_SESSION_FD
|
||||
|| inst->prop.type == HFI_SESSION_DMM) {
|
||||
spin_lock(&inst->core->resources.pm_qos.lock);
|
||||
if (inst->core->resources.pm_qos.off_vote_cnt > 0)
|
||||
inst->core->resources.pm_qos.off_vote_cnt--;
|
||||
else
|
||||
dprintk(CVP_WARN, "%s Unexpected pm_qos off vote %d\n",
|
||||
__func__,
|
||||
inst->core->resources.pm_qos.off_vote_cnt);
|
||||
spin_unlock(&inst->core->resources.pm_qos.lock);
|
||||
hdev = inst->core->device;
|
||||
call_hfi_op(hdev, pm_qos_update, hdev->hfi_device_data);
|
||||
}
|
||||
}
|
||||
|
||||
int msm_cvp_destroy(struct msm_cvp_inst *inst)
|
||||
@@ -362,9 +377,6 @@ int msm_cvp_destroy(struct msm_cvp_inst *inst)
|
||||
|
||||
pr_info(CVP_DBG_TAG "Closed cvp instance: %pK session_id = %d\n",
|
||||
"sess", inst, hash32_ptr(inst->session));
|
||||
if (inst->cur_cmd_type)
|
||||
dprintk(CVP_ERR, "deleted instance has pending cmd %d\n",
|
||||
inst->cur_cmd_type);
|
||||
inst->session = (void *)0xdeadbeef;
|
||||
kfree(inst);
|
||||
return 0;
|
||||
|
@@ -9,7 +9,6 @@
|
||||
#include <linux/poll.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/msm_ion.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <media/msm_eva_private.h>
|
||||
#include "msm_cvp_buf.h"
|
||||
|
@@ -234,7 +234,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(cvp_pwr_fops, cvp_power_get, cvp_power_set, "%llu\n");
|
||||
|
||||
struct dentry *msm_cvp_debugfs_init_drv(void)
|
||||
{
|
||||
struct dentry *dir = NULL, *f;
|
||||
struct dentry *dir = NULL;
|
||||
|
||||
dir = debugfs_create_dir("msm_cvp", NULL);
|
||||
if (IS_ERR_OR_NULL(dir)) {
|
||||
@@ -248,26 +248,17 @@ struct dentry *msm_cvp_debugfs_init_drv(void)
|
||||
debugfs_create_u32("fw_low_power_mode", 0644, dir,
|
||||
&msm_cvp_fw_low_power_mode);
|
||||
debugfs_create_u32("debug_output", 0644, dir, &msm_cvp_debug_out);
|
||||
debugfs_create_u32("minidump_enable", 0644, dir, &msm_cvp_minidump_enable);
|
||||
f = debugfs_create_bool("fw_coverage", 0644, dir, &msm_cvp_fw_coverage);
|
||||
if (IS_ERR_OR_NULL(f))
|
||||
goto failed_create_dir;
|
||||
f = debugfs_create_bool("disable_thermal_mitigation", 0644, dir,
|
||||
debugfs_create_u32("minidump_enable", 0644, dir,
|
||||
&msm_cvp_minidump_enable);
|
||||
debugfs_create_bool("fw_coverage", 0644, dir, &msm_cvp_fw_coverage);
|
||||
debugfs_create_bool("disable_thermal_mitigation", 0644, dir,
|
||||
&msm_cvp_thermal_mitigation_disabled);
|
||||
if (IS_ERR_OR_NULL(f))
|
||||
goto failed_create_dir;
|
||||
f = debugfs_create_bool("enable_cacheop", 0644, dir,
|
||||
debugfs_create_bool("enable_cacheop", 0644, dir,
|
||||
&msm_cvp_cacheop_enabled);
|
||||
if (IS_ERR_OR_NULL(f))
|
||||
goto failed_create_dir;
|
||||
f = debugfs_create_bool("disable_cvp_syscache", 0644, dir,
|
||||
debugfs_create_bool("disable_cvp_syscache", 0644, dir,
|
||||
&msm_cvp_syscache_disable);
|
||||
if (IS_ERR_OR_NULL(f))
|
||||
goto failed_create_dir;
|
||||
f = debugfs_create_bool("disable_dcvs", 0644, dir,
|
||||
debugfs_create_bool("disable_dcvs", 0644, dir,
|
||||
&msm_cvp_dcvs_disable);
|
||||
if (IS_ERR_OR_NULL(f))
|
||||
goto failed_create_dir;
|
||||
|
||||
debugfs_create_file("cvp_power", 0644, dir, NULL, &cvp_pwr_fops);
|
||||
|
||||
|
@@ -215,6 +215,7 @@ static int delete_dsp_session(struct msm_cvp_inst *inst,
|
||||
struct list_head *ptr_dsp_buf = NULL, *next_dsp_buf = NULL;
|
||||
struct cvp_internal_buf *buf = NULL;
|
||||
struct task_struct *task = NULL;
|
||||
struct cvp_hfi_device *hdev;
|
||||
int rc;
|
||||
|
||||
if (!inst)
|
||||
@@ -265,6 +266,18 @@ static int delete_dsp_session(struct msm_cvp_inst *inst,
|
||||
|
||||
task = inst->task;
|
||||
|
||||
spin_lock(&inst->core->resources.pm_qos.lock);
|
||||
if (inst->core->resources.pm_qos.off_vote_cnt > 0)
|
||||
inst->core->resources.pm_qos.off_vote_cnt--;
|
||||
else
|
||||
dprintk(CVP_WARN, "%s Unexpected pm_qos off vote %d\n",
|
||||
__func__,
|
||||
inst->core->resources.pm_qos.off_vote_cnt);
|
||||
spin_unlock(&inst->core->resources.pm_qos.lock);
|
||||
|
||||
hdev = inst->core->device;
|
||||
call_hfi_op(hdev, pm_qos_update, hdev->hfi_device_data);
|
||||
|
||||
rc = msm_cvp_close(inst);
|
||||
if (rc)
|
||||
dprintk(CVP_ERR, "Warning: Failed to close cvp instance\n");
|
||||
@@ -1287,6 +1300,7 @@ static void __dsp_cvp_sess_create(struct cvp_dsp_cmd_msg *cmd)
|
||||
struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
|
||||
struct pid *pid_s = NULL;
|
||||
struct task_struct *task = NULL;
|
||||
struct cvp_hfi_device *hdev;
|
||||
|
||||
cmd->ret = 0;
|
||||
|
||||
@@ -1361,6 +1375,12 @@ static void __dsp_cvp_sess_create(struct cvp_dsp_cmd_msg *cmd)
|
||||
__func__, cmd->session_id, cmd->session_cpu_low,
|
||||
cmd->session_cpu_high);
|
||||
|
||||
spin_lock(&inst->core->resources.pm_qos.lock);
|
||||
inst->core->resources.pm_qos.off_vote_cnt++;
|
||||
spin_unlock(&inst->core->resources.pm_qos.lock);
|
||||
hdev = inst->core->device;
|
||||
call_hfi_op(hdev, pm_qos_update, hdev->hfi_device_data);
|
||||
|
||||
return;
|
||||
|
||||
fail_get_pid:
|
||||
@@ -1382,6 +1402,7 @@ static void __dsp_cvp_sess_delete(struct cvp_dsp_cmd_msg *cmd)
|
||||
struct cvp_dsp2cpu_cmd_msg *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd;
|
||||
struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
|
||||
struct task_struct *task = NULL;
|
||||
struct cvp_hfi_device *hdev;
|
||||
|
||||
cmd->ret = 0;
|
||||
|
||||
@@ -1418,6 +1439,18 @@ static void __dsp_cvp_sess_delete(struct cvp_dsp_cmd_msg *cmd)
|
||||
|
||||
task = inst->task;
|
||||
|
||||
spin_lock(&inst->core->resources.pm_qos.lock);
|
||||
if (inst->core->resources.pm_qos.off_vote_cnt > 0)
|
||||
inst->core->resources.pm_qos.off_vote_cnt--;
|
||||
else
|
||||
dprintk(CVP_WARN, "%s Unexpected pm_qos off vote %d\n",
|
||||
__func__,
|
||||
inst->core->resources.pm_qos.off_vote_cnt);
|
||||
spin_unlock(&inst->core->resources.pm_qos.lock);
|
||||
|
||||
hdev = inst->core->device;
|
||||
call_hfi_op(hdev, pm_qos_update, hdev->hfi_device_data);
|
||||
|
||||
rc = msm_cvp_close(inst);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "Warning: Failed to close cvp instance\n");
|
||||
@@ -1916,8 +1949,7 @@ int cvp_dsp_device_init(void)
|
||||
int i;
|
||||
char name[CVP_FASTRPC_DRIVER_NAME_SIZE] = "qcom,fastcv0\0";
|
||||
|
||||
add_va_node_to_list(&head_node_dbg_struct,
|
||||
&gfa_cv, sizeof(struct cvp_dsp_apps),
|
||||
add_va_node_to_list(CVP_DBG_DUMP, &gfa_cv, sizeof(struct cvp_dsp_apps),
|
||||
"cvp_dsp_apps-gfa_cv", false);
|
||||
|
||||
mutex_init(&me->tx_lock);
|
||||
|
@@ -398,7 +398,10 @@ struct msm_cvp_inst {
|
||||
struct msm_cvp_capability capability;
|
||||
struct kref kref;
|
||||
struct cvp_session_prop prop;
|
||||
u32 cur_cmd_type;
|
||||
/* error_code will be cleared after being returned to user mode */
|
||||
u32 error_code;
|
||||
/* prev_error_code saves value of error_code before it's cleared */
|
||||
u32 prev_error_code;
|
||||
struct synx_session synx_session_id;
|
||||
struct cvp_fence_queue fence_cmd_queue;
|
||||
};
|
||||
|
@@ -15,7 +15,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <soc/qcom/of_common.h>
|
||||
#include "msm_cvp_internal.h"
|
||||
#include "msm_cvp_debug.h"
|
||||
#include "cvp_hfi_api.h"
|
||||
@@ -38,7 +38,7 @@
|
||||
|
||||
static struct msm_cvp_common_data default_common_data[] = {
|
||||
{
|
||||
.key = "qcom,never-unload-fw",
|
||||
.key = "qcom,auto-pil",
|
||||
.value = 1,
|
||||
},
|
||||
};
|
||||
@@ -49,8 +49,8 @@ static struct msm_cvp_common_data sm8450_common_data[] = {
|
||||
.value = 1,
|
||||
},
|
||||
{
|
||||
.key = "qcom,never-unload-fw",
|
||||
.value = 1,
|
||||
.key = "qcom,pm-qos-latency-us",
|
||||
.value = 50,
|
||||
},
|
||||
{
|
||||
.key = "qcom,sw-power-collapse",
|
||||
@@ -102,11 +102,11 @@ static struct msm_cvp_common_data sm8450_common_data[] = {
|
||||
static struct msm_cvp_common_data sm8550_common_data[] = {
|
||||
{
|
||||
.key = "qcom,auto-pil",
|
||||
.value = 0,
|
||||
.value = 1,
|
||||
},
|
||||
{
|
||||
.key = "qcom,never-unload-fw",
|
||||
.value = 1,
|
||||
.key = "qcom,pm-qos-latency-us",
|
||||
.value = 50,
|
||||
},
|
||||
{
|
||||
.key = "qcom,sw-power-collapse",
|
||||
@@ -194,8 +194,8 @@ static struct msm_cvp_platform_data sm8550_data = {
|
||||
.common_data_length = ARRAY_SIZE(sm8550_common_data),
|
||||
.sku_version = 0,
|
||||
.vpu_ver = VPU_VERSION_5,
|
||||
.ubwc_config = kona_ubwc_data,
|
||||
.noc_qos = 0x0,
|
||||
.ubwc_config = kona_ubwc_data, /*Reuse Kona setting*/
|
||||
.noc_qos = &waipio_noc_qos, /*Reuse Waipio setting*/
|
||||
};
|
||||
|
||||
static const struct of_device_id msm_cvp_dt_match[] = {
|
||||
@@ -204,7 +204,7 @@ static const struct of_device_id msm_cvp_dt_match[] = {
|
||||
.data = &sm8450_data,
|
||||
},
|
||||
{
|
||||
.compatible = "qcom,kailua-cvp",
|
||||
.compatible = "qcom,kalama-cvp",
|
||||
.data = &sm8550_data,
|
||||
},
|
||||
|
||||
|
@@ -750,7 +750,7 @@ int cvp_read_platform_resources_from_drv_data(
|
||||
{
|
||||
struct msm_cvp_platform_data *platform_data;
|
||||
struct msm_cvp_platform_resources *res;
|
||||
int rc = 0;
|
||||
int rc = 0, i;
|
||||
|
||||
if (!core || !core->platform_data) {
|
||||
dprintk(CVP_ERR, "%s Invalid data\n", __func__);
|
||||
@@ -777,14 +777,16 @@ int cvp_read_platform_resources_from_drv_data(
|
||||
res->sw_power_collapsible = find_key_value(platform_data,
|
||||
"qcom,sw-power-collapse");
|
||||
|
||||
res->never_unload_fw = find_key_value(platform_data,
|
||||
"qcom,never-unload-fw");
|
||||
|
||||
res->debug_timeout = find_key_value(platform_data,
|
||||
"qcom,debug-timeout");
|
||||
|
||||
res->pm_qos_latency_us = find_key_value(platform_data,
|
||||
res->pm_qos.latency_us = find_key_value(platform_data,
|
||||
"qcom,pm-qos-latency-us");
|
||||
res->pm_qos.silver_count = 4;
|
||||
for (i = 0; i < res->pm_qos.silver_count; i++)
|
||||
res->pm_qos.silver_cores[i] = i;
|
||||
res->pm_qos.off_vote_cnt = 0;
|
||||
spin_lock_init(&res->pm_qos.lock);
|
||||
|
||||
res->max_secure_inst_count = find_key_value(platform_data,
|
||||
"qcom,max-secure-instances");
|
||||
|
@@ -8,6 +8,7 @@
|
||||
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include "msm_cvp_core.h"
|
||||
#include <linux/soc/qcom/llcc-qcom.h>
|
||||
|
||||
@@ -134,6 +135,19 @@ struct msm_cvp_mem_cdsp {
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
#define MAX_SILVER_CORE_NUM 8
|
||||
#define HFI_SESSION_FD 4
|
||||
#define HFI_SESSION_DMM 2
|
||||
|
||||
struct cvp_pm_qos {
|
||||
u32 silver_count;
|
||||
u32 latency_us;
|
||||
u32 off_vote_cnt;
|
||||
spinlock_t lock;
|
||||
int silver_cores[MAX_SILVER_CORE_NUM];
|
||||
struct dev_pm_qos_request *pm_qos_hdls;
|
||||
};
|
||||
|
||||
struct msm_cvp_platform_resources {
|
||||
phys_addr_t firmware_base;
|
||||
phys_addr_t register_base;
|
||||
@@ -166,9 +180,8 @@ struct msm_cvp_platform_resources {
|
||||
bool thermal_mitigable;
|
||||
const char *fw_name;
|
||||
const char *hfi_version;
|
||||
bool never_unload_fw;
|
||||
bool debug_timeout;
|
||||
uint32_t pm_qos_latency_us;
|
||||
struct cvp_pm_qos pm_qos;
|
||||
uint32_t max_inst_count;
|
||||
uint32_t max_secure_inst_count;
|
||||
int msm_cvp_hw_rsp_timeout;
|
||||
|
@@ -71,7 +71,7 @@ void cvp_dump_fence_queue(struct msm_cvp_inst *inst)
|
||||
int cvp_import_synx(struct msm_cvp_inst *inst, struct cvp_fence_command *fc,
|
||||
u32 *fence)
|
||||
{
|
||||
int rc = 0;
|
||||
int rc = 0, rr = 0;
|
||||
int i;
|
||||
struct cvp_fence_type *fs;
|
||||
struct synx_import_params params;
|
||||
@@ -99,12 +99,12 @@ int cvp_import_synx(struct msm_cvp_inst *inst, struct cvp_fence_command *fc,
|
||||
dprintk(CVP_ERR,
|
||||
"%s: %d synx_import failed\n",
|
||||
__func__, h_synx);
|
||||
return rc;
|
||||
rr = rc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return rr;
|
||||
}
|
||||
|
||||
int cvp_release_synx(struct msm_cvp_inst *inst, struct cvp_fence_command *fc)
|
||||
|
@@ -8,7 +8,6 @@
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/msm_dma_iommu_mapping.h>
|
||||
#include <linux/msm_ion.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include <linux/mem-buf.h>
|
||||
#include <linux/slab.h>
|
||||
|
In neuem Issue referenzieren
Einen Benutzer sperren