msm: eva: Propagate tip of 2.0 to 3.0

Add back all changes made after Aug. 3rd from
Kailua to Lanai.

Change-Id: I725b77892ab24354014b3d9bbc13d14c710aff5a
Signed-off-by: George Shen <quic_sqiao@quicinc.com>
This commit is contained in:
George Shen
2022-09-27 12:50:57 -07:00
parent 18119d7649
commit fcbb3d87ab
13 changed files with 466 additions and 223 deletions

View File

@@ -622,10 +622,10 @@ static int __init msm_cvp_init(void)
return rc;
}
cvp_driver->msg_cache = KMEM_CACHE(cvp_session_msg, 0);
cvp_driver->frame_cache = KMEM_CACHE(msm_cvp_frame, 0);
cvp_driver->buf_cache = KMEM_CACHE(cvp_internal_buf, 0);
cvp_driver->smem_cache = KMEM_CACHE(msm_cvp_smem, 0);
cvp_driver->msg_cache.cache = KMEM_CACHE(cvp_session_msg, 0);
cvp_driver->frame_cache.cache = KMEM_CACHE(msm_cvp_frame, 0);
cvp_driver->buf_cache.cache = KMEM_CACHE(cvp_internal_buf, 0);
cvp_driver->smem_cache.cache = KMEM_CACHE(msm_cvp_smem, 0);
return rc;
}
@@ -633,10 +633,10 @@ static int __init msm_cvp_init(void)
static void __exit msm_cvp_exit(void)
{
cvp_dsp_device_exit();
kmem_cache_destroy(cvp_driver->msg_cache);
kmem_cache_destroy(cvp_driver->frame_cache);
kmem_cache_destroy(cvp_driver->buf_cache);
kmem_cache_destroy(cvp_driver->smem_cache);
kmem_cache_destroy(cvp_driver->msg_cache.cache);
kmem_cache_destroy(cvp_driver->frame_cache.cache);
kmem_cache_destroy(cvp_driver->buf_cache.cache);
kmem_cache_destroy(cvp_driver->smem_cache.cache);
platform_driver_unregister(&msm_cvp_driver);
debugfs_remove_recursive(cvp_driver->debugfs_root);

View File

@@ -956,6 +956,37 @@ static int __tzbsp_set_cvp_state(enum tzbsp_subsys_state state)
return 0;
}
/*
* Based on fal10_veto, X2RPMh, core_pwr_on and PWAitMode value, infer
* value of xtss_sw_reset. xtss_sw_reset is a TZ register bit. Driver
* cannot access it directly.
*
* In __boot_firmware() function, the caller of this function. It checks
* "core_pwr_on" == false, basically core powered off. So this function
* doesn't check core_pwr_on. Assume core_pwr_on = false.
*
* fal10_veto = VPU_CPU_CS_X2RPMh[2] |
* ( ~VPU_CPU_CS_X2RPMh[1] & core_pwr_on ) |
* ( ~VPU_CPU_CS_X2RPMh[0] & ~( xtss_sw_reset | PWaitMode ) ) ;
*/
static inline void check_tensilica_in_reset(struct iris_hfi_device *device)
{
u32 X2RPMh, fal10_veto, wait_mode;
X2RPMh = __read_register(device, CVP_CPU_CS_X2RPMh);
X2RPMh = X2RPMh & 0x7;
/* wait_mode = 1: Tensilica is in WFI mode (PWaitMode = true) */
wait_mode = __read_register(device, CVP_WRAPPER_CPU_STATUS);
wait_mode = wait_mode & 0x1;
fal10_veto = __read_register(device, CVP_CPU_CS_X2RPMh_STATUS);
fal10_veto = fal10_veto & 0x1;
dprintk(CVP_WARN, "tensilica reset check %#x %#x %#x\n",
X2RPMh, wait_mode, fal10_veto);
}
static inline int __boot_firmware(struct iris_hfi_device *device)
{
int rc = 0, loop = 10;
@@ -1000,8 +1031,11 @@ static inline int __boot_firmware(struct iris_hfi_device *device)
}
if (!(ctrl_status & CVP_CTRL_INIT_STATUS__M)) {
dprintk(CVP_ERR, "Failed to boot FW status: %x\n",
ctrl_status);
ctrl_init_val = __read_register(device, CVP_CTRL_INIT);
dprintk(CVP_ERR,
"Failed to boot FW status: %x %x\n",
ctrl_status, ctrl_init_val);
check_tensilica_in_reset(device);
rc = -ENODEV;
}
@@ -1098,7 +1132,6 @@ static int iris_hfi_flush_debug_queue(void *dev)
return -EINVAL;
}
cvp_dump_csr(device);
mutex_lock(&device->lock);
if (!device->power_enabled) {
@@ -1106,6 +1139,7 @@ static int iris_hfi_flush_debug_queue(void *dev)
rc = -EINVAL;
goto exit;
}
cvp_dump_csr(device);
__flush_debug_queue(device, NULL);
exit:
mutex_unlock(&device->lock);
@@ -4169,7 +4203,7 @@ static void power_off_iris2(struct iris_hfi_device *device)
/*Do not access registers after this point!*/
device->power_enabled = false;
pr_info_ratelimited(CVP_DBG_TAG "cvp (eva) power collapsed\n", "pwr");
pr_info(CVP_DBG_TAG "cvp (eva) power collapsed\n", "pwr");
}
static inline int __resume(struct iris_hfi_device *device)

View File

@@ -356,4 +356,14 @@ static inline enum buf_map_type cvp_find_map_type(int pkt_type)
else
return MAP_FRAME;
}
static inline bool is_params_pkt(int pkt_type)
{
if (pkt_type == HFI_CMD_SESSION_CVP_DMM_PARAMS ||
pkt_type == HFI_CMD_SESSION_CVP_WARP_DS_PARAMS)
return true;
return false;
}
#endif

View File

@@ -76,6 +76,8 @@
#define CVP_CPU_CS_X2RPMh_SWOVERRIDE_BMSK 0x4
#define CVP_CPU_CS_X2RPMh_SWOVERRIDE_SHFT 0x3
#define CVP_CPU_CS_X2RPMh_STATUS (CVP_CPU_BASE_OFFS + 0x170)
/*
* --------------------------------------------------------------------------
* MODULE: cvp_wrapper

View File

@@ -90,7 +90,6 @@ static int msm_dma_get_device_address(struct dma_buf *dbuf, u32 align,
* Get the scatterlist for the given attachment
* Mapping of sg is taken care by map attachment
*/
attach->dma_map_attrs = DMA_ATTR_DELAYED_UNMAP;
/*
* We do not need dma_map function to perform cache operations
* on the whole buffer size and hence pass skip sync flag.
@@ -105,6 +104,9 @@ static int msm_dma_get_device_address(struct dma_buf *dbuf, u32 align,
table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR_OR_NULL(table)) {
dprintk(CVP_ERR, "Failed to map table %d\n", PTR_ERR(table));
dprintk(CVP_ERR,
"Mapping detail dma_buf 0x%llx, %s, size %#x\n",
dbuf, dbuf->name, dbuf->size);
rc = PTR_ERR(table) ?: -ENOMEM;
goto mem_map_table_failed;
}
@@ -258,27 +260,8 @@ int msm_cvp_map_smem(struct msm_cvp_inst *inst,
/* User persist buffer has no feature config info */
is_config_pkt = cvp_hfi_defs[i].is_config_pkt;
/* if (!(smem->flags & SMEM_SECURE) &&
is_config_pkt &&
(msm_cvp_debug & CVP_MEM)) {
dma_buf_begin_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
smem->kvaddr = __cvp_dma_buf_vmap(dma_buf);
if (!smem->kvaddr) {
dprintk(CVP_WARN,
"Failed to map config buf in kernel\n");
dma_buf_end_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
goto checksum_done;
}
for (i = 0; i < (dma_buf->size); i++) {
smem->checksum += *(u8 *)(smem->kvaddr + i);
}
__cvp_dma_buf_vunmap(dma_buf, smem->kvaddr);
smem->kvaddr = 0;
dma_buf_end_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
}
checksum_done:*/
print_smem(CVP_MEM, str, inst, smem);
atomic_inc(&inst->smem_count);
goto success;
exit:
smem->device_addr = 0x0;
@@ -308,6 +291,7 @@ int msm_cvp_unmap_smem(struct msm_cvp_inst *inst,
}
smem->device_addr = 0x0;
atomic_dec(&inst->smem_count);
exit:
return rc;

View File

@@ -498,7 +498,7 @@ static int hfi_process_session_cvp_msg(u32 device_id,
else
sq = &inst->session_queue;
sess_msg = kmem_cache_alloc(cvp_driver->msg_cache, GFP_KERNEL);
sess_msg = cvp_kmem_cache_zalloc(&cvp_driver->msg_cache, GFP_KERNEL);
if (sess_msg == NULL) {
dprintk(CVP_ERR, "%s runs out msg cache memory\n", __func__);
return -ENOMEM;
@@ -528,7 +528,7 @@ static int hfi_process_session_cvp_msg(u32 device_id,
error_handle_msg:
spin_unlock(&sq->lock);
kmem_cache_free(cvp_driver->msg_cache, sess_msg);
cvp_kmem_cache_free(&cvp_driver->msg_cache, sess_msg);
return -ENOMEM;
}

View File

@@ -110,7 +110,7 @@ static int cvp_wait_process_message(struct msm_cvp_inst *inst,
}
if (!out) {
kmem_cache_free(cvp_driver->msg_cache, msg);
cvp_kmem_cache_free(&cvp_driver->msg_cache, msg);
goto exit;
}
@@ -118,7 +118,7 @@ static int cvp_wait_process_message(struct msm_cvp_inst *inst,
memcpy(out, &msg->pkt, get_msg_size(hdr));
if (hdr->client_data.kdata >= ARRAY_SIZE(cvp_hfi_defs))
msm_cvp_unmap_frame(inst, hdr->client_data.kdata);
kmem_cache_free(cvp_driver->msg_cache, msg);
cvp_kmem_cache_free(&cvp_driver->msg_cache, msg);
exit:
return rc;

View File

@@ -36,6 +36,8 @@ static void _wncc_print_cvpwnccbufs_table(struct msm_cvp_inst* inst);
static int _wncc_unmap_metadata_bufs(struct eva_kmd_hfi_packet* in_pkt,
unsigned int num_layers, struct eva_kmd_wncc_metadata** wncc_metadata);
void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst, bool log);
int print_smem(u32 tag, const char *str, struct msm_cvp_inst *inst,
struct msm_cvp_smem *smem)
{
@@ -53,19 +55,22 @@ int print_smem(u32 tag, const char *str, struct msm_cvp_inst *inst,
}
if (smem->dma_buf) {
if (!atomic_read(&smem->refcount))
return 0;
i = get_pkt_index_from_type(smem->pkt_type);
if (i > 0)
strlcpy(name, cvp_hfi_defs[i].name, PKT_NAME_LEN);
if (!atomic_read(&smem->refcount))
dprintk(tag,
"%s: %x : %pK size %d flags %#x iova %#x idx %d ref %d pkt_type %s buf_idx %#x chksum %#x",
str, hash32_ptr(inst->session), smem->dma_buf,
" UNUSED mapping %s: 0x%llx %s size %d iova %#x idx %d pkt_type %s buf_idx %#x fd %d",
str, smem->dma_buf, smem->dma_buf->name,
smem->size, smem->device_addr, smem->bitmap_index, name, smem->buf_idx, smem->fd);
else
dprintk(tag,
"%s: %x : 0x%llx %s size %d flags %#x iova %#x idx %d ref %d pkt_type %s buf_idx %#x fd %d",
str, hash32_ptr(inst->session), smem->dma_buf, smem->dma_buf->name,
smem->size, smem->flags, smem->device_addr,
smem->bitmap_index, atomic_read(&smem->refcount),
name, smem->buf_idx, smem->checksum);
name, smem->buf_idx, smem->fd);
}
return 0;
}
@@ -78,10 +83,10 @@ static void print_internal_buffer(u32 tag, const char *str,
if (cbuf->smem->dma_buf) {
dprintk(tag,
"%s: %x : fd %d off %d %pK size %d iova %#x",
"%s: %x : fd %d off %d 0x%llx %s size %d iova %#x",
str, hash32_ptr(inst->session), cbuf->fd,
cbuf->offset, cbuf->smem->dma_buf, cbuf->size,
cbuf->smem->device_addr);
cbuf->offset, cbuf->smem->dma_buf, cbuf->smem->dma_buf->name,
cbuf->size, cbuf->smem->device_addr);
} else {
dprintk(tag,
"%s: %x : idx %2d fd %d off %d size %d iova %#x",
@@ -285,13 +290,13 @@ int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
dprintk(CVP_MEM, "dma_buf from internal %llu\n", dma_buf);
cbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
cbuf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
if (!cbuf) {
rc = -ENOMEM;
goto exit;
}
smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
if (!smem) {
rc = -ENOMEM;
goto exit;
@@ -301,6 +306,7 @@ int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
smem->bitmap_index = MAX_DMABUF_NUMS;
smem->pkt_type = 0;
smem->buf_idx = 0;
smem->fd = buf->fd;
dprintk(CVP_MEM, "%s: dma_buf = %llx\n", __func__, dma_buf);
rc = msm_cvp_map_smem(inst, smem, "map dsp");
if (rc) {
@@ -329,10 +335,10 @@ exit:
if (smem->device_addr)
msm_cvp_unmap_smem(inst, smem, "unmap dsp");
msm_cvp_smem_put_dma_buf(smem->dma_buf);
kmem_cache_free(cvp_driver->smem_cache, smem);
cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
}
if (cbuf)
kmem_cache_free(cvp_driver->buf_cache, cbuf);
cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
return rc;
}
@@ -377,8 +383,8 @@ int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
list_del(&cbuf->list);
mutex_unlock(&inst->cvpdspbufs.lock);
kmem_cache_free(cvp_driver->smem_cache, cbuf->smem);
kmem_cache_free(cvp_driver->buf_cache, cbuf);
cvp_kmem_cache_free(&cvp_driver->smem_cache, cbuf->smem);
cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
return rc;
}
@@ -443,15 +449,15 @@ int msm_cvp_map_buf_wncc(struct msm_cvp_inst *inst,
return -EINVAL;
}
cbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
cbuf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
if (!cbuf) {
msm_cvp_smem_put_dma_buf(dma_buf);
return -ENOMEM;
}
smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
if (!smem) {
kmem_cache_free(cvp_driver->buf_cache, cbuf);
cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
msm_cvp_smem_put_dma_buf(dma_buf);
return -ENOMEM;
}
@@ -460,6 +466,7 @@ int msm_cvp_map_buf_wncc(struct msm_cvp_inst *inst,
smem->bitmap_index = MAX_DMABUF_NUMS;
smem->pkt_type = 0;
smem->buf_idx = 0;
smem->fd = buf->fd;
dprintk(CVP_MEM, "%s: dma_buf = %llx", __func__, dma_buf);
rc = msm_cvp_map_smem(inst, smem, "map wncc");
if (rc) {
@@ -536,9 +543,9 @@ exit:
if (smem->device_addr)
msm_cvp_unmap_smem(inst, smem, "unmap wncc");
msm_cvp_smem_put_dma_buf(smem->dma_buf);
kmem_cache_free(cvp_driver->buf_cache, cbuf);
cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
cbuf = NULL;
kmem_cache_free(cvp_driver->smem_cache, smem);
cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
smem = NULL;
return rc;
}
@@ -625,8 +632,8 @@ int msm_cvp_unmap_buf_wncc(struct msm_cvp_inst *inst,
}
mutex_unlock(&inst->cvpwnccbufs.lock);
kmem_cache_free(cvp_driver->smem_cache, cbuf->smem);
kmem_cache_free(cvp_driver->buf_cache, cbuf);
cvp_kmem_cache_free(&cvp_driver->smem_cache, cbuf->smem);
cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
return rc;
}
@@ -1072,6 +1079,8 @@ static struct msm_cvp_smem *msm_cvp_session_find_smem(struct msm_cvp_inst *inst,
u32 pkt_type)
{
struct msm_cvp_smem *smem;
struct msm_cvp_frame *frame;
struct cvp_internal_buf *buf;
int i;
if (inst->dma_cache.nr > MAX_DMABUF_NUMS)
@@ -1092,12 +1101,41 @@ static struct msm_cvp_smem *msm_cvp_session_find_smem(struct msm_cvp_inst *inst,
*/
msm_cvp_smem_put_dma_buf(smem->dma_buf);
mutex_unlock(&inst->dma_cache.lock);
print_smem(CVP_MEM, "found", inst, smem);
print_smem(CVP_MEM, "found in cache", inst, smem);
return smem;
}
mutex_unlock(&inst->dma_cache.lock);
/* earch persist list */
mutex_lock(&inst->persistbufs.lock);
list_for_each_entry(buf, &inst->persistbufs.list, list) {
smem = buf->smem;
if (smem && smem->dma_buf == dma_buf) {
atomic_inc(&smem->refcount);
mutex_unlock(&inst->persistbufs.lock);
print_smem(CVP_MEM, "found in persist", inst, smem);
return smem;
}
}
mutex_unlock(&inst->persistbufs.lock);
/* Search frame list */
mutex_lock(&inst->frames.lock);
list_for_each_entry(frame, &inst->frames.list, list) {
for (i = 0; i < frame->nr; i++) {
smem = frame->bufs[i].smem;
if (smem && smem->dma_buf == dma_buf) {
atomic_inc(&smem->refcount);
mutex_unlock(&inst->frames.lock);
print_smem(CVP_MEM, "found in frame",
inst, smem);
return smem;
}
}
}
mutex_unlock(&inst->frames.lock);
return NULL;
}
@@ -1121,15 +1159,16 @@ static int msm_cvp_session_add_smem(struct msm_cvp_inst *inst,
smem2 = inst->dma_cache.entries[i];
msm_cvp_unmap_smem(inst, smem2, "unmap cpu");
msm_cvp_smem_put_dma_buf(smem2->dma_buf);
kmem_cache_free(cvp_driver->smem_cache, smem2);
cvp_kmem_cache_free(&cvp_driver->smem_cache, smem2);
inst->dma_cache.entries[i] = smem;
smem->bitmap_index = i;
SET_USE_BITMAP(i, inst);
} else {
dprintk(CVP_WARN,
"%s: reached limit, fallback to frame mapping list\n"
"%s: reached limit, fallback to buf mapping list\n"
, __func__);
atomic_inc(&smem->refcount);
mutex_unlock(&inst->dma_cache.lock);
return -ENOMEM;
}
@@ -1163,7 +1202,7 @@ static struct msm_cvp_smem *msm_cvp_session_get_smem(struct msm_cvp_inst *inst,
}
if (is_persist) {
smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
if (!smem)
return NULL;
@@ -1171,6 +1210,7 @@ static struct msm_cvp_smem *msm_cvp_session_get_smem(struct msm_cvp_inst *inst,
smem->bitmap_index = MAX_DMABUF_NUMS;
smem->pkt_type = pkt_type;
smem->flags |= SMEM_PERSIST;
smem->fd = buf->fd;
atomic_inc(&smem->refcount);
rc = msm_cvp_map_smem(inst, smem, "map cpu");
if (rc)
@@ -1187,20 +1227,22 @@ static struct msm_cvp_smem *msm_cvp_session_get_smem(struct msm_cvp_inst *inst,
smem = msm_cvp_session_find_smem(inst, dma_buf, pkt_type);
if (!smem) {
found = 0;
smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
if (!smem)
return NULL;
smem->dma_buf = dma_buf;
smem->bitmap_index = MAX_DMABUF_NUMS;
smem->pkt_type = pkt_type;
smem->fd = buf->fd;
rc = msm_cvp_map_smem(inst, smem, "map cpu");
if (rc)
goto exit;
if (!IS_CVP_BUF_VALID(buf, smem)) {
dprintk(CVP_ERR,
"%s: invalid offset %d or size %d new entry\n",
__func__, buf->offset, buf->size);
"%s: invalid buf %d %d fd %d dma 0x%llx %s %d type %#x\n",
__func__, buf->offset, buf->size, buf->fd,
dma_buf, dma_buf->name, dma_buf->size, pkt_type);
goto exit2;
}
rc = msm_cvp_session_add_smem(inst, smem);
@@ -1210,7 +1252,7 @@ static struct msm_cvp_smem *msm_cvp_session_get_smem(struct msm_cvp_inst *inst,
}
if (!IS_CVP_BUF_VALID(buf, smem)) {
dprintk(CVP_ERR, "%s: invalid offset %d or size %d\n",
dprintk(CVP_ERR, "%s: invalid offset %d or size %d found\n",
__func__, buf->offset, buf->size);
if (found) {
mutex_lock(&inst->dma_cache.lock);
@@ -1221,13 +1263,16 @@ static struct msm_cvp_smem *msm_cvp_session_get_smem(struct msm_cvp_inst *inst,
goto exit2;
}
if (smem->fd != buf->fd)
dprintk(CVP_ERR, "%s Failed fd check\n", __func__);
return smem;
exit2:
msm_cvp_unmap_smem(inst, smem, "unmap cpu");
exit:
msm_cvp_smem_put_dma_buf(dma_buf);
kmem_cache_free(cvp_driver->smem_cache, smem);
cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
smem = NULL;
return smem;
}
@@ -1272,19 +1317,24 @@ static u32 msm_cvp_map_user_persist_buf(struct msm_cvp_inst *inst,
dma_buf_put(dma_buf);
pbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
pbuf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
if (!pbuf) {
dprintk(CVP_ERR, "%s failed to allocate kmem obj\n",
__func__);
return 0;
}
if (is_params_pkt(pkt_type))
smem = msm_cvp_session_get_smem(inst, buf, false, pkt_type);
else
smem = msm_cvp_session_get_smem(inst, buf, true, pkt_type);
if (!smem)
goto exit;
smem->pkt_type = pkt_type;
smem->buf_idx = buf_idx;
smem->fd = buf->fd;
pbuf->smem = smem;
pbuf->fd = buf->fd;
pbuf->size = buf->size;
@@ -1302,7 +1352,7 @@ static u32 msm_cvp_map_user_persist_buf(struct msm_cvp_inst *inst,
return iova;
exit:
kmem_cache_free(cvp_driver->buf_cache, pbuf);
cvp_kmem_cache_free(&cvp_driver->buf_cache, pbuf);
return 0;
}
@@ -1367,24 +1417,26 @@ static void msm_cvp_unmap_frame_buf(struct msm_cvp_inst *inst,
if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
/* smem not in dmamap cache */
if (atomic_dec_and_test(&smem->refcount)) {
msm_cvp_unmap_smem(inst, smem, "unmap cpu");
dma_heap_buffer_free(smem->dma_buf);
smem->pkt_type = smem->buf_idx = 0;
kmem_cache_free(cvp_driver->smem_cache, smem);
smem->buf_idx |= 0xdead0000;
cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
buf->smem = NULL;
}
} else {
mutex_lock(&inst->dma_cache.lock);
if (atomic_dec_and_test(&smem->refcount)) {
CLEAR_USE_BITMAP(smem->bitmap_index, inst);
print_smem(CVP_MEM, "Map dereference",
inst, smem);
smem->pkt_type = smem->buf_idx = 0;
smem->buf_idx |= 0x10000000;
}
mutex_unlock(&inst->dma_cache.lock);
}
}
kmem_cache_free(cvp_driver->frame_cache, frame);
cvp_kmem_cache_free(&cvp_driver->frame_cache, frame);
}
void msm_cvp_unmap_frame(struct msm_cvp_inst *inst, u64 ktid)
@@ -1470,6 +1522,12 @@ int msm_cvp_map_frame(struct msm_cvp_inst *inst,
u64 ktid;
struct msm_cvp_frame *frame;
struct cvp_hfi_cmd_session_hdr *cmd_hdr;
struct msm_cvp_inst *instance;
struct msm_cvp_core *core = NULL;
core = get_cvp_core(MSM_CORE_CVP);
if (!core)
return -EINVAL;
if (!offset || !buf_num)
return 0;
@@ -1479,7 +1537,7 @@ int msm_cvp_map_frame(struct msm_cvp_inst *inst,
ktid &= (FENCE_BIT - 1);
cmd_hdr->client_data.kdata = ktid;
frame = kmem_cache_zalloc(cvp_driver->frame_cache, GFP_KERNEL);
frame = cvp_kmem_cache_zalloc(&cvp_driver->frame_cache, GFP_KERNEL);
if (!frame)
return -ENOMEM;
@@ -1499,7 +1557,12 @@ int msm_cvp_map_frame(struct msm_cvp_inst *inst,
dprintk(CVP_ERR,
"%s: buf %d register failed.\n",
__func__, i);
dprintk(CVP_ERR, "smem_leak_count %d\n", core->smem_leak_count);
mutex_lock(&core->lock);
list_for_each_entry(instance, &core->instances, list) {
msm_cvp_print_inst_bufs(instance, false);
}
mutex_unlock(&core->lock);
msm_cvp_unmap_frame_buf(inst, frame);
return -EINVAL;
}
@@ -1522,6 +1585,7 @@ int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst)
struct msm_cvp_smem *smem;
struct cvp_hal_session *session;
struct eva_kmd_buffer buf;
struct list_head *ptr, *next;
session = (struct cvp_hal_session *)inst->session;
@@ -1532,6 +1596,48 @@ int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst)
}
mutex_unlock(&inst->frames.lock);
mutex_lock(&inst->persistbufs.lock);
list_for_each_safe(ptr, next, &inst->persistbufs.list) {
cbuf = list_entry(ptr, struct cvp_internal_buf, list);
smem = cbuf->smem;
if (!smem) {
dprintk(CVP_ERR, "%s invalid persist smem\n", __func__);
mutex_unlock(&inst->persistbufs.lock);
return -EINVAL;
}
if (cbuf->ownership != DRIVER) {
dprintk(CVP_MEM,
"%s: %x : fd %d %pK size %d",
"free user persistent", hash32_ptr(inst->session), cbuf->fd,
smem->dma_buf, cbuf->size);
list_del(&cbuf->list);
if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
/*
* don't care refcount, has to remove mapping
* this is user persistent buffer
*/
if (smem->device_addr) {
msm_cvp_unmap_smem(inst, smem,
"unmap persist");
msm_cvp_smem_put_dma_buf(
cbuf->smem->dma_buf);
smem->device_addr = 0;
}
cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
cbuf->smem = NULL;
cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
} else {
/*
* DMM_PARAMS and WAP_NCC_PARAMS cases
* Leave dma_cache cleanup to unmap
*/
cbuf->smem = NULL;
cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
}
}
}
mutex_unlock(&inst->persistbufs.lock);
mutex_lock(&inst->dma_cache.lock);
for (i = 0; i < inst->dma_cache.nr; i++) {
smem = inst->dma_cache.entries[i];
@@ -1542,7 +1648,7 @@ int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst)
}
msm_cvp_unmap_smem(inst, smem, "unmap cpu");
msm_cvp_smem_put_dma_buf(smem->dma_buf);
kmem_cache_free(cvp_driver->smem_cache, smem);
cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
inst->dma_cache.entries[i] = NULL;
}
mutex_unlock(&inst->dma_cache.lock);
@@ -1575,7 +1681,7 @@ int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst)
__func__, rc);
}
list_del(&cbuf->list);
kmem_cache_free(cvp_driver->buf_cache, cbuf);
cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
}
mutex_unlock(&inst->cvpdspbufs.lock);
@@ -1600,9 +1706,10 @@ int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst)
void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst, bool log)
{
struct cvp_internal_buf *buf;
struct msm_cvp_frame *frame;
struct msm_cvp_core *core;
struct inst_snapshot *snap = NULL;
int i;
int i = 0, c = 0;
core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
if (log && core->log.snapshot_index < 16) {
@@ -1618,15 +1725,26 @@ void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst, bool log)
}
dprintk(CVP_ERR,
"---Buffer details for inst: %pK of type: %d---\n",
inst, inst->session_type);
"---Buffer details for inst: %pK %s of type: %d---\n",
inst, inst->proc_name, inst->session_type);
dprintk(CVP_ERR, "dma_cache entries %d\n", inst->dma_cache.nr);
mutex_lock(&inst->dma_cache.lock);
dprintk(CVP_ERR, "dma cache: %d\n", inst->dma_cache.nr);
if (inst->dma_cache.nr <= MAX_DMABUF_NUMS)
for (i = 0; i < inst->dma_cache.nr; i++)
_log_smem(snap, inst, inst->dma_cache.entries[i], log);
mutex_unlock(&inst->dma_cache.lock);
i = 0;
dprintk(CVP_ERR, "frame buffer list\n");
mutex_lock(&inst->frames.lock);
list_for_each_entry(frame, &inst->frames.list, list) {
dprintk(CVP_ERR, "frame no %d tid %llx bufs\n", i++, frame->ktid);
for (c = 0; c < frame->nr; c++)
_log_smem(snap, inst, frame->bufs[c].smem, log);
}
mutex_unlock(&inst->frames.lock);
mutex_lock(&inst->cvpdspbufs.lock);
dprintk(CVP_ERR, "dsp buffer list:\n");
list_for_each_entry(buf, &inst->cvpdspbufs.list, list)
@@ -1669,13 +1787,13 @@ struct cvp_internal_buf *cvp_allocate_arp_bufs(struct msm_cvp_inst *inst,
*/
smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
buf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
buf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
if (!buf) {
dprintk(CVP_ERR, "%s Out of memory\n", __func__);
goto fail_kzalloc;
}
buf->smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
buf->smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
if (!buf->smem) {
dprintk(CVP_ERR, "%s Out of memory\n", __func__);
goto fail_kzalloc;
@@ -1702,7 +1820,7 @@ struct cvp_internal_buf *cvp_allocate_arp_bufs(struct msm_cvp_inst *inst,
return buf;
err_no_mem:
kmem_cache_free(cvp_driver->buf_cache, buf);
cvp_kmem_cache_free(&cvp_driver->buf_cache, buf);
fail_kzalloc:
return NULL;
}
@@ -1762,25 +1880,18 @@ int cvp_release_arp_buffers(struct msm_cvp_inst *inst)
return -EINVAL;
}
list_del(&buf->list);
if (buf->ownership == DRIVER)
if (buf->ownership == DRIVER) {
dprintk(CVP_MEM,
"%s: %x : fd %d %pK size %d",
"free arp", hash32_ptr(inst->session), buf->fd,
smem->dma_buf, buf->size);
else
dprintk(CVP_MEM,
"%s: %x : fd %d %pK size %d",
"free user persistent", hash32_ptr(inst->session), buf->fd,
smem->dma_buf, buf->size);
list_del(&buf->list);
atomic_dec(&smem->refcount);
msm_cvp_smem_free(smem);
kmem_cache_free(cvp_driver->smem_cache, smem);
cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
buf->smem = NULL;
kmem_cache_free(cvp_driver->buf_cache, buf);
cvp_kmem_cache_free(&cvp_driver->buf_cache, buf);
}
}
mutex_unlock(&inst->persistbufs.lock);
return rc;
@@ -1821,7 +1932,7 @@ int cvp_allocate_dsp_bufs(struct msm_cvp_inst *inst,
}
dprintk(CVP_MEM, "%s smem_flags 0x%x\n", __func__, smem_flags);
buf->smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
buf->smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
if (!buf->smem) {
dprintk(CVP_ERR, "%s Out of memory\n", __func__);
goto fail_kzalloc_smem_cache;
@@ -1846,7 +1957,7 @@ int cvp_allocate_dsp_bufs(struct msm_cvp_inst *inst,
return rc;
err_no_mem:
kmem_cache_free(cvp_driver->smem_cache, buf->smem);
cvp_kmem_cache_free(&cvp_driver->smem_cache, buf->smem);
fail_kzalloc_smem_cache:
return rc;
}
@@ -1880,7 +1991,7 @@ int cvp_release_dsp_buffers(struct msm_cvp_inst *inst,
smem->dma_buf->name, buf->size);
atomic_dec(&smem->refcount);
msm_cvp_smem_free(smem);
kmem_cache_free(cvp_driver->smem_cache, smem);
cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
} else {
dprintk(CVP_ERR,
"%s: wrong owner %d %x : fd %x %s size %d",

View File

@@ -77,7 +77,7 @@ struct msm_cvp_smem {
u32 flags;
u32 pkt_type;
u32 buf_idx;
u32 checksum;
u32 fd;
struct cvp_dma_mapping_info mapping_info;
};

View File

@@ -25,6 +25,20 @@
#define NUM_DMM_MAX_FEATURE_POINTS 500
#define CYCLES_MARGIN_IN_POWEROF2 3
static atomic_t nr_insts;
void *cvp_kmem_cache_zalloc(struct cvp_kmem_cache *k, gfp_t flags)
{
atomic_inc(&k->nr_objs);
return kmem_cache_zalloc(k->cache, flags);
}
void cvp_kmem_cache_free(struct cvp_kmem_cache *k, void *obj)
{
atomic_dec(&k->nr_objs);
kmem_cache_free(k->cache, obj);
}
int msm_cvp_poll(void *instance, struct file *filp,
struct poll_table_struct *wait)
{
@@ -115,7 +129,7 @@ static void __deinit_session_queue(struct msm_cvp_inst *inst)
spin_lock(&inst->session_queue.lock);
list_for_each_entry_safe(msg, tmpmsg, &inst->session_queue.msgs, node) {
list_del_init(&msg->node);
kmem_cache_free(cvp_driver->msg_cache, msg);
cvp_kmem_cache_free(&cvp_driver->msg_cache, msg);
}
inst->session_queue.msg_count = 0;
inst->session_queue.state = QUEUE_INVALID;
@@ -208,6 +222,7 @@ void *msm_cvp_open(int core_id, int session_type, struct task_struct *task)
mutex_lock(&core->lock);
mutex_lock(&core->clk_lock);
list_add_tail(&inst->list, &core->instances);
atomic_inc(&nr_insts);
mutex_unlock(&core->clk_lock);
mutex_unlock(&core->lock);
@@ -255,8 +270,9 @@ static void msm_cvp_clean_sess_queue(struct msm_cvp_inst *inst,
struct cvp_session_queue *sq)
{
struct cvp_session_msg *mptr, *dummy;
u64 ktid;
u64 ktid = 0LL;
check_again:
spin_lock(&sq->lock);
if (sq->msg_count && sq->state != QUEUE_ACTIVE) {
list_for_each_entry_safe(mptr, dummy, &sq->msgs, node) {
@@ -264,12 +280,19 @@ static void msm_cvp_clean_sess_queue(struct msm_cvp_inst *inst,
if (ktid) {
list_del_init(&mptr->node);
sq->msg_count--;
msm_cvp_unmap_frame(inst, ktid);
kmem_cache_free(cvp_driver->msg_cache, mptr);
break;
}
}
}
spin_unlock(&sq->lock);
if (ktid) {
msm_cvp_unmap_frame(inst, ktid);
cvp_kmem_cache_free(&cvp_driver->msg_cache, mptr);
mptr = NULL;
ktid = 0LL;
goto check_again;
}
}
static void msm_cvp_cleanup_instance(struct msm_cvp_inst *inst)
@@ -366,6 +389,7 @@ int msm_cvp_destroy(struct msm_cvp_inst *inst)
mutex_lock(&core->clk_lock);
/* inst->list lives in core->instances */
list_del(&inst->list);
atomic_dec(&nr_insts);
mutex_unlock(&core->clk_lock);
mutex_unlock(&core->lock);
@@ -389,11 +413,23 @@ int msm_cvp_destroy(struct msm_cvp_inst *inst)
pr_info(
CVP_DBG_TAG
"%s closed cvp instance: %pK session_id = %d type %d\n",
"sess", inst->proc_name, inst, hash32_ptr(inst->session),
inst->session_type);
"closed cvp instance: %pK session_id = %d type %d %d\n",
inst->proc_name, inst, hash32_ptr(inst->session),
inst->session_type, core->smem_leak_count);
inst->session = (void *)0xdeadbeef;
if (atomic_read(&inst->smem_count) > 0) {
dprintk(CVP_WARN, "Session closed with %d unmapped smems\n",
atomic_read(&inst->smem_count));
core->smem_leak_count += atomic_read(&inst->smem_count);
}
kfree(inst);
dprintk(CVP_SESS,
"sys-stat: nr_insts %d msgs %d, frames %d, bufs %d, smems %d\n",
atomic_read(&nr_insts),
atomic_read(&cvp_driver->msg_cache.nr_objs),
atomic_read(&cvp_driver->frame_cache.nr_objs),
atomic_read(&cvp_driver->buf_cache.nr_objs),
atomic_read(&cvp_driver->smem_cache.nr_objs));
return 0;
}

View File

@@ -12,6 +12,7 @@
#include "cvp_hfi.h"
#include "cvp_dump.h"
static atomic_t nr_maps;
struct cvp_dsp_apps gfa_cv;
static int hlosVM[HLOS_VM_NUM] = {VMID_HLOS};
static int dspVM[DSP_VM_NUM] = {VMID_HLOS, VMID_CDSP_Q6};
@@ -21,6 +22,8 @@ static int hlosVMperm[HLOS_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC };
static int cvp_reinit_dsp(void);
static void cvp_remove_dsp_sessions(void);
static int __fastrpc_driver_register(struct fastrpc_driver *driver)
{
#ifdef CVP_FASTRPC_ENABLED
@@ -254,7 +257,7 @@ static int delete_dsp_session(struct msm_cvp_inst *inst,
list_del(&buf->list);
kmem_cache_free(cvp_driver->buf_cache, buf);
cvp_kmem_cache_free(&cvp_driver->buf_cache, buf);
}
}
@@ -314,12 +317,56 @@ static void eva_fastrpc_driver_release_name(
DRIVER_NAME_AVAILABLE;
}
static struct cvp_dsp_fastrpc_driver_entry *dequeue_frpc_node(void)
/* The function may not return for up to 50ms */
static bool dequeue_frpc_node(struct cvp_dsp_fastrpc_driver_entry *node)
{
struct cvp_dsp_apps *me = &gfa_cv;
struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
struct list_head *ptr = NULL, *next = NULL;
u32 refcount, max_count = 10;
bool rc = false;
if (!node)
return rc;
search_again:
ptr = &me->fastrpc_driver_list.list;
mutex_lock(&me->fastrpc_driver_list.lock);
list_for_each_safe(ptr, next, &me->fastrpc_driver_list.list) {
frpc_node = list_entry(ptr,
struct cvp_dsp_fastrpc_driver_entry, list);
if (frpc_node == node) {
refcount = atomic_read(&frpc_node->refcount);
if (refcount > 0) {
mutex_unlock(&me->fastrpc_driver_list.lock);
usleep_range(5000, 10000);
if (max_count-- == 0) {
dprintk(CVP_ERR, "%s timeout\n",
__func__);
goto exit;
}
goto search_again;
}
list_del(&frpc_node->list);
rc = true;
break;
}
}
mutex_unlock(&me->fastrpc_driver_list.lock);
exit:
return rc;
}
/* The function may not return for up to 50ms */
static struct cvp_dsp_fastrpc_driver_entry *pop_frpc_node(void)
{
struct cvp_dsp_apps *me = &gfa_cv;
struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
struct list_head *ptr = NULL, *next = NULL;
u32 refcount, max_count = 10;
search_again:
ptr = &me->fastrpc_driver_list.list;
mutex_lock(&me->fastrpc_driver_list.lock);
list_for_each_safe(ptr, next, &me->fastrpc_driver_list.list) {
@@ -327,20 +374,31 @@ static struct cvp_dsp_fastrpc_driver_entry *dequeue_frpc_node(void)
struct cvp_dsp_fastrpc_driver_entry, list);
if (frpc_node) {
refcount = atomic_read(&frpc_node->refcount);
if (refcount > 0) {
mutex_unlock(&me->fastrpc_driver_list.lock);
usleep_range(5000, 10000);
if (max_count-- == 0) {
dprintk(CVP_ERR, "%s timeout\n",
__func__);
frpc_node = NULL;
goto exit;
}
goto search_again;
}
list_del(&frpc_node->list);
break;
}
}
mutex_unlock(&me->fastrpc_driver_list.lock);
exit:
return frpc_node;
}
static void cvp_dsp_rpmsg_remove(struct rpmsg_device *rpdev)
{
struct cvp_dsp_apps *me = &gfa_cv;
struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
struct msm_cvp_inst *inst = NULL;
struct list_head *s = NULL, *next_s = NULL;
u32 max_num_retries = 100;
dprintk(CVP_WARN, "%s: CDSP SSR triggered\n", __func__);
@@ -369,38 +427,10 @@ static void cvp_dsp_rpmsg_remove(struct rpmsg_device *rpdev)
mutex_unlock(&me->tx_lock);
mutex_unlock(&me->rx_lock);
while ((frpc_node = dequeue_frpc_node())) {
s = &frpc_node->dsp_sessions.list;
list_for_each_safe(s, next_s,
&frpc_node->dsp_sessions.list) {
inst = list_entry(s, struct msm_cvp_inst,
dsp_list);
if (inst) {
delete_dsp_session(inst, frpc_node);
mutex_lock(&frpc_node->dsp_sessions.lock);
list_del(&inst->dsp_list);
frpc_node->session_cnt--;
mutex_unlock(&frpc_node->dsp_sessions.lock);
}
}
cvp_remove_dsp_sessions();
dprintk(CVP_DSP, "%s DEINIT_MSM_CVP_LIST 0x%x\n",
__func__, frpc_node->dsp_sessions);
DEINIT_MSM_CVP_LIST(&frpc_node->dsp_sessions);
dprintk(CVP_DSP, "%s list_del fastrpc node 0x%x\n",
__func__, frpc_node);
__fastrpc_driver_unregister(
&frpc_node->cvp_fastrpc_driver);
dprintk(CVP_DSP,
"%s Unregistered fastrpc handle 0x%x\n",
__func__, frpc_node->handle);
mutex_lock(&me->driver_name_lock);
eva_fastrpc_driver_release_name(frpc_node);
mutex_unlock(&me->driver_name_lock);
kfree(frpc_node);
frpc_node = NULL;
}
dprintk(CVP_WARN, "%s: CDSP SSR handled\n", __func__);
dprintk(CVP_WARN, "%s: CDSP SSR handled nr_maps %d\n", __func__,
atomic_read(&nr_maps));
}
static int cvp_dsp_rpmsg_callback(struct rpmsg_device *rpdev,
@@ -496,6 +526,7 @@ retry:
}
me->state = DSP_SUSPEND;
dprintk(CVP_DSP, "DSP suspended, nr_map: %d\n", atomic_read(&nr_maps));
goto exit;
fatal_exit:
@@ -525,44 +556,45 @@ int cvp_dsp_resume(uint32_t session_flag)
return rc;
}
static void cvp_remove_dsp_process_sess(
struct cvp_dsp_fastrpc_driver_entry *frpc_node)
{
struct msm_cvp_inst *inst = NULL;
struct list_head *s = NULL, *next_s = NULL;
s = &frpc_node->dsp_sessions.list;
list_for_each_safe(s, next_s, &frpc_node->dsp_sessions.list) {
inst = list_entry(s, struct msm_cvp_inst, dsp_list);
delete_dsp_session(inst, frpc_node);
}
}
static void cvp_remove_dsp_sessions(void)
{
struct cvp_dsp_apps *me = &gfa_cv;
struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
struct list_head *ptr = NULL, *next = NULL;
struct msm_cvp_inst *inst = NULL;
struct list_head *s = NULL, *next_s = NULL;
dprintk(CVP_WARN, "%s: EVA SSR triggered, clean cdsp eva sessions\n",
__func__);
while ((frpc_node = pop_frpc_node())) {
s = &frpc_node->dsp_sessions.list;
list_for_each_safe(s, next_s,
&frpc_node->dsp_sessions.list) {
inst = list_entry(s, struct msm_cvp_inst,
dsp_list);
if (inst) {
delete_dsp_session(inst, frpc_node);
mutex_lock(&frpc_node->dsp_sessions.lock);
list_del(&inst->dsp_list);
frpc_node->session_cnt--;
mutex_unlock(&frpc_node->dsp_sessions.lock);
}
}
ptr = &me->fastrpc_driver_list.list;
mutex_lock(&me->fastrpc_driver_list.lock);
list_for_each_safe(ptr, next, &me->fastrpc_driver_list.list) {
frpc_node = list_entry(ptr,
struct cvp_dsp_fastrpc_driver_entry, list);
if (frpc_node) {
cvp_remove_dsp_process_sess(frpc_node);
list_del(&frpc_node->list);
__fastrpc_driver_unregister(&frpc_node->cvp_fastrpc_driver);
dprintk(CVP_DSP, "%s DEINIT_MSM_CVP_LIST 0x%x\n",
__func__, frpc_node->dsp_sessions);
DEINIT_MSM_CVP_LIST(&frpc_node->dsp_sessions);
dprintk(CVP_DSP, "%s list_del fastrpc node 0x%x\n",
__func__, frpc_node);
__fastrpc_driver_unregister(
&frpc_node->cvp_fastrpc_driver);
dprintk(CVP_DSP,
"%s Unregistered fastrpc handle 0x%x\n",
__func__, frpc_node->handle);
mutex_lock(&me->driver_name_lock);
eva_fastrpc_driver_release_name(frpc_node);
mutex_unlock(&me->driver_name_lock);
kfree(frpc_node);
frpc_node = NULL;
}
}
mutex_unlock(&me->fastrpc_driver_list.lock);
dprintk(CVP_WARN, "%s: EVA SSR handled for CDSP\n", __func__);
}
@@ -845,6 +877,9 @@ static int __reinit_dsp(void)
*/
cvp_remove_dsp_sessions();
dprintk(CVP_WARN, "Reinit EVA DSP interface: nr_map %d\n",
atomic_read(&nr_maps));
/* Resend HFI queue */
mutex_lock(&me->tx_lock);
if (!device->dsp_iface_q_table.align_virtual_addr) {
@@ -900,7 +935,13 @@ static int cvp_reinit_dsp(void)
return rc;
}
static struct cvp_dsp_fastrpc_driver_entry *cvp_find_fastrpc_node_with_handle(
static void cvp_put_fastrpc_node(struct cvp_dsp_fastrpc_driver_entry *node)
{
if (node && (atomic_read(&node->refcount) > 0))
atomic_dec(&node->refcount);
}
static struct cvp_dsp_fastrpc_driver_entry *cvp_get_fastrpc_node_with_handle(
uint32_t handle)
{
struct cvp_dsp_apps *me = &gfa_cv;
@@ -913,6 +954,7 @@ static struct cvp_dsp_fastrpc_driver_entry *cvp_find_fastrpc_node_with_handle(
struct cvp_dsp_fastrpc_driver_entry, list);
if (handle == tmp_node->handle) {
frpc_node = tmp_node;
atomic_inc(&frpc_node->refcount);
dprintk(CVP_DSP, "Find tmp_node with handle 0x%x\n",
handle);
break;
@@ -934,11 +976,12 @@ static int cvp_fastrpc_probe(struct fastrpc_device *rpc_dev)
dprintk(CVP_DSP, "%s fastrpc probe handle 0x%x\n",
__func__, rpc_dev->handle);
frpc_node = cvp_find_fastrpc_node_with_handle(rpc_dev->handle);
frpc_node = cvp_get_fastrpc_node_with_handle(rpc_dev->handle);
if (frpc_node) {
frpc_node->cvp_fastrpc_device = rpc_dev;
// static structure with signal and pid
complete(&frpc_node->fastrpc_probe_completion);
cvp_put_fastrpc_node(frpc_node);
}
return 0;
@@ -992,6 +1035,7 @@ static int eva_fastrpc_dev_map_dma(struct fastrpc_device *frpc_device,
}
buf->fd = (s32)frpc_map_buf.v_dsp_addr;
*v_dsp_addr = frpc_map_buf.v_dsp_addr;
atomic_inc(&nr_maps);
} else {
dprintk(CVP_DSP, "%s Buffer not mapped to dsp\n", __func__);
buf->fd = 0;
@@ -1016,6 +1060,8 @@ static int eva_fastrpc_dev_unmap_dma(struct fastrpc_device *frpc_device,
__func__, rc);
return rc;
}
if (atomic_read(&nr_maps) > 0)
atomic_dec(&nr_maps);
} else {
dprintk(CVP_DSP, "%s buffer not mapped to dsp\n", __func__);
}
@@ -1043,7 +1089,7 @@ int cvp_dsp_fastrpc_unmap(uint32_t process_id, struct cvp_internal_buf *buf)
struct fastrpc_device *frpc_device = NULL;
int rc = 0;
frpc_node = cvp_find_fastrpc_node_with_handle(process_id);
frpc_node = cvp_get_fastrpc_node_with_handle(process_id);
if (!frpc_node) {
dprintk(CVP_ERR, "%s no frpc node for process id %d\n",
__func__, process_id);
@@ -1051,13 +1097,11 @@ int cvp_dsp_fastrpc_unmap(uint32_t process_id, struct cvp_internal_buf *buf)
}
frpc_device = frpc_node->cvp_fastrpc_device;
rc = eva_fastrpc_dev_unmap_dma(frpc_device, buf);
if (rc) {
dprintk(CVP_ERR,
"%s Fail to unmap buffer 0x%x\n",
if (rc)
dprintk(CVP_ERR, "%s Fail to unmap buffer 0x%x\n",
__func__, rc);
return rc;
}
cvp_put_fastrpc_node(frpc_node);
return rc;
}
@@ -1068,7 +1112,7 @@ int cvp_dsp_del_sess(uint32_t process_id, struct msm_cvp_inst *inst)
struct msm_cvp_inst *sess;
bool found = false;
frpc_node = cvp_find_fastrpc_node_with_handle(process_id);
frpc_node = cvp_get_fastrpc_node_with_handle(process_id);
if (!frpc_node) {
dprintk(CVP_ERR, "%s no frpc node for process id %d\n",
__func__, process_id);
@@ -1091,6 +1135,7 @@ int cvp_dsp_del_sess(uint32_t process_id, struct msm_cvp_inst *inst)
mutex_unlock(&frpc_node->dsp_sessions.lock);
cvp_put_fastrpc_node(frpc_node);
return 0;
}
@@ -1101,9 +1146,9 @@ static int eva_fastrpc_driver_register(uint32_t handle)
struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
bool skip_deregister = true;
dprintk(CVP_DSP, "%s -> cvp_find_fastrpc_node_with_handle pid 0x%x\n",
dprintk(CVP_DSP, "%s -> cvp_get_fastrpc_node_with_handle pid 0x%x\n",
__func__, handle);
frpc_node = cvp_find_fastrpc_node_with_handle(handle);
frpc_node = cvp_get_fastrpc_node_with_handle(handle);
if (frpc_node == NULL) {
dprintk(CVP_DSP, "%s new fastrpc node pid 0x%x\n",
@@ -1134,10 +1179,10 @@ static int eva_fastrpc_driver_register(uint32_t handle)
init_completion(&frpc_node->fastrpc_probe_completion);
mutex_lock(&me->fastrpc_driver_list.lock);
dprintk(CVP_DSP, "Add frpc node 0x%x to list\n", frpc_node);
list_add_tail(&frpc_node->list, &me->fastrpc_driver_list.list);
mutex_unlock(&me->fastrpc_driver_list.lock);
INIT_MSM_CVP_LIST(&frpc_node->dsp_sessions);
mutex_unlock(&me->fastrpc_driver_list.lock);
dprintk(CVP_DSP, "Add frpc node 0x%x to list\n", frpc_node);
/* register fastrpc device to this session */
rc = __fastrpc_driver_register(&frpc_node->cvp_fastrpc_driver);
@@ -1160,16 +1205,13 @@ static int eva_fastrpc_driver_register(uint32_t handle)
} else {
dprintk(CVP_DSP, "%s fastrpc probe hndl %pK pid 0x%x\n",
__func__, frpc_node, handle);
cvp_put_fastrpc_node(frpc_node);
}
return rc;
fail_fastrpc_driver_register:
/* remove list if this is the last session */
mutex_lock(&me->fastrpc_driver_list.lock);
list_del(&frpc_node->list);
mutex_unlock(&me->fastrpc_driver_list.lock);
dequeue_frpc_node(frpc_node);
if (!skip_deregister)
__fastrpc_driver_unregister(&frpc_node->cvp_fastrpc_driver);
@@ -1187,11 +1229,15 @@ static void eva_fastrpc_driver_unregister(uint32_t handle, bool force_exit)
struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
struct cvp_dsp2cpu_cmd_msg *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd;
dprintk(CVP_DSP, "%s Unregister fastrpc driver handle 0x%x, force %d\n",
__func__, handle, (uint32_t)force_exit);
dprintk(CVP_DSP, "%s Unregister fastrpc driver hdl %#x pid %#x, f %d\n",
__func__, handle, dsp2cpu_cmd->pid, (uint32_t)force_exit);
if (handle != dsp2cpu_cmd->pid)
dprintk(CVP_ERR, "Unregister pid != hndl %#x %#x\n",
handle, dsp2cpu_cmd->pid);
/* Foundd fastrpc node */
frpc_node = cvp_find_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
frpc_node = cvp_get_fastrpc_node_with_handle(handle);
if (frpc_node == NULL) {
dprintk(CVP_DSP, "%s fastrpc handle 0x%x unregistered\n",
@@ -1205,16 +1251,18 @@ static void eva_fastrpc_driver_unregister(uint32_t handle, bool force_exit)
DEINIT_MSM_CVP_LIST(&frpc_node->dsp_sessions);
/* remove list if this is the last session */
mutex_lock(&me->fastrpc_driver_list.lock);
list_del(&frpc_node->list);
mutex_unlock(&me->fastrpc_driver_list.lock);
cvp_put_fastrpc_node(frpc_node);
if (!dequeue_frpc_node(frpc_node))
/* Don't find the node */
return;
__fastrpc_driver_unregister(&frpc_node->cvp_fastrpc_driver);
mutex_lock(&me->driver_name_lock);
eva_fastrpc_driver_release_name(frpc_node);
mutex_unlock(&me->driver_name_lock);
kfree(frpc_node);
} else {
cvp_put_fastrpc_node(frpc_node);
}
}
@@ -1448,9 +1496,11 @@ static void __dsp_cvp_sess_create(struct cvp_dsp_cmd_msg *cmd)
cmd->session_cpu_high = (uint32_t)((inst_handle & HIGH32) >> 32);
cmd->session_cpu_low = (uint32_t)(inst_handle & LOW32);
frpc_node = cvp_find_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
if (frpc_node)
frpc_node = cvp_get_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
if (frpc_node) {
eva_fastrpc_driver_add_sess(frpc_node, inst);
cvp_put_fastrpc_node(frpc_node);
}
inst->task = task;
dprintk(CVP_DSP,
@@ -1495,7 +1545,7 @@ static void __dsp_cvp_sess_delete(struct cvp_dsp_cmd_msg *cmd)
dsp2cpu_cmd->session_cpu_high,
dsp2cpu_cmd->pid);
frpc_node = cvp_find_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
frpc_node = cvp_get_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
if (!frpc_node) {
dprintk(CVP_ERR, "%s pid 0x%x not registered with fastrpc\n",
__func__, dsp2cpu_cmd->pid);
@@ -1503,6 +1553,7 @@ static void __dsp_cvp_sess_delete(struct cvp_dsp_cmd_msg *cmd)
return;
}
cvp_put_fastrpc_node(frpc_node);
inst = (struct msm_cvp_inst *)ptr_dsp2cpu(
dsp2cpu_cmd->session_cpu_high,
dsp2cpu_cmd->session_cpu_low);
@@ -1539,7 +1590,8 @@ static void __dsp_cvp_sess_delete(struct cvp_dsp_cmd_msg *cmd)
if (task)
put_task_struct(task);
dprintk(CVP_DSP, "%s DSP2CPU_DETELE_SESSION Done\n", __func__);
dprintk(CVP_DSP, "%s DSP2CPU_DETELE_SESSION Done, nr_maps %d\n",
__func__, atomic_read(&nr_maps));
dsp_fail_delete:
return;
}
@@ -1732,7 +1784,7 @@ static void __dsp_cvp_mem_alloc(struct cvp_dsp_cmd_msg *cmd)
dsp2cpu_cmd->session_cpu_high,
dsp2cpu_cmd->pid);
frpc_node = cvp_find_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
frpc_node = cvp_get_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
if (!frpc_node) {
dprintk(CVP_ERR, "%s Failed to find fastrpc node 0x%x\n",
__func__, dsp2cpu_cmd->pid);
@@ -1744,7 +1796,7 @@ static void __dsp_cvp_mem_alloc(struct cvp_dsp_cmd_msg *cmd)
dsp2cpu_cmd->session_cpu_high,
dsp2cpu_cmd->session_cpu_low);
buf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
buf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
if (!buf)
goto fail_kzalloc_buf;
@@ -1779,15 +1831,17 @@ static void __dsp_cvp_mem_alloc(struct cvp_dsp_cmd_msg *cmd)
__func__, cmd->sbuf.size, cmd->sbuf.iova,
cmd->sbuf.v_dsp_addr);
cvp_put_fastrpc_node(frpc_node);
return;
fail_fastrpc_dev_map_dma:
cvp_release_dsp_buffers(inst, buf);
fail_allocate_dsp_buf:
kmem_cache_free(cvp_driver->buf_cache, buf);
cvp_kmem_cache_free(&cvp_driver->buf_cache, buf);
fail_kzalloc_buf:
fail_fastrpc_node:
cmd->ret = -1;
cvp_put_fastrpc_node(frpc_node);
return;
}
@@ -1824,7 +1878,7 @@ static void __dsp_cvp_mem_free(struct cvp_dsp_cmd_msg *cmd)
return;
}
frpc_node = cvp_find_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
frpc_node = cvp_get_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
if (!frpc_node) {
dprintk(CVP_ERR, "%s Failed to find fastrpc node 0x%x\n",
__func__, dsp2cpu_cmd->pid);
@@ -1870,7 +1924,7 @@ static void __dsp_cvp_mem_free(struct cvp_dsp_cmd_msg *cmd)
list_del(&buf->list);
kmem_cache_free(cvp_driver->buf_cache, buf);
cvp_kmem_cache_free(&cvp_driver->buf_cache, buf);
break;
}
}
@@ -1878,6 +1932,7 @@ static void __dsp_cvp_mem_free(struct cvp_dsp_cmd_msg *cmd)
fail_release_buf:
fail_fastrpc_dev_unmap_dma:
mutex_unlock(&buf_list->lock);
cvp_put_fastrpc_node(frpc_node);
}
static int cvp_dsp_thread(void *data)

View File

@@ -7,6 +7,7 @@
#define MSM_CVP_DSP_H
#include <linux/types.h>
#include <linux/refcount.h>
#include "msm_cvp_debug.h"
#include "cvp_core_hfi.h"
@@ -181,6 +182,7 @@ struct cvp_dsp_fastrpc_driver_entry {
uint32_t handle;
uint32_t session_cnt;
uint32_t driver_name_idx;
atomic_t refcount;
struct fastrpc_driver cvp_fastrpc_driver;
struct fastrpc_device *cvp_fastrpc_device;
struct completion fastrpc_probe_completion;

View File

@@ -141,6 +141,11 @@ struct msm_cvp_platform_data {
struct msm_cvp_qos_setting *noc_qos;
};
struct cvp_kmem_cache {
struct kmem_cache *cache;
atomic_t nr_objs;
};
struct msm_cvp_drv {
struct mutex lock;
struct list_head cores;
@@ -148,10 +153,10 @@ struct msm_cvp_drv {
struct dentry *debugfs_root;
int thermal_level;
u32 sku_version;
struct kmem_cache *msg_cache;
struct kmem_cache *frame_cache;
struct kmem_cache *buf_cache;
struct kmem_cache *smem_cache;
struct cvp_kmem_cache msg_cache;
struct cvp_kmem_cache frame_cache;
struct cvp_kmem_cache buf_cache;
struct cvp_kmem_cache smem_cache;
char fw_version[CVP_VERSION_LENGTH];
};
@@ -370,6 +375,7 @@ struct msm_cvp_core {
u32 smmu_fault_count;
u32 last_fault_addr;
u32 ssr_count;
u32 smem_leak_count;
bool trigger_ssr;
unsigned long curr_freq;
unsigned long orig_core_sum;
@@ -386,6 +392,7 @@ struct msm_cvp_inst {
enum session_type session_type;
u32 process_id;
struct task_struct *task;
atomic_t smem_count;
struct cvp_session_queue session_queue;
struct cvp_session_queue session_queue_fence;
struct cvp_session_event event_handler;
@@ -432,4 +439,6 @@ void msm_cvp_ssr_handler(struct work_struct *work);
*/
int msm_cvp_destroy(struct msm_cvp_inst *inst);
void *cvp_get_drv_data(struct device *dev);
void *cvp_kmem_cache_zalloc(struct cvp_kmem_cache *k, gfp_t flags);
void cvp_kmem_cache_free(struct cvp_kmem_cache *k, void *obj);
#endif