diff --git a/msm/eva/cvp.c b/msm/eva/cvp.c index cf3e4b530d..797c5c3dfe 100644 --- a/msm/eva/cvp.c +++ b/msm/eva/cvp.c @@ -37,7 +37,7 @@ static int cvp_open(struct inode *inode, struct file *filp) struct msm_cvp_core, cdev); struct msm_cvp_inst *inst; - dprintk(CVP_SESS, "%s: Enter\n", __func__); + dprintk(CVP_SESS, "%s: core->id: %d\n", __func__, core->id); inst = msm_cvp_open(core->id, MSM_CVP_USER); if (!inst) { @@ -416,9 +416,13 @@ static int msm_probe_cvp_device(struct platform_device *pdev) atomic64_set(&core->kernel_trans_id, 0); - rc = cvp_dsp_device_init(); - if (rc) - dprintk(CVP_WARN, "Failed to initialize DSP driver\n"); + if (core->resources.dsp_enabled) { + rc = cvp_dsp_device_init(); + if (rc) + dprintk(CVP_WARN, "Failed to initialize DSP driver\n"); + } else { + dprintk(CVP_DSP, "DSP interface not enabled\n"); + } return rc; diff --git a/msm/eva/msm_cvp_buf.c b/msm/eva/msm_cvp_buf.c index 994fa43353..785acd8b5c 100644 --- a/msm/eva/msm_cvp_buf.c +++ b/msm/eva/msm_cvp_buf.c @@ -3,6 +3,12 @@ * Copyright (c) 2020, The Linux Foundation. All rights reserved. */ +#include +#include +#include +#include +#include +#include #include "msm_cvp_common.h" #include "cvp_hfi_api.h" #include "msm_cvp_debug.h" @@ -251,6 +257,232 @@ int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf) return rc; } +static struct file *msm_cvp_fget(unsigned int fd, struct task_struct *task, + fmode_t mask, unsigned int refs) +{ + struct files_struct *files = task->files; + struct file *file; + + rcu_read_lock(); +loop: + file = fcheck_files(files, fd); + if (file) { + /* File object ref couldn't be taken. + * dup2() atomicity guarantee is the reason + * we loop to catch the new file (or NULL pointer) + */ + if (file->f_mode & mask) + file = NULL; + else if (!get_file_rcu_many(file, refs)) + goto loop; + } + rcu_read_unlock(); + + return file; +} + +static struct dma_buf *cvp_dma_buf_get(struct file *file, int fd, + struct task_struct *task) +{ + if (file->f_op != gfa_cv.dmabuf_f_op) { + dprintk(CVP_WARN, "fd doesn't refer to dma_buf\n"); + return ERR_PTR(-EINVAL); + } + + return file->private_data; +} + +int msm_cvp_map_buf_dsp_new(struct msm_cvp_inst *inst, + struct eva_kmd_buffer *buf, + int32_t pid, uint32_t *iova) +{ + int rc = 0; + bool found = false; + struct cvp_internal_buf *cbuf; + struct msm_cvp_smem *smem = NULL; + struct cvp_hal_session *session; + struct dma_buf *dma_buf = NULL; + + struct pid *pid_s; + struct task_struct *task; + struct file *file; + + if (!inst || !inst->core || !buf) { + dprintk(CVP_ERR, "%s: invalid params\n", __func__); + return -EINVAL; + } + + if (buf->fd < 0) { + dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd); + return 0; + } + + if (buf->offset) { + dprintk(CVP_ERR, + "%s: offset is deprecated, set to 0.\n", + __func__); + return -EINVAL; + } + + session = (struct cvp_hal_session *)inst->session; + + mutex_lock(&inst->cvpdspbufs.lock); + list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) { + if (cbuf->fd == buf->fd) { + if (cbuf->size != buf->size) { + dprintk(CVP_ERR, "%s: buf size mismatch\n", + __func__); + mutex_unlock(&inst->cvpdspbufs.lock); + return -EINVAL; + } + found = true; + break; + } + } + mutex_unlock(&inst->cvpdspbufs.lock); + if (found) { + print_internal_buffer(CVP_ERR, "duplicate", inst, cbuf); + return -EINVAL; + } + + pid_s = find_get_pid(pid); + if (pid_s == NULL) { + dprintk(CVP_WARN, "%s incorrect pid\n", __func__); + return -EINVAL; + } + dprintk(CVP_WARN, "%s get pid_s 0x%x from pidA 0x%x\n", __func__, pid_s, pid); + /* task = get_pid_task(pid, PIDTYPE_PID); */ + task = get_pid_task(pid_s, PIDTYPE_TGID); + + if (!task) + dprintk(CVP_WARN, "%s task doesn't exist\n", __func__); + file = msm_cvp_fget(buf->fd, task, FMODE_PATH, 1); + if (file == NULL) { + dprintk(CVP_WARN, "%s fail to get file from fd\n", __func__); + put_task_struct(task); + return -EINVAL; + } + + //entry->file = file; + dma_buf = cvp_dma_buf_get( + file, + buf->fd, + task); + if (dma_buf == ERR_PTR(-EINVAL)) { + dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd); + fput(file); + put_task_struct(task); + return -EINVAL; + } + + dprintk(CVP_WARN, "dma_buf from internal %llu\n", dma_buf); + /* to unmap dsp buf, below sequence is required + * fput(file); + * dma_buf_put(dma_buf); + * put_task_struct(task); + */ + + if (!dma_buf) { + dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd); + return 0; + } + + cbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL); + if (!cbuf) + return -ENOMEM; + + smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL); + if (!smem) { + kmem_cache_free(cvp_driver->buf_cache, cbuf); + return -ENOMEM; + } + + smem->dma_buf = dma_buf; + smem->bitmap_index = MAX_DMABUF_NUMS; + dprintk(CVP_DSP, "%s: dma_buf = %llx\n", __func__, dma_buf); + rc = msm_cvp_map_smem(inst, smem, "map dsp"); + if (rc) { + print_client_buffer(CVP_ERR, "map failed", inst, buf); + goto exit; + } + + cbuf->smem = smem; + cbuf->fd = buf->fd; + cbuf->size = buf->size; + cbuf->offset = buf->offset; + cbuf->ownership = CLIENT; + cbuf->index = buf->index; + + *iova = (uint32_t)smem->device_addr; + + dprintk(CVP_DSP, "%s: buf->fd %d, device_addr = %llx\n", + __func__, buf->fd, (uint32_t)smem->device_addr); + + mutex_lock(&inst->cvpdspbufs.lock); + list_add_tail(&cbuf->list, &inst->cvpdspbufs.list); + mutex_unlock(&inst->cvpdspbufs.lock); + + return rc; + +exit: + if (smem->device_addr) { + msm_cvp_unmap_smem(inst, smem, "unmap dsp"); + msm_cvp_smem_put_dma_buf(smem->dma_buf); + } + kmem_cache_free(cvp_driver->buf_cache, cbuf); + cbuf = NULL; + kmem_cache_free(cvp_driver->smem_cache, smem); + smem = NULL; + return rc; +} + +int msm_cvp_unmap_buf_dsp_new(struct msm_cvp_inst *inst, + struct eva_kmd_buffer *buf) +{ + int rc = 0; + bool found; + struct cvp_internal_buf *cbuf; + struct cvp_hal_session *session; + + if (!inst || !inst->core || !buf) { + dprintk(CVP_ERR, "%s: invalid params\n", __func__); + return -EINVAL; + } + + session = (struct cvp_hal_session *)inst->session; + if (!session) { + dprintk(CVP_ERR, "%s: invalid session\n", __func__); + return -EINVAL; + } + + mutex_lock(&inst->cvpdspbufs.lock); + found = false; + list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) { + if (cbuf->fd == buf->fd) { + found = true; + break; + } + } + mutex_unlock(&inst->cvpdspbufs.lock); + if (!found) { + print_client_buffer(CVP_ERR, "invalid", inst, buf); + return -EINVAL; + } + + if (cbuf->smem->device_addr) { + msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp"); + msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf); + } + + mutex_lock(&inst->cvpdspbufs.lock); + list_del(&cbuf->list); + mutex_unlock(&inst->cvpdspbufs.lock); + + kmem_cache_free(cvp_driver->smem_cache, cbuf->smem); + kmem_cache_free(cvp_driver->buf_cache, cbuf); + return rc; +} + void msm_cvp_cache_operations(struct msm_cvp_smem *smem, u32 type, u32 offset, u32 size) { @@ -964,3 +1196,98 @@ int cvp_release_arp_buffers(struct msm_cvp_inst *inst) return rc; } +int cvp_allocate_dsp_bufs(struct msm_cvp_inst *inst, + struct cvp_internal_buf *buf, + u32 buffer_size, + u32 secure_type) +{ + u32 smem_flags = SMEM_UNCACHED; + int rc = 0; + + if (!inst) { + dprintk(CVP_ERR, "%s Invalid input\n", __func__); + return -EINVAL; + } + + if (!buf) + return -EINVAL; + + if (!buffer_size) + return -EINVAL; + + switch (secure_type) { + case 0: + break; + case 1: + smem_flags |= SMEM_SECURE | SMEM_PIXEL; + break; + case 2: + smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL; + break; + default: + dprintk(CVP_ERR, "%s Invalid secure_type %d\n", + __func__, secure_type); + return -EINVAL; + } + + dprintk(CVP_ERR, "%s smem_flags 0x%x\n", __func__, smem_flags); + buf->smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL); + if (!buf->smem) { + dprintk(CVP_ERR, "%s Out of memory\n", __func__); + goto fail_kzalloc_smem_cache; + } + + rc = msm_cvp_smem_alloc(buffer_size, 1, smem_flags, 0, + &(inst->core->resources), buf->smem); + if (rc) { + dprintk(CVP_ERR, "Failed to allocate ARP memory\n"); + goto err_no_mem; + } + + dprintk(CVP_ERR, "%s dma_buf %pK\n", __func__, buf->smem->dma_buf); + + buf->size = buf->smem->size; + buf->type = HFI_BUFFER_INTERNAL_PERSIST_1; + buf->ownership = CLIENT; + + return rc; + +err_no_mem: + kmem_cache_free(cvp_driver->smem_cache, buf->smem); +fail_kzalloc_smem_cache: + return rc; +} + +int cvp_release_dsp_buffers(struct msm_cvp_inst *inst, + struct cvp_internal_buf *buf) +{ + struct msm_cvp_smem *smem; + int rc = 0; + + if (!inst) { + dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst); + return -EINVAL; + } + + if (!buf) { + dprintk(CVP_ERR, "Invalid buffer pointer = %pK\n", inst); + return -EINVAL; + } + + smem = buf->smem; + if (!smem) { + dprintk(CVP_ERR, "%s invalid smem\n", __func__); + return -EINVAL; + } + + if (buf->ownership == CLIENT) { + dprintk(CVP_MEM, + "%s: %x : fd %x %s size %d", + "free dsp buf", hash32_ptr(inst->session), buf->fd, + smem->dma_buf->name, buf->size); + msm_cvp_smem_free(smem); + kmem_cache_free(cvp_driver->smem_cache, smem); + } + + return rc; +} diff --git a/msm/eva/msm_cvp_buf.h b/msm/eva/msm_cvp_buf.h index 681673b716..c46d4177af 100644 --- a/msm/eva/msm_cvp_buf.h +++ b/msm/eva/msm_cvp_buf.h @@ -15,7 +15,7 @@ #include #include -#define MAX_FRAME_BUFFER_NUMS 48 +#define MAX_FRAME_BUFFER_NUMS 30 #define MAX_DMABUF_NUMS 64 struct msm_cvp_inst; @@ -33,7 +33,8 @@ enum smem_prop { SMEM_CACHED = 0x2, SMEM_SECURE = 0x4, SMEM_ADSP = 0x8, - SMEM_NON_PIXEL = 0x10 + SMEM_NON_PIXEL = 0x10, + SMEM_PIXEL = 0x20 }; struct msm_cvp_list { @@ -177,6 +178,12 @@ int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf); int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf); +int msm_cvp_map_buf_dsp_new(struct msm_cvp_inst *inst, + struct eva_kmd_buffer *buf, + int32_t pid, + uint32_t *iova); +int msm_cvp_unmap_buf_dsp_new(struct msm_cvp_inst *inst, + struct eva_kmd_buffer *buf); void msm_cvp_cache_operations(struct msm_cvp_smem *smem, u32 type, u32 offset, u32 size); u32 msm_cvp_map_frame_buf(struct msm_cvp_inst *inst, @@ -197,4 +204,10 @@ int msm_cvp_map_frame(struct msm_cvp_inst *inst, void msm_cvp_unmap_frame(struct msm_cvp_inst *inst, u64 ktid); int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst); void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst); +int cvp_allocate_dsp_bufs(struct msm_cvp_inst *inst, + struct cvp_internal_buf *buf, + u32 buffer_size, + u32 secure_type); +int cvp_release_dsp_buffers(struct msm_cvp_inst *inst, + struct cvp_internal_buf *buf); #endif diff --git a/msm/eva/msm_cvp_dsp.c b/msm/eva/msm_cvp_dsp.c index e9e76afe08..3ee71d7991 100644 --- a/msm/eva/msm_cvp_dsp.c +++ b/msm/eva/msm_cvp_dsp.c @@ -7,8 +7,9 @@ #include #include #include -#include "msm_cvp_dsp.h" -#include "msm_cvp_internal.h" +#include "msm_cvp_core.h" +#include "msm_cvp.h" +#include "cvp_hfi.h" struct cvp_dsp_apps gfa_cv; static int hlosVM[HLOS_VM_NUM] = {VMID_HLOS}; @@ -204,9 +205,11 @@ static int cvp_dsp_rpmsg_callback(struct rpmsg_device *rpdev, } else if (rsp->type < CVP_DSP_MAX_CMD && len == sizeof(struct cvp_dsp2cpu_cmd_msg)) { if (me->pending_dsp2cpu_cmd.type != CVP_INVALID_RPMSG_TYPE) { - dprintk(CVP_ERR, "%s: DSP2CPU cmd:%d pending %d\n", + dprintk(CVP_ERR, + "%s: DSP2CPU cmd:%d pending %d %d expect %d\n", __func__, rsp->type, - me->pending_dsp2cpu_cmd.type); + me->pending_dsp2cpu_cmd.type, len, + sizeof(struct cvp_dsp2cpu_cmd_msg)); goto exit; } memcpy(&me->pending_dsp2cpu_cmd, rsp, @@ -286,29 +289,19 @@ int cvp_dsp_resume(uint32_t session_flag) int rc = 0; struct cvp_dsp_cmd_msg cmd; struct cvp_dsp_apps *me = &gfa_cv; - struct cvp_dsp_rsp_msg rsp; cmd.type = CPU2DSP_RESUME; - mutex_lock(&me->lock); + /* + * Deadlock against DSP2CPU_CREATE_SESSION in dsp_thread + * Probably get rid of this entirely as discussed before + */ if (me->state != DSP_SUSPEND) goto exit; - /* Use cvp_dsp_send_cmd_sync after dsp driver is ready */ - rc = cvp_dsp_send_cmd_sync(&cmd, - sizeof(struct cvp_dsp_cmd_msg), - &rsp); - if (rc) { - dprintk(CVP_ERR, - "%s: cvp_dsp_send_cmd failed rc = %d\n", - __func__, rc); - goto exit; - } - me->state = DSP_READY; exit: - mutex_unlock(&me->lock); return rc; } @@ -641,12 +634,280 @@ static int cvp_reinit_dsp(void) return rc; } +#ifdef FASTRPC_DRIVER_AVAILABLE +static struct cvp_dsp_fastrpc_driver_entry *cvp_find_fastrpc_node_with_handle( + uint32_t handle) +{ + struct cvp_dsp_apps *me = &gfa_cv; + struct list_head *ptr = NULL, *next = NULL; + struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL; + + mutex_lock(&me->fastrpc_driver_list.lock); + list_for_each_safe(ptr, next, &me->fastrpc_driver_list.list) { + frpc_node = list_entry(ptr, + struct cvp_dsp_fastrpc_driver_entry, list); + if (handle == frpc_node->handle) { + dprintk(CVP_DSP, "Find frpc_node with handle 0x%x\n", + handle); + break; + } + } + mutex_unlock(&me->fastrpc_driver_list.lock); + + return frpc_node; +} + +static void eva_fastrpc_driver_unregister(struct msm_cvp_inst *inst, + uint32_t handle, + bool force_exit); + +static int cvp_fastrpc_probe(struct fastrpc_device *rpc_dev) +{ + struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL; + + dprintk(CVP_DSP, "%s fastrpc probe handle 0x%x\n", + __func__, rpc_dev->handle); + + frpc_node = cvp_find_fastrpc_node_with_handle(rpc_dev->handle); + if (frpc_node) { + frpc_node->cvp_fastrpc_device = rpc_dev; + // static structure with signal and pid + complete(&frpc_node->fastrpc_probe_completion); + } + + return 0; +} + +static int cvp_fastrpc_callback(struct fastrpc_device *rpc_dev, + enum fastrpc_driver_status fastrpc_proc_num) +{ + dprintk(CVP_DSP, "%s handle 0x%x, proc %d\n", __func__, + rpc_dev->handle, fastrpc_proc_num); + + /* fastrpc drive down when process gone + * any handling can happen here, such as + * eva_fastrpc_driver_unregister(rpc_dev->handle, true); + */ + + return 0; +} + + +static struct fastrpc_driver cvp_fastrpc_client = { + .probe = cvp_fastrpc_probe, + .callback = cvp_fastrpc_callback, + .driver = { + .name = "qcom,fastcv", + }, +}; + + +static int eva_fastrpc_dev_map_dma(struct fastrpc_device *frpc_device, + struct cvp_internal_buf *buf, + uint32_t dsp_remote_map, + uint64_t *v_dsp_addr) +{ + struct fastrpc_dev_map_dma frpc_map_buf = {0}; + int rc = 0; + + if (dsp_remote_map == 1) { + frpc_map_buf.buf = buf->smem->dma_buf; + frpc_map_buf.size = buf->smem->size; + frpc_map_buf.attrs = 0; + + dprintk(CVP_DSP, + "%s frpc_map_buf size %d, dma_buf %pK, map %pK, 0x%x\n", + __func__, frpc_map_buf.size, frpc_map_buf.buf, + &frpc_map_buf, (unsigned long)&frpc_map_buf); + rc = fastrpc_driver_invoke(frpc_device, FASTRPC_DEV_MAP_DMA, + (unsigned long)(&frpc_map_buf)); + if (rc) { + dprintk(CVP_ERR, + "%s Failed to map buffer 0x%x\n", __func__, rc); + return rc; + } + buf->fd = (s32)frpc_map_buf.v_dsp_addr; + *v_dsp_addr = frpc_map_buf.v_dsp_addr; + } else { + dprintk(CVP_DSP, "%s Buffer not mapped to dsp\n", __func__); + buf->fd = 0; + } + + return rc; +} + +static int eva_fastrpc_dev_unmap_dma(struct fastrpc_device *frpc_device, + struct cvp_internal_buf *buf) +{ + struct fastrpc_dev_unmap_dma frpc_unmap_buf = {0}; + int rc = 0; + + /* Only if buffer is mapped to dsp */ + if (buf->fd != 0) { + frpc_unmap_buf.buf = buf->smem->dma_buf; + rc = fastrpc_driver_invoke(frpc_device, FASTRPC_DEV_UNMAP_DMA, + (unsigned long)(&frpc_unmap_buf)); + if (rc) { + dprintk(CVP_ERR, "%s Failed to unmap buffer 0x%x\n", + __func__, rc); + return rc; + } + } else { + dprintk(CVP_DSP, "%s buffer not mapped to dsp\n", __func__); + } + + return rc; +} + +static int eva_fastrpc_driver_register(uint32_t handle) +{ + struct cvp_dsp_apps *me = &gfa_cv; + int rc = 0; + //struct cvp_dsp2cpu_cmd_msg *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd; + struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL; + + frpc_node = cvp_find_fastrpc_node_with_handle(handle); + + if (frpc_node == NULL) { + frpc_node = kzalloc(sizeof(*frpc_node), GFP_KERNEL); + if (!frpc_node) { + dprintk(CVP_DSP, "%s allocate frpc node fail\n", + __func__); + return -EINVAL; + } + + memset(frpc_node, 0, sizeof(*frpc_node)); + + /* Init completion */ + init_completion(&frpc_node->fastrpc_probe_completion); + + mutex_lock(&me->fastrpc_driver_list.lock); + dprintk(CVP_DSP, "Add frpc node 0x%x to list\n", frpc_node); + list_add_tail(&frpc_node->list, &me->fastrpc_driver_list.list); + mutex_unlock(&me->fastrpc_driver_list.lock); + + /* register fastrpc device to this session */ + frpc_node->handle = handle; + frpc_node->cvp_fastrpc_driver = cvp_fastrpc_client; + frpc_node->cvp_fastrpc_driver.handle = handle; + rc = fastrpc_driver_register(&frpc_node->cvp_fastrpc_driver); + if (rc) { + dprintk(CVP_ERR, "%s fastrpc driver reg fail err %d\n", + __func__, rc); + goto fail_fastrpc_driver_register; + } + + /* signal wait reuse dsp timeout setup for now */ + if (!wait_for_completion_timeout( + &frpc_node->fastrpc_probe_completion, + msecs_to_jiffies(CVP_DSP_RESPONSE_TIMEOUT))) { + dprintk(CVP_ERR, "%s fastrpc driver_register timeout\n", + __func__); + goto fail_fastrpc_driver_timeout; + } + + /* initialize dspbuf list */ + INIT_MSM_CVP_LIST(&frpc_node->dspbufs); + } + + frpc_node->session_cnt++; + + return rc; + +fail_fastrpc_driver_timeout: + /* remove list if this is the last session */ + mutex_lock(&me->fastrpc_driver_list.lock); + list_del(&frpc_node->list); + mutex_unlock(&me->fastrpc_driver_list.lock); + fastrpc_driver_unregister(&frpc_node->cvp_fastrpc_driver); +fail_fastrpc_driver_register: + kfree(frpc_node); + return -EINVAL; +} + +static void eva_fastrpc_driver_unregister(struct msm_cvp_inst *inst, + uint32_t handle, + bool force_exit) +{ + struct cvp_dsp_apps *me = &gfa_cv; + struct list_head *ptr = NULL, *next = NULL; + struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL; + struct msm_cvp_list *buf_list = NULL; + struct cvp_internal_buf *buf = NULL; + struct fastrpc_device *frpc_device = NULL; + struct cvp_dsp2cpu_cmd_msg *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd; + int rc = 0; + + dprintk(CVP_DSP, "%s Unregister fastrpc driver handle 0x%x, force %d\n", + __func__, handle, (uint32_t)force_exit); + + /* Foundd fastrpc node */ + frpc_node = cvp_find_fastrpc_node_with_handle(dsp2cpu_cmd->pid); + + if (frpc_node == NULL) + return; + + frpc_node->session_cnt--; + + if ((frpc_node->session_cnt == 0) || force_exit) { + dprintk(CVP_DSP, "%s session cnt %d, force %d\n", + __func__, frpc_node->session_cnt, (uint32_t)force_exit); + //Free any left over buffers + ptr = NULL; + next = NULL; + buf_list = &frpc_node->dspbufs; + mutex_lock(&buf_list->lock); + list_for_each_safe(ptr, next, &buf_list->list) { + buf = list_entry(ptr, struct cvp_internal_buf, list); + + if (!buf->smem) { + dprintk(CVP_DSP, "%s Empyt smem\n", __func__); + list_del(&buf->list); + kfree(buf); + continue; + } + + rc = eva_fastrpc_dev_unmap_dma(frpc_device, buf); + if (rc) { + dprintk(CVP_ERR, + "%s Fail to unmap buffer 0x%x\n", + __func__, rc); + continue; + } + + rc = cvp_release_dsp_buffers(inst, buf); + if (rc) { + dprintk(CVP_ERR, + "%s Fail to free buffer 0x%x\n", + __func__, rc); + continue; + } + + list_del(&buf->list); + kfree(buf); + } + mutex_unlock(&buf_list->lock); + + DEINIT_MSM_CVP_LIST(&frpc_node->dspbufs); + + /* remove list if this is the last session */ + mutex_lock(&me->fastrpc_driver_list.lock); + list_del(&frpc_node->list); + mutex_unlock(&me->fastrpc_driver_list.lock); + + fastrpc_driver_unregister(&frpc_node->cvp_fastrpc_driver); + kfree(frpc_node); + } +} + +#endif + void cvp_dsp_send_hfi_queue(void) { struct msm_cvp_core *core; struct iris_hfi_device *device; struct cvp_dsp_apps *me = &gfa_cv; - struct cvp_dsp_rsp_msg rsp; + struct cvp_dsp_rsp_msg rsp = {0}; uint64_t addr; uint32_t size; int rc; @@ -737,6 +998,686 @@ exit: mutex_unlock(&me->lock); mutex_unlock(&device->lock); } +/* 32 or 64 bit CPU Side Ptr <-> 2 32 bit DSP Pointers. Dirty Fix. */ +static void *ptr_dsp2cpu(uint32_t session_cpu_high, uint32_t session_cpu_low) +{ + void *inst; + + if ((session_cpu_high == 0) && (sizeof(void *) == BITPTRSIZE32)) { + inst = (void *)((uintptr_t)session_cpu_low); + } else if ((session_cpu_high != 0) && (sizeof(void *) == BITPTRSIZE64)) { + inst = (void *)((uintptr_t)(((uint64_t)session_cpu_high) << 32 + | session_cpu_low)); + } else { + dprintk(CVP_ERR, + "%s Invalid _cpu_high = 0x%x _cpu_low = 0x%x\n", + __func__, session_cpu_high, session_cpu_low); + inst = NULL; + } + return inst; +} + +static void print_power(const struct eva_power_req *pwr_req) +{ + if (pwr_req) { + dprintk(CVP_DSP, "Clock: Fdu %d Ica %d Od %d Mpu %d Fw %d", + pwr_req->clock_fdu, pwr_req->clock_ica, + pwr_req->clock_od, pwr_req->clock_mpu, + pwr_req->clock_fw); + dprintk(CVP_DSP, "OpClock: Fdu %d Ica %d Od %d Mpu %d Fw %d", + pwr_req->op_clock_fdu, pwr_req->op_clock_ica, + pwr_req->op_clock_od, pwr_req->op_clock_mpu, + pwr_req->op_clock_fw); + dprintk(CVP_DSP, "Actual Bw: Ddr %d, SysCache %d", + pwr_req->bw_ddr, pwr_req->bw_sys_cache); + dprintk(CVP_DSP, "OpBw: Ddr %d, SysCache %d", + pwr_req->op_bw_ddr, pwr_req->op_bw_sys_cache); + } +} + +static int msm_cvp_register_buffer_dsp(struct msm_cvp_inst *inst, + struct eva_kmd_buffer *buf, + int32_t pid, + uint32_t *iova) +{ + struct cvp_hfi_device *hdev; + struct cvp_hal_session *session; + struct msm_cvp_inst *s; + int rc = 0; + + if (!inst || !inst->core || !buf) { + dprintk(CVP_ERR, "%s: invalid params\n", __func__); + return -EINVAL; + } + + if (!buf->index) + return 0; + + s = cvp_get_inst_validate(inst->core, inst); + if (!s) + return -ECONNRESET; + + inst->cur_cmd_type = EVA_KMD_REGISTER_BUFFER; + session = (struct cvp_hal_session *)inst->session; + if (!session) { + dprintk(CVP_ERR, "%s: invalid session\n", __func__); + rc = -EINVAL; + goto exit; + } + hdev = inst->core->device; + print_client_buffer(CVP_HFI, "register", inst, buf); + + rc = msm_cvp_map_buf_dsp_new(inst, buf, pid, iova); + dprintk(CVP_DSP, "%s: fd %d, iova 0x%x\n", __func__, buf->fd, *iova); + +exit: + inst->cur_cmd_type = 0; + cvp_put_inst(s); + return rc; +} + +static int msm_cvp_unregister_buffer_dsp(struct msm_cvp_inst *inst, + struct eva_kmd_buffer *buf) +{ + struct msm_cvp_inst *s; + int rc = 0; + + if (!inst || !inst->core || !buf) { + dprintk(CVP_ERR, "%s: invalid params\n", __func__); + return -EINVAL; + } + + if (!buf->index) + return 0; + + s = cvp_get_inst_validate(inst->core, inst); + if (!s) + return -ECONNRESET; + + inst->cur_cmd_type = EVA_KMD_UNREGISTER_BUFFER; + print_client_buffer(CVP_HFI, "unregister", inst, buf); + + rc = msm_cvp_unmap_buf_dsp_new(inst, buf); + inst->cur_cmd_type = 0; + cvp_put_inst(s); + return rc; +} + +static void __dsp_cvp_sess_create(struct cvp_dsp_cmd_msg *cmd) +{ + struct cvp_dsp_apps *me = &gfa_cv; + struct msm_cvp_inst *inst = NULL; + uint64_t inst_handle = 0; + struct eva_kmd_arg *kmd; + struct eva_kmd_sys_properties *sys_prop = NULL; + struct eva_kmd_session_control *sys_ctrl = NULL; + int rc = 0; + struct cvp_dsp2cpu_cmd_msg *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd; + + cmd->ret = 0; + + dprintk(CVP_DSP, + "%s sess Type %d Mask %d Prio %d Sec %d pid 0x%x\n", + __func__, dsp2cpu_cmd->session_type, + dsp2cpu_cmd->kernel_mask, + dsp2cpu_cmd->session_prio, + dsp2cpu_cmd->is_secure, + dsp2cpu_cmd->pid); + + kmd = kzalloc(sizeof(*kmd), GFP_KERNEL); + if (!kmd) { + dprintk(CVP_ERR, "%s kzalloc failure\n", __func__); + goto fail_frpc_driver_reg; + } + +#ifdef FASTRPC_DRIVER_AVAILABLE + rc = eva_fastrpc_driver_register(dsp2cpu_cmd->pid); +#endif + if (rc) { + dprintk(CVP_ERR, "%s Register fastrpc driver fail\n", __func__); + goto fail_frpc_driver_reg; + } + + inst = msm_cvp_open(MSM_CORE_CVP, MSM_CVP_USER); + if (!inst) { + dprintk(CVP_ERR, "%s Failed create instance\n", __func__); + goto fail_msm_cvp_open; + } + + kmd->type = EVA_KMD_SET_SYS_PROPERTY; + sys_prop = (struct eva_kmd_sys_properties *)&kmd->data.sys_properties; + sys_prop->prop_num = 5; + + sys_prop->prop_data[0].prop_type = EVA_KMD_PROP_SESSION_KERNELMASK; + sys_prop->prop_data[0].data = dsp2cpu_cmd->kernel_mask; + sys_prop->prop_data[1].prop_type = EVA_KMD_PROP_SESSION_TYPE; + sys_prop->prop_data[1].data = dsp2cpu_cmd->session_type; + sys_prop->prop_data[2].prop_type = EVA_KMD_PROP_SESSION_PRIORITY; + sys_prop->prop_data[2].data = dsp2cpu_cmd->session_prio; + sys_prop->prop_data[3].prop_type = EVA_KMD_PROP_SESSION_SECURITY; + sys_prop->prop_data[3].data = dsp2cpu_cmd->is_secure; + sys_prop->prop_data[4].prop_type = EVA_KMD_PROP_SESSION_DSPMASK; + sys_prop->prop_data[4].data = dsp2cpu_cmd->dsp_access_mask; + + rc = msm_cvp_handle_syscall(inst, kmd); + if (rc) { + dprintk(CVP_ERR, "%s Failed to set sys property\n", __func__); + goto fail_set_sys_property; + } + dprintk(CVP_DSP, "%s set sys property done\n", __func__); + + + /* EVA_KMD_SESSION_CONTROL from DSP */ + memset(kmd, 0, sizeof(struct eva_kmd_arg)); + kmd->type = EVA_KMD_SESSION_CONTROL; + sys_ctrl = (struct eva_kmd_session_control *)&kmd->data.session_ctrl; + sys_ctrl->ctrl_type = SESSION_CREATE; + + rc = msm_cvp_handle_syscall(inst, kmd); + if (rc) { + dprintk(CVP_ERR, "Warning: send Session Create failed\n"); + goto fail_session_create; + } + dprintk(CVP_DSP, "%s send Session Create done\n", __func__); + + + /* Get session id */ + memset(kmd, 0, sizeof(struct eva_kmd_arg)); + kmd->type = EVA_KMD_GET_SESSION_INFO; + rc = msm_cvp_handle_syscall(inst, kmd); + if (rc) { + dprintk(CVP_ERR, "Warning: get session index failed\n"); + goto fail_get_session_info; + } + cmd->session_id = kmd->data.session.session_id; + + inst_handle = (uint64_t)inst; + cmd->session_cpu_high = (uint32_t)((inst_handle & HIGH32) >> 32); + cmd->session_cpu_low = (uint32_t)(inst_handle & LOW32); + + dprintk(CVP_DSP, + "%s CREATE_SESS id 0x%x, cpu_low 0x%x, cpu_high 0x%x\n", + __func__, cmd->session_id, cmd->session_cpu_low, + cmd->session_cpu_high); + + kfree(kmd); + return; + +fail_get_session_info: +fail_session_create: +fail_set_sys_property: +fail_msm_cvp_open: + /* unregister fastrpc driver */ +fail_frpc_driver_reg: + cmd->ret = -1; + kfree(kmd); +} + +static void __dsp_cvp_sess_delete(struct cvp_dsp_cmd_msg *cmd) +{ + struct cvp_dsp_apps *me = &gfa_cv; + struct msm_cvp_inst *inst; + struct eva_kmd_arg *kmd; + struct eva_kmd_session_control *sys_ctrl; + int rc; + struct cvp_dsp2cpu_cmd_msg *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd; + + cmd->ret = 0; + + dprintk(CVP_DSP, + "%s sess id 0x%x low 0x%x high 0x%x, pid 0x%x\n", + __func__, dsp2cpu_cmd->session_id, + dsp2cpu_cmd->session_cpu_low, + dsp2cpu_cmd->session_cpu_high, + dsp2cpu_cmd->pid); + + kmd = kzalloc(sizeof(*kmd), GFP_KERNEL); + if (!kmd) { + dprintk(CVP_ERR, "%s kzalloc failure\n", __func__); + cmd->ret = -1; + return; + } + + inst = (struct msm_cvp_inst *)ptr_dsp2cpu( + dsp2cpu_cmd->session_cpu_high, + dsp2cpu_cmd->session_cpu_low); + +#ifdef FASTRPC_DRIVER_AVAILABLE + /* unregister fastrpc driver */ + eva_fastrpc_driver_unregister(inst, dsp2cpu_cmd->pid, false); +#endif + + kmd->type = EVA_KMD_SESSION_CONTROL; + sys_ctrl = (struct eva_kmd_session_control *)&kmd->data.session_ctrl; + + /* Session delete does nothing here */ + sys_ctrl->ctrl_type = SESSION_DELETE; + + rc = msm_cvp_handle_syscall(inst, kmd); + if (rc) { + dprintk(CVP_ERR, "Warning: send Delete Session failed\n"); + cmd->ret = -1; + goto dsp_fail_delete; + } + + rc = msm_cvp_close(inst); + if (rc) { + dprintk(CVP_ERR, "Warning: Failed to close cvp instance\n"); + cmd->ret = -1; + goto dsp_fail_delete; + } + + dprintk(CVP_DSP, "%s DSP2CPU_DETELE_SESSION Done\n", __func__); +dsp_fail_delete: + kfree(kmd); +} + +static void __dsp_cvp_power_req(struct cvp_dsp_cmd_msg *cmd) +{ + struct cvp_dsp_apps *me = &gfa_cv; + struct msm_cvp_inst *inst; + struct eva_kmd_arg *kmd; + struct eva_kmd_sys_properties *sys_prop; + int rc; + struct cvp_dsp2cpu_cmd_msg *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd; + + cmd->ret = 0; + dprintk(CVP_DSP, + "%s sess id 0x%x, low 0x%x, high 0x%x\n", + __func__, dsp2cpu_cmd->session_id, + dsp2cpu_cmd->session_cpu_low, + dsp2cpu_cmd->session_cpu_high); + + kmd = kzalloc(sizeof(*kmd), GFP_KERNEL); + if (!kmd) { + dprintk(CVP_ERR, "%s kzalloc failure\n", __func__); + cmd->ret = -1; + return; + } + + inst = (struct msm_cvp_inst *)ptr_dsp2cpu( + dsp2cpu_cmd->session_cpu_high, + dsp2cpu_cmd->session_cpu_low); + + print_power(&dsp2cpu_cmd->power_req); + + /* EVA_KMD_SET_SYS_PROPERTY + * Total 14 properties, 8 max once + * Need to do 2 rounds + */ + kmd->type = EVA_KMD_SET_SYS_PROPERTY; + sys_prop = (struct eva_kmd_sys_properties *)&kmd->data.sys_properties; + sys_prop->prop_num = 7; + + sys_prop->prop_data[0].prop_type = EVA_KMD_PROP_PWR_FDU; + sys_prop->prop_data[0].data = + dsp2cpu_cmd->power_req.clock_fdu; + sys_prop->prop_data[1].prop_type = EVA_KMD_PROP_PWR_ICA; + sys_prop->prop_data[1].data = + dsp2cpu_cmd->power_req.clock_ica; + sys_prop->prop_data[2].prop_type = EVA_KMD_PROP_PWR_OD; + sys_prop->prop_data[2].data = + dsp2cpu_cmd->power_req.clock_od; + sys_prop->prop_data[3].prop_type = EVA_KMD_PROP_PWR_MPU; + sys_prop->prop_data[3].data = + dsp2cpu_cmd->power_req.clock_mpu; + sys_prop->prop_data[4].prop_type = EVA_KMD_PROP_PWR_FW; + sys_prop->prop_data[4].data = + dsp2cpu_cmd->power_req.clock_fw; + sys_prop->prop_data[5].prop_type = EVA_KMD_PROP_PWR_DDR; + sys_prop->prop_data[5].data = + dsp2cpu_cmd->power_req.bw_ddr; + sys_prop->prop_data[6].prop_type = EVA_KMD_PROP_PWR_SYSCACHE; + sys_prop->prop_data[6].data = + dsp2cpu_cmd->power_req.bw_sys_cache; + + rc = msm_cvp_handle_syscall(inst, kmd); + if (rc) { + dprintk(CVP_ERR, "%s Failed to set sys property\n", __func__); + cmd->ret = -1; + goto dsp_fail_power_req; + } + dprintk(CVP_DSP, "%s set sys property done part 1\n", __func__); + + /* EVA_KMD_SET_SYS_PROPERTY Round 2 */ + memset(kmd, 0, sizeof(struct eva_kmd_arg)); + kmd->type = EVA_KMD_SET_SYS_PROPERTY; + sys_prop = (struct eva_kmd_sys_properties *)&kmd->data.sys_properties; + sys_prop->prop_num = 7; + + sys_prop->prop_data[0].prop_type = EVA_KMD_PROP_PWR_FDU_OP; + sys_prop->prop_data[0].data = + dsp2cpu_cmd->power_req.op_clock_fdu; + sys_prop->prop_data[1].prop_type = EVA_KMD_PROP_PWR_ICA_OP; + sys_prop->prop_data[1].data = + dsp2cpu_cmd->power_req.op_clock_ica; + sys_prop->prop_data[2].prop_type = EVA_KMD_PROP_PWR_OD_OP; + sys_prop->prop_data[2].data = + dsp2cpu_cmd->power_req.op_clock_od; + sys_prop->prop_data[3].prop_type = EVA_KMD_PROP_PWR_MPU_OP; + sys_prop->prop_data[3].data = + dsp2cpu_cmd->power_req.op_clock_mpu; + sys_prop->prop_data[4].prop_type = EVA_KMD_PROP_PWR_FW_OP; + sys_prop->prop_data[4].data = + dsp2cpu_cmd->power_req.op_clock_fw; + sys_prop->prop_data[5].prop_type = EVA_KMD_PROP_PWR_DDR_OP; + sys_prop->prop_data[5].data = + dsp2cpu_cmd->power_req.op_bw_ddr; + sys_prop->prop_data[6].prop_type = EVA_KMD_PROP_PWR_SYSCACHE_OP; + sys_prop->prop_data[6].data = + dsp2cpu_cmd->power_req.op_bw_sys_cache; + + rc = msm_cvp_handle_syscall(inst, kmd); + if (rc) { + dprintk(CVP_ERR, "%s Failed to set sys property\n", __func__); + cmd->ret = -1; + goto dsp_fail_power_req; + } + dprintk(CVP_DSP, "%s set sys property done part 2\n", __func__); + + memset(kmd, 0, sizeof(struct eva_kmd_arg)); + kmd->type = EVA_KMD_UPDATE_POWER; + rc = msm_cvp_handle_syscall(inst, kmd); + if (rc) { + /* May need to define more error types + * Check UMD implementation here: + * https://opengrok.qualcomm.com/source/xref/LA.UM.9.14/vendor/qcom/proprietary/cv-noship/cvp/cpurev/src/cvpcpuRev_skel_imp_cvp2.cpp#380 + */ + dprintk(CVP_ERR, "%s Failed to send update power numbers\n", __func__); + cmd->ret = -1; + goto dsp_fail_power_req; + } + + dprintk(CVP_DSP, "%s DSP2CPU_POWER_REQUEST Done\n", __func__); +dsp_fail_power_req: + kfree(kmd); +} + +static void __dsp_cvp_buf_register(struct cvp_dsp_cmd_msg *cmd) +{ + struct cvp_dsp_apps *me = &gfa_cv; + struct msm_cvp_inst *inst; + struct eva_kmd_arg *kmd; + struct eva_kmd_buffer *kmd_buf; + int rc; + struct cvp_dsp2cpu_cmd_msg *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd; + + cmd->ret = 0; + + dprintk(CVP_DSP, + "%s sess id 0x%x, low 0x%x, high 0x%x, pid 0x%x\n", + __func__, dsp2cpu_cmd->session_id, + dsp2cpu_cmd->session_cpu_low, + dsp2cpu_cmd->session_cpu_high, + dsp2cpu_cmd->pid); + + kmd = kzalloc(sizeof(*kmd), GFP_KERNEL); + if (!kmd) { + dprintk(CVP_ERR, "%s kzalloc failure\n", __func__); + cmd->ret = -1; + return; + } + + inst = (struct msm_cvp_inst *)ptr_dsp2cpu( + dsp2cpu_cmd->session_cpu_high, + dsp2cpu_cmd->session_cpu_low); + + kmd->type = EVA_KMD_REGISTER_BUFFER; + kmd_buf = (struct eva_kmd_buffer *)&(kmd->data.regbuf); + kmd_buf->type = EVA_KMD_BUFTYPE_INPUT; + kmd_buf->index = dsp2cpu_cmd->sbuf.index; + kmd_buf->fd = dsp2cpu_cmd->sbuf.fd; + kmd_buf->size = dsp2cpu_cmd->sbuf.size; + kmd_buf->offset = dsp2cpu_cmd->sbuf.offset; + kmd_buf->pixelformat = 0; + kmd_buf->flags = EVA_KMD_FLAG_UNSECURE; + + rc = msm_cvp_register_buffer_dsp(inst, kmd_buf, + dsp2cpu_cmd->pid, &cmd->sbuf.iova); + if (rc) { + dprintk(CVP_ERR, "%s Failed to register buffer\n", __func__); + cmd->ret = -1; + goto dsp_fail_buf_reg; + } + dprintk(CVP_DSP, "%s register buffer done\n", __func__); + + cmd->sbuf.size = kmd_buf->size; + cmd->sbuf.fd = kmd_buf->fd; + cmd->sbuf.index = kmd_buf->index; + cmd->sbuf.offset = kmd_buf->offset; + dprintk(CVP_DSP, "%s: fd %d, iova 0x%x\n", __func__, + cmd->sbuf.fd, cmd->sbuf.iova); +dsp_fail_buf_reg: + kfree(kmd); +} + +static void __dsp_cvp_buf_deregister(struct cvp_dsp_cmd_msg *cmd) +{ + struct cvp_dsp_apps *me = &gfa_cv; + struct msm_cvp_inst *inst; + struct eva_kmd_arg *kmd; + struct eva_kmd_buffer *kmd_buf; + int rc; + struct cvp_dsp2cpu_cmd_msg *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd; + + cmd->ret = 0; + + dprintk(CVP_DSP, + "%s : sess id 0x%x, low 0x%x, high 0x%x, pid 0x%x\n", + __func__, dsp2cpu_cmd->session_id, + dsp2cpu_cmd->session_cpu_low, + dsp2cpu_cmd->session_cpu_high, + dsp2cpu_cmd->pid); + + kmd = kzalloc(sizeof(*kmd), GFP_KERNEL); + if (!kmd) { + dprintk(CVP_ERR, "%s kzalloc failure\n", __func__); + cmd->ret = -1; + return; + } + + inst = (struct msm_cvp_inst *)ptr_dsp2cpu( + dsp2cpu_cmd->session_cpu_high, + dsp2cpu_cmd->session_cpu_low); + + kmd->type = EVA_KMD_UNREGISTER_BUFFER; + kmd_buf = (struct eva_kmd_buffer *)&(kmd->data.regbuf); + kmd_buf->type = EVA_KMD_UNREGISTER_BUFFER; + + kmd_buf->type = EVA_KMD_BUFTYPE_INPUT; + kmd_buf->index = dsp2cpu_cmd->sbuf.index; + kmd_buf->fd = dsp2cpu_cmd->sbuf.fd; + kmd_buf->size = dsp2cpu_cmd->sbuf.size; + kmd_buf->offset = dsp2cpu_cmd->sbuf.offset; + kmd_buf->pixelformat = 0; + kmd_buf->flags = EVA_KMD_FLAG_UNSECURE; + + rc = msm_cvp_unregister_buffer_dsp(inst, kmd_buf); + if (rc) { + dprintk(CVP_ERR, "%s Failed to deregister buffer\n", __func__); + cmd->ret = -1; + goto fail_dsp_buf_dereg; + } + + dprintk(CVP_DSP, "%s deregister buffer done\n", __func__); +fail_dsp_buf_dereg: + kfree(kmd); +} + +static void __dsp_cvp_mem_alloc(struct cvp_dsp_cmd_msg *cmd) +{ + struct cvp_dsp_apps *me = &gfa_cv; + struct msm_cvp_inst *inst; + int rc; + struct cvp_internal_buf *buf = NULL; + struct cvp_dsp2cpu_cmd_msg *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd; + uint64_t v_dsp_addr = 0; + +#ifdef FASTRPC_DRIVER_AVAILABLE + struct fastrpc_device *frpc_device = NULL; + struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL; + struct msm_cvp_list *buf_list = NULL; +#endif + + cmd->ret = 0; + + dprintk(CVP_DSP, + "%s sess id 0x%x, low 0x%x, high 0x%x, pid 0x%x\n", + __func__, dsp2cpu_cmd->session_id, + dsp2cpu_cmd->session_cpu_low, + dsp2cpu_cmd->session_cpu_high, + dsp2cpu_cmd->pid); + +#ifdef FASTRPC_DRIVER_AVAILABLE + frpc_node = cvp_find_fastrpc_node_with_handle(dsp2cpu_cmd->pid); + if (!frpc_node) { + dprintk(CVP_ERR, "%s Failed to find fastrpc node 0x%x\n", + __func__, dsp2cpu_cmd->pid); + goto fail_fastrpc_node; + } + frpc_device = frpc_node->cvp_fastrpc_device; +#endif + + inst = (struct msm_cvp_inst *)ptr_dsp2cpu( + dsp2cpu_cmd->session_cpu_high, + dsp2cpu_cmd->session_cpu_low); + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + goto fail_kzalloc_buf; + + rc = cvp_allocate_dsp_bufs(inst, buf, + dsp2cpu_cmd->sbuf.size, + dsp2cpu_cmd->sbuf.type); + if (rc != 0) + goto fail_allocate_dsp_buf; + +#ifdef FASTRPC_DRIVER_AVAILABLE + rc = eva_fastrpc_dev_map_dma(frpc_device, buf, + dsp2cpu_cmd->sbuf.dsp_remote_map, + &v_dsp_addr); +#endif + if (rc) { + dprintk(CVP_ERR, "%s Failed to map buffer 0x%x\n", __func__, + rc); + goto fail_fastrpc_dev_map_dma; + } +#ifdef FASTRPC_DRIVER_AVAILABLE + buf_list = &frpc_node->dspbufs; + mutex_lock(&buf_list->lock); + list_add_tail(&buf->list, &buf_list->list); + mutex_unlock(&buf_list->lock); +#endif + dprintk(CVP_DSP, "%s allocate buffer done, addr 0x%llx\n", + __func__, v_dsp_addr); + + cmd->sbuf.size = buf->smem->size; + cmd->sbuf.fd = buf->fd; + cmd->sbuf.offset = 0; + cmd->sbuf.iova = buf->smem->device_addr; + cmd->sbuf.v_dsp_addr = v_dsp_addr; + dprintk(CVP_DSP, "%s: size %d, iova 0x%x, v_dsp_addr 0x%llx\n", + __func__, cmd->sbuf.size, cmd->sbuf.iova, + cmd->sbuf.v_dsp_addr); + + return; + +fail_fastrpc_dev_map_dma: + cvp_release_dsp_buffers(inst, buf); +fail_allocate_dsp_buf: + kfree(buf); +fail_kzalloc_buf: +#ifdef FASTRPC_DRIVER_AVAILABLE +fail_fastrpc_node: +#endif + cmd->ret = -1; + return; + +} + +static void __dsp_cvp_mem_free(struct cvp_dsp_cmd_msg *cmd) +{ +#ifdef FASTRPC_DRIVER_AVAILABLE + struct cvp_dsp_apps *me = &gfa_cv; + struct msm_cvp_inst *inst; + int rc; + struct cvp_internal_buf *buf = NULL; + struct list_head *ptr = NULL, *next = NULL; + struct msm_cvp_list *buf_list = NULL; + struct cvp_dsp2cpu_cmd_msg *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd; + + struct fastrpc_device *frpc_device = NULL; + struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL; + + cmd->ret = 0; + + dprintk(CVP_DSP, + "%s sess id 0x%x, low 0x%x, high 0x%x, pid 0x%x\n", + __func__, dsp2cpu_cmd->session_id, + dsp2cpu_cmd->session_cpu_low, + dsp2cpu_cmd->session_cpu_high, + dsp2cpu_cmd->pid); + + inst = (struct msm_cvp_inst *)ptr_dsp2cpu( + dsp2cpu_cmd->session_cpu_high, + dsp2cpu_cmd->session_cpu_low); + + frpc_node = cvp_find_fastrpc_node_with_handle(dsp2cpu_cmd->pid); + if (!frpc_node) { + dprintk(CVP_ERR, "%s Failed to find fastrpc node 0x%x\n", + __func__, dsp2cpu_cmd->pid); + cmd->ret = -1; + return; + } + frpc_device = frpc_node->cvp_fastrpc_device; + + buf_list = &frpc_node->dspbufs; + mutex_lock(&buf_list->lock); + list_for_each_safe(ptr, next, &buf_list->list) { + buf = list_entry(ptr, struct cvp_internal_buf, list); + dprintk(CVP_DSP, "fd in list 0x%x, fd from dsp 0x%x\n", + buf->fd, dsp2cpu_cmd->sbuf.fd); + + if (!buf->smem) { + dprintk(CVP_DSP, "Empyt smem\n"); + continue; + } + + /* Verify with device addr */ + if (buf->smem->device_addr == dsp2cpu_cmd->sbuf.iova) { + dprintk(CVP_DSP, "%s find device addr 0x%x\n", + __func__, buf->smem->device_addr); + + rc = eva_fastrpc_dev_unmap_dma(frpc_device, buf); + if (rc) { + dprintk(CVP_ERR, + "%s Failed to unmap buffer 0x%x\n", + __func__, rc); + cmd->ret = -1; + goto fail_fastrpc_dev_unmap_dma; + } + + rc = cvp_release_dsp_buffers(inst, buf); + if (rc) { + dprintk(CVP_ERR, + "%s Failed to free buffer 0x%x\n", + __func__, rc); + cmd->ret = -1; + goto fail_release_buf; + } + + list_del(&buf->list); + + kfree(buf); + break; + } + } + +fail_release_buf: +fail_fastrpc_dev_unmap_dma: + mutex_unlock(&buf_list->lock); +#endif +} static int cvp_dsp_thread(void *data) { @@ -811,6 +1752,48 @@ wait_dsp: cmd.ret = 0; break; } + case DSP2CPU_CREATE_SESSION: + { + __dsp_cvp_sess_create(&cmd); + + break; + } + case DSP2CPU_DETELE_SESSION: + { + __dsp_cvp_sess_delete(&cmd); + + break; + } + case DSP2CPU_POWER_REQUEST: + { + __dsp_cvp_power_req(&cmd); + + break; + } + case DSP2CPU_REGISTER_BUFFER: + { + __dsp_cvp_buf_register(&cmd); + + break; + } + case DSP2CPU_DEREGISTER_BUFFER: + { + __dsp_cvp_buf_deregister(&cmd); + + break; + } + case DSP2CPU_MEM_ALLOC: + { + __dsp_cvp_mem_alloc(&cmd); + + break; + } + case DSP2CPU_MEM_FREE: + { + __dsp_cvp_mem_free(&cmd); + + break; + } default: dprintk(CVP_ERR, "unrecognaized dsp cmds: %d\n", me->pending_dsp2cpu_cmd.type); @@ -848,6 +1831,8 @@ int cvp_dsp_device_init(void) me->pending_dsp2cpu_cmd.type = CVP_INVALID_RPMSG_TYPE; me->pending_dsp2cpu_rsp.type = CVP_INVALID_RPMSG_TYPE; + INIT_MSM_CVP_LIST(&me->fastrpc_driver_list); + rc = register_rpmsg_driver(&cvp_dsp_rpmsg_client); if (rc) { dprintk(CVP_ERR, @@ -879,6 +1864,8 @@ void cvp_dsp_device_exit(void) me->state = DSP_INVALID; mutex_unlock(&me->lock); + DEINIT_MSM_CVP_LIST(&me->fastrpc_driver_list); + for (i = 0; i <= CPU2DSP_MAX_CMD; i++) complete_all(&me->completions[i]); diff --git a/msm/eva/msm_cvp_dsp.h b/msm/eva/msm_cvp_dsp.h index da926c0cf1..fd58f9a267 100644 --- a/msm/eva/msm_cvp_dsp.h +++ b/msm/eva/msm_cvp_dsp.h @@ -10,6 +10,16 @@ #include "msm_cvp_debug.h" #include "cvp_core_hfi.h" +#include +#include + +/*#define FASTRPC_DRIVER_AVAILABLE*/ + +#ifdef FASTRPC_DRIVER_AVAILABLE +#include +#endif + + #define CVP_APPS_DSP_GLINK_GUID "cvp-glink-apps-dsp" #define CVP_APPS_DSP_SMD_GUID "cvp-smd-apps-dsp" @@ -20,6 +30,17 @@ #define CVP_DSP2CPU_RESERVED 8 #define CVP_DSP_RESPONSE_TIMEOUT 300 #define CVP_INVALID_RPMSG_TYPE 0xBADDFACE +#define MAX_FRAME_BUF_NUM 16 + +#define BITPTRSIZE32 (4) +#define BITPTRSIZE64 (8) +#define HIGH32 (0xFFFFFFFF00000000LL) +#define LOW32 (0xFFFFFFFFLL) + + +/* Supports up to 8 DSP sessions in 4 processes */ +#define MAX_FASTRPC_DRIVER_NUM (4) +#define MAX_DSP_SESSION_NUM (8) int cvp_dsp_device_init(void); void cvp_dsp_device_exit(void); @@ -42,10 +63,56 @@ enum CVP_DSP_COMMAND { CPU2DSP_SHUTDOWN = 3, CPU2DSP_REGISTER_BUFFER = 4, CPU2DSP_DEREGISTER_BUFFER = 5, - CPU2DSP_MAX_CMD = 6, - DSP2CPU_POWERON = 6, - DSP2CPU_POWEROFF = 7, - CVP_DSP_MAX_CMD = 8, + CPU2DSP_INIT = 6, + CPU2DSP_SET_DEBUG_LEVEL = 7, + CPU2DSP_MAX_CMD = 8, + DSP2CPU_POWERON = 11, + DSP2CPU_POWEROFF = 12, + DSP2CPU_CREATE_SESSION = 13, + DSP2CPU_DETELE_SESSION = 14, + DSP2CPU_POWER_REQUEST = 15, + DSP2CPU_POWER_CANCEL = 16, + DSP2CPU_REGISTER_BUFFER = 17, + DSP2CPU_DEREGISTER_BUFFER = 18, + DSP2CPU_MEM_ALLOC = 19, + DSP2CPU_MEM_FREE = 20, + CVP_DSP_MAX_CMD = 21, +}; + +enum eva_dsp_debug_level { + EVA_PORT_INFO_ON = 0, + EVA_PORT_DEBUG_ON = 1, + EVA_QDI_INFO_ON = 2, + EVA_QDI_DEBUG_ON = 3, + EVA_MEM_DEBUG_ON = 4 +}; + +struct eva_power_req { + uint32_t clock_fdu; + uint32_t clock_ica; + uint32_t clock_od; + uint32_t clock_mpu; + uint32_t clock_fw; + uint32_t bw_ddr; + uint32_t bw_sys_cache; + uint32_t op_clock_fdu; + uint32_t op_clock_ica; + uint32_t op_clock_od; + uint32_t op_clock_mpu; + uint32_t op_clock_fw; + uint32_t op_bw_ddr; + uint32_t op_bw_sys_cache; +}; + +struct eva_mem_remote { + uint32_t type; + uint32_t size; + uint32_t fd; + uint32_t offset; + uint32_t index; + uint32_t iova; + uint32_t dsp_remote_map; + uint64_t v_dsp_addr; }; struct cvp_dsp_cmd_msg { @@ -61,6 +128,15 @@ struct cvp_dsp_cmd_msg { uint32_t buff_fd; uint32_t buff_offset; uint32_t buff_fd_size; + + uint32_t eva_dsp_debug_level; + + /* Create Session */ + uint32_t session_cpu_low; + uint32_t session_cpu_high; + + struct eva_mem_remote sbuf; + uint32_t reserved1; uint32_t reserved2; }; @@ -76,9 +152,38 @@ struct cvp_dsp2cpu_cmd_msg { uint32_t type; uint32_t ver; uint32_t len; + + /* Create Session */ + uint32_t session_type; + uint32_t kernel_mask; + uint32_t session_prio; + uint32_t is_secure; + uint32_t dsp_access_mask; + + uint32_t session_id; + uint32_t session_cpu_low; + uint32_t session_cpu_high; + int32_t pid; + struct eva_power_req power_req; + struct eva_mem_remote sbuf; + uint32_t data[CVP_DSP2CPU_RESERVED]; }; +struct cvp_dsp_fastrpc_driver_entry { + struct list_head list; + uint32_t handle; + uint32_t session_cnt; +#ifdef FASTRPC_DRIVER_AVAILABLE + struct fastrpc_driver cvp_fastrpc_driver; + struct fastrpc_device *cvp_fastrpc_device; +#endif + struct completion fastrpc_probe_completion; + struct msm_cvp_list dspbufs; + /* all dsp sessions list */ + struct msm_cvp_list dsp_session; +}; + struct cvp_dsp_apps { struct mutex lock; struct rpmsg_device *chan; @@ -90,6 +195,10 @@ struct cvp_dsp_apps { struct cvp_dsp2cpu_cmd_msg pending_dsp2cpu_cmd; struct cvp_dsp_rsp_msg pending_dsp2cpu_rsp; struct task_struct *dsp_thread; + /* dsp buffer mapping, set of dma function pointer */ + const struct file_operations *dmabuf_f_op; + uint32_t buf_num; + struct msm_cvp_list fastrpc_driver_list; }; extern struct cvp_dsp_apps gfa_cv; diff --git a/msm/eva/msm_cvp_platform.c b/msm/eva/msm_cvp_platform.c index efc64f0e05..4cc0a02404 100644 --- a/msm/eva/msm_cvp_platform.c +++ b/msm/eva/msm_cvp_platform.c @@ -93,11 +93,15 @@ static struct msm_cvp_common_data sm8450_common_data[] = { }, { .key = "qcom,dsp-resp-timeout", - .value = 1000 + .value = 1000, }, { .key = "qcom,debug-timeout", .value = 0, + }, + { + .key = "qcom,dsp-enabled", + .value = 0, } }; diff --git a/msm/eva/msm_cvp_res_parse.c b/msm/eva/msm_cvp_res_parse.c index 2ff704c8ca..c46ea1924b 100644 --- a/msm/eva/msm_cvp_res_parse.c +++ b/msm/eva/msm_cvp_res_parse.c @@ -727,6 +727,9 @@ int cvp_read_platform_resources_from_drv_data( res->auto_pil = find_key_value(platform_data, "qcom,auto-pil"); + res->dsp_enabled = find_key_value(platform_data, + "qcom,dsp-enabled"); + res->max_load = find_key_value(platform_data, "qcom,max-hw-load"); diff --git a/msm/eva/msm_cvp_resources.h b/msm/eva/msm_cvp_resources.h index ad254fb87f..3686bf0d2e 100644 --- a/msm/eva/msm_cvp_resources.h +++ b/msm/eva/msm_cvp_resources.h @@ -158,6 +158,7 @@ struct msm_cvp_platform_resources { bool use_non_secure_pil; bool sw_power_collapsible; bool auto_pil; + bool dsp_enabled; struct list_head context_banks; bool thermal_mitigable; const char *fw_name; diff --git a/msm/eva/msm_smem.c b/msm/eva/msm_smem.c index 6fe57762c4..86937f927e 100644 --- a/msm/eva/msm_smem.c +++ b/msm/eva/msm_smem.c @@ -16,6 +16,7 @@ #include "msm_cvp_debug.h" #include "msm_cvp_resources.h" #include "cvp_core_hfi.h" +#include "msm_cvp_dsp.h" static int msm_dma_get_device_address(struct dma_buf *dbuf, u32 align, @@ -277,6 +278,9 @@ static int alloc_dma_mem(size_t size, u32 align, u32 flags, int map_kernel, if (flags & SMEM_NON_PIXEL) ion_flags |= ION_FLAG_CP_NON_PIXEL; + if (flags & SMEM_PIXEL) + ion_flags |= ION_FLAG_CP_PIXEL; + if (flags & SMEM_SECURE) { ion_flags |= ION_FLAG_SECURE; heap_mask = ION_HEAP(ION_SECURE_HEAP_ID); @@ -292,6 +296,9 @@ static int alloc_dma_mem(size_t size, u32 align, u32 flags, int map_kernel, goto fail_shared_mem_alloc; } + if (!gfa_cv.dmabuf_f_op) + gfa_cv.dmabuf_f_op = (const struct file_operations *)dbuf->file->f_op; + mem->flags = flags; mem->ion_flags = ion_flags; mem->size = size;