diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index edbf0e55f2..528e35bc9c 100755 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -5559,9 +5559,10 @@ static int fastrpc_file_free(struct fastrpc_file *fl) spin_lock_irqsave(&fl->apps->hlock, irq_flags); is_locked = true; - if (!fl->is_ramdump_pend) { + if (fl->is_dma_invoke_pend) + wait_for_completion(&fl->dma_invoke); + if (!fl->is_ramdump_pend) goto skip_dump_wait; - } is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); wait_for_completion(&fl->work); @@ -5573,6 +5574,7 @@ skip_dump_wait: } hlist_del_init(&fl->hn); fl->is_ramdump_pend = false; + fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); @@ -5602,6 +5604,7 @@ skip_dump_wait: kfree(fl->hdr_bufs); if (!IS_ERR_OR_NULL(fl->pers_hdr_buf)) fastrpc_buf_free(fl->pers_hdr_buf, 0); + mutex_lock(&fl->internal_map_mutex); mutex_lock(&fl->map_mutex); do { lmap = NULL; @@ -5613,6 +5616,7 @@ skip_dump_wait: fastrpc_mmap_free(lmap, 1); } while (lmap); mutex_unlock(&fl->map_mutex); + mutex_unlock(&fl->internal_map_mutex); if (fl->device && is_driver_closed) device_unregister(&fl->device->dev); @@ -6032,6 +6036,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->exit_notif = false; fl->exit_async = false; init_completion(&fl->work); + init_completion(&fl->dma_invoke); fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE; filp->private_data = fl; mutex_init(&fl->internal_map_mutex); @@ -8084,8 +8089,7 @@ union fastrpc_dev_param { struct fastrpc_dev_unmap_dma *unmap; }; -long fastrpc_driver_invoke(struct fastrpc_device *dev, unsigned int invoke_num, - unsigned long invoke_param) +long fastrpc_dev_map_dma(struct fastrpc_device *dev, unsigned long invoke_param) { int err = 0; union fastrpc_dev_param p; @@ -8095,86 +8099,138 @@ long fastrpc_driver_invoke(struct fastrpc_device *dev, unsigned int invoke_num, uintptr_t raddr = 0; unsigned long irq_flags = 0; + p.map = (struct fastrpc_dev_map_dma *)invoke_param; + spin_lock_irqsave(&me->hlock, irq_flags); + /* Verify if fastrpc device is closed*/ + VERIFY(err, dev && !dev->dev_close); + if (err) { + err = -ESRCH; + spin_unlock_irqrestore(&me->hlock, irq_flags); + return err; + } + fl = dev->fl; + /* Verify if fastrpc file is not NULL*/ + if (!fl) { + err = -EBADF; + spin_unlock_irqrestore(&me->hlock, irq_flags); + return err; + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + mutex_lock(&fl->internal_map_mutex); + spin_lock_irqsave(&me->hlock, irq_flags); + /* Verify if fastrpc file is being closed, holding device lock*/ + if (fl->file_close) { + err = -ESRCH; + spin_unlock_irqrestore(&me->hlock, irq_flags); + goto bail; + } + fl->is_dma_invoke_pend = true; + spin_unlock_irqrestore(&me->hlock, irq_flags); + mutex_lock(&fl->map_mutex); + /* Map DMA buffer on SMMU device*/ + err = fastrpc_mmap_create(fl, -1, p.map->buf, + p.map->attrs, 0, p.map->size, + ADSP_MMAP_DMA_BUFFER, &map); + mutex_unlock(&fl->map_mutex); + if (err) + goto bail; + /* Map DMA buffer on DSP*/ + VERIFY(err, 0 == (err = fastrpc_mmap_on_dsp(fl, + map->flags, 0, map->phys, map->size, map->refs, &raddr))); + if (err) + goto bail; + map->raddr = raddr; + p.map->v_dsp_addr = raddr; +bail: + if (err && map) { + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(map, 0); + mutex_unlock(&fl->map_mutex); + } + if (fl) { + spin_lock_irqsave(&me->hlock, irq_flags); + if (fl->file_close && fl->is_dma_invoke_pend) + complete(&fl->dma_invoke); + fl->is_dma_invoke_pend = false; + spin_unlock_irqrestore(&me->hlock, irq_flags); + } + mutex_unlock(&fl->internal_map_mutex); + return err; +} + +long fastrpc_dev_unmap_dma(struct fastrpc_device *dev, unsigned long invoke_param) +{ + int err = 0; + union fastrpc_dev_param p; + struct fastrpc_file *fl = NULL; + struct fastrpc_mmap *map = NULL; + struct fastrpc_apps *me = &gfa; + unsigned long irq_flags = 0; + + p.unmap = (struct fastrpc_dev_unmap_dma *)invoke_param; + spin_lock_irqsave(&me->hlock, irq_flags); + /* Verify if fastrpc device is closed*/ + VERIFY(err, dev && !dev->dev_close); + if (err) { + err = -ESRCH; + spin_unlock_irqrestore(&me->hlock, irq_flags); + return err; + } + fl = dev->fl; + /* Verify if fastrpc file is not NULL*/ + if (!fl) { + err = -EBADF; + spin_unlock_irqrestore(&me->hlock, irq_flags); + return err; + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + mutex_lock(&fl->internal_map_mutex); + spin_lock_irqsave(&me->hlock, irq_flags); + /* Verify if fastrpc file is being closed, holding device lock*/ + if (fl->file_close) { + err = -ESRCH; + spin_unlock_irqrestore(&me->hlock, irq_flags); + goto bail; + } + fl->is_dma_invoke_pend = true; + spin_unlock_irqrestore(&me->hlock, irq_flags); + mutex_lock(&fl->map_mutex); + if (!fastrpc_mmap_find(fl, -1, p.unmap->buf, 0, 0, ADSP_MMAP_DMA_BUFFER, 0, &map)) { + mutex_unlock(&fl->map_mutex); + if (err) + goto bail; + /* Un-map DMA buffer on DSP*/ + VERIFY(err, !(err = fastrpc_munmap_on_dsp(fl, map->raddr, + map->phys, map->size, map->flags))); + if (err) + goto bail; + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(map, 0); + } + mutex_unlock(&fl->map_mutex); +bail: + if (fl) { + spin_lock_irqsave(&me->hlock, irq_flags); + if (fl->file_close && fl->is_dma_invoke_pend) + complete(&fl->dma_invoke); + fl->is_dma_invoke_pend = false; + spin_unlock_irqrestore(&me->hlock, irq_flags); + } + mutex_unlock(&fl->internal_map_mutex); + return err; +} + +long fastrpc_driver_invoke(struct fastrpc_device *dev, unsigned int invoke_num, + unsigned long invoke_param) +{ + int err = 0; + switch (invoke_num) { case FASTRPC_DEV_MAP_DMA: - p.map = (struct fastrpc_dev_map_dma *)invoke_param; - spin_lock_irqsave(&me->hlock, irq_flags); - /* Verify if fastrpc device is closed*/ - VERIFY(err, dev && !dev->dev_close); - if (err) { - err = -ESRCH; - spin_unlock_irqrestore(&me->hlock, irq_flags); - break; - } - fl = dev->fl; - spin_lock(&fl->hlock); - /* Verify if fastrpc file is being closed, holding device lock*/ - if (fl->file_close) { - err = -ESRCH; - spin_unlock(&fl->hlock); - spin_unlock_irqrestore(&me->hlock, irq_flags); - break; - } - spin_unlock(&fl->hlock); - spin_unlock_irqrestore(&me->hlock, irq_flags); - mutex_lock(&fl->internal_map_mutex); - mutex_lock(&fl->map_mutex); - /* Map DMA buffer on SMMU device*/ - err = fastrpc_mmap_create(fl, -1, p.map->buf, - p.map->attrs, 0, p.map->size, - ADSP_MMAP_DMA_BUFFER, &map); - mutex_unlock(&fl->map_mutex); - if (err) { - mutex_unlock(&fl->internal_map_mutex); - break; - } - /* Map DMA buffer on DSP*/ - VERIFY(err, 0 == (err = fastrpc_mmap_on_dsp(fl, - map->flags, 0, map->phys, map->size, map->refs, &raddr))); - if (err) { - mutex_unlock(&fl->internal_map_mutex); - break; - } - map->raddr = raddr; - mutex_unlock(&fl->internal_map_mutex); - p.map->v_dsp_addr = raddr; + err = fastrpc_dev_map_dma(dev, invoke_param); break; case FASTRPC_DEV_UNMAP_DMA: - p.unmap = (struct fastrpc_dev_unmap_dma *)invoke_param; - spin_lock_irqsave(&me->hlock, irq_flags); - /* Verify if fastrpc device is closed*/ - VERIFY(err, dev && !dev->dev_close); - if (err) { - err = -ESRCH; - spin_unlock_irqrestore(&me->hlock, irq_flags); - break; - } - fl = dev->fl; - spin_lock(&fl->hlock); - /* Verify if fastrpc file is being closed, holding device lock*/ - if (fl->file_close) { - err = -ESRCH; - spin_unlock(&fl->hlock); - spin_unlock_irqrestore(&me->hlock, irq_flags); - break; - } - spin_unlock(&fl->hlock); - spin_unlock_irqrestore(&me->hlock, irq_flags); - mutex_lock(&fl->internal_map_mutex); - mutex_lock(&fl->map_mutex); - if (!fastrpc_mmap_find(fl, -1, p.unmap->buf, 0, 0, ADSP_MMAP_DMA_BUFFER, 0, &map)) { - /* Un-map DMA buffer on DSP*/ - mutex_unlock(&fl->map_mutex); - VERIFY(err, !(err = fastrpc_munmap_on_dsp(fl, map->raddr, - map->phys, map->size, map->flags))); - if (err) { - mutex_unlock(&fl->internal_map_mutex); - break; - } - fastrpc_mmap_free(map, 0); - } - mutex_unlock(&fl->map_mutex); - mutex_unlock(&fl->internal_map_mutex); + err = fastrpc_dev_unmap_dma(dev, invoke_param); break; default: err = -ENOTTY; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 445eb82c3d..9664ebc158 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -1091,6 +1091,10 @@ struct fastrpc_file { struct completion work; /* Flag to indicate ram dump collection status*/ bool is_ramdump_pend; + /* Process kill will wait on bus driver invoke thread to complete its process */ + struct completion dma_invoke; + /* Flag to indicate invoke pending */ + bool is_dma_invoke_pend; /* Flag to indicate type of process (static, dynamic) */ uint32_t proc_flags; /* If set, threads will poll for DSP response instead of glink wait */