msm: camera: mem_mgr: Cache invalidation optimization

During unmap these internal buffers will be freed immediately
afterwards, therefore there is no need to do cache operation.

CRs-Fixed: 2627074
Change-Id: Ica4ebb93eb212604268c3450a92d075197783537
Signed-off-by: Karthik Jayakumar <kjayakum@codeaurora.org>
This commit is contained in:
Karthik Jayakumar
2020-02-21 13:39:59 -08:00
parent c4d16b525c
commit a68684d77c
5 changed files with 52 additions and 10 deletions

View File

@@ -196,6 +196,7 @@ static void cam_mem_put_slot(int32_t idx)
mutex_lock(&tbl.m_lock); mutex_lock(&tbl.m_lock);
mutex_lock(&tbl.bufq[idx].q_lock); mutex_lock(&tbl.bufq[idx].q_lock);
tbl.bufq[idx].active = false; tbl.bufq[idx].active = false;
tbl.bufq[idx].is_internal = false;
mutex_unlock(&tbl.bufq[idx].q_lock); mutex_unlock(&tbl.bufq[idx].q_lock);
mutex_destroy(&tbl.bufq[idx].q_lock); mutex_destroy(&tbl.bufq[idx].q_lock);
clear_bit(idx, tbl.bitmap); clear_bit(idx, tbl.bitmap);
@@ -540,7 +541,8 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
int fd, int fd,
dma_addr_t *hw_vaddr, dma_addr_t *hw_vaddr,
size_t *len, size_t *len,
enum cam_smmu_region_id region) enum cam_smmu_region_id region,
bool is_internal)
{ {
int i; int i;
int rc = -1; int rc = -1;
@@ -582,7 +584,8 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
dir, dir,
(dma_addr_t *)hw_vaddr, (dma_addr_t *)hw_vaddr,
len, len,
region); region,
is_internal);
if (rc < 0) { if (rc < 0) {
CAM_ERR(CAM_MEM, CAM_ERR(CAM_MEM,
@@ -675,7 +678,8 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
fd, fd,
&hw_vaddr, &hw_vaddr,
&len, &len,
region); region,
true);
if (rc) { if (rc) {
CAM_ERR(CAM_MEM, CAM_ERR(CAM_MEM,
@@ -691,6 +695,7 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
tbl.bufq[idx].dma_buf = NULL; tbl.bufq[idx].dma_buf = NULL;
tbl.bufq[idx].flags = cmd->flags; tbl.bufq[idx].flags = cmd->flags;
tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, fd); tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, fd);
tbl.bufq[idx].is_internal = true;
if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true); CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
@@ -733,6 +738,23 @@ slot_fail:
return rc; return rc;
} }
static bool cam_mem_util_is_map_internal(int32_t fd)
{
uint32_t i;
bool is_internal = false;
mutex_lock(&tbl.m_lock);
for_each_set_bit(i, tbl.bitmap, tbl.bits) {
if (tbl.bufq[i].fd == fd) {
is_internal = tbl.bufq[i].is_internal;
break;
}
}
mutex_unlock(&tbl.m_lock);
return is_internal;
}
int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd) int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
{ {
int32_t idx; int32_t idx;
@@ -740,6 +762,7 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
dma_addr_t hw_vaddr = 0; dma_addr_t hw_vaddr = 0;
size_t len = 0; size_t len = 0;
bool is_internal = false;
if (!atomic_read(&cam_mem_mgr_state)) { if (!atomic_read(&cam_mem_mgr_state)) {
CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized"); CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
@@ -769,6 +792,8 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
return -EINVAL; return -EINVAL;
} }
is_internal = cam_mem_util_is_map_internal(cmd->fd);
if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) || if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
(cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) { (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
rc = cam_mem_util_map_hw_va(cmd->flags, rc = cam_mem_util_map_hw_va(cmd->flags,
@@ -777,7 +802,8 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
cmd->fd, cmd->fd,
&hw_vaddr, &hw_vaddr,
&len, &len,
CAM_SMMU_REGION_IO); CAM_SMMU_REGION_IO,
is_internal);
if (rc) { if (rc) {
CAM_ERR(CAM_MEM, CAM_ERR(CAM_MEM,
"Failed in map_hw_va, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d", "Failed in map_hw_va, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
@@ -813,6 +839,7 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls, memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
sizeof(int32_t) * cmd->num_hdl); sizeof(int32_t) * cmd->num_hdl);
tbl.bufq[idx].is_imported = true; tbl.bufq[idx].is_imported = true;
tbl.bufq[idx].is_internal = is_internal;
mutex_unlock(&tbl.bufq[idx].q_lock); mutex_unlock(&tbl.bufq[idx].q_lock);
cmd->out.buf_handle = tbl.bufq[idx].buf_handle; cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
@@ -939,6 +966,7 @@ static int cam_mem_mgr_cleanup_table(void)
tbl.bufq[i].num_hdl = 0; tbl.bufq[i].num_hdl = 0;
tbl.bufq[i].dma_buf = NULL; tbl.bufq[i].dma_buf = NULL;
tbl.bufq[i].active = false; tbl.bufq[i].active = false;
tbl.bufq[i].is_internal = false;
mutex_unlock(&tbl.bufq[i].q_lock); mutex_unlock(&tbl.bufq[i].q_lock);
mutex_destroy(&tbl.bufq[i].q_lock); mutex_destroy(&tbl.bufq[i].q_lock);
} }
@@ -1034,6 +1062,7 @@ static int cam_mem_util_unmap(int32_t idx,
tbl.bufq[idx].fd = -1; tbl.bufq[idx].fd = -1;
tbl.bufq[idx].dma_buf = NULL; tbl.bufq[idx].dma_buf = NULL;
tbl.bufq[idx].is_imported = false; tbl.bufq[idx].is_imported = false;
tbl.bufq[idx].is_internal = false;
tbl.bufq[idx].len = 0; tbl.bufq[idx].len = 0;
tbl.bufq[idx].num_hdl = 0; tbl.bufq[idx].num_hdl = 0;
tbl.bufq[idx].active = false; tbl.bufq[idx].active = false;

View File

@@ -41,6 +41,7 @@ enum cam_smmu_mapping_client {
* @kmdvaddr: Kernel virtual address * @kmdvaddr: Kernel virtual address
* @active: state of the buffer * @active: state of the buffer
* @is_imported: Flag indicating if buffer is imported from an FD in user space * @is_imported: Flag indicating if buffer is imported from an FD in user space
* @is_internal: Flag indicating kernel allocated buffer
*/ */
struct cam_mem_buf_queue { struct cam_mem_buf_queue {
struct dma_buf *dma_buf; struct dma_buf *dma_buf;
@@ -56,6 +57,7 @@ struct cam_mem_buf_queue {
uintptr_t kmdvaddr; uintptr_t kmdvaddr;
bool active; bool active;
bool is_imported; bool is_imported;
bool is_internal;
}; };
/** /**

View File

@@ -7,6 +7,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/types.h>
#include <mm/slab.h> #include <mm/slab.h>

View File

@@ -197,6 +197,7 @@ struct cam_dma_buff_info {
int ion_fd; int ion_fd;
size_t len; size_t len;
size_t phys_len; size_t phys_len;
bool is_internal;
}; };
struct cam_sec_buff_info { struct cam_sec_buff_info {
@@ -249,7 +250,7 @@ static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd, static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
bool dis_delayed_unmap, enum dma_data_direction dma_dir, bool dis_delayed_unmap, enum dma_data_direction dma_dir,
dma_addr_t *paddr_ptr, size_t *len_ptr, dma_addr_t *paddr_ptr, size_t *len_ptr,
enum cam_smmu_region_id region_id); enum cam_smmu_region_id region_id, bool is_internal);
static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx, static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
struct dma_buf *buf, enum dma_data_direction dma_dir, struct dma_buf *buf, enum dma_data_direction dma_dir,
@@ -1997,7 +1998,7 @@ err_out:
static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd, static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
bool dis_delayed_unmap, enum dma_data_direction dma_dir, bool dis_delayed_unmap, enum dma_data_direction dma_dir,
dma_addr_t *paddr_ptr, size_t *len_ptr, dma_addr_t *paddr_ptr, size_t *len_ptr,
enum cam_smmu_region_id region_id) enum cam_smmu_region_id region_id, bool is_internal)
{ {
int rc = -1; int rc = -1;
struct cam_dma_buff_info *mapping_info = NULL; struct cam_dma_buff_info *mapping_info = NULL;
@@ -2015,6 +2016,7 @@ static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
} }
mapping_info->ion_fd = ion_fd; mapping_info->ion_fd = ion_fd;
mapping_info->is_internal = is_internal;
/* add to the list */ /* add to the list */
list_add(&mapping_info->list, list_add(&mapping_info->list,
&iommu_cb_set.cb_info[idx].smmu_buf_list); &iommu_cb_set.cb_info[idx].smmu_buf_list);
@@ -2118,6 +2120,9 @@ static int cam_smmu_unmap_buf_and_remove_from_list(
iommu_cb_set.cb_info[idx].io_mapping_size -= mapping_info->len; iommu_cb_set.cb_info[idx].io_mapping_size -= mapping_info->len;
} }
if (mapping_info->is_internal)
mapping_info->attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
dma_buf_unmap_attachment(mapping_info->attach, dma_buf_unmap_attachment(mapping_info->attach,
mapping_info->table, mapping_info->dir); mapping_info->table, mapping_info->dir);
dma_buf_detach(mapping_info->buf, mapping_info->attach); dma_buf_detach(mapping_info->buf, mapping_info->attach);
@@ -2877,7 +2882,8 @@ static int cam_smmu_map_iova_validate_params(int handle,
int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap, int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr, enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
size_t *len_ptr, enum cam_smmu_region_id region_id) size_t *len_ptr, enum cam_smmu_region_id region_id,
bool is_internal)
{ {
int idx, rc = 0; int idx, rc = 0;
enum cam_smmu_buf_state buf_state; enum cam_smmu_buf_state buf_state;
@@ -2927,7 +2933,8 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
} }
rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd,
dis_delayed_unmap, dma_dir, paddr_ptr, len_ptr, region_id); dis_delayed_unmap, dma_dir, paddr_ptr, len_ptr,
region_id, is_internal);
if (rc < 0) { if (rc < 0) {
CAM_ERR(CAM_SMMU, CAM_ERR(CAM_SMMU,
"mapping or add list fail, idx=%d, fd=%d, region=%d, rc=%d", "mapping or add list fail, idx=%d, fd=%d, region=%d, rc=%d",

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
*/ */
#ifndef _CAM_SMMU_API_H_ #ifndef _CAM_SMMU_API_H_
@@ -111,11 +111,13 @@ int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
* CAM_SMMU_REGION_SHARED, dma_addr is used as an input parameter * CAM_SMMU_REGION_SHARED, dma_addr is used as an input parameter
* which specifies the cpu virtual address to map. * which specifies the cpu virtual address to map.
* @len_ptr : Length of buffer mapped returned by CAM SMMU driver. * @len_ptr : Length of buffer mapped returned by CAM SMMU driver.
* @region_id : Memory region identifier
* @is_internal: Specifies if this buffer is kernel allocated.
* @return Status of operation. Negative in case of error. Zero otherwise. * @return Status of operation. Negative in case of error. Zero otherwise.
*/ */
int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap, int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr, enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr,
enum cam_smmu_region_id region_id); enum cam_smmu_region_id region_id, bool is_internal);
/** /**
* @brief : Maps kernel space IOVA for calling driver * @brief : Maps kernel space IOVA for calling driver
@@ -129,6 +131,7 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
* CAM_SMMU_REGION_SHARED, dma_addr is used as an input * CAM_SMMU_REGION_SHARED, dma_addr is used as an input
* parameter which specifies the cpu virtual address to map. * parameter which specifies the cpu virtual address to map.
* @len_ptr : Length of buffer mapped returned by CAM SMMU driver. * @len_ptr : Length of buffer mapped returned by CAM SMMU driver.
* @region_id : Memory region identifier
* @return Status of operation. Negative in case of error. Zero otherwise. * @return Status of operation. Negative in case of error. Zero otherwise.
*/ */
int cam_smmu_map_kernel_iova(int handle, int cam_smmu_map_kernel_iova(int handle,