Merge "msm: camera: smmu: Use dmabuf inode num as unique identifier" into camera-kernel.lnx.5.0
This commit is contained in:

committed by
Gerrit - the friendly Code Review server

commit
3c599ca616
@@ -58,8 +58,8 @@ static void cam_mem_mgr_print_tbl(void)
|
||||
min = do_div(tmp, 60);
|
||||
hrs = do_div(tmp, 24);
|
||||
CAM_INFO(CAM_MEM,
|
||||
"%llu:%llu:%llu:%llu idx %d fd %d size %llu",
|
||||
hrs, min, sec, ms, i, tbl.bufq[i].fd,
|
||||
"%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu",
|
||||
hrs, min, sec, ms, i, tbl.bufq[i].fd, tbl.bufq[i].i_ino,
|
||||
tbl.bufq[i].len);
|
||||
}
|
||||
}
|
||||
@@ -284,15 +284,15 @@ int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
|
||||
}
|
||||
|
||||
if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
|
||||
rc = cam_smmu_get_stage2_iova(mmu_handle, tbl.bufq[idx].fd,
|
||||
rc = cam_smmu_get_stage2_iova(mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].dma_buf,
|
||||
iova_ptr, len_ptr);
|
||||
else
|
||||
rc = cam_smmu_get_iova(mmu_handle, tbl.bufq[idx].fd,
|
||||
rc = cam_smmu_get_iova(mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].dma_buf,
|
||||
iova_ptr, len_ptr);
|
||||
if (rc) {
|
||||
CAM_ERR(CAM_MEM,
|
||||
"fail to map buf_hdl:0x%x, mmu_hdl: 0x%x for fd:%d",
|
||||
buf_handle, mmu_handle, tbl.bufq[idx].fd);
|
||||
"fail to map buf_hdl:0x%x, mmu_hdl: 0x%x for fd:%d i_ino:%lu",
|
||||
buf_handle, mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino);
|
||||
goto handle_mismatch;
|
||||
}
|
||||
|
||||
@@ -300,8 +300,8 @@ int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
|
||||
*flags = tbl.bufq[idx].flags;
|
||||
|
||||
CAM_DBG(CAM_MEM,
|
||||
"handle:0x%x fd:%d iova_ptr:0x%llx len_ptr:%llu",
|
||||
mmu_handle, tbl.bufq[idx].fd, iova_ptr, *len_ptr);
|
||||
"handle:0x%x fd:%d i_ino:%lu iova_ptr:0x%llx len_ptr:%llu",
|
||||
mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino, iova_ptr, *len_ptr);
|
||||
handle_mismatch:
|
||||
mutex_unlock(&tbl.bufq[idx].q_lock);
|
||||
return rc;
|
||||
@@ -519,7 +519,8 @@ put_heaps:
|
||||
|
||||
static int cam_mem_util_get_dma_buf(size_t len,
|
||||
unsigned int cam_flags,
|
||||
struct dma_buf **buf)
|
||||
struct dma_buf **buf,
|
||||
unsigned long *i_ino)
|
||||
{
|
||||
int rc = 0;
|
||||
struct dma_heap *heap;
|
||||
@@ -623,6 +624,8 @@ static int cam_mem_util_get_dma_buf(size_t len,
|
||||
}
|
||||
}
|
||||
|
||||
*i_ino = file_inode((*buf)->file)->i_ino;
|
||||
|
||||
if ((cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) ||
|
||||
(cam_flags & CAM_MEM_FLAG_EVA_NOPIXEL)) {
|
||||
if (num_vmids >= CAM_MAX_VMIDS) {
|
||||
@@ -644,7 +647,7 @@ static int cam_mem_util_get_dma_buf(size_t len,
|
||||
}
|
||||
}
|
||||
|
||||
CAM_DBG(CAM_MEM, "Allocate success, len=%zu, *buf=%pK", len, *buf);
|
||||
CAM_DBG(CAM_MEM, "Allocate success, len=%zu, *buf=%pK, i_ino=%lu", len, *buf, *i_ino);
|
||||
|
||||
if (tbl.alloc_profile_enable) {
|
||||
CAM_GET_TIMESTAMP(ts2);
|
||||
@@ -661,7 +664,8 @@ end:
|
||||
#else
|
||||
static int cam_mem_util_get_dma_buf(size_t len,
|
||||
unsigned int cam_flags,
|
||||
struct dma_buf **buf)
|
||||
struct dma_buf **buf,
|
||||
unsigned long *i_ino)
|
||||
{
|
||||
int rc = 0;
|
||||
unsigned int heap_id;
|
||||
@@ -702,6 +706,8 @@ static int cam_mem_util_get_dma_buf(size_t len,
|
||||
if (IS_ERR_OR_NULL(*buf))
|
||||
return -ENOMEM;
|
||||
|
||||
*i_ino = file_inode((*buf)->file)->i_ino;
|
||||
|
||||
if (tbl.alloc_profile_enable) {
|
||||
CAM_GET_TIMESTAMP(ts2);
|
||||
CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
|
||||
@@ -715,12 +721,13 @@ static int cam_mem_util_get_dma_buf(size_t len,
|
||||
|
||||
static int cam_mem_util_buffer_alloc(size_t len, uint32_t flags,
|
||||
struct dma_buf **dmabuf,
|
||||
int *fd)
|
||||
int *fd,
|
||||
unsigned long *i_ino)
|
||||
{
|
||||
int rc;
|
||||
struct dma_buf *temp_dmabuf = NULL;
|
||||
|
||||
rc = cam_mem_util_get_dma_buf(len, flags, dmabuf);
|
||||
rc = cam_mem_util_get_dma_buf(len, flags, dmabuf, i_ino);
|
||||
if (rc) {
|
||||
CAM_ERR(CAM_MEM,
|
||||
"Error allocating dma buf : len=%llu, flags=0x%x",
|
||||
@@ -735,8 +742,8 @@ static int cam_mem_util_buffer_alloc(size_t len, uint32_t flags,
|
||||
goto put_buf;
|
||||
}
|
||||
|
||||
CAM_DBG(CAM_MEM, "Alloc success : len=%zu, *dmabuf=%pK, fd=%d",
|
||||
len, *dmabuf, *fd);
|
||||
CAM_DBG(CAM_MEM, "Alloc success : len=%zu, *dmabuf=%pK, fd=%d, i_ino=%lu",
|
||||
len, *dmabuf, *fd, *i_ino);
|
||||
|
||||
/*
|
||||
* increment the ref count so that ref count becomes 2 here
|
||||
@@ -745,8 +752,8 @@ static int cam_mem_util_buffer_alloc(size_t len, uint32_t flags,
|
||||
*/
|
||||
temp_dmabuf = dma_buf_get(*fd);
|
||||
if (IS_ERR_OR_NULL(temp_dmabuf)) {
|
||||
CAM_ERR(CAM_MEM, "dma_buf_get failed, *fd=%d", *fd);
|
||||
rc = -EINVAL;
|
||||
rc = PTR_ERR(temp_dmabuf);
|
||||
CAM_ERR(CAM_MEM, "dma_buf_get failed, *fd=%d, i_ino=%lu, rc=%d", *fd, *i_ino, rc);
|
||||
goto put_buf;
|
||||
}
|
||||
|
||||
@@ -815,6 +822,7 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
|
||||
int32_t *mmu_hdls,
|
||||
int32_t num_hdls,
|
||||
int fd,
|
||||
struct dma_buf *dmabuf,
|
||||
dma_addr_t *hw_vaddr,
|
||||
size_t *len,
|
||||
enum cam_smmu_region_id region,
|
||||
@@ -839,9 +847,9 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
|
||||
|
||||
for (i = 0; i < num_hdls; i++) {
|
||||
if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
|
||||
rc = cam_smmu_map_stage2_iova(mmu_hdls[i], fd, dir, hw_vaddr, len);
|
||||
rc = cam_smmu_map_stage2_iova(mmu_hdls[i], fd, dmabuf, dir, hw_vaddr, len);
|
||||
else
|
||||
rc = cam_smmu_map_user_iova(mmu_hdls[i], fd, dis_delayed_unmap, dir,
|
||||
rc = cam_smmu_map_user_iova(mmu_hdls[i], fd, dmabuf, dis_delayed_unmap, dir,
|
||||
hw_vaddr, len, region, is_internal);
|
||||
if (rc) {
|
||||
CAM_ERR(CAM_MEM,
|
||||
@@ -856,9 +864,9 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
|
||||
multi_map_fail:
|
||||
for (--i; i>= 0; i--) {
|
||||
if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
|
||||
cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
|
||||
cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd, dmabuf);
|
||||
else
|
||||
cam_smmu_unmap_user_iova(mmu_hdls[i], fd, CAM_SMMU_REGION_IO);
|
||||
cam_smmu_unmap_user_iova(mmu_hdls[i], fd, dmabuf, CAM_SMMU_REGION_IO);
|
||||
}
|
||||
|
||||
return rc;
|
||||
@@ -874,6 +882,7 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
|
||||
size_t len;
|
||||
uintptr_t kvaddr = 0;
|
||||
size_t klen;
|
||||
unsigned long i_ino = 0;
|
||||
|
||||
if (!atomic_read(&cam_mem_mgr_state)) {
|
||||
CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
|
||||
@@ -901,7 +910,7 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = cam_mem_util_buffer_alloc(len, cmd->flags, &dmabuf, &fd);
|
||||
rc = cam_mem_util_buffer_alloc(len, cmd->flags, &dmabuf, &fd, &i_ino);
|
||||
if (rc) {
|
||||
CAM_ERR(CAM_MEM,
|
||||
"Ion Alloc failed, len=%llu, align=%llu, flags=0x%x, num_hdl=%d",
|
||||
@@ -911,7 +920,7 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
|
||||
}
|
||||
if (!dmabuf) {
|
||||
CAM_ERR(CAM_MEM,
|
||||
"Ion Alloc return NULL dmabuf! fd=%d, len=%d", fd, len);
|
||||
"Ion Alloc return NULL dmabuf! fd=%d, i_ino=%lu, len=%d", fd, i_ino, len);
|
||||
cam_mem_mgr_print_tbl();
|
||||
return rc;
|
||||
}
|
||||
@@ -944,6 +953,7 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
|
||||
cmd->mmu_hdls,
|
||||
cmd->num_hdl,
|
||||
fd,
|
||||
dmabuf,
|
||||
&hw_vaddr,
|
||||
&len,
|
||||
region,
|
||||
@@ -965,6 +975,7 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
|
||||
|
||||
mutex_lock(&tbl.bufq[idx].q_lock);
|
||||
tbl.bufq[idx].fd = fd;
|
||||
tbl.bufq[idx].i_ino = i_ino;
|
||||
tbl.bufq[idx].dma_buf = NULL;
|
||||
tbl.bufq[idx].flags = cmd->flags;
|
||||
tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, fd);
|
||||
@@ -999,9 +1010,9 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
|
||||
cmd->out.vaddr = 0;
|
||||
|
||||
CAM_DBG(CAM_MEM,
|
||||
"fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu",
|
||||
"fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu, i_ino=%lu",
|
||||
cmd->out.fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
|
||||
tbl.bufq[idx].len);
|
||||
tbl.bufq[idx].len, tbl.bufq[idx].i_ino);
|
||||
|
||||
return rc;
|
||||
|
||||
@@ -1014,14 +1025,14 @@ slot_fail:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool cam_mem_util_is_map_internal(int32_t fd)
|
||||
static bool cam_mem_util_is_map_internal(int32_t fd, unsigned i_ino)
|
||||
{
|
||||
uint32_t i;
|
||||
bool is_internal = false;
|
||||
|
||||
mutex_lock(&tbl.m_lock);
|
||||
for_each_set_bit(i, tbl.bitmap, tbl.bits) {
|
||||
if (tbl.bufq[i].fd == fd) {
|
||||
if ((tbl.bufq[i].fd == fd) && (tbl.bufq[i].i_ino == i_ino)) {
|
||||
is_internal = tbl.bufq[i].is_internal;
|
||||
break;
|
||||
}
|
||||
@@ -1039,6 +1050,7 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
|
||||
dma_addr_t hw_vaddr = 0;
|
||||
size_t len = 0;
|
||||
bool is_internal = false;
|
||||
unsigned long i_ino;
|
||||
|
||||
if (!atomic_read(&cam_mem_mgr_state)) {
|
||||
CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
|
||||
@@ -1068,7 +1080,9 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
is_internal = cam_mem_util_is_map_internal(cmd->fd);
|
||||
i_ino = file_inode(dmabuf->file)->i_ino;
|
||||
|
||||
is_internal = cam_mem_util_is_map_internal(cmd->fd, i_ino);
|
||||
|
||||
idx = cam_mem_get_slot();
|
||||
if (idx < 0) {
|
||||
@@ -1084,6 +1098,7 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
|
||||
cmd->mmu_hdls,
|
||||
cmd->num_hdl,
|
||||
cmd->fd,
|
||||
dmabuf,
|
||||
&hw_vaddr,
|
||||
&len,
|
||||
CAM_SMMU_REGION_IO,
|
||||
@@ -1105,6 +1120,7 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
|
||||
|
||||
mutex_lock(&tbl.bufq[idx].q_lock);
|
||||
tbl.bufq[idx].fd = cmd->fd;
|
||||
tbl.bufq[idx].i_ino = i_ino;
|
||||
tbl.bufq[idx].dma_buf = NULL;
|
||||
tbl.bufq[idx].flags = cmd->flags;
|
||||
tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
|
||||
@@ -1130,9 +1146,9 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
|
||||
cmd->out.vaddr = 0;
|
||||
cmd->out.size = (uint32_t)len;
|
||||
CAM_DBG(CAM_MEM,
|
||||
"fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu",
|
||||
"fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu, i_ino=%lu",
|
||||
cmd->fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
|
||||
tbl.bufq[idx].len);
|
||||
tbl.bufq[idx].len, tbl.bufq[idx].i_ino);
|
||||
|
||||
return rc;
|
||||
map_fail:
|
||||
@@ -1151,6 +1167,8 @@ static int cam_mem_util_unmap_hw_va(int32_t idx,
|
||||
int32_t *mmu_hdls;
|
||||
int num_hdls;
|
||||
int fd;
|
||||
struct dma_buf *dma_buf;
|
||||
unsigned long i_ino;
|
||||
int rc = 0;
|
||||
|
||||
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
|
||||
@@ -1162,18 +1180,20 @@ static int cam_mem_util_unmap_hw_va(int32_t idx,
|
||||
mmu_hdls = tbl.bufq[idx].hdls;
|
||||
num_hdls = tbl.bufq[idx].num_hdl;
|
||||
fd = tbl.bufq[idx].fd;
|
||||
dma_buf = tbl.bufq[idx].dma_buf;
|
||||
i_ino = tbl.bufq[idx].i_ino;
|
||||
|
||||
CAM_DBG(CAM_MEM,
|
||||
"unmap_hw_va : idx=%d, fd=%x, flags=0x%x, num_hdls=%d, client=%d",
|
||||
idx, fd, flags, num_hdls, client);
|
||||
"unmap_hw_va : idx=%d, fd=%x, i_ino=%lu flags=0x%x, num_hdls=%d, client=%d",
|
||||
idx, fd, i_ino, flags, num_hdls, client);
|
||||
|
||||
if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
|
||||
for (i = 0; i < num_hdls; i++) {
|
||||
rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
|
||||
rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd, dma_buf);
|
||||
if (rc < 0) {
|
||||
CAM_ERR(CAM_MEM,
|
||||
"Failed in secure unmap, i=%d, fd=%d, mmu_hdl=%d, rc=%d",
|
||||
i, fd, mmu_hdls[i], rc);
|
||||
"Failed in secure unmap, i=%d, fd=%d, i_ino=%lu, mmu_hdl=%d, rc=%d",
|
||||
i, fd, i_ino, mmu_hdls[i], rc);
|
||||
goto unmap_end;
|
||||
}
|
||||
}
|
||||
@@ -1181,7 +1201,7 @@ static int cam_mem_util_unmap_hw_va(int32_t idx,
|
||||
for (i = 0; i < num_hdls; i++) {
|
||||
if (client == CAM_SMMU_MAPPING_USER) {
|
||||
rc = cam_smmu_unmap_user_iova(mmu_hdls[i],
|
||||
fd, region);
|
||||
fd, dma_buf, region);
|
||||
} else if (client == CAM_SMMU_MAPPING_KERNEL) {
|
||||
rc = cam_smmu_unmap_kernel_iova(mmu_hdls[i],
|
||||
tbl.bufq[idx].dma_buf, region);
|
||||
@@ -1193,8 +1213,8 @@ static int cam_mem_util_unmap_hw_va(int32_t idx,
|
||||
}
|
||||
if (rc < 0) {
|
||||
CAM_ERR(CAM_MEM,
|
||||
"Failed in unmap, i=%d, fd=%d, mmu_hdl=%d, region=%d, rc=%d",
|
||||
i, fd, mmu_hdls[i], region, rc);
|
||||
"Failed in unmap, i=%d, fd=%d, i_ino=%lu, mmu_hdl=%d, region=%d, rc=%d",
|
||||
i, fd, i_ino, mmu_hdls[i], region, rc);
|
||||
goto unmap_end;
|
||||
}
|
||||
}
|
||||
@@ -1246,6 +1266,7 @@ static int cam_mem_mgr_cleanup_table(void)
|
||||
tbl.bufq[i].dma_buf = NULL;
|
||||
}
|
||||
tbl.bufq[i].fd = -1;
|
||||
tbl.bufq[i].i_ino = 0;
|
||||
tbl.bufq[i].flags = 0;
|
||||
tbl.bufq[i].buf_handle = -1;
|
||||
tbl.bufq[i].vaddr = 0;
|
||||
@@ -1337,6 +1358,10 @@ static int cam_mem_util_unmap(int32_t idx,
|
||||
if (cam_mem_util_unmap_hw_va(idx, region, client))
|
||||
CAM_ERR(CAM_MEM, "Failed, dmabuf=%pK",
|
||||
tbl.bufq[idx].dma_buf);
|
||||
/*
|
||||
* Workaround as smmu driver doing put_buf without get_buf for kernel mappings
|
||||
* Setting NULL here so that we dont call dma_buf_pt again below
|
||||
*/
|
||||
if (client == CAM_SMMU_MAPPING_KERNEL)
|
||||
tbl.bufq[idx].dma_buf = NULL;
|
||||
}
|
||||
@@ -1349,15 +1374,15 @@ static int cam_mem_util_unmap(int32_t idx,
|
||||
sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
|
||||
|
||||
CAM_DBG(CAM_MEM,
|
||||
"Ion buf at idx = %d freeing fd = %d, imported %d, dma_buf %pK",
|
||||
idx, tbl.bufq[idx].fd,
|
||||
tbl.bufq[idx].is_imported,
|
||||
tbl.bufq[idx].dma_buf);
|
||||
"Ion buf at idx = %d freeing fd = %d, imported %d, dma_buf %pK, i_ino %lu",
|
||||
idx, tbl.bufq[idx].fd, tbl.bufq[idx].is_imported, tbl.bufq[idx].dma_buf,
|
||||
tbl.bufq[idx].i_ino);
|
||||
|
||||
if (tbl.bufq[idx].dma_buf)
|
||||
dma_buf_put(tbl.bufq[idx].dma_buf);
|
||||
|
||||
tbl.bufq[idx].fd = -1;
|
||||
tbl.bufq[idx].i_ino = 0;
|
||||
tbl.bufq[idx].dma_buf = NULL;
|
||||
tbl.bufq[idx].is_imported = false;
|
||||
tbl.bufq[idx].is_internal = false;
|
||||
@@ -1425,6 +1450,7 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
|
||||
int32_t idx;
|
||||
int32_t smmu_hdl = 0;
|
||||
int32_t num_hdl = 0;
|
||||
unsigned long i_ino = 0;
|
||||
|
||||
enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
|
||||
|
||||
@@ -1445,7 +1471,7 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = cam_mem_util_get_dma_buf(inp->size, inp->flags, &buf);
|
||||
rc = cam_mem_util_get_dma_buf(inp->size, inp->flags, &buf, &i_ino);
|
||||
|
||||
if (rc) {
|
||||
CAM_ERR(CAM_MEM, "ION alloc failed for shared buffer");
|
||||
@@ -1508,6 +1534,7 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
|
||||
mem_handle = GET_MEM_HANDLE(idx, ion_fd);
|
||||
tbl.bufq[idx].dma_buf = buf;
|
||||
tbl.bufq[idx].fd = -1;
|
||||
tbl.bufq[idx].i_ino = i_ino;
|
||||
tbl.bufq[idx].flags = inp->flags;
|
||||
tbl.bufq[idx].buf_handle = mem_handle;
|
||||
tbl.bufq[idx].kmdvaddr = kvaddr;
|
||||
@@ -1528,6 +1555,9 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
|
||||
out->len = inp->size;
|
||||
out->region = region;
|
||||
|
||||
CAM_DBG(CAM_MEM, "idx=%d, dmabuf=%pK, i_ino=%lu, flags=0x%x, mem_handle=0x%x",
|
||||
idx, buf, i_ino, inp->flags, mem_handle);
|
||||
|
||||
return rc;
|
||||
slot_fail:
|
||||
cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
|
||||
@@ -1598,6 +1628,7 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
|
||||
int32_t smmu_hdl = 0;
|
||||
int32_t num_hdl = 0;
|
||||
uintptr_t kvaddr = 0;
|
||||
unsigned long i_ino = 0;
|
||||
|
||||
if (!atomic_read(&cam_mem_mgr_state)) {
|
||||
CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
|
||||
@@ -1620,7 +1651,7 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = cam_mem_util_get_dma_buf(inp->size, 0, &buf);
|
||||
rc = cam_mem_util_get_dma_buf(inp->size, 0, &buf, &i_ino);
|
||||
|
||||
if (rc) {
|
||||
CAM_ERR(CAM_MEM, "ION alloc failed for sec heap buffer");
|
||||
@@ -1661,6 +1692,7 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
|
||||
mutex_lock(&tbl.bufq[idx].q_lock);
|
||||
mem_handle = GET_MEM_HANDLE(idx, ion_fd);
|
||||
tbl.bufq[idx].fd = -1;
|
||||
tbl.bufq[idx].i_ino = i_ino;
|
||||
tbl.bufq[idx].dma_buf = buf;
|
||||
tbl.bufq[idx].flags = inp->flags;
|
||||
tbl.bufq[idx].buf_handle = mem_handle;
|
||||
|
@@ -34,6 +34,7 @@ enum cam_smmu_mapping_client {
|
||||
* @hdls: list of mapped handles
|
||||
* @num_hdl: number of handles
|
||||
* @fd: file descriptor of buffer
|
||||
* @i_ino: inode number of this dmabuf. Uniquely identifies a buffer
|
||||
* @buf_handle: unique handle for buffer
|
||||
* @align: alignment for allocation
|
||||
* @len: size of buffer
|
||||
@@ -51,6 +52,7 @@ struct cam_mem_buf_queue {
|
||||
int32_t hdls[CAM_MEM_MMU_MAX_HANDLE];
|
||||
int32_t num_hdl;
|
||||
int32_t fd;
|
||||
unsigned long i_ino;
|
||||
int32_t buf_handle;
|
||||
int32_t align;
|
||||
size_t len;
|
||||
|
@@ -112,6 +112,7 @@ struct cam_smmu_monitor {
|
||||
|
||||
/* map-unmap info */
|
||||
int ion_fd;
|
||||
unsigned long i_ino;
|
||||
dma_addr_t paddr;
|
||||
size_t len;
|
||||
enum cam_smmu_region_id region_id;
|
||||
@@ -210,6 +211,7 @@ struct cam_dma_buff_info {
|
||||
dma_addr_t paddr;
|
||||
struct list_head list;
|
||||
int ion_fd;
|
||||
unsigned long i_ino;
|
||||
size_t len;
|
||||
size_t phys_len;
|
||||
bool is_internal;
|
||||
@@ -223,6 +225,7 @@ struct cam_sec_buff_info {
|
||||
dma_addr_t paddr;
|
||||
struct list_head list;
|
||||
int ion_fd;
|
||||
unsigned long i_ino;
|
||||
size_t len;
|
||||
};
|
||||
|
||||
@@ -241,13 +244,13 @@ static int cam_smmu_create_add_handle_in_table(char *name,
|
||||
int *hdl);
|
||||
|
||||
static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
|
||||
int ion_fd);
|
||||
int ion_fd, struct dma_buf *dma_buf);
|
||||
|
||||
static struct cam_dma_buff_info *cam_smmu_find_mapping_by_dma_buf(int idx,
|
||||
struct dma_buf *buf);
|
||||
|
||||
static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx,
|
||||
int ion_fd);
|
||||
int ion_fd, struct dma_buf *dma_buf);
|
||||
|
||||
static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
|
||||
dma_addr_t base, size_t size,
|
||||
@@ -315,6 +318,7 @@ static void cam_smmu_update_monitor_array(
|
||||
|
||||
cb_info->monitor_entries[iterator].is_map = is_map;
|
||||
cb_info->monitor_entries[iterator].ion_fd = mapping_info->ion_fd;
|
||||
cb_info->monitor_entries[iterator].i_ino = mapping_info->i_ino;
|
||||
cb_info->monitor_entries[iterator].paddr = mapping_info->paddr;
|
||||
cb_info->monitor_entries[iterator].len = mapping_info->len;
|
||||
cb_info->monitor_entries[iterator].region_id = mapping_info->region_id;
|
||||
@@ -357,11 +361,12 @@ static void cam_smmu_dump_monitor_array(
|
||||
hrs = do_div(tmp, 24);
|
||||
|
||||
CAM_INFO(CAM_SMMU,
|
||||
"**** %llu:%llu:%llu.%llu : Index[%d] [%s] : ion_fd=%d start=0x%llx end=0x%llx len=%zu region=%d",
|
||||
"**** %llu:%llu:%llu.%llu : Index[%d] [%s] : ion_fd=%d i_ino=%lu start=0x%llx end=0x%llx len=%zu region=%d",
|
||||
hrs, min, sec, ms,
|
||||
index,
|
||||
cb_info->monitor_entries[index].is_map ? "MAP" : "UNMAP",
|
||||
cb_info->monitor_entries[index].ion_fd,
|
||||
cb_info->monitor_entries[index].i_ino,
|
||||
cb_info->monitor_entries[index].paddr,
|
||||
cb_info->monitor_entries[index].paddr +
|
||||
cb_info->monitor_entries[index].len,
|
||||
@@ -581,8 +586,8 @@ static void cam_smmu_dump_cb_info(int idx)
|
||||
min = do_div(tmp, 60);
|
||||
hrs = do_div(tmp, 24);
|
||||
CAM_ERR(CAM_SMMU,
|
||||
"%llu:%llu:%llu:%llu: %u ion_fd=%d start=0x%x end=0x%x len=%u region=%d",
|
||||
hrs, min, sec, ms, i, mapping->ion_fd,
|
||||
"%llu:%llu:%llu:%llu: %u ion_fd=%d i_ino=%lu start=0x%x end=0x%x len=%u region=%d",
|
||||
hrs, min, sec, ms, i, mapping->ion_fd, mapping->i_ino,
|
||||
(void *)mapping->paddr,
|
||||
((uint64_t)mapping->paddr +
|
||||
(uint64_t)mapping->len),
|
||||
@@ -602,8 +607,8 @@ static void cam_smmu_print_user_list(int idx)
|
||||
list_for_each_entry(mapping,
|
||||
&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
|
||||
CAM_ERR(CAM_SMMU,
|
||||
"ion_fd = %d, paddr= 0x%pK, len = %u, region = %d",
|
||||
mapping->ion_fd, (void *)mapping->paddr,
|
||||
"ion_fd = %d, i_ino=%lu, paddr= 0x%pK, len = %u, region = %d",
|
||||
mapping->ion_fd, mapping->i_ino, (void *)mapping->paddr,
|
||||
(unsigned int)mapping->len,
|
||||
mapping->region_id);
|
||||
}
|
||||
@@ -617,8 +622,8 @@ static void cam_smmu_print_kernel_list(int idx)
|
||||
list_for_each_entry(mapping,
|
||||
&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) {
|
||||
CAM_ERR(CAM_SMMU,
|
||||
"dma_buf = %pK, paddr= 0x%pK, len = %u, region = %d",
|
||||
mapping->buf, (void *)mapping->paddr,
|
||||
"dma_buf = %pK, i_ino = %lu, paddr= 0x%pK, len = %u, region = %d",
|
||||
mapping->buf, mapping->i_ino, (void *)mapping->paddr,
|
||||
(unsigned int)mapping->len,
|
||||
mapping->region_id);
|
||||
}
|
||||
@@ -657,9 +662,9 @@ static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr)
|
||||
if (start_addr <= current_addr && current_addr <= end_addr) {
|
||||
closest_mapping = mapping;
|
||||
CAM_INFO(CAM_SMMU,
|
||||
"Found va 0x%lx in:0x%lx-0x%lx, fd %d cb:%s",
|
||||
"Found va 0x%lx in:0x%lx-0x%lx, fd %d i_ino %lu cb:%s",
|
||||
current_addr, start_addr,
|
||||
end_addr, mapping->ion_fd,
|
||||
end_addr, mapping->ion_fd, mapping->i_ino,
|
||||
iommu_cb_set.cb_info[idx].name[0]);
|
||||
goto end;
|
||||
} else {
|
||||
@@ -673,9 +678,9 @@ static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr)
|
||||
closest_mapping = mapping;
|
||||
}
|
||||
CAM_DBG(CAM_SMMU,
|
||||
"approx va %lx not in range: %lx-%lx fd = %0x",
|
||||
"approx va %lx not in range: %lx-%lx fd = %0x i_ino %lu",
|
||||
current_addr, start_addr,
|
||||
end_addr, mapping->ion_fd);
|
||||
end_addr, mapping->ion_fd, mapping->i_ino);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -683,8 +688,8 @@ end:
|
||||
if (closest_mapping) {
|
||||
buf_handle = GET_MEM_HANDLE(idx, closest_mapping->ion_fd);
|
||||
CAM_INFO(CAM_SMMU,
|
||||
"Closest map fd %d 0x%lx %llu-%llu 0x%lx-0x%lx buf=%pK mem %0x",
|
||||
closest_mapping->ion_fd, current_addr,
|
||||
"Closest map fd %d i_ino %lu 0x%lx %llu-%llu 0x%lx-0x%lx buf=%pK mem %0x",
|
||||
closest_mapping->ion_fd, closest_mapping->i_ino, current_addr,
|
||||
mapping->len, closest_mapping->len,
|
||||
(unsigned long)closest_mapping->paddr,
|
||||
(unsigned long)closest_mapping->paddr + mapping->len,
|
||||
@@ -1164,25 +1169,29 @@ static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
|
||||
}
|
||||
|
||||
static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
|
||||
int ion_fd)
|
||||
int ion_fd, struct dma_buf *dmabuf)
|
||||
{
|
||||
struct cam_dma_buff_info *mapping;
|
||||
unsigned long i_ino;
|
||||
|
||||
if (ion_fd < 0) {
|
||||
CAM_ERR(CAM_SMMU, "Invalid fd %d", ion_fd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
i_ino = file_inode(dmabuf->file)->i_ino;
|
||||
|
||||
list_for_each_entry(mapping,
|
||||
&iommu_cb_set.cb_info[idx].smmu_buf_list,
|
||||
list) {
|
||||
if (mapping->ion_fd == ion_fd) {
|
||||
CAM_DBG(CAM_SMMU, "find ion_fd %d", ion_fd);
|
||||
if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) {
|
||||
CAM_DBG(CAM_SMMU, "find ion_fd %d i_ino %lu", ion_fd, i_ino);
|
||||
return mapping;
|
||||
}
|
||||
}
|
||||
|
||||
CAM_ERR(CAM_SMMU, "Error: Cannot find entry by index %d", idx);
|
||||
CAM_ERR(CAM_SMMU, "Error: Cannot find entry by index %d, fd %d i_ino %lu",
|
||||
idx, ion_fd, i_ino);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@@ -1212,19 +1221,22 @@ static struct cam_dma_buff_info *cam_smmu_find_mapping_by_dma_buf(int idx,
|
||||
}
|
||||
|
||||
static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx,
|
||||
int ion_fd)
|
||||
int ion_fd, struct dma_buf *dmabuf)
|
||||
{
|
||||
struct cam_sec_buff_info *mapping;
|
||||
unsigned long i_ino;
|
||||
|
||||
i_ino = file_inode(dmabuf->file)->i_ino;
|
||||
|
||||
list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
|
||||
list) {
|
||||
if (mapping->ion_fd == ion_fd) {
|
||||
CAM_DBG(CAM_SMMU, "find ion_fd %d", ion_fd);
|
||||
if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) {
|
||||
CAM_DBG(CAM_SMMU, "find ion_fd %d, i_ino %lu", ion_fd, i_ino);
|
||||
return mapping;
|
||||
}
|
||||
}
|
||||
CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d by index %d",
|
||||
ion_fd, idx);
|
||||
CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d i_ino %lu by index %d",
|
||||
ion_fd, i_ino, idx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1235,9 +1247,9 @@ static void cam_smmu_clean_user_buffer_list(int idx)
|
||||
|
||||
list_for_each_entry_safe(mapping_info, temp,
|
||||
&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
|
||||
CAM_DBG(CAM_SMMU, "Free mapping address %pK, i = %d, fd = %d",
|
||||
CAM_DBG(CAM_SMMU, "Free mapping address %pK, i = %d, fd = %d, i_ino = %lu",
|
||||
(void *)mapping_info->paddr, idx,
|
||||
mapping_info->ion_fd);
|
||||
mapping_info->ion_fd, mapping_info->i_ino);
|
||||
|
||||
if (mapping_info->ion_fd == 0xDEADBEEF)
|
||||
/* Clean up scratch buffers */
|
||||
@@ -1253,9 +1265,9 @@ static void cam_smmu_clean_user_buffer_list(int idx)
|
||||
CAM_ERR(CAM_SMMU, "Buffer delete failed: idx = %d",
|
||||
idx);
|
||||
CAM_ERR(CAM_SMMU,
|
||||
"Buffer delete failed: addr = %lx, fd = %d",
|
||||
"Buffer delete failed: addr = %lx, fd = %d, i_ino = %lu",
|
||||
(unsigned long)mapping_info->paddr,
|
||||
mapping_info->ion_fd);
|
||||
mapping_info->ion_fd, mapping_info->i_ino);
|
||||
/*
|
||||
* Ignore this error and continue to delete other
|
||||
* buffers in the list
|
||||
@@ -2269,12 +2281,15 @@ static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
|
||||
}
|
||||
|
||||
mapping_info->ion_fd = ion_fd;
|
||||
mapping_info->i_ino = file_inode(buf->file)->i_ino;
|
||||
mapping_info->is_internal = is_internal;
|
||||
ktime_get_real_ts64(&mapping_info->ts);
|
||||
/* add to the list */
|
||||
list_add(&mapping_info->list,
|
||||
&iommu_cb_set.cb_info[idx].smmu_buf_list);
|
||||
|
||||
CAM_DBG(CAM_SMMU, "fd %d i_ino %lu dmabuf %pK", ion_fd, mapping_info->i_ino, buf);
|
||||
|
||||
cam_smmu_update_monitor_array(&iommu_cb_set.cb_info[idx], true,
|
||||
mapping_info);
|
||||
|
||||
@@ -2298,11 +2313,15 @@ static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
|
||||
}
|
||||
|
||||
mapping_info->ion_fd = -1;
|
||||
mapping_info->i_ino = file_inode(buf->file)->i_ino;
|
||||
ktime_get_real_ts64(&mapping_info->ts);
|
||||
/* add to the list */
|
||||
list_add(&mapping_info->list,
|
||||
&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list);
|
||||
|
||||
CAM_DBG(CAM_SMMU, "fd %d i_ino %lu dmabuf %pK",
|
||||
mapping_info->ion_fd, mapping_info->i_ino, buf);
|
||||
|
||||
cam_smmu_update_monitor_array(&iommu_cb_set.cb_info[idx], true,
|
||||
mapping_info);
|
||||
|
||||
@@ -2401,14 +2420,17 @@ static int cam_smmu_unmap_buf_and_remove_from_list(
|
||||
}
|
||||
|
||||
static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
|
||||
int ion_fd, dma_addr_t *paddr_ptr, size_t *len_ptr,
|
||||
int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr,
|
||||
struct timespec64 **ts_mapping)
|
||||
{
|
||||
struct cam_dma_buff_info *mapping;
|
||||
unsigned long i_ino;
|
||||
|
||||
i_ino = file_inode(dmabuf->file)->i_ino;
|
||||
|
||||
list_for_each_entry(mapping,
|
||||
&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
|
||||
if (mapping->ion_fd == ion_fd) {
|
||||
if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) {
|
||||
*paddr_ptr = mapping->paddr;
|
||||
*len_ptr = mapping->len;
|
||||
*ts_mapping = &mapping->ts;
|
||||
@@ -2420,14 +2442,17 @@ static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
|
||||
}
|
||||
|
||||
static enum cam_smmu_buf_state cam_smmu_user_reuse_fd_in_list(int idx,
|
||||
int ion_fd, dma_addr_t *paddr_ptr, size_t *len_ptr,
|
||||
int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr,
|
||||
struct timespec64 **ts_mapping)
|
||||
{
|
||||
struct cam_dma_buff_info *mapping;
|
||||
unsigned long i_ino;
|
||||
|
||||
i_ino = file_inode(dmabuf->file)->i_ino;
|
||||
|
||||
list_for_each_entry(mapping,
|
||||
&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
|
||||
if (mapping->ion_fd == ion_fd) {
|
||||
if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) {
|
||||
*paddr_ptr = mapping->paddr;
|
||||
*len_ptr = mapping->len;
|
||||
*ts_mapping = &mapping->ts;
|
||||
@@ -2457,15 +2482,17 @@ static enum cam_smmu_buf_state cam_smmu_check_dma_buf_in_list(int idx,
|
||||
}
|
||||
|
||||
static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
|
||||
int ion_fd, dma_addr_t *paddr_ptr,
|
||||
size_t *len_ptr)
|
||||
int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr)
|
||||
{
|
||||
struct cam_sec_buff_info *mapping;
|
||||
unsigned long i_ino;
|
||||
|
||||
i_ino = file_inode(dmabuf->file)->i_ino;
|
||||
|
||||
list_for_each_entry(mapping,
|
||||
&iommu_cb_set.cb_info[idx].smmu_buf_list,
|
||||
list) {
|
||||
if (mapping->ion_fd == ion_fd) {
|
||||
if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) {
|
||||
*paddr_ptr = mapping->paddr;
|
||||
*len_ptr = mapping->len;
|
||||
mapping->ref_count++;
|
||||
@@ -2477,14 +2504,17 @@ static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
|
||||
}
|
||||
|
||||
static enum cam_smmu_buf_state cam_smmu_validate_secure_fd_in_list(int idx,
|
||||
int ion_fd, dma_addr_t *paddr_ptr, size_t *len_ptr)
|
||||
int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr)
|
||||
{
|
||||
struct cam_sec_buff_info *mapping;
|
||||
unsigned long i_ino;
|
||||
|
||||
i_ino = file_inode(dmabuf->file)->i_ino;
|
||||
|
||||
list_for_each_entry(mapping,
|
||||
&iommu_cb_set.cb_info[idx].smmu_buf_list,
|
||||
list) {
|
||||
if (mapping->ion_fd == ion_fd) {
|
||||
if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) {
|
||||
*paddr_ptr = mapping->paddr;
|
||||
*len_ptr = mapping->len;
|
||||
return CAM_SMMU_BUFF_EXIST;
|
||||
@@ -2644,6 +2674,7 @@ static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
|
||||
}
|
||||
|
||||
mapping_info->ion_fd = 0xDEADBEEF;
|
||||
mapping_info->i_ino = 0;
|
||||
mapping_info->buf = NULL;
|
||||
mapping_info->attach = NULL;
|
||||
mapping_info->table = table;
|
||||
@@ -2939,14 +2970,16 @@ static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
|
||||
}
|
||||
|
||||
mapping_info->ion_fd = ion_fd;
|
||||
mapping_info->i_ino = file_inode(dmabuf->file)->i_ino;
|
||||
mapping_info->paddr = *paddr_ptr;
|
||||
mapping_info->len = *len_ptr;
|
||||
mapping_info->dir = dma_dir;
|
||||
mapping_info->ref_count = 1;
|
||||
mapping_info->buf = dmabuf;
|
||||
|
||||
CAM_DBG(CAM_SMMU, "idx=%d, ion_fd=%d, dev=%pOFfp, paddr=0x%llx, len=%zu",
|
||||
idx, ion_fd, iommu_cb_set.cb_info[idx].dev, *paddr_ptr, *len_ptr);
|
||||
CAM_DBG(CAM_SMMU, "idx=%d, ion_fd=%d, i_ino=%lu, dev=%pOFfp, paddr=0x%llx, len=%zu",
|
||||
idx, ion_fd, mapping_info->i_ino, iommu_cb_set.cb_info[idx].dev,
|
||||
*paddr_ptr, *len_ptr);
|
||||
|
||||
/* add to the list */
|
||||
list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
|
||||
@@ -2963,9 +2996,8 @@ err_out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
int cam_smmu_map_stage2_iova(int handle,
|
||||
int ion_fd, enum cam_smmu_map_dir dir,
|
||||
dma_addr_t *paddr_ptr, size_t *len_ptr)
|
||||
int cam_smmu_map_stage2_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
|
||||
enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr, size_t *len_ptr)
|
||||
{
|
||||
int idx, rc;
|
||||
enum dma_data_direction dma_dir;
|
||||
@@ -3014,7 +3046,7 @@ int cam_smmu_map_stage2_iova(int handle,
|
||||
goto get_addr_end;
|
||||
}
|
||||
|
||||
buf_state = cam_smmu_check_secure_fd_in_list(idx, ion_fd, paddr_ptr,
|
||||
buf_state = cam_smmu_check_secure_fd_in_list(idx, ion_fd, dmabuf, paddr_ptr,
|
||||
len_ptr);
|
||||
if (buf_state == CAM_SMMU_BUFF_EXIST) {
|
||||
CAM_DBG(CAM_SMMU,
|
||||
@@ -3049,14 +3081,15 @@ static int cam_smmu_secure_unmap_buf_and_remove_from_list(
|
||||
dma_buf_put(mapping_info->buf);
|
||||
list_del_init(&mapping_info->list);
|
||||
|
||||
CAM_DBG(CAM_SMMU, "unmap fd: %d, idx : %d", mapping_info->ion_fd, idx);
|
||||
CAM_DBG(CAM_SMMU, "unmap fd: %d, i_ino : %lu, idx : %d",
|
||||
mapping_info->ion_fd, mapping_info->i_ino, idx);
|
||||
|
||||
/* free one buffer */
|
||||
kfree(mapping_info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cam_smmu_unmap_stage2_iova(int handle, int ion_fd)
|
||||
int cam_smmu_unmap_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf)
|
||||
{
|
||||
int idx, rc;
|
||||
struct cam_sec_buff_info *mapping_info;
|
||||
@@ -3088,7 +3121,7 @@ int cam_smmu_unmap_stage2_iova(int handle, int ion_fd)
|
||||
}
|
||||
|
||||
/* based on ion fd and index, we can find mapping info of buffer */
|
||||
mapping_info = cam_smmu_find_mapping_by_sec_buf_idx(idx, ion_fd);
|
||||
mapping_info = cam_smmu_find_mapping_by_sec_buf_idx(idx, ion_fd, dma_buf);
|
||||
if (!mapping_info) {
|
||||
CAM_ERR(CAM_SMMU,
|
||||
"Error: Invalid params! idx = %d, fd = %d",
|
||||
@@ -3159,8 +3192,8 @@ static int cam_smmu_map_iova_validate_params(int handle,
|
||||
return rc;
|
||||
}
|
||||
|
||||
int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
|
||||
enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
|
||||
int cam_smmu_map_user_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
|
||||
bool dis_delayed_unmap, enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
|
||||
size_t *len_ptr, enum cam_smmu_region_id region_id,
|
||||
bool is_internal)
|
||||
{
|
||||
@@ -3203,7 +3236,7 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
|
||||
goto get_addr_end;
|
||||
}
|
||||
|
||||
buf_state = cam_smmu_user_reuse_fd_in_list(idx, ion_fd, paddr_ptr,
|
||||
buf_state = cam_smmu_user_reuse_fd_in_list(idx, ion_fd, dmabuf, paddr_ptr,
|
||||
len_ptr, &ts);
|
||||
if (buf_state == CAM_SMMU_BUFF_EXIST) {
|
||||
uint64_t ms = 0, tmp = 0, hrs = 0, min = 0, sec = 0;
|
||||
@@ -3301,7 +3334,7 @@ get_addr_end:
|
||||
}
|
||||
EXPORT_SYMBOL(cam_smmu_map_kernel_iova);
|
||||
|
||||
int cam_smmu_get_iova(int handle, int ion_fd,
|
||||
int cam_smmu_get_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
|
||||
dma_addr_t *paddr_ptr, size_t *len_ptr)
|
||||
{
|
||||
int idx, rc = 0;
|
||||
@@ -3345,7 +3378,7 @@ int cam_smmu_get_iova(int handle, int ion_fd,
|
||||
goto get_addr_end;
|
||||
}
|
||||
|
||||
buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr,
|
||||
buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, dma_buf, paddr_ptr,
|
||||
len_ptr, &ts);
|
||||
if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
|
||||
CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
|
||||
@@ -3360,7 +3393,7 @@ get_addr_end:
|
||||
}
|
||||
EXPORT_SYMBOL(cam_smmu_get_iova);
|
||||
|
||||
int cam_smmu_get_stage2_iova(int handle, int ion_fd,
|
||||
int cam_smmu_get_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
|
||||
dma_addr_t *paddr_ptr, size_t *len_ptr)
|
||||
{
|
||||
int idx, rc = 0;
|
||||
@@ -3403,10 +3436,7 @@ int cam_smmu_get_stage2_iova(int handle, int ion_fd,
|
||||
goto get_addr_end;
|
||||
}
|
||||
|
||||
buf_state = cam_smmu_validate_secure_fd_in_list(idx,
|
||||
ion_fd,
|
||||
paddr_ptr,
|
||||
len_ptr);
|
||||
buf_state = cam_smmu_validate_secure_fd_in_list(idx, ion_fd, dma_buf, paddr_ptr, len_ptr);
|
||||
|
||||
if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
|
||||
CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
|
||||
@@ -3442,7 +3472,7 @@ static int cam_smmu_unmap_validate_params(int handle)
|
||||
}
|
||||
|
||||
int cam_smmu_unmap_user_iova(int handle,
|
||||
int ion_fd, enum cam_smmu_region_id region_id)
|
||||
int ion_fd, struct dma_buf *dma_buf, enum cam_smmu_region_id region_id)
|
||||
{
|
||||
int idx, rc;
|
||||
struct cam_dma_buff_info *mapping_info;
|
||||
@@ -3471,7 +3501,7 @@ int cam_smmu_unmap_user_iova(int handle,
|
||||
}
|
||||
|
||||
/* Based on ion_fd & index, we can find mapping info of buffer */
|
||||
mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
|
||||
mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd, dma_buf);
|
||||
|
||||
if (!mapping_info) {
|
||||
CAM_ERR(CAM_SMMU,
|
||||
@@ -3556,7 +3586,7 @@ unmap_end:
|
||||
EXPORT_SYMBOL(cam_smmu_unmap_kernel_iova);
|
||||
|
||||
|
||||
int cam_smmu_put_iova(int handle, int ion_fd)
|
||||
int cam_smmu_put_iova(int handle, int ion_fd, struct dma_buf *dma_buf)
|
||||
{
|
||||
int idx;
|
||||
int rc = 0;
|
||||
@@ -3586,7 +3616,7 @@ int cam_smmu_put_iova(int handle, int ion_fd)
|
||||
}
|
||||
|
||||
/* based on ion fd and index, we can find mapping info of buffer */
|
||||
mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
|
||||
mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd, dma_buf);
|
||||
if (!mapping_info) {
|
||||
CAM_ERR(CAM_SMMU, "Error: Invalid params idx = %d, fd = %d",
|
||||
idx, ion_fd);
|
||||
|
@@ -114,6 +114,7 @@ int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
|
||||
*
|
||||
* @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
|
||||
* @param ion_fd: ION handle identifying the memory buffer.
|
||||
* @param dmabuf: DMA buf handle identifying the memory buffer.
|
||||
* @param dis_delayed_unmap: Whether to disable Delayed Unmap feature
|
||||
* for this mapping
|
||||
* @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
|
||||
@@ -127,8 +128,8 @@ int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
|
||||
* @is_internal: Specifies if this buffer is kernel allocated.
|
||||
* @return Status of operation. Negative in case of error. Zero otherwise.
|
||||
*/
|
||||
int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
|
||||
enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr,
|
||||
int cam_smmu_map_user_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
|
||||
bool dis_delayed_unmap, enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr,
|
||||
enum cam_smmu_region_id region_id, bool is_internal);
|
||||
|
||||
/**
|
||||
@@ -156,11 +157,13 @@ int cam_smmu_map_kernel_iova(int handle,
|
||||
*
|
||||
* @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
|
||||
* @param ion_fd: ION handle identifying the memory buffer.
|
||||
* @param dma_buf: DMA Buf handle identifying the memory buffer.
|
||||
* @param region_id: Region id from which to unmap buffer.
|
||||
*
|
||||
* @return Status of operation. Negative in case of error. Zero otherwise.
|
||||
*/
|
||||
int cam_smmu_unmap_user_iova(int handle,
|
||||
int ion_fd, enum cam_smmu_region_id region_id);
|
||||
int ion_fd, struct dma_buf *dma_buf, enum cam_smmu_region_id region_id);
|
||||
|
||||
/**
|
||||
* @brief : Unmaps kernel IOVA for calling driver
|
||||
@@ -261,12 +264,13 @@ void cam_smmu_unset_client_page_fault_handler(int handle, void *token);
|
||||
*
|
||||
* @param handle: SMMU handle identifying the context bank to map to
|
||||
* @param ion_fd: ION fd of memory to map to
|
||||
* @param dma_buf: DMA buf of memory to map to
|
||||
* @param paddr_ptr: Pointer IOVA address that will be returned
|
||||
* @param len_ptr: Length of memory mapped
|
||||
*
|
||||
* @return Status of operation. Negative in case of error. Zero otherwise.
|
||||
*/
|
||||
int cam_smmu_get_iova(int handle, int ion_fd,
|
||||
int cam_smmu_get_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
|
||||
dma_addr_t *paddr_ptr, size_t *len_ptr);
|
||||
|
||||
/**
|
||||
@@ -274,12 +278,13 @@ int cam_smmu_get_iova(int handle, int ion_fd,
|
||||
*
|
||||
* @param handle: SMMU handle identifying the secure context bank to map to
|
||||
* @param ion_fd: ION fd of memory to map to
|
||||
* @param dma_buf: DMA Buf of memory to map to
|
||||
* @param paddr_ptr: Pointer IOVA address that will be returned
|
||||
* @param len_ptr: Length of memory mapped
|
||||
*
|
||||
* @return Status of operation. Negative in case of error. Zero otherwise.
|
||||
*/
|
||||
int cam_smmu_get_stage2_iova(int handle, int ion_fd,
|
||||
int cam_smmu_get_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
|
||||
dma_addr_t *paddr_ptr, size_t *len_ptr);
|
||||
|
||||
/**
|
||||
@@ -287,35 +292,37 @@ int cam_smmu_get_stage2_iova(int handle, int ion_fd,
|
||||
*
|
||||
* @param handle: SMMU handle identifying the context bank
|
||||
* @param ion_fd: ION fd of memory to unmap
|
||||
* @param dma_buf: DMA Buf of memory to unmap
|
||||
*
|
||||
* @return Status of operation. Negative in case of error. Zero otherwise.
|
||||
*/
|
||||
int cam_smmu_put_iova(int handle, int ion_fd);
|
||||
int cam_smmu_put_iova(int handle, int ion_fd, struct dma_buf *dma_buf);
|
||||
|
||||
/**
|
||||
* @brief Maps secure memory for SMMU handle
|
||||
*
|
||||
* @param handle: SMMU handle identifying secure context bank
|
||||
* @param ion_fd: ION fd to map securely
|
||||
* @param dmabuf: DMA buf to map securely
|
||||
* @param dir: DMA Direction for the mapping
|
||||
* @param dma_addr: Returned IOVA address after mapping
|
||||
* @param len_ptr: Length of memory mapped
|
||||
*
|
||||
* @return Status of operation. Negative in case of error. Zero otherwise.
|
||||
*/
|
||||
int cam_smmu_map_stage2_iova(int handle,
|
||||
int ion_fd, enum cam_smmu_map_dir dir, dma_addr_t *dma_addr,
|
||||
size_t *len_ptr);
|
||||
int cam_smmu_map_stage2_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
|
||||
enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr);
|
||||
|
||||
/**
|
||||
* @brief Unmaps secure memopry for SMMU handle
|
||||
*
|
||||
* @param handle: SMMU handle identifying secure context bank
|
||||
* @param ion_fd: ION fd to unmap
|
||||
* @param dma_buf: DMA Buf to unmap
|
||||
*
|
||||
* @return Status of operation. Negative in case of error. Zero otherwise.
|
||||
*/
|
||||
int cam_smmu_unmap_stage2_iova(int handle, int ion_fd);
|
||||
int cam_smmu_unmap_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf);
|
||||
|
||||
/**
|
||||
* @brief Allocates firmware for context bank
|
||||
|
Reference in New Issue
Block a user