|
@@ -27,7 +27,6 @@
|
|
|
#include "camera_main.h"
|
|
|
#include "cam_trace.h"
|
|
|
#include "cam_common_util.h"
|
|
|
-#include "cam_compat.h"
|
|
|
|
|
|
#define SHARED_MEM_POOL_GRANULARITY 16
|
|
|
|
|
@@ -42,6 +41,7 @@
|
|
|
#define CAM_SMMU_HDL_VALIDATE(x, y) ((x) != ((y) & CAM_SMMU_HDL_MASK))
|
|
|
|
|
|
#define CAM_SMMU_MONITOR_MAX_ENTRIES 100
|
|
|
+#define CAM_SMMU_BUF_TRACKING_POOL 600
|
|
|
#define CAM_SMMU_INC_MONITOR_HEAD(head, ret) \
|
|
|
div_u64_rem(atomic64_add_return(1, head),\
|
|
|
CAM_SMMU_MONITOR_MAX_ENTRIES, (ret))
|
|
@@ -50,6 +50,7 @@ static int g_num_pf_handled = 1;
|
|
|
module_param(g_num_pf_handled, int, 0644);
|
|
|
|
|
|
struct cam_fw_alloc_info icp_fw;
|
|
|
+struct cam_smmu_buffer_tracker *buf_tracking_pool;
|
|
|
|
|
|
struct cam_smmu_work_payload {
|
|
|
int idx;
|
|
@@ -112,9 +113,10 @@ struct cam_smmu_monitor {
|
|
|
|
|
|
struct cam_smmu_debug {
|
|
|
struct dentry *dentry;
|
|
|
+ uint32_t fatal_pf_mask;
|
|
|
bool cb_dump_enable;
|
|
|
bool map_profile_enable;
|
|
|
- uint32_t fatal_pf_mask;
|
|
|
+ bool disable_buf_tracking;
|
|
|
};
|
|
|
|
|
|
struct cam_smmu_subregion_info {
|
|
@@ -227,10 +229,13 @@ struct cam_iommu_cb_set {
|
|
|
struct mutex payload_list_lock;
|
|
|
struct list_head payload_list;
|
|
|
struct cam_smmu_debug debug_cfg;
|
|
|
+ struct list_head buf_tracker_free_list;
|
|
|
+ struct cam_csf_version csf_version;
|
|
|
+ spinlock_t s_lock;
|
|
|
bool force_cache_allocs;
|
|
|
bool need_shared_buffer_padding;
|
|
|
bool is_expanded_memory;
|
|
|
- struct cam_csf_version csf_version;
|
|
|
+ bool is_track_buf_disabled;
|
|
|
};
|
|
|
|
|
|
static const struct of_device_id msm_cam_smmu_dt_match[] = {
|
|
@@ -247,7 +252,8 @@ struct cam_dma_buff_info {
|
|
|
enum dma_data_direction dir;
|
|
|
enum cam_smmu_region_id region_id;
|
|
|
int iommu_dir;
|
|
|
- int ref_count;
|
|
|
+ int map_count;
|
|
|
+ struct kref ref_count;
|
|
|
dma_addr_t paddr;
|
|
|
struct list_head list;
|
|
|
int ion_fd;
|
|
@@ -263,7 +269,8 @@ struct cam_sec_buff_info {
|
|
|
struct dma_buf_attachment *attach;
|
|
|
struct sg_table *table;
|
|
|
enum dma_data_direction dir;
|
|
|
- int ref_count;
|
|
|
+ int map_count;
|
|
|
+ struct kref ref_count;
|
|
|
dma_addr_t paddr;
|
|
|
struct list_head list;
|
|
|
int ion_fd;
|
|
@@ -357,7 +364,8 @@ static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
|
|
|
static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
|
|
|
bool dis_delayed_unmap, enum dma_data_direction dma_dir,
|
|
|
dma_addr_t *paddr_ptr, size_t *len_ptr,
|
|
|
- enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *dmabuf);
|
|
|
+ enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *dmabuf,
|
|
|
+ struct kref **ref_count);
|
|
|
|
|
|
static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
|
|
|
struct dma_buf *buf, enum dma_data_direction dma_dir,
|
|
@@ -2596,6 +2604,46 @@ int cam_smmu_release_buf_region(enum cam_smmu_region_id region,
|
|
|
}
|
|
|
EXPORT_SYMBOL(cam_smmu_release_buf_region);
|
|
|
|
|
|
+static int cam_smmu_util_return_map_entry(struct cam_smmu_buffer_tracker *entry)
|
|
|
+{
|
|
|
+ spin_lock_bh(&iommu_cb_set.s_lock);
|
|
|
+ list_add_tail(&entry->list, &iommu_cb_set.buf_tracker_free_list);
|
|
|
+ spin_unlock_bh(&iommu_cb_set.s_lock);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void cam_smmu_buffer_tracker_putref(struct list_head *track_list)
|
|
|
+{
|
|
|
+ struct cam_smmu_buffer_tracker *buffer_tracker, *temp;
|
|
|
+
|
|
|
+ if (iommu_cb_set.is_track_buf_disabled)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!track_list || list_empty(track_list))
|
|
|
+ return;
|
|
|
+
|
|
|
+ list_for_each_entry_safe(buffer_tracker, temp, track_list, list) {
|
|
|
+ if (refcount_dec_and_test(&buffer_tracker->ref_count->refcount))
|
|
|
+ CAM_ERR(CAM_SMMU,
|
|
|
+ "[SMMU_BT] Unexpected - buffer reference [fd: 0x%x ino: 0x%x cb: %s] zeroed prior to unmap invocation",
|
|
|
+ buffer_tracker->ion_fd, buffer_tracker->i_ino,
|
|
|
+ buffer_tracker->cb_name);
|
|
|
+ else
|
|
|
+ CAM_DBG(CAM_SMMU,
|
|
|
+ "[SMMU_BT] kref_count after put, [fd: 0x%x ino: 0x%x cb: %s], count: %d",
|
|
|
+ buffer_tracker->ion_fd, buffer_tracker->i_ino,
|
|
|
+ buffer_tracker->cb_name,
|
|
|
+ kref_read(buffer_tracker->ref_count));
|
|
|
+
|
|
|
+ list_del_init(&buffer_tracker->list);
|
|
|
+
|
|
|
+ cam_smmu_util_return_map_entry(buffer_tracker);
|
|
|
+
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(cam_smmu_buffer_tracker_putref);
|
|
|
+
|
|
|
static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
|
|
|
int idx, enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
|
|
|
size_t *len_ptr, enum cam_smmu_region_id region_id,
|
|
@@ -2742,7 +2790,7 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
|
|
|
(*mapping_info)->paddr = *paddr_ptr;
|
|
|
(*mapping_info)->len = *len_ptr;
|
|
|
(*mapping_info)->dir = dma_dir;
|
|
|
- (*mapping_info)->ref_count = 1;
|
|
|
+ (*mapping_info)->map_count = 1;
|
|
|
(*mapping_info)->region_id = region_id;
|
|
|
|
|
|
if (!*paddr_ptr || !*len_ptr) {
|
|
@@ -2786,7 +2834,8 @@ err_out:
|
|
|
static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
|
|
|
bool dis_delayed_unmap, enum dma_data_direction dma_dir,
|
|
|
dma_addr_t *paddr_ptr, size_t *len_ptr,
|
|
|
- enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *buf)
|
|
|
+ enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *buf,
|
|
|
+ struct kref **ref_count)
|
|
|
{
|
|
|
int rc = -1;
|
|
|
struct cam_dma_buff_info *mapping_info = NULL;
|
|
@@ -2802,6 +2851,8 @@ static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
|
|
|
mapping_info->ion_fd = ion_fd;
|
|
|
mapping_info->i_ino = file_inode(buf->file)->i_ino;
|
|
|
mapping_info->is_internal = is_internal;
|
|
|
+ kref_init(&mapping_info->ref_count);
|
|
|
+ *ref_count = &mapping_info->ref_count;
|
|
|
CAM_GET_TIMESTAMP(mapping_info->ts);
|
|
|
/* add to the list */
|
|
|
list_add(&mapping_info->list,
|
|
@@ -2938,9 +2989,58 @@ static int cam_smmu_unmap_buf_and_remove_from_list(
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int cam_smmu_util_get_free_map_entry(struct cam_smmu_buffer_tracker **entry)
|
|
|
+{
|
|
|
+ spin_lock_bh(&iommu_cb_set.s_lock);
|
|
|
+ if (list_empty(&iommu_cb_set.buf_tracker_free_list)) {
|
|
|
+ CAM_WARN(CAM_SMMU, "[SMMU_BT] Not enough mem to track buffer");
|
|
|
+ spin_unlock_bh(&iommu_cb_set.s_lock);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ *entry = list_first_entry(&iommu_cb_set.buf_tracker_free_list,
|
|
|
+ struct cam_smmu_buffer_tracker, list);
|
|
|
+
|
|
|
+ list_del_init(&(*entry)->list);
|
|
|
+ spin_unlock_bh(&iommu_cb_set.s_lock);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int cam_smmu_add_buf_to_track_list(int ion_fd, unsigned long inode,
|
|
|
+ struct kref **ref_count, struct list_head *buf_tracker, int idx)
|
|
|
+{
|
|
|
+ int rc = 0;
|
|
|
+ struct cam_smmu_buffer_tracker *buf;
|
|
|
+
|
|
|
+ if (iommu_cb_set.is_track_buf_disabled)
|
|
|
+ return rc;
|
|
|
+
|
|
|
+ rc = cam_smmu_util_get_free_map_entry(&buf);
|
|
|
+ if (rc == -ENOMEM) {
|
|
|
+ rc = 0;
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+
|
|
|
+ kref_get(*ref_count);
|
|
|
+
|
|
|
+ buf->ion_fd = ion_fd;
|
|
|
+ buf->i_ino = inode;
|
|
|
+ buf->ref_count = *ref_count;
|
|
|
+ buf->cb_name = iommu_cb_set.cb_info[idx].name[0];
|
|
|
+
|
|
|
+ CAM_DBG(CAM_SMMU,
|
|
|
+ "[SMMU_BT] ref_cnt increased for fd 0x%x, ino 0x%x: %d, cb: %s",
|
|
|
+ buf->ion_fd, buf->i_ino, kref_read(buf->ref_count), buf->cb_name);
|
|
|
+
|
|
|
+ list_add(&buf->list, buf_tracker);
|
|
|
+
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
|
|
|
int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr,
|
|
|
- struct timespec64 **ts_mapping)
|
|
|
+ struct timespec64 **ts_mapping, unsigned long *inode, struct kref **ref_count)
|
|
|
{
|
|
|
struct cam_dma_buff_info *mapping;
|
|
|
unsigned long i_ino;
|
|
@@ -2953,6 +3053,8 @@ static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
|
|
|
*paddr_ptr = mapping->paddr;
|
|
|
*len_ptr = mapping->len;
|
|
|
*ts_mapping = &mapping->ts;
|
|
|
+ *inode = i_ino;
|
|
|
+ *ref_count = &mapping->ref_count;
|
|
|
return CAM_SMMU_BUFF_EXIST;
|
|
|
}
|
|
|
}
|
|
@@ -2962,7 +3064,7 @@ static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
|
|
|
|
|
|
static enum cam_smmu_buf_state cam_smmu_user_reuse_fd_in_list(int idx,
|
|
|
int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr,
|
|
|
- struct timespec64 **ts_mapping)
|
|
|
+ struct timespec64 **ts_mapping, struct kref **ref_count)
|
|
|
{
|
|
|
struct cam_dma_buff_info *mapping;
|
|
|
unsigned long i_ino;
|
|
@@ -2975,7 +3077,8 @@ static enum cam_smmu_buf_state cam_smmu_user_reuse_fd_in_list(int idx,
|
|
|
*paddr_ptr = mapping->paddr;
|
|
|
*len_ptr = mapping->len;
|
|
|
*ts_mapping = &mapping->ts;
|
|
|
- mapping->ref_count++;
|
|
|
+ mapping->map_count++;
|
|
|
+ *ref_count = &mapping->ref_count;
|
|
|
return CAM_SMMU_BUFF_EXIST;
|
|
|
}
|
|
|
}
|
|
@@ -3001,7 +3104,8 @@ static enum cam_smmu_buf_state cam_smmu_check_dma_buf_in_list(int idx,
|
|
|
}
|
|
|
|
|
|
static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
|
|
|
- int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr)
|
|
|
+ int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr,
|
|
|
+ struct kref **ref_count)
|
|
|
{
|
|
|
struct cam_sec_buff_info *mapping;
|
|
|
unsigned long i_ino;
|
|
@@ -3014,7 +3118,8 @@ static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
|
|
|
if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) {
|
|
|
*paddr_ptr = mapping->paddr;
|
|
|
*len_ptr = mapping->len;
|
|
|
- mapping->ref_count++;
|
|
|
+ mapping->map_count++;
|
|
|
+ *ref_count = &mapping->ref_count;
|
|
|
return CAM_SMMU_BUFF_EXIST;
|
|
|
}
|
|
|
}
|
|
@@ -3023,7 +3128,8 @@ static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
|
|
|
}
|
|
|
|
|
|
static enum cam_smmu_buf_state cam_smmu_validate_secure_fd_in_list(int idx,
|
|
|
- int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr)
|
|
|
+ int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr,
|
|
|
+ unsigned long *inode, struct kref **ref_count)
|
|
|
{
|
|
|
struct cam_sec_buff_info *mapping;
|
|
|
unsigned long i_ino;
|
|
@@ -3036,6 +3142,8 @@ static enum cam_smmu_buf_state cam_smmu_validate_secure_fd_in_list(int idx,
|
|
|
if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) {
|
|
|
*paddr_ptr = mapping->paddr;
|
|
|
*len_ptr = mapping->len;
|
|
|
+ *inode = i_ino;
|
|
|
+ *ref_count = &mapping->ref_count;
|
|
|
return CAM_SMMU_BUFF_EXIST;
|
|
|
}
|
|
|
}
|
|
@@ -3200,7 +3308,7 @@ static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
|
|
|
mapping_info->paddr = iova;
|
|
|
mapping_info->len = virt_len;
|
|
|
mapping_info->iommu_dir = iommu_dir;
|
|
|
- mapping_info->ref_count = 1;
|
|
|
+ mapping_info->map_count = 1;
|
|
|
mapping_info->phys_len = phys_len;
|
|
|
mapping_info->region_id = CAM_SMMU_REGION_SCRATCH;
|
|
|
|
|
@@ -3432,8 +3540,8 @@ handle_err:
|
|
|
}
|
|
|
|
|
|
static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
|
|
|
- enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
|
|
|
- size_t *len_ptr, struct dma_buf *dmabuf)
|
|
|
+ enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
|
|
|
+ size_t *len_ptr, struct dma_buf *dmabuf, struct kref **ref_count)
|
|
|
{
|
|
|
int rc = 0;
|
|
|
struct dma_buf_attachment *attach = NULL;
|
|
@@ -3493,11 +3601,14 @@ static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
|
|
|
mapping_info->paddr = *paddr_ptr;
|
|
|
mapping_info->len = *len_ptr;
|
|
|
mapping_info->dir = dma_dir;
|
|
|
- mapping_info->ref_count = 1;
|
|
|
+ mapping_info->map_count = 1;
|
|
|
mapping_info->buf = dmabuf;
|
|
|
mapping_info->attach = attach;
|
|
|
mapping_info->table = table;
|
|
|
|
|
|
+ kref_init(&mapping_info->ref_count);
|
|
|
+ *ref_count = &mapping_info->ref_count;
|
|
|
+
|
|
|
CAM_DBG(CAM_SMMU, "idx=%d, ion_fd=%d, i_ino=%lu, dev=%pOFfp, paddr=0x%llx, len=%zu",
|
|
|
idx, ion_fd, mapping_info->i_ino,
|
|
|
iommu_cb_set.cb_info[idx].dev->of_node,
|
|
@@ -3517,7 +3628,8 @@ err_out:
|
|
|
}
|
|
|
|
|
|
int cam_smmu_map_stage2_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
|
|
|
- enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr, size_t *len_ptr)
|
|
|
+ enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr, size_t *len_ptr,
|
|
|
+ struct kref **ref_count)
|
|
|
{
|
|
|
int idx, rc;
|
|
|
enum dma_data_direction dma_dir;
|
|
@@ -3567,7 +3679,7 @@ int cam_smmu_map_stage2_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
|
|
|
}
|
|
|
|
|
|
buf_state = cam_smmu_check_secure_fd_in_list(idx, ion_fd, dmabuf, paddr_ptr,
|
|
|
- len_ptr);
|
|
|
+ len_ptr, ref_count);
|
|
|
if (buf_state == CAM_SMMU_BUFF_EXIST) {
|
|
|
CAM_DBG(CAM_SMMU,
|
|
|
"fd:%d already in list idx:%d, handle=%d give same addr back",
|
|
@@ -3576,7 +3688,7 @@ int cam_smmu_map_stage2_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
|
|
|
goto get_addr_end;
|
|
|
}
|
|
|
rc = cam_smmu_map_stage2_buffer_and_add_to_list(idx, ion_fd, dma_dir,
|
|
|
- paddr_ptr, len_ptr, dmabuf);
|
|
|
+ paddr_ptr, len_ptr, dmabuf, ref_count);
|
|
|
if (rc < 0) {
|
|
|
CAM_ERR(CAM_SMMU,
|
|
|
"Error: mapping or add list fail, idx=%d, handle=%d, fd=%d, rc=%d",
|
|
@@ -3624,7 +3736,8 @@ static int cam_smmu_secure_unmap_buf_and_remove_from_list(
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-int cam_smmu_unmap_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf)
|
|
|
+int cam_smmu_unmap_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
|
|
|
+ bool force_unmap)
|
|
|
{
|
|
|
int idx, rc;
|
|
|
struct cam_sec_buff_info *mapping_info;
|
|
@@ -3665,15 +3778,23 @@ int cam_smmu_unmap_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf)
|
|
|
goto put_addr_end;
|
|
|
}
|
|
|
|
|
|
- mapping_info->ref_count--;
|
|
|
- if (mapping_info->ref_count > 0) {
|
|
|
+ mapping_info->map_count--;
|
|
|
+ if (mapping_info->map_count > 0) {
|
|
|
CAM_DBG(CAM_SMMU,
|
|
|
- "idx: %d fd = %d ref_count: %d",
|
|
|
- idx, ion_fd, mapping_info->ref_count);
|
|
|
+ "idx: %d fd = %d map_count: %d",
|
|
|
+ idx, ion_fd, mapping_info->map_count);
|
|
|
rc = 0;
|
|
|
goto put_addr_end;
|
|
|
}
|
|
|
- mapping_info->ref_count = 0;
|
|
|
+ mapping_info->map_count = 0;
|
|
|
+ if (!force_unmap && kref_read(&mapping_info->ref_count) > 1) {
|
|
|
+ CAM_ERR(CAM_SMMU,
|
|
|
+ "[SMMU_BT] Error: can't unmap buffer as it's still active, idx: %d, cb: %s, fd: 0x%x, ino: 0x%x, ref_count: %d",
|
|
|
+ idx, iommu_cb_set.cb_info[idx].name[0], ion_fd, mapping_info->i_ino,
|
|
|
+ kref_read(&mapping_info->ref_count));
|
|
|
+ rc = -EPERM;
|
|
|
+ goto put_addr_end;
|
|
|
+ }
|
|
|
|
|
|
/* unmapping one buffer from device */
|
|
|
rc = cam_smmu_secure_unmap_buf_and_remove_from_list(mapping_info, idx);
|
|
@@ -3739,10 +3860,34 @@ bool cam_smmu_supports_shared_region(int handle)
|
|
|
return is_shared;
|
|
|
}
|
|
|
|
|
|
+void cam_smmu_buffer_tracker_buffer_putref(struct cam_smmu_buffer_tracker *entry)
|
|
|
+{
|
|
|
+
|
|
|
+ if (!entry) {
|
|
|
+ CAM_WARN(CAM_ISP, "[SMMU_BT] track buffer entry is NULL");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (refcount_dec_and_test(&entry->ref_count->refcount))
|
|
|
+ CAM_ERR(CAM_SMMU,
|
|
|
+ "[SMMU_BT] Unexpected - buffer reference [fd: 0x%x ino: 0x%x cb: %s] zeroed prior to unmap invocation",
|
|
|
+ entry->ion_fd, entry->i_ino, entry->cb_name);
|
|
|
+ else
|
|
|
+ CAM_DBG(CAM_SMMU,
|
|
|
+ "[SMMU_BT] kref_count after put, [fd: 0x%x ino: 0x%x cb: %s], count: %d",
|
|
|
+ entry->ion_fd, entry->i_ino, entry->cb_name, kref_read(entry->ref_count));
|
|
|
+
|
|
|
+
|
|
|
+ list_del_init(&entry->list);
|
|
|
+
|
|
|
+ cam_smmu_util_return_map_entry(entry);
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
int cam_smmu_map_user_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
|
|
|
bool dis_delayed_unmap, enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
|
|
|
size_t *len_ptr, enum cam_smmu_region_id region_id,
|
|
|
- bool is_internal)
|
|
|
+ bool is_internal, struct kref **ref_count)
|
|
|
{
|
|
|
int idx, rc = 0;
|
|
|
struct timespec64 *ts = NULL;
|
|
@@ -3784,7 +3929,7 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
|
|
|
}
|
|
|
|
|
|
buf_state = cam_smmu_user_reuse_fd_in_list(idx, ion_fd, dmabuf, paddr_ptr,
|
|
|
- len_ptr, &ts);
|
|
|
+ len_ptr, &ts, ref_count);
|
|
|
if (buf_state == CAM_SMMU_BUFF_EXIST) {
|
|
|
uint64_t ms = 0, hrs = 0, min = 0, sec = 0;
|
|
|
|
|
@@ -3801,7 +3946,7 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
|
|
|
|
|
|
rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd,
|
|
|
dis_delayed_unmap, dma_dir, paddr_ptr, len_ptr,
|
|
|
- region_id, is_internal, dmabuf);
|
|
|
+ region_id, is_internal, dmabuf, ref_count);
|
|
|
if (rc < 0) {
|
|
|
CAM_ERR(CAM_SMMU,
|
|
|
"mapping or add list fail cb:%s idx=%d, fd=%d, region=%d, rc=%d",
|
|
@@ -3877,11 +4022,13 @@ get_addr_end:
|
|
|
EXPORT_SYMBOL(cam_smmu_map_kernel_iova);
|
|
|
|
|
|
int cam_smmu_get_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
|
|
|
- dma_addr_t *paddr_ptr, size_t *len_ptr)
|
|
|
+ dma_addr_t *paddr_ptr, size_t *len_ptr, struct list_head *buf_tracker,
|
|
|
+ struct kref **ref_count)
|
|
|
{
|
|
|
int idx, rc = 0;
|
|
|
struct timespec64 *ts = NULL;
|
|
|
enum cam_smmu_buf_state buf_state;
|
|
|
+ unsigned long i_ino;
|
|
|
|
|
|
if (!paddr_ptr || !len_ptr) {
|
|
|
CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid");
|
|
@@ -3921,7 +4068,7 @@ int cam_smmu_get_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
|
|
|
}
|
|
|
|
|
|
buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, dma_buf, paddr_ptr,
|
|
|
- len_ptr, &ts);
|
|
|
+ len_ptr, &ts, &i_ino, ref_count);
|
|
|
if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
|
|
|
CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
|
|
|
rc = -EINVAL;
|
|
@@ -3929,6 +4076,9 @@ int cam_smmu_get_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
|
|
|
goto get_addr_end;
|
|
|
}
|
|
|
|
|
|
+ if (buf_tracker)
|
|
|
+ rc = cam_smmu_add_buf_to_track_list(ion_fd, i_ino, ref_count, buf_tracker, idx);
|
|
|
+
|
|
|
get_addr_end:
|
|
|
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
|
|
|
return rc;
|
|
@@ -3936,10 +4086,12 @@ get_addr_end:
|
|
|
EXPORT_SYMBOL(cam_smmu_get_iova);
|
|
|
|
|
|
int cam_smmu_get_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
|
|
|
- dma_addr_t *paddr_ptr, size_t *len_ptr)
|
|
|
+ dma_addr_t *paddr_ptr, size_t *len_ptr, struct list_head *buf_tracker,
|
|
|
+ struct kref **ref_count)
|
|
|
{
|
|
|
int idx, rc = 0;
|
|
|
enum cam_smmu_buf_state buf_state;
|
|
|
+ unsigned long i_ino;
|
|
|
|
|
|
if (!paddr_ptr || !len_ptr) {
|
|
|
CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid");
|
|
@@ -3978,7 +4130,8 @@ int cam_smmu_get_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
|
|
|
goto get_addr_end;
|
|
|
}
|
|
|
|
|
|
- buf_state = cam_smmu_validate_secure_fd_in_list(idx, ion_fd, dma_buf, paddr_ptr, len_ptr);
|
|
|
+ buf_state = cam_smmu_validate_secure_fd_in_list(idx, ion_fd, dma_buf, paddr_ptr, len_ptr,
|
|
|
+ &i_ino, ref_count);
|
|
|
|
|
|
if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
|
|
|
CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
|
|
@@ -3986,6 +4139,9 @@ int cam_smmu_get_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
|
|
|
goto get_addr_end;
|
|
|
}
|
|
|
|
|
|
+ if (buf_tracker)
|
|
|
+ rc = cam_smmu_add_buf_to_track_list(ion_fd, i_ino, ref_count, buf_tracker, idx);
|
|
|
+
|
|
|
get_addr_end:
|
|
|
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
|
|
|
return rc;
|
|
@@ -4014,7 +4170,8 @@ static int cam_smmu_unmap_validate_params(int handle)
|
|
|
}
|
|
|
|
|
|
int cam_smmu_unmap_user_iova(int handle,
|
|
|
- int ion_fd, struct dma_buf *dma_buf, enum cam_smmu_region_id region_id)
|
|
|
+ int ion_fd, struct dma_buf *dma_buf, enum cam_smmu_region_id region_id,
|
|
|
+ bool force_unmap)
|
|
|
{
|
|
|
int idx, rc;
|
|
|
struct cam_dma_buff_info *mapping_info;
|
|
@@ -4053,15 +4210,25 @@ int cam_smmu_unmap_user_iova(int handle,
|
|
|
goto unmap_end;
|
|
|
}
|
|
|
|
|
|
- mapping_info->ref_count--;
|
|
|
- if (mapping_info->ref_count > 0) {
|
|
|
+ mapping_info->map_count--;
|
|
|
+ if (mapping_info->map_count > 0) {
|
|
|
CAM_DBG(CAM_SMMU,
|
|
|
- "idx: %d fd = %d ref_count: %d",
|
|
|
- idx, ion_fd, mapping_info->ref_count);
|
|
|
+ "idx: %d, cb: %s fd = %d , ino: 0x%x, map_count: %d, ref_count: %d",
|
|
|
+ idx, iommu_cb_set.cb_info[idx].name[0], ion_fd,
|
|
|
+ mapping_info->i_ino, mapping_info->map_count,
|
|
|
+ kref_read(&mapping_info->ref_count));
|
|
|
rc = 0;
|
|
|
goto unmap_end;
|
|
|
}
|
|
|
- mapping_info->ref_count = 0;
|
|
|
+ mapping_info->map_count = 0;
|
|
|
+ if (!force_unmap && kref_read(&mapping_info->ref_count) > 1) {
|
|
|
+ CAM_ERR(CAM_SMMU,
|
|
|
+ "[SMMU_BT] Error: can't unmap buffer as it's still active, idx: %d, cb: %s, fd: 0x%x, ino: 0x%x, ref_count: %d",
|
|
|
+ idx, iommu_cb_set.cb_info[idx].name[0], ion_fd, mapping_info->i_ino,
|
|
|
+ kref_read(&mapping_info->ref_count));
|
|
|
+ rc = -EPERM;
|
|
|
+ goto unmap_end;
|
|
|
+ }
|
|
|
|
|
|
/* Unmapping one buffer from device */
|
|
|
CAM_DBG(CAM_SMMU, "SMMU: removing buffer idx = %d", idx);
|
|
@@ -5344,6 +5511,8 @@ static int cam_smmu_create_debug_fs(void)
|
|
|
iommu_cb_set.debug_cfg.dentry, &iommu_cb_set.debug_cfg.map_profile_enable);
|
|
|
debugfs_create_file("fatal_pf_mask", 0644,
|
|
|
iommu_cb_set.debug_cfg.dentry, NULL, &cam_smmu_fatal_pf_mask);
|
|
|
+ debugfs_create_bool("disable_buf_tracking", 0644,
|
|
|
+ iommu_cb_set.debug_cfg.dentry, &iommu_cb_set.debug_cfg.disable_buf_tracking);
|
|
|
|
|
|
end:
|
|
|
return rc;
|
|
@@ -5351,6 +5520,7 @@ end:
|
|
|
|
|
|
int cam_smmu_driver_init(struct cam_csf_version *csf_ver, int32_t *num_cbs)
|
|
|
{
|
|
|
+ int i;
|
|
|
/* expect inputs to be valid */
|
|
|
if (!csf_ver || !num_cbs) {
|
|
|
CAM_ERR(CAM_SMMU, "Invalid params csf: %p num_cbs: %p",
|
|
@@ -5360,12 +5530,34 @@ int cam_smmu_driver_init(struct cam_csf_version *csf_ver, int32_t *num_cbs)
|
|
|
|
|
|
*num_cbs = iommu_cb_set.cb_num;
|
|
|
memcpy(csf_ver, &iommu_cb_set.csf_version, sizeof(*csf_ver));
|
|
|
+
|
|
|
+ iommu_cb_set.is_track_buf_disabled = iommu_cb_set.debug_cfg.disable_buf_tracking;
|
|
|
+
|
|
|
+ if (!iommu_cb_set.is_track_buf_disabled) {
|
|
|
+ buf_tracking_pool = kcalloc(CAM_SMMU_BUF_TRACKING_POOL,
|
|
|
+ sizeof(struct cam_smmu_buffer_tracker), GFP_KERNEL);
|
|
|
+
|
|
|
+ if (!buf_tracking_pool) {
|
|
|
+ CAM_WARN(CAM_SMMU, "[SMMU_BT] Not enough mem for buffer tracker pool");
|
|
|
+ goto end;
|
|
|
+ }
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&iommu_cb_set.buf_tracker_free_list);
|
|
|
+ for (i = 0; i < CAM_SMMU_BUF_TRACKING_POOL; i++) {
|
|
|
+ INIT_LIST_HEAD(&buf_tracking_pool[i].list);
|
|
|
+ list_add_tail(&buf_tracking_pool[i].list,
|
|
|
+ &iommu_cb_set.buf_tracker_free_list);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+end:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
void cam_smmu_driver_deinit(void)
|
|
|
{
|
|
|
- /* no-op */
|
|
|
+ INIT_LIST_HEAD(&iommu_cb_set.buf_tracker_free_list);
|
|
|
+ kfree(buf_tracking_pool);
|
|
|
}
|
|
|
|
|
|
static int cam_smmu_fw_dev_component_bind(struct device *dev,
|
|
@@ -5459,6 +5651,7 @@ static int cam_smmu_component_bind(struct device *dev,
|
|
|
|
|
|
INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work);
|
|
|
mutex_init(&iommu_cb_set.payload_list_lock);
|
|
|
+ spin_lock_init(&iommu_cb_set.s_lock);
|
|
|
INIT_LIST_HEAD(&iommu_cb_set.payload_list);
|
|
|
cam_smmu_create_debug_fs();
|
|
|
|