|
@@ -119,6 +119,13 @@ struct cam_smmu_monitor {
|
|
enum cam_smmu_region_id region_id;
|
|
enum cam_smmu_region_id region_id;
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+struct cam_smmu_debug {
|
|
|
|
+ struct dentry *dentry;
|
|
|
|
+ bool cb_dump_enable;
|
|
|
|
+ bool map_profile_enable;
|
|
|
|
+ uint32_t fatal_pf_mask;
|
|
|
|
+};
|
|
|
|
+
|
|
struct cam_context_bank_info {
|
|
struct cam_context_bank_info {
|
|
struct device *dev;
|
|
struct device *dev;
|
|
struct iommu_domain *domain;
|
|
struct iommu_domain *domain;
|
|
@@ -187,10 +194,7 @@ struct cam_iommu_cb_set {
|
|
struct work_struct smmu_work;
|
|
struct work_struct smmu_work;
|
|
struct mutex payload_list_lock;
|
|
struct mutex payload_list_lock;
|
|
struct list_head payload_list;
|
|
struct list_head payload_list;
|
|
- u32 non_fatal_fault;
|
|
|
|
- struct dentry *dentry;
|
|
|
|
- bool cb_dump_enable;
|
|
|
|
- bool map_profile_enable;
|
|
|
|
|
|
+ struct cam_smmu_debug debug_cfg;
|
|
bool force_cache_allocs;
|
|
bool force_cache_allocs;
|
|
bool need_shared_buffer_padding;
|
|
bool need_shared_buffer_padding;
|
|
bool is_expanded_memory;
|
|
bool is_expanded_memory;
|
|
@@ -622,7 +626,7 @@ static void cam_smmu_dump_cb_info(int idx)
|
|
cb_info->shared_mapping_size, cb_info->io_mapping_size,
|
|
cb_info->shared_mapping_size, cb_info->io_mapping_size,
|
|
shared_free_len, io_free_len);
|
|
shared_free_len, io_free_len);
|
|
|
|
|
|
- if (iommu_cb_set.cb_dump_enable) {
|
|
|
|
|
|
+ if (iommu_cb_set.debug_cfg.cb_dump_enable) {
|
|
list_for_each_entry_safe(mapping, mapping_temp,
|
|
list_for_each_entry_safe(mapping, mapping_temp,
|
|
&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
|
|
&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
|
|
i++;
|
|
i++;
|
|
@@ -907,6 +911,11 @@ static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
|
|
* lead to further fault irqs being triggered
|
|
* lead to further fault irqs being triggered
|
|
*/
|
|
*/
|
|
if (iommu_cb_set.cb_info[idx].non_fatal_faults_en) {
|
|
if (iommu_cb_set.cb_info[idx].non_fatal_faults_en) {
|
|
|
|
+ /* Panic if debugfs is set for a context bank */
|
|
|
|
+ if (iommu_cb_set.debug_cfg.fatal_pf_mask & BIT(idx))
|
|
|
|
+ CAM_TRIGGER_PANIC("SMMU context fault from soc: %s[cb_idx: %u]",
|
|
|
|
+ iommu_cb_set.cb_info[idx].name[0], idx);
|
|
|
|
+
|
|
CAM_DBG(CAM_SMMU,
|
|
CAM_DBG(CAM_SMMU,
|
|
"PF marked as non-fatal for cb: %s, return success to SMMU",
|
|
"PF marked as non-fatal for cb: %s, return success to SMMU",
|
|
cb_name);
|
|
cb_name);
|
|
@@ -939,7 +948,7 @@ static int cam_smmu_translate_dir_to_iommu_dir(
|
|
default:
|
|
default:
|
|
CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d", dir);
|
|
CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d", dir);
|
|
break;
|
|
break;
|
|
- };
|
|
|
|
|
|
+ }
|
|
return IOMMU_INVALID_DIR;
|
|
return IOMMU_INVALID_DIR;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2152,7 +2161,7 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
|
|
goto err_out;
|
|
goto err_out;
|
|
}
|
|
}
|
|
|
|
|
|
- if (iommu_cb_set.map_profile_enable)
|
|
|
|
|
|
+ if (iommu_cb_set.debug_cfg.map_profile_enable)
|
|
CAM_GET_TIMESTAMP(ts1);
|
|
CAM_GET_TIMESTAMP(ts1);
|
|
|
|
|
|
attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev);
|
|
attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev);
|
|
@@ -2235,7 +2244,7 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
|
|
"iova=%pK, region_id=%d, paddr=0x%llx, len=%zu, dma_map_attrs=%d",
|
|
"iova=%pK, region_id=%d, paddr=0x%llx, len=%zu, dma_map_attrs=%d",
|
|
iova, region_id, *paddr_ptr, *len_ptr, attach->dma_map_attrs);
|
|
iova, region_id, *paddr_ptr, *len_ptr, attach->dma_map_attrs);
|
|
|
|
|
|
- if (iommu_cb_set.map_profile_enable) {
|
|
|
|
|
|
+ if (iommu_cb_set.debug_cfg.map_profile_enable) {
|
|
CAM_GET_TIMESTAMP(ts2);
|
|
CAM_GET_TIMESTAMP(ts2);
|
|
CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
|
|
CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
|
|
trace_cam_log_event("SMMUMapProfile", "size and time in micro",
|
|
trace_cam_log_event("SMMUMapProfile", "size and time in micro",
|
|
@@ -2413,7 +2422,7 @@ static int cam_smmu_unmap_buf_and_remove_from_list(
|
|
mapping_info->region_id, mapping_info->paddr, mapping_info->len,
|
|
mapping_info->region_id, mapping_info->paddr, mapping_info->len,
|
|
mapping_info->attach->dma_map_attrs);
|
|
mapping_info->attach->dma_map_attrs);
|
|
|
|
|
|
- if (iommu_cb_set.map_profile_enable)
|
|
|
|
|
|
+ if (iommu_cb_set.debug_cfg.map_profile_enable)
|
|
CAM_GET_TIMESTAMP(ts1);
|
|
CAM_GET_TIMESTAMP(ts1);
|
|
|
|
|
|
if (mapping_info->region_id == CAM_SMMU_REGION_SHARED) {
|
|
if (mapping_info->region_id == CAM_SMMU_REGION_SHARED) {
|
|
@@ -2457,7 +2466,7 @@ static int cam_smmu_unmap_buf_and_remove_from_list(
|
|
dma_buf_detach(mapping_info->buf, mapping_info->attach);
|
|
dma_buf_detach(mapping_info->buf, mapping_info->attach);
|
|
dma_buf_put(mapping_info->buf);
|
|
dma_buf_put(mapping_info->buf);
|
|
|
|
|
|
- if (iommu_cb_set.map_profile_enable) {
|
|
|
|
|
|
+ if (iommu_cb_set.debug_cfg.map_profile_enable) {
|
|
CAM_GET_TIMESTAMP(ts2);
|
|
CAM_GET_TIMESTAMP(ts2);
|
|
CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
|
|
CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
|
|
trace_cam_log_event("SMMUUnmapProfile",
|
|
trace_cam_log_event("SMMUUnmapProfile",
|
|
@@ -4494,7 +4503,26 @@ static unsigned long cam_smmu_mini_dump_cb(void *dst, unsigned long len)
|
|
}
|
|
}
|
|
end:
|
|
end:
|
|
return dumped_len;
|
|
return dumped_len;
|
|
-};
|
|
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cam_smmu_set_fatal_pf_mask(void *data, u64 val)
|
|
|
|
+{
|
|
|
|
+ iommu_cb_set.debug_cfg.fatal_pf_mask = val;
|
|
|
|
+ CAM_DBG(CAM_SMMU, "Set fatal page fault value: 0x%llx",
|
|
|
|
+ iommu_cb_set.debug_cfg.fatal_pf_mask);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cam_smmu_get_fatal_pf_mask(void *data, u64 *val)
|
|
|
|
+{
|
|
|
|
+ *val = iommu_cb_set.debug_cfg.fatal_pf_mask;
|
|
|
|
+ CAM_DBG(CAM_SMMU, "Get fatal page fault value: 0x%llx",
|
|
|
|
+ *val);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+DEFINE_DEBUGFS_ATTRIBUTE(cam_smmu_fatal_pf_mask,
|
|
|
|
+ cam_smmu_get_fatal_pf_mask, cam_smmu_set_fatal_pf_mask, "%16llu");
|
|
|
|
|
|
static int cam_smmu_create_debug_fs(void)
|
|
static int cam_smmu_create_debug_fs(void)
|
|
{
|
|
{
|
|
@@ -4511,12 +4539,15 @@ static int cam_smmu_create_debug_fs(void)
|
|
goto end;
|
|
goto end;
|
|
}
|
|
}
|
|
/* Store parent inode for cleanup in caller */
|
|
/* Store parent inode for cleanup in caller */
|
|
- iommu_cb_set.dentry = dbgfileptr;
|
|
|
|
|
|
+ iommu_cb_set.debug_cfg.dentry = dbgfileptr;
|
|
|
|
|
|
debugfs_create_bool("cb_dump_enable", 0644,
|
|
debugfs_create_bool("cb_dump_enable", 0644,
|
|
- iommu_cb_set.dentry, &iommu_cb_set.cb_dump_enable);
|
|
|
|
|
|
+ iommu_cb_set.debug_cfg.dentry, &iommu_cb_set.debug_cfg.cb_dump_enable);
|
|
debugfs_create_bool("map_profile_enable", 0644,
|
|
debugfs_create_bool("map_profile_enable", 0644,
|
|
- iommu_cb_set.dentry, &iommu_cb_set.map_profile_enable);
|
|
|
|
|
|
+ iommu_cb_set.debug_cfg.dentry, &iommu_cb_set.debug_cfg.map_profile_enable);
|
|
|
|
+ debugfs_create_file("fatal_pf_mask", 0644,
|
|
|
|
+ iommu_cb_set.debug_cfg.dentry, NULL, &cam_smmu_fatal_pf_mask);
|
|
|
|
+
|
|
end:
|
|
end:
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
@@ -4640,7 +4671,7 @@ static void cam_smmu_component_unbind(struct device *dev,
|
|
}
|
|
}
|
|
|
|
|
|
cam_smmu_release_cb(pdev);
|
|
cam_smmu_release_cb(pdev);
|
|
- iommu_cb_set.dentry = NULL;
|
|
|
|
|
|
+ iommu_cb_set.debug_cfg.dentry = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
const static struct component_ops cam_smmu_component_ops = {
|
|
const static struct component_ops cam_smmu_component_ops = {
|