Files
android_kernel_samsung_sm86…/msm/sde_dbg.c
Mahadevan 626d6ef343 disp: msm: re-factor debug bus logic to dump all test points
As per the HW teams guidelines, allow dumping the status of
all the HW blocks blocks at every test point.
This code brings in the following changes
1) Featurize the hw block  and test point range per target.
2) Reduce the debug bus entries and decrease the static
   memory foot print.
3) Allow analyzers to be integrated into the new format.

Change-Id: Ic3bc2c1b77f5617b0f81a2066b22e50cfd6ff8dd
Signed-off-by: Mahadevan <mahap@codeaurora.org>
2020-12-23 08:55:48 +05:30

2692 sor
69 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2009-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/ktime.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/dma-buf.h>
#include <linux/slab.h>
#include <linux/list_sort.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include "sde_dbg.h"
#include "sde/sde_hw_catalog.h"
#define SDE_DBG_BASE_MAX 10
#define DEFAULT_PANIC 1
#define DEFAULT_REGDUMP SDE_DBG_DUMP_IN_MEM
#define DEFAULT_DBGBUS_SDE SDE_DBG_DUMP_IN_MEM
#define DEFAULT_DBGBUS_VBIFRT SDE_DBG_DUMP_IN_MEM
#define DEFAULT_DBGBUS_DSI SDE_DBG_DUMP_IN_MEM
#define DEFAULT_DBGBUS_LUTDMA SDE_DBG_DUMP_IN_MEM
#define DEFAULT_BASE_REG_CNT DEFAULT_MDSS_HW_BLOCK_SIZE
#define GROUP_BYTES 4
#define ROW_BYTES 16
#define RANGE_NAME_LEN 40
#define REG_BASE_NAME_LEN 80
#define DBGBUS_FLAGS_DSPP BIT(0)
#define DBGBUS_DSPP_STATUS 0x34C
#define DBGBUS_NAME_SDE "sde"
#define DBGBUS_NAME_VBIF_RT "vbif_rt"
#define DBGBUS_NAME_DSI "dsi"
#define DBGBUS_NAME_LUTDMA "reg_dma"
/* offsets from LUTDMA top address for the debug buses */
#define DBGBUS_LUTDMA_0 0x1E8
#define DBGBUS_LUTDMA_1 0x5E8
/* offsets from sde top address for the debug buses */
#define DBGBUS_SSPP0 0x188
#define DBGBUS_AXI_INTF 0x194
#define DBGBUS_SSPP1 0x298
#define DBGBUS_DSPP 0x348
#define DBGBUS_PERIPH 0x418
/* offsets from DSI CTRL base address for the DSI debug buses */
#define DSI_DEBUG_BUS_CTRL 0x0124
#define DSI_DEBUG_BUS 0x0128
#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
#define TEST_EXT_MASK(id, tp) (((tp >> 3) << 24) | (id << 4) \
| ((tp & 0x7) << 1) | BIT(0))
/* following offsets are with respect to MDP VBIF base for DBG BUS access */
#define MMSS_VBIF_CLKON 0x4
#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
#define MMSS_VBIF_TEST_BUS_OUT 0x230
/* Vbif error info */
#define MMSS_VBIF_PND_ERR 0x190
#define MMSS_VBIF_SRC_ERR 0x194
#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
#define MMSS_VBIF_ERR_INFO 0X1a0
#define MMSS_VBIF_ERR_INFO_1 0x1a4
#define MMSS_VBIF_CLIENT_NUM 14
/* print debug ranges in groups of 4 u32s */
#define REG_DUMP_ALIGN 16
#define RSC_DEBUG_MUX_SEL_SDM845 9
#define DBG_CTRL_STOP_FTRACE BIT(0)
#define DBG_CTRL_PANIC_UNDERRUN BIT(1)
#define DBG_CTRL_RESET_HW_PANIC BIT(2)
#define DBG_CTRL_MAX BIT(3)
#define DUMP_BUF_SIZE (4096 * 512)
#define DUMP_CLMN_COUNT 4
#define DUMP_LINE_SIZE 256
#define DUMP_MAX_LINES_PER_BLK 512
#define EXT_TEST_GROUP_SEL_EN 0x7
#define DSPP_DEBUGBUS_CTRL_EN 0x7001
#define SDE_HW_REV_MAJOR(rev) ((rev) >> 28)
/**
* struct sde_dbg_reg_offset - tracking for start and end of region
* @start: start offset
* @start: end offset
*/
struct sde_dbg_reg_offset {
u32 start;
u32 end;
};
/**
* struct sde_dbg_reg_range - register dumping named sub-range
* @head: head of this node
* @reg_dump: address for the mem dump
* @range_name: name of this range
* @offset: offsets for range to dump
* @xin_id: client xin id
*/
struct sde_dbg_reg_range {
struct list_head head;
u32 *reg_dump;
char range_name[RANGE_NAME_LEN];
struct sde_dbg_reg_offset offset;
uint32_t xin_id;
};
/**
* struct sde_dbg_reg_base - register region base.
* may sub-ranges: sub-ranges are used for dumping
* or may not have sub-ranges: dumping is base -> max_offset
* @reg_base_head: head of this node
* @sub_range_list: head to the list with dump ranges
* @name: register base name
* @base: base pointer
* @off: cached offset of region for manual register dumping
* @cnt: cached range of region for manual register dumping
* @max_offset: length of region
* @buf: buffer used for manual register dumping
* @buf_len: buffer length used for manual register dumping
* @reg_dump: address for the mem dump if no ranges used
* @cb: callback for external dump function, null if not defined
* @cb_ptr: private pointer to callback function
*/
struct sde_dbg_reg_base {
struct list_head reg_base_head;
struct list_head sub_range_list;
char name[REG_BASE_NAME_LEN];
void __iomem *base;
size_t off;
size_t cnt;
size_t max_offset;
char *buf;
size_t buf_len;
u32 *reg_dump;
void (*cb)(void *ptr);
void *cb_ptr;
};
struct sde_debug_bus_entry {
u32 wr_addr;
u32 block_id;
u32 block_id_max_cnt;
u32 test_id;
u32 test_id_max_cnt;
void (*analyzer)(void __iomem *mem_base,
struct sde_debug_bus_entry *entry, u32 val,
u32 block_id_cnt, u32 test_id_cnt);
};
struct vbif_debug_bus_entry {
u32 disable_bus_addr;
u32 block_bus_addr;
u32 bit_offset;
u32 block_cnt;
u32 test_pnt_start;
u32 test_pnt_cnt;
};
struct dsi_debug_bus_entry {
u32 mux;
u32 sel;
};
struct lutdma_debug_bus_entry {
u32 wr_addr;
bool read_engine;
u32 indicies;
};
struct sde_dbg_dsi_ctrl_list_entry {
const char *name;
void __iomem *base;
struct list_head list;
};
struct sde_dbg_debug_bus_common {
char *name;
u32 enable_mask;
bool include_in_deferred_work;
u32 flags;
u32 entries_size;
u32 *dumped_content;
u32 content_idx;
u32 content_size;
};
struct sde_dbg_sde_debug_bus {
struct sde_dbg_debug_bus_common cmn;
struct sde_debug_bus_entry *entries;
u32 top_blk_off;
};
struct sde_dbg_vbif_debug_bus {
struct sde_dbg_debug_bus_common cmn;
struct vbif_debug_bus_entry *entries;
};
struct sde_dbg_dsi_debug_bus {
struct sde_dbg_debug_bus_common cmn;
struct dsi_debug_bus_entry *entries;
};
struct sde_dbg_lutdma_debug_bus {
struct sde_dbg_debug_bus_common cmn;
struct lutdma_debug_bus_entry *entries;
};
/**
* struct sde_dbg_regbuf - wraps buffer and tracking params for register dumps
* @buf: pointer to allocated memory for storing register dumps in hw recovery
* @buf_size: size of the memory allocated
* @len: size of the dump data valid in the buffer
* @rpos: cursor points to the buffer position read by client
* @dump_done: to indicate if dumping to user memory is complete
* @cur_blk: points to the current sde_dbg_reg_base block
*/
struct sde_dbg_regbuf {
char *buf;
int buf_size;
int len;
int rpos;
int dump_done;
struct sde_dbg_reg_base *cur_blk;
};
/**
* struct sde_dbg_base - global sde debug base structure
* @evtlog: event log instance
* @reg_base_list: list of register dumping regions
* @dev: device pointer
* @mutex: mutex to serialize access to serialze dumps, debugfs access
* @req_dump_blks: list of blocks requested for dumping
* @panic_on_err: whether to kernel panic after triggering dump via debugfs
* @dump_work: work struct for deferring register dump work to separate thread
* @work_panic: panic after dump if internal user passed "panic" special region
* @enable_reg_dump: whether to dump registers into memory, kernel log, or both
* @dbgbus_sde: debug bus structure for the sde
* @dbgbus_vbif_rt: debug bus structure for the realtime vbif
* @dbgbus_lutdma: debug bus structure for the lutdma hw
* @dump_all: dump all entries in register dump
* @dump_secure: dump entries excluding few as it is in secure-session
* @dsi_dbg_bus: dump dsi debug bus register
* @regbuf: buffer data to track the register dumping in hw recovery
* @cur_evt_index: index used for tracking event logs dump in hw recovery
* @dbgbus_dump_idx: index used for tracking dbg-bus dump in hw recovery
* @vbif_dbgbus_dump_idx: index for tracking vbif dumps in hw recovery
*/
static struct sde_dbg_base {
struct sde_dbg_evtlog *evtlog;
struct list_head reg_base_list;
struct device *dev;
struct mutex mutex;
struct sde_dbg_reg_base *req_dump_blks[SDE_DBG_BASE_MAX];
u32 panic_on_err;
struct work_struct dump_work;
bool work_panic;
u32 enable_reg_dump;
struct sde_dbg_sde_debug_bus dbgbus_sde;
struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt;
struct sde_dbg_dsi_debug_bus dbgbus_dsi;
struct sde_dbg_lutdma_debug_bus dbgbus_lutdma;
bool dump_all;
bool dump_secure;
u32 debugfs_ctrl;
struct sde_dbg_regbuf regbuf;
u32 cur_evt_index;
enum sde_dbg_dump_context dump_mode;
} sde_dbg_base;
static LIST_HEAD(sde_dbg_dsi_list);
static DEFINE_MUTEX(sde_dbg_dsi_mutex);
/* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */
struct sde_dbg_evtlog *sde_dbg_base_evtlog;
static void _sde_debug_bus_xbar_dump(void __iomem *mem_base,
struct sde_debug_bus_entry *entry, u32 val, u32 block_id_cnt,
u32 test_id_cnt)
{
dev_err(sde_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
entry->wr_addr, entry->block_id + block_id_cnt,
entry->test_id + test_id_cnt, val);
}
static void _sde_debug_bus_lm_dump(void __iomem *mem_base,
struct sde_debug_bus_entry *entry, u32 val, u32 block_id_cnt,
u32 test_id_cnt)
{
if (!(val & 0xFFF000))
return;
dev_err(sde_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
entry->wr_addr, entry->block_id + block_id_cnt,
entry->test_id + test_id_cnt, val);
}
static void _sde_debug_bus_ppb0_dump(void __iomem *mem_base,
struct sde_debug_bus_entry *entry, u32 val, u32 block_id_cnt,
u32 test_id_cnt)
{
if (!(val & BIT(15)))
return;
dev_err(sde_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
entry->wr_addr, entry->block_id + block_id_cnt,
entry->test_id + test_id_cnt, val);
}
static void _sde_debug_bus_ppb1_dump(void __iomem *mem_base,
struct sde_debug_bus_entry *entry, u32 val, u32 block_id_cnt,
u32 test_id_cnt)
{
if (!(val & BIT(15)))
return;
dev_err(sde_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
entry->wr_addr, entry->block_id + block_id_cnt,
entry->test_id + test_id_cnt, val);
}
static struct sde_debug_bus_entry dbg_bus_sde[] = {
{ DBGBUS_SSPP0, 0, 256, 0, 8 },
{ DBGBUS_AXI_INTF, 0, 256, 0, 8 },
{ DBGBUS_SSPP1, 0, 256, 0, 8 },
{ DBGBUS_DSPP, 0, 256, 0, 32 },
{ DBGBUS_PERIPH, 0, 256, 0, 8 },
/* ppb_0 */
{ DBGBUS_DSPP, 31, 1, 0, 1, _sde_debug_bus_ppb0_dump },
{ DBGBUS_DSPP, 33, 1, 0, 1, _sde_debug_bus_ppb0_dump },
{ DBGBUS_DSPP, 35, 1, 0, 1, _sde_debug_bus_ppb0_dump },
{ DBGBUS_DSPP, 42, 1, 0, 1, _sde_debug_bus_ppb0_dump },
{ DBGBUS_DSPP, 47, 1, 0, 1, _sde_debug_bus_ppb0_dump },
{ DBGBUS_DSPP, 49, 1, 0, 1, _sde_debug_bus_ppb0_dump },
/* ppb_1 */
{ DBGBUS_DSPP, 32, 1, 0, 1, _sde_debug_bus_ppb1_dump },
{ DBGBUS_DSPP, 34, 1, 0, 1, _sde_debug_bus_ppb1_dump },
{ DBGBUS_DSPP, 36, 1, 0, 1, _sde_debug_bus_ppb1_dump },
{ DBGBUS_DSPP, 43, 1, 0, 1, _sde_debug_bus_ppb1_dump },
{ DBGBUS_DSPP, 48, 1, 0, 1, _sde_debug_bus_ppb1_dump },
{ DBGBUS_DSPP, 50, 1, 0, 1, _sde_debug_bus_ppb1_dump },
/* crossbar */
{ DBGBUS_DSPP, 0, 1, 0, 1, _sde_debug_bus_xbar_dump },
/* blend */
{ DBGBUS_DSPP, 63, 1, 7, 1, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 70, 1, 7, 1, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 77, 1, 7, 1, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 110, 1, 7, 1, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 96, 1, 7, 1, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 124, 1, 7, 1, _sde_debug_bus_lm_dump }
};
static struct vbif_debug_bus_entry vbif_dbg_bus[] = {
{0x214, 0x21c, 16, 2, 0x0, 0xd}, /* arb clients */
{0x214, 0x21c, 16, 2, 0x80, 0xc0}, /* arb clients */
{0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
{0x214, 0x21c, 0, 16, 0x0, 0xf}, /* xin blocks - axi side */
{0x214, 0x21c, 0, 16, 0x80, 0xa4}, /* xin blocks - axi side */
{0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
{0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
};
static struct dsi_debug_bus_entry dsi_dbg_bus[] = {
{0, 0x00}, {1, 0x00}, {2, 0x00}, {3, 0x00},
{0, 0x01}, {1, 0x01}, {2, 0x01}, {3, 0x01},
{0, 0x02}, {1, 0x02}, {2, 0x02}, {3, 0x02},
{0, 0x03}, {1, 0x03}, {2, 0x03}, {3, 0x03},
{0, 0x04}, {1, 0x04}, {2, 0x04}, {3, 0x04},
{0, 0x05}, {1, 0x05}, {2, 0x05}, {3, 0x05},
{0, 0x06}, {1, 0x06}, {2, 0x06}, {3, 0x06},
{0, 0x07}, {1, 0x07}, {2, 0x07}, {3, 0x07},
{0, 0x08}, {1, 0x08}, {2, 0x08}, {3, 0x08},
{0, 0x0a}, {1, 0x0a}, {2, 0x0a}, {3, 0x0a},
{0, 0x0b}, {1, 0x0b}, {2, 0x0b}, {3, 0x0b},
{0, 0x0c}, {1, 0x0c}, {2, 0x0c}, {3, 0x0c},
{0, 0x0d}, {1, 0x0d}, {2, 0x0d}, {3, 0x0d},
{0, 0x0e}, {1, 0x0e}, {2, 0x0e}, {3, 0x0e},
{0, 0x0f}, {1, 0x0f}, {2, 0x0f}, {3, 0x0f},
{0, 0x10}, {1, 0x10}, {2, 0x10}, {3, 0x10},
{0, 0x11}, {1, 0x11}, {2, 0x11}, {3, 0x11},
{0, 0x14}, {1, 0x14}, {2, 0x14}, {3, 0x14},
{0, 0x15}, {1, 0x15}, {2, 0x15}, {3, 0x15},
{0, 0x16}, {1, 0x16}, {2, 0x16}, {3, 0x16},
{0, 0x17}, {1, 0x17}, {2, 0x17}, {3, 0x17},
{0, 0x18}, {1, 0x18}, {2, 0x18}, {3, 0x18},
{0, 0x19}, {1, 0x19}, {2, 0x19}, {3, 0x19},
{0, 0x1a}, {1, 0x1a}, {2, 0x1a}, {3, 0x1a},
{0, 0x1b}, {1, 0x1b}, {2, 0x1b}, {3, 0x1b},
{0, 0x1c}, {1, 0x1c}, {2, 0x1c}, {3, 0x1c},
{0, 0x1d}, {1, 0x1d}, {2, 0x1d}, {3, 0x1d},
{0, 0x1e}, {1, 0x1e}, {2, 0x1e}, {3, 0x1e},
{0, 0x1f}, {1, 0x1f}, {2, 0x1f}, {3, 0x1f},
{0, 0x20}, {1, 0x20}, {2, 0x20}, {3, 0x20},
{0, 0x21}, {1, 0x21}, {2, 0x21}, {3, 0x21},
{0, 0x22}, {1, 0x22}, {2, 0x22}, {3, 0x22},
{0, 0x23}, {1, 0x23}, {2, 0x23}, {3, 0x23},
{0, 0x24}, {1, 0x24}, {2, 0x24}, {3, 0x24},
{0, 0x25}, {1, 0x25}, {2, 0x25}, {3, 0x25},
{0, 0x28}, {1, 0x28}, {2, 0x28}, {3, 0x28},
{0, 0x29}, {1, 0x29}, {2, 0x29}, {3, 0x29},
{0, 0x2a}, {1, 0x2a}, {2, 0x2a}, {3, 0x2a},
{0, 0x2b}, {1, 0x2b}, {2, 0x2b}, {3, 0x2b},
{0, 0x2c}, {1, 0x2c}, {2, 0x2c}, {3, 0x2c},
{0, 0x32}, {1, 0x32}, {2, 0x32}, {3, 0x32},
{0, 0x33}, {1, 0x33}, {2, 0x33}, {3, 0x33},
{0, 0x34}, {1, 0x34}, {2, 0x34}, {3, 0x34},
{0, 0x35}, {1, 0x35}, {2, 0x35}, {3, 0x35},
{0, 0x36}, {1, 0x36}, {2, 0x36}, {3, 0x36},
{0, 0x37}, {1, 0x37}, {2, 0x37}, {3, 0x37},
{0, 0x38}, {1, 0x38}, {2, 0x38}, {3, 0x38},
{0, 0x39}, {1, 0x39}, {2, 0x39}, {3, 0x39},
{0, 0x3c}, {0, 0x3d}, {0, 0x3e}, {0, 0x3f},
};
static struct lutdma_debug_bus_entry dbg_bus_lutdma[] = {
{ DBGBUS_LUTDMA_0, false, 1024 },
{ DBGBUS_LUTDMA_0, true, 1024 },
{ DBGBUS_LUTDMA_1, false, 1024 },
{ DBGBUS_LUTDMA_1, true, 1024 },
};
/**
* _sde_power_check - check if power needs to enabled
* @dump_mode: to check if power need to be enabled
* Return: true if success; false otherwise
*/
static inline bool _sde_power_check(enum sde_dbg_dump_context dump_mode)
{
return (dump_mode == SDE_DBG_DUMP_CLK_ENABLED_CTX ||
dump_mode == SDE_DBG_DUMP_IRQ_CTX) ? false : true;
}
/**
* _sde_dump_reg - helper function for dumping rotator register set content
* @dump_name: register set name
* @reg_dump_flag: dumping flag controlling in-log/memory dump location
* @base_addr: starting address of io region for calculating offsets to print
* @addr: starting address offset for dumping
* @len_bytes: range of the register set
* @dump_mem: output buffer for memory dump location option
* @from_isr: whether being called from isr context
*/
static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
char *base_addr, char *addr, size_t len_bytes, u32 **dump_mem)
{
u32 in_log, in_mem, len_align, len_padded;
u32 *dump_addr = NULL;
char *end_addr;
int i;
int rc;
if (!len_bytes)
return;
in_log = (reg_dump_flag & SDE_DBG_DUMP_IN_LOG);
in_mem = (reg_dump_flag & SDE_DBG_DUMP_IN_MEM);
pr_debug("%s: reg_dump_flag=%d in_log=%d in_mem=%d\n",
dump_name, reg_dump_flag, in_log, in_mem);
if (!in_log && !in_mem)
return;
if (in_log)
dev_info(sde_dbg_base.dev, "%s: start_offset 0x%lx len 0x%zx\n",
dump_name, (unsigned long)(addr - base_addr),
len_bytes);
len_align = (len_bytes + REG_DUMP_ALIGN - 1) / REG_DUMP_ALIGN;
len_padded = len_align * REG_DUMP_ALIGN;
end_addr = addr + len_bytes;
if (in_mem) {
if (dump_mem && !(*dump_mem))
*dump_mem = devm_kzalloc(sde_dbg_base.dev, len_padded,
GFP_KERNEL);
if (dump_mem && *dump_mem) {
dump_addr = *dump_mem;
dev_info(sde_dbg_base.dev,
"%s: start_addr:0x%pK len:0x%x reg_offset=0x%lx\n",
dump_name, dump_addr, len_padded,
(unsigned long)(addr - base_addr));
} else {
in_mem = 0;
pr_err("dump_mem: kzalloc fails!\n");
}
}
if (_sde_power_check(sde_dbg_base.dump_mode)) {
rc = pm_runtime_get_sync(sde_dbg_base.dev);
if (rc < 0) {
pr_err("failed to enable power %d\n", rc);
return;
}
}
for (i = 0; i < len_align; i++) {
u32 x0, x4, x8, xc;
x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0;
x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0;
xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
if (in_log)
dev_info(sde_dbg_base.dev,
"0x%lx : %08x %08x %08x %08x\n",
(unsigned long)(addr - base_addr),
x0, x4, x8, xc);
if (dump_addr) {
dump_addr[i * 4] = x0;
dump_addr[i * 4 + 1] = x4;
dump_addr[i * 4 + 2] = x8;
dump_addr[i * 4 + 3] = xc;
}
addr += REG_DUMP_ALIGN;
}
if (_sde_power_check(sde_dbg_base.dump_mode))
pm_runtime_put_sync(sde_dbg_base.dev);
}
/**
* _sde_dbg_get_dump_range - helper to retrieve dump length for a range node
* @range_node: range node to dump
* @max_offset: max offset of the register base
* @Return: length
*/
static u32 _sde_dbg_get_dump_range(struct sde_dbg_reg_offset *range_node,
size_t max_offset)
{
u32 length = 0;
if (range_node->start == 0 && range_node->end == 0) {
length = max_offset;
} else if (range_node->start < max_offset) {
if (range_node->end > max_offset)
length = max_offset - range_node->start;
else if (range_node->start < range_node->end)
length = range_node->end - range_node->start;
}
return length;
}
static int _sde_dump_reg_range_cmp(void *priv, struct list_head *a,
struct list_head *b)
{
struct sde_dbg_reg_range *ar, *br;
if (!a || !b)
return 0;
ar = container_of(a, struct sde_dbg_reg_range, head);
br = container_of(b, struct sde_dbg_reg_range, head);
return ar->offset.start - br->offset.start;
}
static const char *const exclude_modules[] = {
"vbif_rt",
"vbif_nrt",
"wb_2",
NULL
};
static bool is_block_exclude(char **modules, char *name)
{
char **ptr = modules;
while (*ptr != NULL) {
if (!strcmp(name, *ptr))
return true;
++ptr;
}
return false;
}
/**
* _sde_dump_reg_by_ranges - dump ranges or full range of the register blk base
* @dbg: register blk base structure
* @reg_dump_flag: dump target, memory, kernel log, or both
* @dump_secure: flag to indicate dumping in secure-session
*/
static void _sde_dump_reg_by_ranges(struct sde_dbg_reg_base *dbg,
u32 reg_dump_flag, bool dump_secure)
{
char *addr;
size_t len;
struct sde_dbg_reg_range *range_node;
if (!dbg || !(dbg->base || dbg->cb)) {
pr_err("dbg base is null!\n");
return;
}
dev_info(sde_dbg_base.dev, "%s:=========%s DUMP=========\n", __func__,
dbg->name);
if (dbg->cb) {
dbg->cb(dbg->cb_ptr);
/* If there is a list to dump the registers by ranges, use the ranges */
} else if (!list_empty(&dbg->sub_range_list)) {
/* sort the list by start address first */
list_sort(NULL, &dbg->sub_range_list, _sde_dump_reg_range_cmp);
list_for_each_entry(range_node, &dbg->sub_range_list, head) {
len = _sde_dbg_get_dump_range(&range_node->offset,
dbg->max_offset);
addr = dbg->base + range_node->offset.start;
if (dump_secure &&
is_block_exclude((char**)exclude_modules,
range_node->range_name))
continue;
pr_debug("%s: range_base=0x%pK start=0x%x end=0x%x\n",
range_node->range_name,
addr, range_node->offset.start,
range_node->offset.end);
_sde_dump_reg(range_node->range_name, reg_dump_flag,
dbg->base, addr, len,
&range_node->reg_dump);
}
} else {
/* If there is no list to dump ranges, dump all registers */
dev_info(sde_dbg_base.dev,
"Ranges not found, will dump full registers\n");
dev_info(sde_dbg_base.dev, "base:0x%pK len:0x%zx\n", dbg->base,
dbg->max_offset);
addr = dbg->base;
len = dbg->max_offset;
_sde_dump_reg(dbg->name, reg_dump_flag, dbg->base, addr, len,
&dbg->reg_dump);
}
}
/**
* _sde_dump_reg_by_blk - dump a named register base region
* @blk_name: register blk name
* @dump_secure: flag to indicate dumping in secure-session
*/
static void _sde_dump_reg_by_blk(const char *blk_name, bool dump_secure)
{
struct sde_dbg_base *dbg_base = &sde_dbg_base;
struct sde_dbg_reg_base *blk_base;
if (!dbg_base)
return;
list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head) {
if (strlen(blk_base->name) &&
!strcmp(blk_base->name, blk_name)) {
_sde_dump_reg_by_ranges(blk_base,
dbg_base->enable_reg_dump, dump_secure);
break;
}
}
}
/**
* _sde_dump_reg_all - dump all register regions
*/
static void _sde_dump_reg_all(bool dump_secure)
{
struct sde_dbg_base *dbg_base = &sde_dbg_base;
struct sde_dbg_reg_base *blk_base;
if (!dbg_base)
return;
list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head) {
if (!strlen(blk_base->name))
continue;
if (dump_secure &&
is_block_exclude((char **)exclude_modules,
blk_base->name))
continue;
_sde_dump_reg_by_blk(blk_base->name, dump_secure);
}
}
/**
* _sde_dump_get_blk_addr - retrieve register block address by name
* @blk_name: register blk name
* @Return: register blk base, or NULL
*/
static struct sde_dbg_reg_base *_sde_dump_get_blk_addr(const char *blk_name)
{
struct sde_dbg_base *dbg_base = &sde_dbg_base;
struct sde_dbg_reg_base *blk_base;
list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head)
if (strlen(blk_base->name) && !strcmp(blk_base->name, blk_name))
return blk_base;
return NULL;
}
static void _sde_dbg_dump_sde_bus_entry(
struct sde_debug_bus_entry *head, void __iomem *mem_base,
u32 *dump_addr, struct sde_dbg_sde_debug_bus *bus, bool in_log,
bool in_mem)
{
int i, j;
u32 status = 0;
u32 offset;
if (!dump_addr && !in_log && !in_mem)
return;
for (i = 0; i < head->block_id_max_cnt; i++) {
for (j = 0; j < head->test_id_max_cnt; j++) {
if (head->test_id + j > EXT_TEST_GROUP_SEL_EN)
writel_relaxed(TEST_EXT_MASK(
(head->block_id + i),
(head->test_id + j)),
mem_base + head->wr_addr);
else
writel_relaxed(TEST_MASK((head->block_id + i),
(head->test_id + j)),
mem_base + head->wr_addr);
wmb(); /* make sure test bits were written */
if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
offset = DBGBUS_DSPP_STATUS;
/* keep DSPP test point enabled */
if (head->wr_addr != DBGBUS_DSPP)
writel_relaxed(DSPP_DEBUGBUS_CTRL_EN,
mem_base + DBGBUS_DSPP);
} else {
offset = head->wr_addr + 0x4;
}
status = readl_relaxed(mem_base + offset);
if (in_log)
dev_info(sde_dbg_base.dev,
"waddr=0x%x blk=%d tst=%d val=0x%x\n",
head->wr_addr,
head->block_id + i,
head->test_id + j, status);
if (dump_addr && in_mem) {
*dump_addr++ = head->wr_addr;
*dump_addr++ = head->block_id + i;
*dump_addr++ = head->test_id + j;
*dump_addr++ = status;
}
if (head->analyzer)
head->analyzer(mem_base, head, status, i, j);
/* Disable debug bus once we are done */
writel_relaxed(0x0, mem_base + head->wr_addr);
if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
head->wr_addr != DBGBUS_DSPP)
writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
}
}
}
static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus)
{
bool in_log, in_mem;
u32 **dump_mem = NULL;
u32 *dump_addr = NULL;
u32 bus_size;
struct sde_debug_bus_entry *head;
struct sde_debug_bus_entry *dbg_bus;
int list_size = 0;
int i;
void __iomem *mem_base = NULL;
struct sde_dbg_reg_base *reg_base;
int rc;
if (!bus || !bus->cmn.entries_size)
return;
list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list,
reg_base_head)
if (strlen(reg_base->name) &&
!strcmp(reg_base->name, bus->cmn.name))
mem_base = reg_base->base + bus->top_blk_off;
if (!mem_base) {
pr_err("unable to find mem_base for %s\n", bus->cmn.name);
return;
}
dbg_bus = bus->entries;
bus_size = bus->cmn.entries_size;
dump_mem = &bus->cmn.dumped_content;
if (!dump_mem || !dbg_bus || !bus_size)
return;
/* allocate memory for each test id */
for (i = 0; i < bus_size; i++) {
head = dbg_bus + i;
list_size += (head->block_id_max_cnt * head->test_id_max_cnt);
}
list_size *= 16;
in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG);
in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM);
if (!in_log && !in_mem)
return;
dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
bus->cmn.name);
if (in_mem) {
if (!(*dump_mem)) {
*dump_mem = devm_kzalloc(sde_dbg_base.dev, list_size,
GFP_KERNEL);
bus->cmn.content_size = list_size / sizeof(u32);
}
if (*dump_mem) {
dump_addr = *dump_mem;
dev_info(sde_dbg_base.dev,
"%s: start_addr:0x%pK len:0x%x\n",
__func__, dump_addr, list_size);
} else {
in_mem = false;
pr_err("dump_mem: allocation fails\n");
}
}
rc = pm_runtime_get_sync(sde_dbg_base.dev);
if (rc < 0) {
pr_err("failed to enable power %d\n", rc);
return;
}
for (i = 0; i < bus_size; i++) {
head = dbg_bus + i;
_sde_dbg_dump_sde_bus_entry(head, mem_base, dump_addr,
bus, in_log, in_mem);
if (dump_addr)
dump_addr += (head->block_id_max_cnt *
head->test_id_max_cnt * 4);
}
pm_runtime_put_sync(sde_dbg_base.dev);
dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
bus->cmn.name);
}
static int _sde_dbg_dump_vbif_debug_bus_entry(
struct vbif_debug_bus_entry *head, void __iomem *mem_base,
u32 *dump_addr, bool in_log)
{
int i, j, count = 0;
u32 val;
if (!dump_addr && !in_log)
return 0;
for (i = 0; i < head->block_cnt; i++) {
writel_relaxed(1 << (i + head->bit_offset),
mem_base + head->block_bus_addr);
/* make sure that current bus blcok enable */
wmb();
for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
writel_relaxed(j, mem_base + head->block_bus_addr + 4);
/* make sure that test point is enabled */
wmb();
val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
if (dump_addr) {
*dump_addr++ = head->block_bus_addr;
*dump_addr++ = i;
*dump_addr++ = j;
*dump_addr++ = val;
count += DUMP_CLMN_COUNT;
}
if (in_log)
dev_info(sde_dbg_base.dev,
"testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
head->block_bus_addr, i, j, val);
}
}
return count;
}
static void _sde_dbg_dump_vbif_dbg_bus(struct sde_dbg_vbif_debug_bus *bus)
{
bool in_log, in_mem;
u32 **dump_mem = NULL;
u32 *dump_addr = NULL;
u32 value, d0, d1;
unsigned long reg, reg1, reg2;
struct vbif_debug_bus_entry *head;
int i, list_size = 0;
void __iomem *mem_base = NULL;
struct vbif_debug_bus_entry *dbg_bus;
u32 bus_size;
struct sde_dbg_reg_base *reg_base;
int rc, count;
if (!bus || !bus->cmn.entries_size)
return;
list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list,
reg_base_head)
if (strlen(reg_base->name) &&
!strcmp(reg_base->name, bus->cmn.name))
mem_base = reg_base->base;
if (!mem_base) {
pr_err("unable to find mem_base for %s\n", bus->cmn.name);
return;
}
dbg_bus = bus->entries;
bus_size = bus->cmn.entries_size;
dump_mem = &bus->cmn.dumped_content;
dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
bus->cmn.name);
if (!dump_mem || !dbg_bus || !bus_size)
return;
/* allocate memory for each test point */
for (i = 0; i < bus_size; i++) {
head = dbg_bus + i;
list_size += (head->block_cnt * (head->test_pnt_cnt -
head->test_pnt_start));
}
/* 4 bytes * 4 entries for each test point*/
list_size *= DUMP_CLMN_COUNT * sizeof(u32);
in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG);
in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM);
if (!in_log && !in_mem)
return;
if (in_mem) {
if (!(*dump_mem)) {
*dump_mem = devm_kzalloc(sde_dbg_base.dev, list_size,
GFP_KERNEL);
bus->cmn.content_size = list_size / sizeof(u32);
}
if (*dump_mem) {
dump_addr = *dump_mem;
dev_info(sde_dbg_base.dev,
"%s: start_addr:0x%pK len:0x%x\n",
__func__, dump_addr, list_size);
} else {
in_mem = false;
pr_err("dump_mem: allocation fails\n");
}
}
rc = pm_runtime_get_sync(sde_dbg_base.dev);
if (rc < 0) {
pr_err("failed to enable power %d\n", rc);
return;
}
value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
/* make sure that vbif core is on */
wmb();
/**
* Extract VBIF error info based on XIN halt and error status.
* If the XIN client is not in HALT state, or an error is detected,
* then retrieve the VBIF error info for it.
*/
reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
dev_err(sde_dbg_base.dev,
"XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
reg, reg1, reg2);
reg >>= 16;
reg &= ~(reg1 | reg2);
for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
if (!test_bit(0, &reg)) {
writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
/* make sure reg write goes through */
wmb();
d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
dev_err(sde_dbg_base.dev,
"Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
i, d0, d1);
}
reg >>= 1;
}
for (i = 0; i < bus_size; i++) {
head = dbg_bus + i;
writel_relaxed(0, mem_base + head->disable_bus_addr);
writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
/* make sure that other bus is off */
wmb();
count = _sde_dbg_dump_vbif_debug_bus_entry(head, mem_base,
dump_addr, in_log);
if (dump_addr && (count > 0))
dump_addr += count;
}
pm_runtime_put_sync(sde_dbg_base.dev);
dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
bus->cmn.name);
}
static void _sde_dbg_dump_dsi_dbg_bus(struct sde_dbg_dsi_debug_bus *bus)
{
struct sde_dbg_dsi_ctrl_list_entry *entry;
struct list_head *list;
int list_size = 0;
u32 reg;
bool in_log, in_mem;
u32 **dump_mem = NULL;
u32 *dump_addr = NULL;
u32 *end_addr;
struct dsi_debug_bus_entry *dbg_bus;
u32 bus_size;
int i, rc, dsi_idx = 0;
if (!bus || !bus->cmn.entries_size)
return;
dbg_bus = bus->entries;
bus_size = bus->cmn.entries_size;
dump_mem = &bus->cmn.dumped_content;
if (!dump_mem || !dbg_bus || list_empty(&sde_dbg_dsi_list))
return;
in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG);
in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM);
if (!in_log && !in_mem)
return;
rc = pm_runtime_get_sync(sde_dbg_base.dev);
if (rc < 0) {
pr_err("failed to enable power %d\n", rc);
return;
}
dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
bus->cmn.name);
mutex_lock(&sde_dbg_dsi_mutex);
if (in_mem) {
/* 4 fields of 4 bytes each, per table entry, per dsi ctrl*/
list_for_each(list, &sde_dbg_dsi_list)
list_size++;
list_size *= bus_size * sizeof(u32) * DUMP_CLMN_COUNT;
if (!(*dump_mem)) {
*dump_mem = devm_kzalloc(sde_dbg_base.dev, list_size,
GFP_KERNEL);
bus->cmn.content_size = list_size / sizeof(u32);
}
if (*dump_mem) {
dump_addr = *dump_mem;
end_addr = *dump_mem + bus->cmn.content_size;
dev_info(sde_dbg_base.dev,
"%s: start_addr:0x%pK len:0x%x\n",
__func__, dump_addr, list_size);
} else {
in_mem = false;
pr_err("dump_mem: allocation fails\n");
}
}
list_for_each_entry(entry, &sde_dbg_dsi_list, list) {
dev_info(sde_dbg_base.dev, "%s start_addr:0x%pK\n",
entry->name, dump_addr);
for (i = 0; i < bus_size; i++) {
if (!entry->base)
break;
reg = ((dbg_bus[i].mux << 12) |
(dbg_bus[i].sel << 4) | BIT(0));
writel_relaxed(reg, entry->base + DSI_DEBUG_BUS_CTRL);
wmb(); /* make sure debug-bus test point is enabled */
reg = readl_relaxed(entry->base + DSI_DEBUG_BUS);
if (dump_addr && (dump_addr < end_addr)) {
*dump_addr++ = dsi_idx;
*dump_addr++ = dbg_bus[i].mux;
*dump_addr++ = dbg_bus[i].sel;
*dump_addr++ = reg;
}
if (in_log)
dev_info(sde_dbg_base.dev,
"mux:0x%x sel:0x%x status:0x%x\n",
dbg_bus[i].mux, dbg_bus[i].sel, reg);
}
/* Disable debug bus once we are done */
writel_relaxed(0, entry->base + DSI_DEBUG_BUS_CTRL);
dsi_idx++;
}
mutex_unlock(&sde_dbg_dsi_mutex);
pm_runtime_put_sync(sde_dbg_base.dev);
dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
bus->cmn.name);
}
static void _sde_dbg_dump_lutdma_dbg_bus(struct sde_dbg_lutdma_debug_bus *bus)
{
void __iomem *mem_base = NULL;
struct sde_dbg_reg_base *reg_base;
struct lutdma_debug_bus_entry *entries;
bool dump_in_log, dump_in_mem;
u32 **dump_mem = NULL;
u32 *dump_addr = NULL;
u32 i, j, entry_count, addr, count, val, engine_bit, dump_mem_size = 0;
int rc;
if (!bus || !bus->cmn.entries_size)
return;
list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list,
reg_base_head) {
if (strlen(reg_base->name) &&
!strcmp(reg_base->name, bus->cmn.name))
mem_base = reg_base->base;
}
if (!mem_base) {
pr_err("unable to find mem_base for %s\n", bus->cmn.name);
return;
}
entries = bus->entries;
entry_count = bus->cmn.entries_size;
dump_in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG);
dump_in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM);
dump_mem = &bus->cmn.dumped_content;
if (!dump_in_log && !dump_in_mem)
return;
rc = pm_runtime_get_sync(sde_dbg_base.dev);
if (rc < 0) {
pr_err("failed to enable power %d\n", rc);
return;
}
dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
bus->cmn.name);
if (dump_in_mem) {
if (*dump_mem == NULL) {
for (i = 0; i < entry_count; i++)
dump_mem_size += (entries[i].indicies *
sizeof(u32));
//Ensure enough chunks for debugfs dumping
dump_mem_size += dump_mem_size % (DUMP_CLMN_COUNT * 4);
*dump_mem = devm_kzalloc(sde_dbg_base.dev,
dump_mem_size, GFP_KERNEL);
bus->cmn.content_size = dump_mem_size / sizeof(u32);
}
if (*dump_mem) {
dump_addr = *dump_mem;
dev_info(sde_dbg_base.dev,
"%s: start_addr:0x%pK len:0x%x\n",
__func__, dump_addr, dump_mem_size);
} else {
dump_in_mem = false;
pr_err("dump_mem: allocation fails\n");
}
}
for (i = 0; i < entry_count; i++) {
addr = entries[i].wr_addr;
count = entries[i].indicies;
engine_bit = entries[i].read_engine ? BIT(14) : 0;
for (j = 0 ; j < count; j++) {
val = (BIT(0) | engine_bit | (j << 1)) & 0xFFFF;
writel_relaxed(val, mem_base + addr);
wmb(); /* Ensure dbgbus setup occurs before read */
val = readl_relaxed(mem_base + addr + 0x4);
if (dump_in_log)
dev_info(sde_dbg_base.dev,
"lutdma_waddr=0x%x index=0x%x val=0x%x\n",
addr, j, val);
if (dump_in_mem)
dump_addr[i * count + j] = val;
}
//Disable debug bus when done
writel_relaxed(0, mem_base + addr);
}
dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
bus->cmn.name);
}
/**
* _sde_dump_array - dump array of register bases
* @blk_arr: array of register base pointers
* @len: length of blk_arr
* @do_panic: whether to trigger a panic after dumping
* @name: string indicating origin of dump
* @dump_dbgbus_sde: whether to dump the sde debug bus
* @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
* @dump_dbgbus_dsi: whether to dump the dsi debug bus
* @dump_all: dump evtlog + regs
* @dump_secure: flag to indicate dumping in secure-session
*/
static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[],
u32 len, bool do_panic, const char *name, bool dump_dbgbus_sde,
bool dump_dbgbus_vbif_rt, bool dump_dbgbus_dsi, bool dump_all,
bool dump_secure)
{
int i;
mutex_lock(&sde_dbg_base.mutex);
if (dump_all)
sde_evtlog_dump_all(sde_dbg_base.evtlog);
if (dump_all || !blk_arr || !len) {
_sde_dump_reg_all(dump_secure);
} else {
for (i = 0; i < len; i++) {
if (blk_arr[i] != NULL)
_sde_dump_reg_by_ranges(blk_arr[i],
sde_dbg_base.enable_reg_dump,
dump_secure);
}
}
if (dump_dbgbus_sde)
_sde_dbg_dump_sde_dbg_bus(&sde_dbg_base.dbgbus_sde);
if (dump_dbgbus_vbif_rt)
_sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt);
if (dump_all || dump_dbgbus_dsi)
_sde_dbg_dump_dsi_dbg_bus(&sde_dbg_base.dbgbus_dsi);
if (dump_all || dump_dbgbus_sde)
_sde_dbg_dump_lutdma_dbg_bus(&sde_dbg_base.dbgbus_lutdma);
if (do_panic && sde_dbg_base.panic_on_err)
panic(name);
mutex_unlock(&sde_dbg_base.mutex);
}
/**
* _sde_dump_work - deferred dump work function
* @work: work structure
*/
static void _sde_dump_work(struct work_struct *work)
{
_sde_dump_array(sde_dbg_base.req_dump_blks,
ARRAY_SIZE(sde_dbg_base.req_dump_blks),
sde_dbg_base.work_panic, "evtlog_workitem",
sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work,
sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work,
sde_dbg_base.dbgbus_dsi.cmn.include_in_deferred_work,
sde_dbg_base.dump_all, sde_dbg_base.dump_secure);
}
void sde_dbg_dump(enum sde_dbg_dump_context dump_mode, const char *name, ...)
{
int i, index = 0;
bool do_panic = false;
bool dump_dbgbus_sde = false;
bool dump_dbgbus_vbif_rt = false;
bool dump_dbgbus_dsi = false;
bool dump_all = false;
bool dump_secure = false;
va_list args;
char *blk_name = NULL;
struct sde_dbg_reg_base *blk_base = NULL;
struct sde_dbg_reg_base **blk_arr;
u32 blk_len;
if (!sde_evtlog_is_enabled(sde_dbg_base.evtlog, SDE_EVTLOG_ALWAYS))
return;
if ((dump_mode == SDE_DBG_DUMP_IRQ_CTX) &&
work_pending(&sde_dbg_base.dump_work))
return;
blk_arr = &sde_dbg_base.req_dump_blks[0];
blk_len = ARRAY_SIZE(sde_dbg_base.req_dump_blks);
memset(sde_dbg_base.req_dump_blks, 0,
sizeof(sde_dbg_base.req_dump_blks));
sde_dbg_base.dump_all = false;
sde_dbg_base.dump_mode = dump_mode;
va_start(args, name);
i = 0;
while ((blk_name = va_arg(args, char*))) {
if (i++ >= SDE_EVTLOG_MAX_DATA) {
pr_err("could not parse all dump arguments\n");
break;
}
if (IS_ERR_OR_NULL(blk_name))
break;
blk_base = _sde_dump_get_blk_addr(blk_name);
if (blk_base) {
if (index < blk_len) {
blk_arr[index] = blk_base;
index++;
} else {
pr_err("insufficient space to to dump %s\n",
blk_name);
}
}
if (!strcmp(blk_name, "all"))
dump_all = true;
if (!strcmp(blk_name, "dbg_bus"))
dump_dbgbus_sde = true;
if (!strcmp(blk_name, "vbif_dbg_bus"))
dump_dbgbus_vbif_rt = true;
if (!strcmp(blk_name, "dsi_dbg_bus"))
dump_dbgbus_dsi = true;
if (!strcmp(blk_name, "panic"))
do_panic = true;
if (!strcmp(blk_name, "secure"))
dump_secure = true;
}
va_end(args);
if (dump_mode == SDE_DBG_DUMP_IRQ_CTX) {
/* schedule work to dump later */
sde_dbg_base.work_panic = do_panic;
sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work =
dump_dbgbus_sde;
sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
dump_dbgbus_vbif_rt;
sde_dbg_base.dbgbus_dsi.cmn.include_in_deferred_work =
dump_dbgbus_dsi;
sde_dbg_base.dump_all = dump_all;
schedule_work(&sde_dbg_base.dump_work);
} else {
_sde_dump_array(blk_arr, blk_len, do_panic, name,
dump_dbgbus_sde, dump_dbgbus_vbif_rt,
dump_dbgbus_dsi, dump_all, dump_secure);
}
}
void sde_dbg_ctrl(const char *name, ...)
{
int i = 0;
va_list args;
char *blk_name = NULL;
/* no debugfs controlled events are enabled, just return */
if (!sde_dbg_base.debugfs_ctrl)
return;
va_start(args, name);
while ((blk_name = va_arg(args, char*))) {
if (i++ >= SDE_EVTLOG_MAX_DATA) {
pr_err("could not parse all dbg arguments\n");
break;
}
if (IS_ERR_OR_NULL(blk_name))
break;
if (!strcmp(blk_name, "stop_ftrace") &&
sde_dbg_base.debugfs_ctrl &
DBG_CTRL_STOP_FTRACE) {
pr_debug("tracing off\n");
tracing_off();
}
if (!strcmp(blk_name, "panic_underrun") &&
sde_dbg_base.debugfs_ctrl &
DBG_CTRL_PANIC_UNDERRUN) {
pr_err("panic underrun\n");
SDE_DBG_DUMP_WQ("all", "dbg_bus", "vbif_dbg_bus",
"panic");
}
if (!strcmp(blk_name, "reset_hw_panic") &&
sde_dbg_base.debugfs_ctrl &
DBG_CTRL_RESET_HW_PANIC) {
pr_debug("reset hw panic\n");
panic("reset_hw");
}
}
va_end(args);
}
#ifdef CONFIG_DEBUG_FS
/*
* sde_dbg_debugfs_open - debugfs open handler for evtlog dump
* @inode: debugfs inode
* @file: file handle
*/
static int sde_dbg_debugfs_open(struct inode *inode, struct file *file)
{
if (!inode || !file)
return -EINVAL;
/* non-seekable */
file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
file->private_data = inode->i_private;
mutex_lock(&sde_dbg_base.mutex);
sde_dbg_base.cur_evt_index = 0;
sde_dbg_base.evtlog->first = sde_dbg_base.evtlog->curr + 1;
sde_dbg_base.evtlog->last =
sde_dbg_base.evtlog->first + SDE_EVTLOG_ENTRY;
mutex_unlock(&sde_dbg_base.mutex);
return 0;
}
/*
* sde_dbg_reg_base_open - debugfs open handler for reg base
* @inode: debugfs inode
* @file: file handle
*/
static int sde_dbg_reg_base_open(struct inode *inode, struct file *file)
{
char base_name[64] = {0};
struct sde_dbg_reg_base *reg_base = NULL;
if (!inode || !file)
return -EINVAL;
snprintf(base_name, sizeof(base_name), "%s",
file->f_path.dentry->d_iname);
base_name[strlen(file->f_path.dentry->d_iname) - 4] = '\0';
reg_base = _sde_dump_get_blk_addr(base_name);
if (!reg_base) {
pr_err("error: unable to locate base %s\n",
base_name);
return -EINVAL;
}
/* non-seekable */
file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
file->private_data = reg_base;
return 0;
}
/**
* sde_evtlog_dump_read - debugfs read handler for evtlog dump
* @file: file handler
* @buff: user buffer content for debugfs
* @count: size of user buffer
* @ppos: position offset of user buffer
*/
static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff,
size_t count, loff_t *ppos)
{
ssize_t len = 0;
char evtlog_buf[SDE_EVTLOG_BUF_MAX];
if (!buff || !ppos)
return -EINVAL;
mutex_lock(&sde_dbg_base.mutex);
len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog,
evtlog_buf, SDE_EVTLOG_BUF_MAX,
!sde_dbg_base.cur_evt_index, true);
sde_dbg_base.cur_evt_index++;
mutex_unlock(&sde_dbg_base.mutex);
if (len < 0 || len > count) {
pr_err("len is more than user buffer size\n");
return 0;
}
if (copy_to_user(buff, evtlog_buf, len))
return -EFAULT;
*ppos += len;
return len;
}
/**
* sde_evtlog_dump_write - debugfs write handler for evtlog dump
* @file: file handler
* @user_buf: user buffer content from debugfs
* @count: size of user buffer
* @ppos: position offset of user buffer
*/
static ssize_t sde_evtlog_dump_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
_sde_dump_array(NULL, 0, sde_dbg_base.panic_on_err, "dump_debugfs",
true, true, true, true, false);
return count;
}
static const struct file_operations sde_evtlog_fops = {
.open = sde_dbg_debugfs_open,
.read = sde_evtlog_dump_read,
.write = sde_evtlog_dump_write,
};
/**
* sde_dbg_ctrl_read - debugfs read handler for debug ctrl read
* @file: file handler
* @buff: user buffer content for debugfs
* @count: size of user buffer
* @ppos: position offset of user buffer
*/
static ssize_t sde_dbg_ctrl_read(struct file *file, char __user *buff,
size_t count, loff_t *ppos)
{
ssize_t len = 0;
char buf[24] = {'\0'};
if (!buff || !ppos)
return -EINVAL;
if (*ppos)
return 0; /* the end */
len = snprintf(buf, sizeof(buf), "0x%x\n", sde_dbg_base.debugfs_ctrl);
pr_debug("%s: ctrl:0x%x len:0x%zx\n",
__func__, sde_dbg_base.debugfs_ctrl, len);
if (len < 0 || len >= sizeof(buf))
return 0;
if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
pr_err("error copying the buffer! count:0x%zx\n", count);
return -EFAULT;
}
*ppos += len; /* increase offset */
return len;
}
/**
* sde_dbg_ctrl_write - debugfs read handler for debug ctrl write
* @file: file handler
* @user_buf: user buffer content from debugfs
* @count: size of user buffer
* @ppos: position offset of user buffer
*/
static ssize_t sde_dbg_ctrl_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
u32 dbg_ctrl = 0;
char buf[24];
if (!file) {
pr_err("DbgDbg: %s: error no file --\n", __func__);
return -EINVAL;
}
if (count >= sizeof(buf))
return -EFAULT;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
buf[count] = 0; /* end of string */
if (kstrtouint(buf, 0, &dbg_ctrl)) {
pr_err("%s: error in the number of bytes\n", __func__);
return -EFAULT;
}
pr_debug("dbg_ctrl_read:0x%x\n", dbg_ctrl);
sde_dbg_base.debugfs_ctrl = dbg_ctrl;
return count;
}
static const struct file_operations sde_dbg_ctrl_fops = {
.open = sde_dbg_debugfs_open,
.read = sde_dbg_ctrl_read,
.write = sde_dbg_ctrl_write,
};
static int sde_recovery_regdump_open(struct inode *inode, struct file *file)
{
if (!inode || !file)
return -EINVAL;
/* non-seekable */
file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
file->private_data = inode->i_private;
/* initialize to start position */
sde_dbg_base.regbuf.rpos = 0;
sde_dbg_base.regbuf.cur_blk = NULL;
sde_dbg_base.regbuf.dump_done = false;
return 0;
}
static ssize_t _sde_dbg_dump_reg_rows(u32 reg_start,
void *start, int count, char *buf, int buflen)
{
int i;
int len = 0;
u32 *addr;
u32 reg_offset = 0;
int rows = min(count / DUMP_CLMN_COUNT, DUMP_MAX_LINES_PER_BLK);
if (!start || !buf) {
pr_err("invalid address for dump\n");
return len;
}
if (buflen < PAGE_SIZE) {
pr_err("buffer too small for dump\n");
return len;
}
for (i = 0; i < rows; i++) {
addr = start + (i * DUMP_CLMN_COUNT * sizeof(u32));
reg_offset = reg_start + (i * DUMP_CLMN_COUNT * sizeof(u32));
if (buflen < (len + DUMP_LINE_SIZE))
break;
len += snprintf(buf + len, DUMP_LINE_SIZE,
"0x%.8X | %.8X %.8X %.8X %.8X\n",
reg_offset, addr[0], addr[1], addr[2], addr[3]);
}
return len;
}
static int _sde_dbg_recovery_dump_sub_blk(struct sde_dbg_reg_range *sub_blk,
char *buf, int buflen)
{
int count = 0;
int len = 0;
if (!sub_blk || (buflen < PAGE_SIZE)) {
pr_err("invalid params buflen:%d subblk valid:%d\n",
buflen, sub_blk != NULL);
return len;
}
count = (sub_blk->offset.end - sub_blk->offset.start) / (sizeof(u32));
if (count < DUMP_CLMN_COUNT) {
pr_err("invalid count for register dumps :%d\n", count);
return len;
}
len += snprintf(buf + len, DUMP_LINE_SIZE,
"------------------------------------------\n");
len += snprintf(buf + len, DUMP_LINE_SIZE,
"**** sub block [%s] - size:%d ****\n",
sub_blk->range_name, count);
len += _sde_dbg_dump_reg_rows(sub_blk->offset.start, sub_blk->reg_dump,
count, buf + len, buflen - len);
return len;
}
static int _sde_dbg_recovery_dump_reg_blk(struct sde_dbg_reg_base *blk,
char *buf, int buf_size, int *out_len)
{
int ret = 0;
int len = 0;
struct sde_dbg_reg_range *sub_blk;
if (buf_size < PAGE_SIZE) {
pr_err("buffer too small for dump\n");
return len;
}
if (!blk || !strlen(blk->name)) {
len += snprintf(buf + len, DUMP_LINE_SIZE,
"Found one invalid block - skip dump\n");
*out_len = len;
return len;
}
len += snprintf(buf + len, DUMP_LINE_SIZE,
"******************************************\n");
len += snprintf(buf + len, DUMP_LINE_SIZE,
"==========================================\n");
len += snprintf(buf + len, DUMP_LINE_SIZE,
"*********** DUMP of %s block *************\n",
blk->name);
len += snprintf(buf + len, DUMP_LINE_SIZE,
"count:%ld max-off:0x%lx has_sub_blk:%d\n",
blk->cnt, blk->max_offset,
!list_empty(&blk->sub_range_list));
if (list_empty(&blk->sub_range_list)) {
len += _sde_dbg_dump_reg_rows(0, blk->reg_dump,
blk->max_offset / sizeof(u32), buf + len,
buf_size - len);
} else {
list_for_each_entry(sub_blk, &blk->sub_range_list, head)
len += _sde_dbg_recovery_dump_sub_blk(sub_blk,
buf + len, buf_size - len);
}
*out_len = len;
return ret;
}
static ssize_t sde_recovery_regdump_read(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
ssize_t len = 0;
int usize = 0;
struct sde_dbg_base *dbg_base = &sde_dbg_base;
struct sde_dbg_regbuf *rbuf = &dbg_base->regbuf;
mutex_lock(&sde_dbg_base.mutex);
if (!rbuf->dump_done && !rbuf->cur_blk) {
if (!rbuf->buf)
rbuf->buf = kzalloc(DUMP_BUF_SIZE, GFP_KERNEL);
if (!rbuf->buf) {
len = -ENOMEM;
goto err;
}
rbuf->rpos = 0;
rbuf->len = 0;
rbuf->buf_size = DUMP_BUF_SIZE;
rbuf->cur_blk = list_first_entry(&dbg_base->reg_base_list,
struct sde_dbg_reg_base, reg_base_head);
if (rbuf->cur_blk)
_sde_dbg_recovery_dump_reg_blk(rbuf->cur_blk,
rbuf->buf,
rbuf->buf_size,
&rbuf->len);
pr_debug("dumping done for blk:%s len:%d\n", rbuf->cur_blk ?
rbuf->cur_blk->name : "unknown", rbuf->len);
} else if (rbuf->len == rbuf->rpos && rbuf->cur_blk) {
rbuf->rpos = 0;
rbuf->len = 0;
rbuf->buf_size = DUMP_BUF_SIZE;
if (rbuf->cur_blk == list_last_entry(&dbg_base->reg_base_list,
struct sde_dbg_reg_base, reg_base_head))
rbuf->cur_blk = NULL;
else
rbuf->cur_blk = list_next_entry(rbuf->cur_blk,
reg_base_head);
if (rbuf->cur_blk)
_sde_dbg_recovery_dump_reg_blk(rbuf->cur_blk,
rbuf->buf,
rbuf->buf_size,
&rbuf->len);
pr_debug("dumping done for blk:%s len:%d\n", rbuf->cur_blk ?
rbuf->cur_blk->name : "unknown", rbuf->len);
}
if ((rbuf->len - rbuf->rpos) > 0) {
usize = ((rbuf->len - rbuf->rpos) > count) ?
count : rbuf->len - rbuf->rpos;
if (copy_to_user(ubuf, rbuf->buf + rbuf->rpos, usize)) {
len = -EFAULT;
goto err;
}
len = usize;
rbuf->rpos += usize;
*ppos += usize;
}
if (!len && rbuf->buf)
rbuf->dump_done = true;
err:
mutex_unlock(&sde_dbg_base.mutex);
return len;
}
static const struct file_operations sde_recovery_reg_fops = {
.open = sde_recovery_regdump_open,
.read = sde_recovery_regdump_read,
};
static ssize_t sde_recovery_dbgbus_dump_read(struct file *file,
char __user *buff,
size_t count, loff_t *ppos)
{
ssize_t len = 0;
char log_buf[SDE_EVTLOG_BUF_MAX];
u32 *data;
struct sde_dbg_debug_bus_common *cmn = file->private_data;
u32 entry_size = DUMP_CLMN_COUNT;
u32 max_size = min_t(size_t, count, SDE_EVTLOG_BUF_MAX);
memset(log_buf, 0, sizeof(log_buf));
mutex_lock(&sde_dbg_base.mutex);
if (!cmn->dumped_content || !cmn->entries_size)
goto dump_done;
if (cmn->content_idx < cmn->content_size) {
data = &cmn->dumped_content[cmn->content_idx];
len = scnprintf(log_buf, max_size,
"0x%.8X | %.8X %.8X %.8X %.8X\n",
cmn->content_idx * sizeof(*data),
data[0], data[1], data[2], data[3]);
cmn->content_idx += entry_size;
if (copy_to_user(buff, log_buf, len)) {
len = -EFAULT;
goto dump_done;
}
*ppos += len;
}
dump_done:
mutex_unlock(&sde_dbg_base.mutex);
return len;
}
static int sde_recovery_dbgbus_dump_open(struct inode *inode, struct file *file)
{
if (!inode || !file)
return -EINVAL;
/* non-seekable */
file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
file->private_data = (void *)&sde_dbg_base.dbgbus_sde.cmn;
mutex_lock(&sde_dbg_base.mutex);
sde_dbg_base.dbgbus_sde.cmn.content_idx = 0;
mutex_unlock(&sde_dbg_base.mutex);
return 0;
}
static const struct file_operations sde_recovery_dbgbus_fops = {
.open = sde_recovery_dbgbus_dump_open,
.read = sde_recovery_dbgbus_dump_read,
};
static int sde_recovery_vbif_dbgbus_dump_open(struct inode *inode,
struct file *file)
{
if (!inode || !file)
return -EINVAL;
/* non-seekable */
file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
file->private_data = (void *)&sde_dbg_base.dbgbus_vbif_rt.cmn;
mutex_lock(&sde_dbg_base.mutex);
sde_dbg_base.dbgbus_vbif_rt.cmn.content_idx = 0;
mutex_unlock(&sde_dbg_base.mutex);
return 0;
}
static const struct file_operations sde_recovery_vbif_dbgbus_fops = {
.open = sde_recovery_vbif_dbgbus_dump_open,
.read = sde_recovery_dbgbus_dump_read,
};
static int sde_recovery_dsi_dbgbus_dump_open(struct inode *inode,
struct file *file)
{
if (!inode || !file)
return -EINVAL;
/* non-seekable */
file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
file->private_data = (void *)&sde_dbg_base.dbgbus_dsi.cmn;
mutex_lock(&sde_dbg_base.mutex);
sde_dbg_base.dbgbus_dsi.cmn.content_idx = 0;
mutex_unlock(&sde_dbg_base.mutex);
return 0;
}
static const struct file_operations sde_recovery_dsi_dbgbus_fops = {
.open = sde_recovery_dsi_dbgbus_dump_open,
.read = sde_recovery_dbgbus_dump_read,
};
static int sde_recovery_lutdma_dbgbus_dump_open(struct inode *inode,
struct file *file)
{
if (!inode || !file)
return -EINVAL;
/* non-seekable */
file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
file->private_data = (void *)&sde_dbg_base.dbgbus_lutdma.cmn;
mutex_lock(&sde_dbg_base.mutex);
sde_dbg_base.dbgbus_lutdma.cmn.content_idx = 0;
mutex_unlock(&sde_dbg_base.mutex);
return 0;
}
static const struct file_operations sde_recovery_lutdma_dbgbus_fops = {
.open = sde_recovery_lutdma_dbgbus_dump_open,
.read = sde_recovery_dbgbus_dump_read,
};
/**
* sde_dbg_reg_base_release - release allocated reg dump file private data
* @inode: debugfs inode
* @file: file handle
* @Return: 0 on success
*/
static int sde_dbg_reg_base_release(struct inode *inode, struct file *file)
{
struct sde_dbg_reg_base *dbg;
if (!file)
return -EINVAL;
dbg = file->private_data;
if (!dbg)
return -ENODEV;
mutex_lock(&sde_dbg_base.mutex);
if (dbg && dbg->buf) {
kfree(dbg->buf);
dbg->buf_len = 0;
dbg->buf = NULL;
}
mutex_unlock(&sde_dbg_base.mutex);
return 0;
}
/**
* sde_dbg_reg_base_is_valid_range - verify if requested memory range is valid
* @off: address offset in bytes
* @cnt: memory size in bytes
* Return: true if valid; false otherwise
*/
static bool sde_dbg_reg_base_is_valid_range(
struct sde_dbg_reg_base *base,
u32 off, u32 cnt)
{
struct sde_dbg_reg_range *node;
pr_debug("check offset=0x%x cnt=0x%x\n", off, cnt);
list_for_each_entry(node, &base->sub_range_list, head) {
pr_debug("%s: start=0x%x end=0x%x\n", node->range_name,
node->offset.start, node->offset.end);
if (node->offset.start <= off
&& off <= node->offset.end
&& off + cnt <= node->offset.end) {
pr_debug("valid range requested\n");
return true;
}
}
pr_err("invalid range requested\n");
return false;
}
/**
* sde_dbg_reg_base_offset_write - set new offset and len to debugfs reg base
* @file: file handler
* @user_buf: user buffer content from debugfs
* @count: size of user buffer
* @ppos: position offset of user buffer
*/
static ssize_t sde_dbg_reg_base_offset_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct sde_dbg_reg_base *dbg;
u32 off = 0;
u32 cnt = DEFAULT_BASE_REG_CNT;
char buf[24];
int rc;
if (!file)
return -EINVAL;
dbg = file->private_data;
if (!dbg)
return -ENODEV;
if (count >= sizeof(buf))
return -EFAULT;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
buf[count] = 0; /* end of string */
if (sscanf(buf, "%5x %x", &off, &cnt) != 2)
return -EFAULT;
if (off > dbg->max_offset)
return -EINVAL;
if (off % sizeof(u32))
return -EINVAL;
if (cnt > (dbg->max_offset - off))
cnt = dbg->max_offset - off;
if (cnt == 0)
return -EINVAL;
if (!list_empty(&dbg->sub_range_list)) {
rc = sde_dbg_reg_base_is_valid_range(dbg, off, cnt);
if (!rc)
return -EINVAL;
}
mutex_lock(&sde_dbg_base.mutex);
dbg->off = off;
dbg->cnt = cnt;
mutex_unlock(&sde_dbg_base.mutex);
pr_debug("offset=%x cnt=%x\n", off, cnt);
return count;
}
/**
* sde_dbg_reg_base_offset_read - read current offset and len of register base
* @file: file handler
* @user_buf: user buffer content from debugfs
* @count: size of user buffer
* @ppos: position offset of user buffer
*/
static ssize_t sde_dbg_reg_base_offset_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
struct sde_dbg_reg_base *dbg;
int len = 0;
char buf[24] = {'\0'};
if (!file)
return -EINVAL;
dbg = file->private_data;
if (!dbg)
return -ENODEV;
if (!ppos)
return -EINVAL;
if (*ppos)
return 0; /* the end */
mutex_lock(&sde_dbg_base.mutex);
if (dbg->off % sizeof(u32)) {
mutex_unlock(&sde_dbg_base.mutex);
return -EFAULT;
}
len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
if (len < 0 || len >= sizeof(buf)) {
mutex_unlock(&sde_dbg_base.mutex);
return 0;
}
if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
mutex_unlock(&sde_dbg_base.mutex);
return -EFAULT;
}
*ppos += len; /* increase offset */
mutex_unlock(&sde_dbg_base.mutex);
return len;
}
#ifdef CONFIG_DYNAMIC_DEBUG
/**
* sde_dbg_reg_base_reg_write - write to reg base hw at offset a given value
* @file: file handler
* @user_buf: user buffer content from debugfs
* @count: size of user buffer
* @ppos: position offset of user buffer
*/
static ssize_t sde_dbg_reg_base_reg_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct sde_dbg_reg_base *dbg;
size_t off;
u32 data, cnt;
char buf[24];
int rc;
if (!file)
return -EINVAL;
dbg = file->private_data;
if (!dbg)
return -ENODEV;
if (count >= sizeof(buf))
return -EFAULT;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
buf[count] = 0; /* end of string */
cnt = sscanf(buf, "%zx %x", &off, &data);
if (cnt < 2)
return -EFAULT;
if (off % sizeof(u32))
return -EFAULT;
mutex_lock(&sde_dbg_base.mutex);
if (off >= dbg->max_offset) {
mutex_unlock(&sde_dbg_base.mutex);
return -EFAULT;
}
if (!list_empty(&dbg->sub_range_list)) {
rc = sde_dbg_reg_base_is_valid_range(dbg, off, cnt);
if (!rc)
return -EINVAL;
}
rc = pm_runtime_get_sync(sde_dbg_base.dev);
if (rc < 0) {
mutex_unlock(&sde_dbg_base.mutex);
pr_err("failed to enable power %d\n", rc);
return rc;
}
writel_relaxed(data, dbg->base + off);
pm_runtime_put_sync(sde_dbg_base.dev);
mutex_unlock(&sde_dbg_base.mutex);
pr_debug("addr=%zx data=%x\n", off, data);
return count;
}
#endif
/**
* sde_dbg_reg_base_reg_read - read len from reg base hw at current offset
* @file: file handler
* @user_buf: user buffer content from debugfs
* @count: size of user buffer
* @ppos: position offset of user buffer
*/
static ssize_t sde_dbg_reg_base_reg_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
struct sde_dbg_reg_base *dbg;
size_t len;
int rc;
if (!file)
return -EINVAL;
dbg = file->private_data;
if (!dbg) {
pr_err("invalid handle\n");
return -ENODEV;
}
if (!ppos)
return -EINVAL;
mutex_lock(&sde_dbg_base.mutex);
if (!dbg->buf) {
char dump_buf[64];
char *ptr;
int cnt, tot;
dbg->buf_len = sizeof(dump_buf) *
DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL);
if (!dbg->buf) {
mutex_unlock(&sde_dbg_base.mutex);
return -ENOMEM;
}
if (dbg->off % sizeof(u32)) {
mutex_unlock(&sde_dbg_base.mutex);
return -EFAULT;
}
ptr = dbg->base + dbg->off;
tot = 0;
rc = pm_runtime_get_sync(sde_dbg_base.dev);
if (rc < 0) {
mutex_unlock(&sde_dbg_base.mutex);
pr_err("failed to enable power %d\n", rc);
return rc;
}
for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
ROW_BYTES, GROUP_BYTES, dump_buf,
sizeof(dump_buf), false);
len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
"0x%08x: %s\n",
((int) (unsigned long) ptr) -
((int) (unsigned long) dbg->base),
dump_buf);
ptr += ROW_BYTES;
tot += len;
if (tot >= dbg->buf_len)
break;
}
pm_runtime_put_sync(sde_dbg_base.dev);
dbg->buf_len = tot;
}
if (*ppos >= dbg->buf_len) {
mutex_unlock(&sde_dbg_base.mutex);
return 0; /* done reading */
}
len = min(count, dbg->buf_len - (size_t) *ppos);
if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
mutex_unlock(&sde_dbg_base.mutex);
pr_err("failed to copy to user\n");
return -EFAULT;
}
*ppos += len; /* increase offset */
mutex_unlock(&sde_dbg_base.mutex);
return len;
}
static const struct file_operations sde_off_fops = {
.open = sde_dbg_reg_base_open,
.release = sde_dbg_reg_base_release,
.read = sde_dbg_reg_base_offset_read,
.write = sde_dbg_reg_base_offset_write,
};
static const struct file_operations sde_reg_fops = {
.open = sde_dbg_reg_base_open,
.release = sde_dbg_reg_base_release,
.read = sde_dbg_reg_base_reg_read,
#ifdef CONFIG_DYNAMIC_DEBUG
.write = sde_dbg_reg_base_reg_write,
#endif
};
int sde_dbg_debugfs_register(struct device *dev)
{
static struct sde_dbg_base *dbg = &sde_dbg_base;
struct sde_dbg_reg_base *blk_base;
char debug_name[80] = "";
struct dentry *debugfs_root = NULL;
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *ddev = platform_get_drvdata(pdev);
struct msm_drm_private *priv = NULL;
if (!ddev) {
pr_err("Invalid drm device node\n");
return -EINVAL;
}
priv = ddev->dev_private;
if (!priv) {
pr_err("Invalid msm drm private node\n");
return -EINVAL;
}
debugfs_root = debugfs_create_dir("debug",
ddev->primary->debugfs_root);
if (IS_ERR_OR_NULL(debugfs_root)) {
pr_err("debugfs_root create_dir fail, error %ld\n",
PTR_ERR(debugfs_root));
priv->debug_root = NULL;
return -EINVAL;
}
priv->debug_root = debugfs_root;
debugfs_create_file("dbg_ctrl", 0600, debugfs_root, NULL,
&sde_dbg_ctrl_fops);
debugfs_create_file("dump", 0600, debugfs_root, NULL,
&sde_evtlog_fops);
debugfs_create_u32("enable", 0600, debugfs_root,
&(sde_dbg_base.evtlog->enable));
debugfs_create_u32("panic", 0600, debugfs_root,
&sde_dbg_base.panic_on_err);
debugfs_create_u32("reg_dump", 0600, debugfs_root,
&sde_dbg_base.enable_reg_dump);
debugfs_create_file("recovery_reg", 0400, debugfs_root, NULL,
&sde_recovery_reg_fops);
if (dbg->dbgbus_sde.entries) {
debugfs_create_file("recovery_dbgbus", 0400, debugfs_root, NULL,
&sde_recovery_dbgbus_fops);
snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
dbg->dbgbus_sde.cmn.name);
debugfs_create_u32(debug_name, 0600, debugfs_root,
&dbg->dbgbus_sde.cmn.enable_mask);
}
if (dbg->dbgbus_vbif_rt.entries) {
debugfs_create_file("recovery_vbif_dbgbus", 0400, debugfs_root,
NULL, &sde_recovery_vbif_dbgbus_fops);
snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
dbg->dbgbus_vbif_rt.cmn.name);
debugfs_create_u32(debug_name, 0600, debugfs_root,
&dbg->dbgbus_vbif_rt.cmn.enable_mask);
}
if (dbg->dbgbus_dsi.entries) {
debugfs_create_file("recovery_dsi_dbgbus", 0400, debugfs_root,
NULL, &sde_recovery_dsi_dbgbus_fops);
snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
dbg->dbgbus_dsi.cmn.name);
debugfs_create_u32(debug_name, 0600, debugfs_root,
&dbg->dbgbus_dsi.cmn.enable_mask);
}
if (dbg->dbgbus_lutdma.entries) {
debugfs_create_file("recovery_lutdma_dbgbus", 0400,
debugfs_root, NULL,
&sde_recovery_lutdma_dbgbus_fops);
snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
dbg->dbgbus_lutdma.cmn.name);
debugfs_create_u32(debug_name, 0600, debugfs_root,
&dbg->dbgbus_lutdma.cmn.enable_mask);
}
list_for_each_entry(blk_base, &dbg->reg_base_list, reg_base_head) {
snprintf(debug_name, sizeof(debug_name), "%s_off",
blk_base->name);
debugfs_create_file(debug_name, 0600, debugfs_root, blk_base,
&sde_off_fops);
snprintf(debug_name, sizeof(debug_name), "%s_reg",
blk_base->name);
debugfs_create_file(debug_name, 0400, debugfs_root, blk_base,
&sde_reg_fops);
}
return 0;
}
#else
int sde_dbg_debugfs_register(struct device *dev)
{
return 0;
}
#endif
static void _sde_dbg_debugfs_destroy(void)
{
}
void sde_dbg_init_dbg_buses(u32 hwversion)
{
static struct sde_dbg_base *dbg = &sde_dbg_base;
memset(&dbg->dbgbus_sde, 0, sizeof(dbg->dbgbus_sde));
memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
dbg->dbgbus_sde.entries = dbg_bus_sde;
dbg->dbgbus_sde.cmn.entries_size = ARRAY_SIZE(dbg_bus_sde);
dbg->dbgbus_sde.cmn.flags = DBGBUS_FLAGS_DSPP;
dbg->dbgbus_sde.cmn.name = DBGBUS_NAME_SDE;
dbg->dbgbus_sde.cmn.enable_mask = DEFAULT_DBGBUS_SDE;
dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus;
dbg->dbgbus_vbif_rt.cmn.entries_size = ARRAY_SIZE(vbif_dbg_bus);
dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
dbg->dbgbus_dsi.entries = dsi_dbg_bus;
dbg->dbgbus_dsi.cmn.entries_size = ARRAY_SIZE(dsi_dbg_bus);
dbg->dbgbus_dsi.cmn.name = DBGBUS_NAME_DSI;
dbg->dbgbus_dsi.cmn.enable_mask = DEFAULT_DBGBUS_DSI;
if (SDE_HW_REV_MAJOR(hwversion) >= 0x7) {
dbg->dbgbus_lutdma.entries = dbg_bus_lutdma;
dbg->dbgbus_lutdma.cmn.name = DBGBUS_NAME_LUTDMA;
dbg->dbgbus_lutdma.cmn.entries_size =
ARRAY_SIZE(dbg_bus_lutdma);
dbg->dbgbus_lutdma.cmn.enable_mask = DEFAULT_DBGBUS_LUTDMA;
dbg->dbgbus_lutdma.cmn.include_in_deferred_work = true;
}
}
int sde_dbg_init(struct device *dev)
{
if (!dev) {
pr_err("invalid params\n");
return -EINVAL;
}
mutex_init(&sde_dbg_base.mutex);
INIT_LIST_HEAD(&sde_dbg_base.reg_base_list);
sde_dbg_base.dev = dev;
sde_dbg_base.evtlog = sde_evtlog_init();
if (IS_ERR_OR_NULL(sde_dbg_base.evtlog))
return PTR_ERR(sde_dbg_base.evtlog);
sde_dbg_base_evtlog = sde_dbg_base.evtlog;
INIT_WORK(&sde_dbg_base.dump_work, _sde_dump_work);
sde_dbg_base.work_panic = false;
sde_dbg_base.panic_on_err = DEFAULT_PANIC;
sde_dbg_base.enable_reg_dump = DEFAULT_REGDUMP;
memset(&sde_dbg_base.regbuf, 0, sizeof(sde_dbg_base.regbuf));
pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n",
sde_dbg_base.evtlog->enable, sde_dbg_base.panic_on_err,
sde_dbg_base.enable_reg_dump);
return 0;
}
static void sde_dbg_reg_base_destroy(void)
{
struct sde_dbg_reg_range *range_node, *range_tmp;
struct sde_dbg_reg_base *blk_base, *blk_tmp;
struct sde_dbg_base *dbg_base = &sde_dbg_base;
if (!dbg_base)
return;
list_for_each_entry_safe(blk_base, blk_tmp, &dbg_base->reg_base_list,
reg_base_head) {
list_for_each_entry_safe(range_node, range_tmp,
&blk_base->sub_range_list, head) {
list_del(&range_node->head);
kfree(range_node);
}
list_del(&blk_base->reg_base_head);
kfree(blk_base);
}
}
static void sde_dbg_dsi_ctrl_destroy(void)
{
struct sde_dbg_dsi_ctrl_list_entry *entry, *tmp;
mutex_lock(&sde_dbg_dsi_mutex);
list_for_each_entry_safe(entry, tmp, &sde_dbg_dsi_list, list) {
list_del(&entry->list);
kfree(entry);
}
mutex_unlock(&sde_dbg_dsi_mutex);
}
/**
* sde_dbg_destroy - destroy sde debug facilities
*/
void sde_dbg_destroy(void)
{
kfree(sde_dbg_base.regbuf.buf);
memset(&sde_dbg_base.regbuf, 0, sizeof(sde_dbg_base.regbuf));
_sde_dbg_debugfs_destroy();
sde_dbg_base_evtlog = NULL;
sde_evtlog_destroy(sde_dbg_base.evtlog);
sde_dbg_base.evtlog = NULL;
sde_dbg_reg_base_destroy();
sde_dbg_dsi_ctrl_destroy();
mutex_destroy(&sde_dbg_base.mutex);
}
int sde_dbg_dsi_ctrl_register(void __iomem *base, const char *name)
{
struct sde_dbg_dsi_ctrl_list_entry *entry;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->name = name;
entry->base = base;
mutex_lock(&sde_dbg_dsi_mutex);
list_add_tail(&entry->list, &sde_dbg_dsi_list);
mutex_unlock(&sde_dbg_dsi_mutex);
pr_debug("registered DSI CTRL %s for debugbus support\n", entry->name);
return 0;
}
int sde_dbg_reg_register_base(const char *name, void __iomem *base,
size_t max_offset)
{
struct sde_dbg_base *dbg_base = &sde_dbg_base;
struct sde_dbg_reg_base *reg_base;
if (!name || !strlen(name)) {
pr_err("no debug name provided\n");
return -EINVAL;
}
reg_base = kzalloc(sizeof(*reg_base), GFP_KERNEL);
if (!reg_base)
return -ENOMEM;
strlcpy(reg_base->name, name, sizeof(reg_base->name));
reg_base->base = base;
reg_base->max_offset = max_offset;
reg_base->off = 0;
reg_base->cnt = DEFAULT_BASE_REG_CNT;
reg_base->reg_dump = NULL;
/* Initialize list to make sure check for null list will be valid */
INIT_LIST_HEAD(&reg_base->sub_range_list);
pr_debug("%s base: %pK max_offset 0x%zX\n", reg_base->name,
reg_base->base, reg_base->max_offset);
list_add(&reg_base->reg_base_head, &dbg_base->reg_base_list);
return 0;
}
int sde_dbg_reg_register_cb(const char *name, void (*cb)(void *), void *ptr)
{
struct sde_dbg_base *dbg_base = &sde_dbg_base;
struct sde_dbg_reg_base *reg_base;
if (!name || !strlen(name)) {
pr_err("no debug name provided\n");
return -EINVAL;
}
reg_base = kzalloc(sizeof(*reg_base), GFP_KERNEL);
if (!reg_base)
return -ENOMEM;
strlcpy(reg_base->name, name, sizeof(reg_base->name));
reg_base->base = NULL;
reg_base->max_offset = 0;
reg_base->off = 0;
reg_base->cnt = DEFAULT_BASE_REG_CNT;
reg_base->reg_dump = NULL;
reg_base->cb = cb;
reg_base->cb_ptr = ptr;
/* Initialize list to make sure check for null list will be valid */
INIT_LIST_HEAD(&reg_base->sub_range_list);
pr_debug("%s cb: %pK cb_ptr: %pK\n", reg_base->name,
reg_base->cb, reg_base->cb_ptr);
list_add(&reg_base->reg_base_head, &dbg_base->reg_base_list);
return 0;
}
void sde_dbg_reg_unregister_cb(const char *name, void (*cb)(void *), void *ptr)
{
struct sde_dbg_base *dbg_base = &sde_dbg_base;
struct sde_dbg_reg_base *reg_base;
if (!dbg_base)
return;
list_for_each_entry(reg_base, &dbg_base->reg_base_list, reg_base_head) {
if (strlen(reg_base->name) &&
!strcmp(reg_base->name, name)) {
pr_debug("%s cb: %pK cb_ptr: %pK\n", reg_base->name,
reg_base->cb, reg_base->cb_ptr);
list_del(&reg_base->reg_base_head);
kfree(reg_base);
break;
}
}
}
void sde_dbg_reg_register_dump_range(const char *base_name,
const char *range_name, u32 offset_start, u32 offset_end,
uint32_t xin_id)
{
struct sde_dbg_reg_base *reg_base;
struct sde_dbg_reg_range *range;
reg_base = _sde_dump_get_blk_addr(base_name);
if (!reg_base) {
pr_err("error: for range %s unable to locate base %s\n",
range_name, base_name);
return;
}
if (!range_name || strlen(range_name) == 0) {
pr_err("%pS: bad range name, base_name %s, offset_start 0x%X, end 0x%X\n",
__builtin_return_address(0), base_name,
offset_start, offset_end);
return;
}
if (offset_end - offset_start < REG_DUMP_ALIGN ||
offset_start > offset_end) {
pr_err("%pS: bad range, base_name %s, range_name %s, offset_start 0x%X, end 0x%X\n",
__builtin_return_address(0), base_name,
range_name, offset_start, offset_end);
return;
}
range = kzalloc(sizeof(*range), GFP_KERNEL);
if (!range)
return;
strlcpy(range->range_name, range_name, sizeof(range->range_name));
range->offset.start = offset_start;
range->offset.end = offset_end;
range->xin_id = xin_id;
list_add_tail(&range->head, &reg_base->sub_range_list);
pr_debug("base %s, range %s, start 0x%X, end 0x%X\n",
base_name, range->range_name,
range->offset.start, range->offset.end);
}
void sde_dbg_set_sde_top_offset(u32 blk_off)
{
sde_dbg_base.dbgbus_sde.top_blk_off = blk_off;
}