Merge tag 'drm-next-2019-07-16' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "The biggest thing in this is the AMD Navi GPU support, this again contains a bunch of header files that are large. These are the new AMD RX5700 GPUs that just recently became available. New drivers: - ST-Ericsson MCDE driver - Ingenic JZ47xx SoC UAPI change: - HDR source metadata property Core: - HDR inforframes and EDID parsing - drm hdmi infoframe unpacking - remove prime sg_table caching into dma-buf - New gem vram helpers to reduce driver code - Lots of drmP.h removal - reservation fencing fix - documentation updates - drm_fb_helper_connector removed - mode name command handler rewrite fbcon: - Remove the fbcon notifiers ttm: - forward progress fixes dma-buf: - make mmap call optional - debugfs refcount fixes - dma-fence free with pending signals fix - each dma-buf gets an inode Panels: - Lots of additional panel bindings amdgpu: - initial navi10 support - avoid hw reset - HDR metadata support - new thermal sensors for vega asics - RAS fixes - use HMM rather than MMU notifier - xgmi topology via kfd - SR-IOV fixes - driver reload fixes - DC use a core bpc attribute - Aux fixes for DC - Bandwidth calc updates for DC - Clock handling refactor - kfd VEGAM support vmwgfx: - Coherent memory support changes i915: - HDR Support - HDMI i2c link - Icelake multi-segmented gamma support - GuC firmware update - Mule Creek Canyon PCH support for EHL - EHL platform updtes - move i915.alpha_support to i915.force_probe - runtime PM refactoring - VBT parsing refactoring - DSI fixes - struct mutex dependency reduction - GEM code reorg mali-dp: - Komeda driver features msm: - dsi vs EPROBE_DEFER fixes - msm8998 snapdragon 835 support - a540 gpu support - mdp5 and dpu interconnect support exynos: - drmP.h removal tegra: - misc fixes tda998x: - audio support improvements - pixel repeated mode support - quantisation range handling corrections - HDMI vendor info fix armada: - interlace support fix - overlay/video plane register handling refactor - add gamma support rockchip: - RX3328 support panfrost: - expose perf counters via hidden ioctls vkms: - enumerate CRC sources list ast: - rework BO handling mgag200: - rework BO handling dw-hdmi: - suspend/resume support rcar-du: - R8A774A1 Soc Support - LVDS dual-link mode support - Additional formats - Misc fixes omapdrm: - DSI command mode display support stm - fb modifier support - runtime PM support sun4i: - use vmap ops vc4: - binner bo binding rework v3d: - compute shader support - resync/sync fixes - job management refactoring lima: - NULL pointer in irq handler fix - scheduler default timeout virtio: - fence seqno support - trace events bochs: - misc fixes tc458767: - IRQ/HDP handling sii902x: - HDMI audio support atmel-hlcdc: - misc fixes meson: - zpos support" * tag 'drm-next-2019-07-16' of git://anongit.freedesktop.org/drm/drm: (1815 commits) Revert "Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next" Revert "mm: adjust apply_to_pfn_range interface for dropped token." mm: adjust apply_to_pfn_range interface for dropped token. drm/amdgpu/navi10: add uclk activity sensor drm/amdgpu: properly guard the generic discovery code drm/amdgpu: add missing documentation on new module parameters drm/amdgpu: don't invalidate caches in RELEASE_MEM, only do the writeback drm/amd/display: avoid 64-bit division drm/amdgpu/psp11: simplify the ucode register logic drm/amdgpu: properly guard DC support in navi code drm/amd/powerplay: vega20: fix uninitialized variable use drm/amd/display: dcn20: include linux/delay.h amdgpu: make pmu support optional drm/amd/powerplay: Zero initialize current_rpm in vega20_get_fan_speed_percent drm/amd/powerplay: Zero initialize freq in smu_v11_0_get_current_clk_freq drm/amd/powerplay: Use memset to initialize metrics structs drm/amdgpu/mes10.1: Fix header guard drm/amd/powerplay: add temperature sensor support for navi10 drm/amdgpu: fix scheduler timeout calc drm/amdgpu: Prepare for hmm_range_register API change (v2) ...
This commit is contained in:
@@ -384,19 +384,17 @@ static const unsigned int a3xx_registers[] = {
|
||||
0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
|
||||
0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
|
||||
0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
|
||||
0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
|
||||
0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
|
||||
0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
|
||||
0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
|
||||
0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
|
||||
0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
|
||||
0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
|
||||
0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
|
||||
0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
|
||||
0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
|
||||
0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
|
||||
0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
|
||||
0x303c, 0x303c, 0x305e, 0x305f,
|
||||
0x22ff, 0x22ff, 0x2340, 0x2343, 0x2440, 0x2440, 0x2444, 0x2444,
|
||||
0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470,
|
||||
0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3,
|
||||
0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e,
|
||||
0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea,
|
||||
0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617,
|
||||
0x261a, 0x261a, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0,
|
||||
0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9,
|
||||
0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
|
||||
0x300c, 0x300e, 0x301c, 0x301d, 0x302a, 0x302a, 0x302c, 0x302d,
|
||||
0x3030, 0x3031, 0x3034, 0x3036, 0x303c, 0x303c, 0x305e, 0x305f,
|
||||
~0 /* sentinel */
|
||||
};
|
||||
|
||||
|
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
|
||||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
|
||||
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
|
||||
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
|
||||
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
|
||||
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
|
||||
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
|
||||
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
|
||||
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
|
||||
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
|
||||
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
|
||||
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
|
||||
- /home/ubuntu/envytools/envytools/rnndb/./adreno.xml ( 501 bytes, from 2019-05-29 01:28:15)
|
||||
- /home/ubuntu/envytools/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2019-05-29 01:28:15)
|
||||
- /home/ubuntu/envytools/envytools/rnndb/adreno/a2xx.xml ( 79608 bytes, from 2019-05-29 01:28:15)
|
||||
- /home/ubuntu/envytools/envytools/rnndb/adreno/adreno_common.xml ( 14239 bytes, from 2019-05-29 01:28:15)
|
||||
- /home/ubuntu/envytools/envytools/rnndb/adreno/adreno_pm4.xml ( 43155 bytes, from 2019-05-29 01:28:15)
|
||||
- /home/ubuntu/envytools/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2019-05-29 01:28:15)
|
||||
- /home/ubuntu/envytools/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2019-05-29 01:28:15)
|
||||
- /home/ubuntu/envytools/envytools/rnndb/adreno/a5xx.xml ( 147291 bytes, from 2019-05-29 14:51:41)
|
||||
- /home/ubuntu/envytools/envytools/rnndb/adreno/a6xx.xml ( 148461 bytes, from 2019-05-29 01:28:15)
|
||||
- /home/ubuntu/envytools/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2019-05-29 01:28:15)
|
||||
- /home/ubuntu/envytools/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2019-05-29 01:28:15)
|
||||
|
||||
Copyright (C) 2013-2018 by the following authors:
|
||||
Copyright (C) 2013-2019 by the following authors:
|
||||
- Rob Clark <robdclark@gmail.com> (robclark)
|
||||
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
|
||||
|
||||
@@ -2148,6 +2148,8 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
|
||||
|
||||
#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01
|
||||
|
||||
#define REG_A5XX_HLSQ_DBG_ECO_CNTL 0x00000e04
|
||||
|
||||
#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05
|
||||
|
||||
#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06
|
||||
|
@@ -149,7 +149,6 @@ DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
|
||||
int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct dentry *ent;
|
||||
int ret;
|
||||
|
||||
if (!minor)
|
||||
@@ -166,11 +165,8 @@ int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ent = debugfs_create_file("reset", S_IWUGO,
|
||||
minor->debugfs_root,
|
||||
dev, &reset_fops);
|
||||
if (!ent)
|
||||
return -ENOMEM;
|
||||
debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev,
|
||||
&reset_fops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -309,12 +309,18 @@ static const struct {
|
||||
|
||||
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
|
||||
gpu_write(gpu, a5xx_hwcg[i].offset,
|
||||
state ? a5xx_hwcg[i].value : 0);
|
||||
|
||||
if (adreno_is_a540(adreno_gpu)) {
|
||||
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
|
||||
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU, state ? 0x00000004 : 0);
|
||||
}
|
||||
|
||||
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
|
||||
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
|
||||
}
|
||||
@@ -498,6 +504,9 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||
|
||||
gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
|
||||
|
||||
if (adreno_is_a540(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
|
||||
|
||||
/* Make all blocks contribute to the GPU BUSY perf counter */
|
||||
gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
|
||||
|
||||
@@ -558,7 +567,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
|
||||
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
|
||||
if (adreno_is_a530(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
|
||||
if (adreno_is_a540(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
|
||||
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
|
||||
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
|
||||
|
||||
@@ -583,6 +595,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||
/* Set the highest bank bit */
|
||||
gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
|
||||
gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
|
||||
if (adreno_is_a540(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, 2);
|
||||
|
||||
/* Protect registers from the CP */
|
||||
gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
|
||||
@@ -633,6 +647,30 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||
REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
|
||||
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
|
||||
|
||||
/* Put the GPU into 64 bit by default */
|
||||
gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
|
||||
|
||||
/*
|
||||
* VPC corner case with local memory load kill leads to corrupt
|
||||
* internal state. Normal Disable does not work for all a5x chips.
|
||||
* So do the following setting to disable it.
|
||||
*/
|
||||
if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) {
|
||||
gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23));
|
||||
gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0);
|
||||
}
|
||||
|
||||
ret = adreno_hw_init(gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@@ -23,6 +23,18 @@
|
||||
#define AGC_POWER_CONFIG_PRODUCTION_ID 1
|
||||
#define AGC_INIT_MSG_VALUE 0xBABEFACE
|
||||
|
||||
/* AGC_LM_CONFIG (A540+) */
|
||||
#define AGC_LM_CONFIG (136/4)
|
||||
#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
|
||||
#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
|
||||
#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
|
||||
#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
|
||||
#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
|
||||
#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
|
||||
#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
|
||||
|
||||
#define AGC_LEVEL_CONFIG (140/4)
|
||||
|
||||
static struct {
|
||||
uint32_t reg;
|
||||
uint32_t value;
|
||||
@@ -107,7 +119,7 @@ static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
|
||||
}
|
||||
|
||||
/* Setup thermal limit management */
|
||||
static void a5xx_lm_setup(struct msm_gpu *gpu)
|
||||
static void a530_lm_setup(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||
@@ -156,6 +168,45 @@ static void a5xx_lm_setup(struct msm_gpu *gpu)
|
||||
gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
|
||||
}
|
||||
|
||||
#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
|
||||
#define LM_DCVS_LIMIT 1
|
||||
#define LEVEL_CONFIG ~(0x303)
|
||||
|
||||
static void a540_lm_setup(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
u32 config;
|
||||
|
||||
/* The battery current limiter isn't enabled for A540 */
|
||||
config = AGC_LM_CONFIG_BCL_DISABLED;
|
||||
config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
|
||||
|
||||
/* For now disable GPMU side throttling */
|
||||
config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
|
||||
|
||||
/* Until we get clock scaling 0 is always the active power level */
|
||||
gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
|
||||
|
||||
/* Fixed at 6000 for now */
|
||||
gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
|
||||
|
||||
gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
|
||||
gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
|
||||
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
|
||||
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
|
||||
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
|
||||
PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
|
||||
|
||||
gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
|
||||
}
|
||||
|
||||
/* Enable SP/TP cpower collapse */
|
||||
static void a5xx_pc_init(struct msm_gpu *gpu)
|
||||
{
|
||||
@@ -197,7 +248,8 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
|
||||
if (adreno_is_a530(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
|
||||
|
||||
/* Kick off the GPMU */
|
||||
gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
|
||||
@@ -211,12 +263,26 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
|
||||
DRM_ERROR("%s: GPMU firmware initialization timed out\n",
|
||||
gpu->name);
|
||||
|
||||
if (!adreno_is_a530(adreno_gpu)) {
|
||||
u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
|
||||
|
||||
if (val)
|
||||
DRM_ERROR("%s: GPMU firmware initialization failed: %d\n",
|
||||
gpu->name, val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Enable limits management */
|
||||
static void a5xx_lm_enable(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
|
||||
/* This init sequence only applies to A530 */
|
||||
if (!adreno_is_a530(adreno_gpu))
|
||||
return;
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
|
||||
gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
|
||||
@@ -228,10 +294,14 @@ static void a5xx_lm_enable(struct msm_gpu *gpu)
|
||||
|
||||
int a5xx_power_init(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
int ret;
|
||||
|
||||
/* Set up the limits management */
|
||||
a5xx_lm_setup(gpu);
|
||||
if (adreno_is_a530(adreno_gpu))
|
||||
a530_lm_setup(gpu);
|
||||
else
|
||||
a540_lm_setup(gpu);
|
||||
|
||||
/* Set up SP/TP power collpase */
|
||||
a5xx_pc_init(gpu);
|
||||
|
@@ -74,7 +74,7 @@ bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
|
||||
u32 val;
|
||||
|
||||
/* This can be called from gpu state code so make sure GMU is valid */
|
||||
if (IS_ERR_OR_NULL(gmu->mmio))
|
||||
if (!gmu->initialized)
|
||||
return false;
|
||||
|
||||
val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
|
||||
@@ -90,7 +90,7 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
|
||||
u32 val;
|
||||
|
||||
/* This can be called from gpu state code so make sure GMU is valid */
|
||||
if (IS_ERR_OR_NULL(gmu->mmio))
|
||||
if (!gmu->initialized)
|
||||
return false;
|
||||
|
||||
val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
|
||||
@@ -504,8 +504,10 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
|
||||
wmb();
|
||||
|
||||
err:
|
||||
devm_iounmap(gmu->dev, pdcptr);
|
||||
devm_iounmap(gmu->dev, seqptr);
|
||||
if (!IS_ERR_OR_NULL(pdcptr))
|
||||
iounmap(pdcptr);
|
||||
if (!IS_ERR_OR_NULL(seqptr))
|
||||
iounmap(seqptr);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -695,7 +697,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
|
||||
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
|
||||
int status, ret;
|
||||
|
||||
if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
|
||||
if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
|
||||
return 0;
|
||||
|
||||
gmu->hung = false;
|
||||
@@ -765,7 +767,7 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
if (!gmu->mmio)
|
||||
if (!gmu->initialized)
|
||||
return true;
|
||||
|
||||
reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
|
||||
@@ -1195,7 +1197,7 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
|
||||
ret = ioremap(res->start, resource_size(res));
|
||||
if (!ret) {
|
||||
DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
@@ -1211,10 +1213,10 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
|
||||
|
||||
irq = platform_get_irq_byname(pdev, name);
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
|
||||
name, gmu);
|
||||
ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name);
|
||||
DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
|
||||
name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1227,27 +1229,35 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
|
||||
{
|
||||
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
|
||||
|
||||
if (IS_ERR_OR_NULL(gmu->mmio))
|
||||
if (!gmu->initialized)
|
||||
return;
|
||||
|
||||
a6xx_gmu_stop(a6xx_gpu);
|
||||
|
||||
pm_runtime_disable(gmu->dev);
|
||||
pm_runtime_force_suspend(gmu->dev);
|
||||
|
||||
if (!IS_ERR_OR_NULL(gmu->gxpd)) {
|
||||
pm_runtime_disable(gmu->gxpd);
|
||||
dev_pm_domain_detach(gmu->gxpd, false);
|
||||
}
|
||||
|
||||
a6xx_gmu_irq_disable(gmu);
|
||||
iounmap(gmu->mmio);
|
||||
gmu->mmio = NULL;
|
||||
|
||||
a6xx_gmu_memory_free(gmu, gmu->hfi);
|
||||
|
||||
iommu_detach_device(gmu->domain, gmu->dev);
|
||||
|
||||
iommu_domain_free(gmu->domain);
|
||||
|
||||
free_irq(gmu->gmu_irq, gmu);
|
||||
free_irq(gmu->hfi_irq, gmu);
|
||||
|
||||
/* Drop reference taken in of_find_device_by_node */
|
||||
put_device(gmu->dev);
|
||||
|
||||
gmu->initialized = false;
|
||||
}
|
||||
|
||||
int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
|
||||
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
|
||||
{
|
||||
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
|
||||
struct platform_device *pdev = of_find_device_by_node(node);
|
||||
@@ -1268,34 +1278,34 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
|
||||
/* Get the list of clocks */
|
||||
ret = a6xx_gmu_clocks_probe(gmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_put_device;
|
||||
|
||||
/* Set up the IOMMU context bank */
|
||||
ret = a6xx_gmu_memory_probe(gmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_put_device;
|
||||
|
||||
/* Allocate memory for for the HFI queues */
|
||||
gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
|
||||
if (IS_ERR(gmu->hfi))
|
||||
goto err;
|
||||
goto err_memory;
|
||||
|
||||
/* Allocate memory for the GMU debug region */
|
||||
gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
|
||||
if (IS_ERR(gmu->debug))
|
||||
goto err;
|
||||
goto err_memory;
|
||||
|
||||
/* Map the GMU registers */
|
||||
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
|
||||
if (IS_ERR(gmu->mmio))
|
||||
goto err;
|
||||
goto err_memory;
|
||||
|
||||
/* Get the HFI and GMU interrupts */
|
||||
gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
|
||||
gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
|
||||
|
||||
if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
|
||||
goto err;
|
||||
goto err_mmio;
|
||||
|
||||
/*
|
||||
* Get a link to the GX power domain to reset the GPU in case of GMU
|
||||
@@ -1309,8 +1319,15 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
|
||||
/* Set up the HFI queues */
|
||||
a6xx_hfi_init(gmu);
|
||||
|
||||
gmu->initialized = true;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
|
||||
err_mmio:
|
||||
iounmap(gmu->mmio);
|
||||
free_irq(gmu->gmu_irq, gmu);
|
||||
free_irq(gmu->hfi_irq, gmu);
|
||||
err_memory:
|
||||
a6xx_gmu_memory_free(gmu, gmu->hfi);
|
||||
|
||||
if (gmu->domain) {
|
||||
@@ -1318,6 +1335,11 @@ err:
|
||||
|
||||
iommu_domain_free(gmu->domain);
|
||||
}
|
||||
ret = -ENODEV;
|
||||
|
||||
return -ENODEV;
|
||||
err_put_device:
|
||||
/* Drop reference taken in of_find_device_by_node */
|
||||
put_device(gmu->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@@ -75,6 +75,7 @@ struct a6xx_gmu {
|
||||
|
||||
struct a6xx_hfi_queue queues[2];
|
||||
|
||||
bool initialized;
|
||||
bool hung;
|
||||
};
|
||||
|
||||
|
@@ -391,6 +391,20 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
|
||||
REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
|
||||
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
|
||||
|
||||
/* Turn on 64 bit addressing for all blocks */
|
||||
gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
|
||||
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
|
||||
|
||||
/* enable hardware clockgating */
|
||||
a6xx_set_hwcg(gpu, true);
|
||||
|
||||
@@ -854,7 +868,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
|
||||
/* FIXME: How do we gracefully handle this? */
|
||||
BUG_ON(!node);
|
||||
|
||||
ret = a6xx_gmu_probe(a6xx_gpu, node);
|
||||
ret = a6xx_gmu_init(a6xx_gpu, node);
|
||||
if (ret) {
|
||||
a6xx_destroy(&(a6xx_gpu->base.base));
|
||||
return ERR_PTR(ret);
|
||||
|
@@ -53,7 +53,7 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
|
||||
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
|
||||
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
|
||||
|
||||
int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
|
||||
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
|
||||
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
|
||||
|
||||
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
|
||||
|
@@ -133,6 +133,24 @@ static const struct adreno_info gpulist[] = {
|
||||
ADRENO_QUIRK_FAULT_DETECT_MASK,
|
||||
.init = a5xx_gpu_init,
|
||||
.zapfw = "a530_zap.mdt",
|
||||
}, {
|
||||
.rev = ADRENO_REV(5, 4, 0, 2),
|
||||
.revn = 540,
|
||||
.name = "A540",
|
||||
.fw = {
|
||||
[ADRENO_FW_PM4] = "a530_pm4.fw",
|
||||
[ADRENO_FW_PFP] = "a530_pfp.fw",
|
||||
[ADRENO_FW_GPMU] = "a540_gpmu.fw2",
|
||||
},
|
||||
.gmem = SZ_1M,
|
||||
/*
|
||||
* Increase inactive period to 250 to avoid bouncing
|
||||
* the GDSC which appears to make it grumpy
|
||||
*/
|
||||
.inactive_period = 250,
|
||||
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
|
||||
.init = a5xx_gpu_init,
|
||||
.zapfw = "a540_zap.mdt",
|
||||
}, {
|
||||
.rev = ADRENO_REV(6, 3, 0, ANY_ID),
|
||||
.revn = 630,
|
||||
@@ -340,7 +358,7 @@ static void adreno_unbind(struct device *dev, struct device *master,
|
||||
{
|
||||
struct msm_gpu *gpu = dev_get_drvdata(dev);
|
||||
|
||||
gpu->funcs->pm_suspend(gpu);
|
||||
pm_runtime_force_suspend(dev);
|
||||
gpu->funcs->destroy(gpu);
|
||||
|
||||
set_gpu_pdev(dev_get_drvdata(master), NULL);
|
||||
|
@@ -56,7 +56,6 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
|
||||
return ret;
|
||||
|
||||
mem_phys = r.start;
|
||||
mem_size = resource_size(&r);
|
||||
|
||||
/* Request the MDT file for the firmware */
|
||||
fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
|
||||
@@ -72,6 +71,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mem_size > resource_size(&r)) {
|
||||
DRM_DEV_ERROR(dev,
|
||||
"memory region is too small to load the MDT\n");
|
||||
ret = -E2BIG;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Allocate memory for the firmware image */
|
||||
mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
|
||||
if (!mem_region) {
|
||||
|
@@ -50,6 +50,7 @@ enum {
|
||||
enum adreno_quirks {
|
||||
ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
|
||||
ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
|
||||
ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
|
||||
};
|
||||
|
||||
struct adreno_rev {
|
||||
@@ -210,6 +211,11 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu)
|
||||
return gpu->revn == 530;
|
||||
}
|
||||
|
||||
static inline int adreno_is_a540(struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 540;
|
||||
}
|
||||
|
||||
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
|
||||
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
|
||||
const char *fwname);
|
||||
|
Reference in New Issue
Block a user