Merge tag 'drm-next-5.5-2019-10-09' of git://people.freedesktop.org/~agd5f/linux into drm-next

drm-next-5.5-2019-10-09:

amdgpu:
- Additional RAS enablement for vega20
- RAS page retirement and bad page storage in EEPROM
- No GPU reset with unrecoverable RAS errors
- Reserve vram for page tables rather than trying to evict
- Fix issues with GPU reset and xgmi hives
- DC i2c over aux fixes
- Direct submission for clears, PTE/PDE updates
- Improvements to help support recoverable GPU page faults
- Silence harmless SAD block messages
- Clean up code for creating a bo at a fixed location
- Initial DC HDCP support
- Lots of documentation fixes
- GPU reset for renoir
- Add IH clockgating support for soc15 asics
- Powerplay improvements
- DC MST cleanups
- Add support for MSI-X
- Misc cleanups and bug fixes

amdkfd:
- Query KFD device info by asic type rather than pci ids
- Add navi14 support
- Add renoir support
- Add navi12 support
- gfx10 trap handler improvements
- pasid cleanups
- Check against device cgroup

ttm:
- Return -EBUSY with pipelining with no_gpu_wait

radeon:
- Silence harmless SAD block messages

device_cgroup:
- Export devcgroup_check_permission

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191010041713.3412-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie
2019-10-26 05:56:57 +10:00
253 changed files with 11563 additions and 4290 deletions

View File

@@ -926,7 +926,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
struct ttm_mem_reg *mem,
bool no_wait_gpu)
{
struct dma_fence *fence;
int ret;
@@ -935,19 +936,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
dma_resv_add_shared_fence(bo->base.resv, fence);
if (!fence)
return 0;
ret = dma_resv_reserve_shared(bo->base.resv, 1);
if (unlikely(ret)) {
dma_fence_put(fence);
return ret;
}
if (no_wait_gpu)
return -EBUSY;
dma_fence_put(bo->moving);
bo->moving = fence;
dma_resv_add_shared_fence(bo->base.resv, fence);
ret = dma_resv_reserve_shared(bo->base.resv, 1);
if (unlikely(ret)) {
dma_fence_put(fence);
return ret;
}
dma_fence_put(bo->moving);
bo->moving = fence;
return 0;
}
@@ -978,7 +982,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
} while (1);
return ttm_bo_add_move_fence(bo, man, mem);
return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
}
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -1120,14 +1124,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (unlikely(ret))
goto error;
if (mem->mm_node) {
ret = ttm_bo_add_move_fence(bo, man, mem);
if (unlikely(ret)) {
(*man->func->put_node)(man, mem);
goto error;
}
return 0;
if (!mem->mm_node)
continue;
ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
if (unlikely(ret)) {
(*man->func->put_node)(man, mem);
if (ret == -EBUSY)
continue;
goto error;
}
return 0;
}
for (i = 0; i < placement->num_busy_placement; ++i) {