Merge branch 'drm-next-4.11' of git://people.freedesktop.org/~agd5f/linux into drm-next
This is the main feature pull for radeon and amdgpu for 4.11. Highlights: - Power and clockgating improvements - Preliminary SR-IOV support - ttm buffer priority support - ttm eviction fixes - Removal of the ttm lru callbacks - Remove SI DPM quirks due to MC firmware issues - Handle VFCT with multiple vbioses - Powerplay improvements - Lots of driver cleanups * 'drm-next-4.11' of git://people.freedesktop.org/~agd5f/linux: (120 commits) drm/amdgpu: fix amdgpu_bo_va_mapping flags drm/amdgpu: access stolen VRAM directly on CZ (v2) drm/amdgpu: access stolen VRAM directly on KV/KB (v2) drm/amdgpu: fix kernel panic when dpm disabled on Kv. drm/amdgpu: fix dpm bug on Kv. drm/amd/powerplay: fix regresstion issue can't set manual dpm mode. drm/amdgpu: handle vfct with multiple vbios images drm/radeon: handle vfct with multiple vbios images drm/amdgpu: move misc si headers into amdgpu drm/amdgpu: remove unused header si_reg.h drm/radeon: drop pitcairn dpm quirks drm/amdgpu: drop pitcairn dpm quirks drm: radeon: radeon_ttm: Handle return NULL error from ioremap_nocache drm/amd/amdgpu/amdgpu_ttm: Handle return NULL error from ioremap_nocache drm/amdgpu: add new virtual display ID drm/amd/amdgpu: remove the uncessary parameter for ib scheduler drm/amdgpu: Bring bo creation in line with radeon driver (v2) drm/amd/powerplay: fix misspelling in header guard drm/ttm: revert "add optional LRU removal callback v2" drm/ttm: revert "implement LRU add callbacks v2" ...
This commit is contained in:
@@ -163,6 +163,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
|
||||
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_type_manager *man;
|
||||
|
||||
lockdep_assert_held(&bo->resv->lock.base);
|
||||
|
||||
@@ -170,11 +171,13 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
|
||||
|
||||
BUG_ON(!list_empty(&bo->lru));
|
||||
|
||||
list_add(&bo->lru, bdev->driver->lru_tail(bo));
|
||||
man = &bdev->man[bo->mem.mem_type];
|
||||
list_add_tail(&bo->lru, &man->lru[bo->priority]);
|
||||
kref_get(&bo->list_kref);
|
||||
|
||||
if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
|
||||
list_add(&bo->swap, bdev->driver->swap_lru_tail(bo));
|
||||
list_add_tail(&bo->swap,
|
||||
&bo->glob->swap_lru[bo->priority]);
|
||||
kref_get(&bo->list_kref);
|
||||
}
|
||||
}
|
||||
@@ -183,12 +186,8 @@ EXPORT_SYMBOL(ttm_bo_add_to_lru);
|
||||
|
||||
int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
int put_count = 0;
|
||||
|
||||
if (bdev->driver->lru_removal)
|
||||
bdev->driver->lru_removal(bo);
|
||||
|
||||
if (!list_empty(&bo->swap)) {
|
||||
list_del_init(&bo->swap);
|
||||
++put_count;
|
||||
@@ -198,6 +197,11 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
||||
++put_count;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Add a driver hook to delete from
|
||||
* driver-specific LRU's here.
|
||||
*/
|
||||
|
||||
return put_count;
|
||||
}
|
||||
|
||||
@@ -226,32 +230,16 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
|
||||
|
||||
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
int put_count = 0;
|
||||
|
||||
lockdep_assert_held(&bo->resv->lock.base);
|
||||
|
||||
if (bdev->driver->lru_removal)
|
||||
bdev->driver->lru_removal(bo);
|
||||
|
||||
put_count = ttm_bo_del_from_lru(bo);
|
||||
ttm_bo_list_ref_sub(bo, put_count, true);
|
||||
ttm_bo_add_to_lru(bo);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
|
||||
|
||||
struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo)
|
||||
{
|
||||
return bo->bdev->man[bo->mem.mem_type].lru.prev;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_default_lru_tail);
|
||||
|
||||
struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo)
|
||||
{
|
||||
return bo->glob->swap_lru.prev;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail);
|
||||
|
||||
/*
|
||||
* Call bo->mutex locked.
|
||||
*/
|
||||
@@ -342,7 +330,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
||||
|
||||
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
|
||||
if (bdev->driver->move_notify)
|
||||
bdev->driver->move_notify(bo, mem);
|
||||
bdev->driver->move_notify(bo, evict, mem);
|
||||
bo->mem = *mem;
|
||||
mem->mm_node = NULL;
|
||||
goto moved;
|
||||
@@ -350,7 +338,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
||||
}
|
||||
|
||||
if (bdev->driver->move_notify)
|
||||
bdev->driver->move_notify(bo, mem);
|
||||
bdev->driver->move_notify(bo, evict, mem);
|
||||
|
||||
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
|
||||
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
|
||||
@@ -366,7 +354,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg tmp_mem = *mem;
|
||||
*mem = bo->mem;
|
||||
bo->mem = tmp_mem;
|
||||
bdev->driver->move_notify(bo, mem);
|
||||
bdev->driver->move_notify(bo, false, mem);
|
||||
bo->mem = *mem;
|
||||
*mem = tmp_mem;
|
||||
}
|
||||
@@ -414,7 +402,7 @@ out_err:
|
||||
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
|
||||
{
|
||||
if (bo->bdev->driver->move_notify)
|
||||
bo->bdev->driver->move_notify(bo, NULL);
|
||||
bo->bdev->driver->move_notify(bo, false, NULL);
|
||||
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
bo->ttm = NULL;
|
||||
@@ -741,20 +729,27 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
||||
struct ttm_buffer_object *bo;
|
||||
int ret = -EBUSY, put_count;
|
||||
unsigned i;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
list_for_each_entry(bo, &man->lru, lru) {
|
||||
ret = __ttm_bo_reserve(bo, false, true, NULL);
|
||||
if (ret)
|
||||
continue;
|
||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
||||
list_for_each_entry(bo, &man->lru[i], lru) {
|
||||
ret = __ttm_bo_reserve(bo, false, true, NULL);
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
if (place && !bdev->driver->eviction_valuable(bo, place)) {
|
||||
__ttm_bo_unreserve(bo);
|
||||
ret = -EBUSY;
|
||||
continue;
|
||||
if (place && !bdev->driver->eviction_valuable(bo,
|
||||
place)) {
|
||||
__ttm_bo_unreserve(bo);
|
||||
ret = -EBUSY;
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
break;
|
||||
if (!ret)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
@@ -1197,6 +1192,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
}
|
||||
atomic_inc(&bo->glob->bo_count);
|
||||
drm_vma_node_reset(&bo->vma_node);
|
||||
bo->priority = 0;
|
||||
|
||||
/*
|
||||
* For ttm_bo_type_device buffers, allocate
|
||||
@@ -1291,29 +1287,27 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
|
||||
EXPORT_SYMBOL(ttm_bo_create);
|
||||
|
||||
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
||||
unsigned mem_type, bool allow_errors)
|
||||
unsigned mem_type)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
||||
struct ttm_bo_global *glob = bdev->glob;
|
||||
struct dma_fence *fence;
|
||||
int ret;
|
||||
unsigned i;
|
||||
|
||||
/*
|
||||
* Can't use standard list traversal since we're unlocking.
|
||||
*/
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
while (!list_empty(&man->lru)) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
|
||||
if (ret) {
|
||||
if (allow_errors) {
|
||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
||||
while (!list_empty(&man->lru[i])) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
pr_err("Cleanup eviction failed\n");
|
||||
}
|
||||
spin_lock(&glob->lru_lock);
|
||||
}
|
||||
spin_lock(&glob->lru_lock);
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
||||
@@ -1324,13 +1318,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
||||
if (fence) {
|
||||
ret = dma_fence_wait(fence, false);
|
||||
dma_fence_put(fence);
|
||||
if (ret) {
|
||||
if (allow_errors) {
|
||||
return ret;
|
||||
} else {
|
||||
pr_err("Cleanup eviction failed\n");
|
||||
}
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1359,7 +1348,11 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
|
||||
|
||||
ret = 0;
|
||||
if (mem_type > 0) {
|
||||
ttm_bo_force_list_clean(bdev, mem_type, false);
|
||||
ret = ttm_bo_force_list_clean(bdev, mem_type);
|
||||
if (ret) {
|
||||
pr_err("Cleanup eviction failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = (*man->func->takedown)(man);
|
||||
}
|
||||
@@ -1382,7 +1375,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ttm_bo_force_list_clean(bdev, mem_type, true);
|
||||
return ttm_bo_force_list_clean(bdev, mem_type);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_evict_mm);
|
||||
|
||||
@@ -1391,6 +1384,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct ttm_mem_type_manager *man;
|
||||
unsigned i;
|
||||
|
||||
BUG_ON(type >= TTM_NUM_MEM_TYPES);
|
||||
man = &bdev->man[type];
|
||||
@@ -1416,7 +1410,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
||||
man->use_type = true;
|
||||
man->size = p_size;
|
||||
|
||||
INIT_LIST_HEAD(&man->lru);
|
||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
|
||||
INIT_LIST_HEAD(&man->lru[i]);
|
||||
man->move = NULL;
|
||||
|
||||
return 0;
|
||||
@@ -1448,6 +1443,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
|
||||
container_of(ref, struct ttm_bo_global_ref, ref);
|
||||
struct ttm_bo_global *glob = ref->object;
|
||||
int ret;
|
||||
unsigned i;
|
||||
|
||||
mutex_init(&glob->device_list_mutex);
|
||||
spin_lock_init(&glob->lru_lock);
|
||||
@@ -1459,7 +1455,8 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
|
||||
goto out_no_drp;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&glob->swap_lru);
|
||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
|
||||
INIT_LIST_HEAD(&glob->swap_lru[i]);
|
||||
INIT_LIST_HEAD(&glob->device_list);
|
||||
|
||||
ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
|
||||
@@ -1518,8 +1515,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
|
||||
if (list_empty(&bdev->ddestroy))
|
||||
TTM_DEBUG("Delayed destroy list was clean\n");
|
||||
|
||||
if (list_empty(&bdev->man[0].lru))
|
||||
TTM_DEBUG("Swap list was clean\n");
|
||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
|
||||
if (list_empty(&bdev->man[0].lru[0]))
|
||||
TTM_DEBUG("Swap list %d was clean\n", i);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
||||
drm_vma_offset_manager_destroy(&bdev->vma_manager);
|
||||
@@ -1670,10 +1668,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
||||
struct ttm_buffer_object *bo;
|
||||
int ret = -EBUSY;
|
||||
int put_count;
|
||||
unsigned i;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
list_for_each_entry(bo, &glob->swap_lru, swap) {
|
||||
ret = __ttm_bo_reserve(bo, false, true, NULL);
|
||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
||||
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
|
||||
ret = __ttm_bo_reserve(bo, false, true, NULL);
|
||||
if (!ret)
|
||||
break;
|
||||
}
|
||||
if (!ret)
|
||||
break;
|
||||
}
|
||||
|
Reference in New Issue
Block a user