Merge remote-tracking branch 'airlied/drm-next' into topic/drm-misc
Backmerge drm-next to be able to apply Chris' connector_unregister_all cleanup (need latest i915 and sun4i state for that). Also there's a trivial conflict in ttm_bo.c that git rerere fails to remember. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
This commit is contained in:
@@ -148,6 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
|
||||
BUG_ON(!list_empty(&bo->ddestroy));
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
atomic_dec(&bo->glob->bo_count);
|
||||
fence_put(bo->moving);
|
||||
if (bo->resv == &bo->ttm_resv)
|
||||
reservation_object_fini(&bo->ttm_resv);
|
||||
mutex_destroy(&bo->wu_mutex);
|
||||
@@ -358,7 +359,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
||||
ret = bdev->driver->move(bo, evict, interruptible,
|
||||
no_wait_gpu, mem);
|
||||
else
|
||||
ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
|
||||
ret = ttm_bo_move_memcpy(bo, evict, interruptible,
|
||||
no_wait_gpu, mem);
|
||||
|
||||
if (ret) {
|
||||
if (bdev->driver->move_notify) {
|
||||
@@ -394,8 +396,7 @@ moved:
|
||||
|
||||
out_err:
|
||||
new_man = &bdev->man[bo->mem.mem_type];
|
||||
if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
|
||||
ttm_tt_unbind(bo->ttm);
|
||||
if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
@@ -416,11 +417,8 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
|
||||
if (bo->bdev->driver->move_notify)
|
||||
bo->bdev->driver->move_notify(bo, NULL);
|
||||
|
||||
if (bo->ttm) {
|
||||
ttm_tt_unbind(bo->ttm);
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
bo->ttm = NULL;
|
||||
ttm_bo_mem_put(bo, &bo->mem);
|
||||
|
||||
ww_mutex_unlock (&bo->resv->lock);
|
||||
@@ -686,15 +684,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
||||
struct ttm_placement placement;
|
||||
int ret = 0;
|
||||
|
||||
ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS) {
|
||||
pr_err("Failed to expire sync object before buffer eviction\n");
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
lockdep_assert_held(&bo->resv->lock.base);
|
||||
|
||||
evict_mem = bo->mem;
|
||||
@@ -718,7 +707,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
||||
|
||||
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
|
||||
no_wait_gpu);
|
||||
if (ret) {
|
||||
if (unlikely(ret)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
pr_err("Buffer eviction failed\n");
|
||||
ttm_bo_mem_put(bo, &evict_mem);
|
||||
@@ -797,6 +786,34 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_mem_put);
|
||||
|
||||
/**
|
||||
* Add the last move fence to the BO and reserve a new shared slot.
|
||||
*/
|
||||
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct fence *fence;
|
||||
int ret;
|
||||
|
||||
spin_lock(&man->move_lock);
|
||||
fence = fence_get(man->move);
|
||||
spin_unlock(&man->move_lock);
|
||||
|
||||
if (fence) {
|
||||
reservation_object_add_shared_fence(bo->resv, fence);
|
||||
|
||||
ret = reservation_object_reserve_shared(bo->resv);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
fence_put(bo->moving);
|
||||
bo->moving = fence;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Repeatedly evict memory from the LRU for @mem_type until we create enough
|
||||
* space, or we've evicted everything and there isn't enough space.
|
||||
@@ -823,10 +840,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
} while (1);
|
||||
if (mem->mm_node == NULL)
|
||||
return -ENOMEM;
|
||||
mem->mem_type = mem_type;
|
||||
return 0;
|
||||
return ttm_bo_add_move_fence(bo, man, mem);
|
||||
}
|
||||
|
||||
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
|
||||
@@ -896,6 +911,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
bool has_erestartsys = false;
|
||||
int i, ret;
|
||||
|
||||
ret = reservation_object_reserve_shared(bo->resv);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
mem->mm_node = NULL;
|
||||
for (i = 0; i < placement->num_placement; ++i) {
|
||||
const struct ttm_place *place = &placement->placement[i];
|
||||
@@ -929,9 +948,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
ret = (*man->func->get_node)(man, bo, place, mem);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
if (mem->mm_node)
|
||||
|
||||
if (mem->mm_node) {
|
||||
ret = ttm_bo_add_move_fence(bo, man, mem);
|
||||
if (unlikely(ret)) {
|
||||
(*man->func->put_node)(man, mem);
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
|
||||
@@ -998,20 +1023,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
||||
|
||||
lockdep_assert_held(&bo->resv->lock.base);
|
||||
|
||||
/*
|
||||
* Don't wait for the BO on initial allocation. This is important when
|
||||
* the BO has an imported reservation object.
|
||||
*/
|
||||
if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) {
|
||||
/*
|
||||
* FIXME: It's possible to pipeline buffer moves.
|
||||
* Have the driver move function wait for idle when necessary,
|
||||
* instead of doing it here.
|
||||
*/
|
||||
ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
mem.num_pages = bo->num_pages;
|
||||
mem.size = mem.num_pages << PAGE_SHIFT;
|
||||
mem.page_alignment = bo->mem.page_alignment;
|
||||
@@ -1163,7 +1174,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
bo->mem.page_alignment = page_alignment;
|
||||
bo->mem.bus.io_reserved_vm = false;
|
||||
bo->mem.bus.io_reserved_count = 0;
|
||||
bo->priv_flags = 0;
|
||||
bo->moving = NULL;
|
||||
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
|
||||
bo->persistent_swap_storage = persistent_swap_storage;
|
||||
bo->acc_size = acc_size;
|
||||
@@ -1275,6 +1286,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
||||
struct ttm_bo_global *glob = bdev->glob;
|
||||
struct fence *fence;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@@ -1295,6 +1307,23 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
||||
spin_lock(&glob->lru_lock);
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
||||
spin_lock(&man->move_lock);
|
||||
fence = fence_get(man->move);
|
||||
spin_unlock(&man->move_lock);
|
||||
|
||||
if (fence) {
|
||||
ret = fence_wait(fence, false);
|
||||
fence_put(fence);
|
||||
if (ret) {
|
||||
if (allow_errors) {
|
||||
return ret;
|
||||
} else {
|
||||
pr_err("Cleanup eviction failed\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1314,6 +1343,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
|
||||
mem_type);
|
||||
return ret;
|
||||
}
|
||||
fence_put(man->move);
|
||||
|
||||
man->use_type = false;
|
||||
man->has_type = false;
|
||||
@@ -1359,6 +1389,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
||||
man->io_reserve_fastpath = true;
|
||||
man->use_io_reserve_lru = false;
|
||||
mutex_init(&man->io_reserve_mutex);
|
||||
spin_lock_init(&man->move_lock);
|
||||
INIT_LIST_HEAD(&man->io_reserve_lru);
|
||||
|
||||
ret = bdev->driver->init_mem_type(bdev, type, man);
|
||||
@@ -1377,6 +1408,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
||||
man->size = p_size;
|
||||
|
||||
INIT_LIST_HEAD(&man->lru);
|
||||
man->move = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1570,47 +1602,17 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
|
||||
int ttm_bo_wait(struct ttm_buffer_object *bo,
|
||||
bool interruptible, bool no_wait)
|
||||
{
|
||||
struct reservation_object_list *fobj;
|
||||
struct reservation_object *resv;
|
||||
struct fence *excl;
|
||||
long timeout = 15 * HZ;
|
||||
int i;
|
||||
|
||||
resv = bo->resv;
|
||||
fobj = reservation_object_get_list(resv);
|
||||
excl = reservation_object_get_excl(resv);
|
||||
if (excl) {
|
||||
if (!fence_is_signaled(excl)) {
|
||||
if (no_wait)
|
||||
return -EBUSY;
|
||||
|
||||
timeout = fence_wait_timeout(excl,
|
||||
interruptible, timeout);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
|
||||
struct fence *fence;
|
||||
fence = rcu_dereference_protected(fobj->shared[i],
|
||||
reservation_object_held(resv));
|
||||
|
||||
if (!fence_is_signaled(fence)) {
|
||||
if (no_wait)
|
||||
return -EBUSY;
|
||||
|
||||
timeout = fence_wait_timeout(fence,
|
||||
interruptible, timeout);
|
||||
}
|
||||
}
|
||||
long timeout = no_wait ? 0 : 15 * HZ;
|
||||
|
||||
timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
|
||||
interruptible, timeout);
|
||||
if (timeout < 0)
|
||||
return timeout;
|
||||
|
||||
if (timeout == 0)
|
||||
return -EBUSY;
|
||||
|
||||
reservation_object_add_excl_fence(resv, NULL);
|
||||
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
|
||||
reservation_object_add_excl_fence(bo->resv, NULL);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_wait);
|
||||
@@ -1680,14 +1682,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
||||
ttm_bo_list_ref_sub(bo, put_count, true);
|
||||
|
||||
/**
|
||||
* Wait for GPU, then move to system cached.
|
||||
* Move to system cached
|
||||
*/
|
||||
|
||||
ret = ttm_bo_wait(bo, false, false);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
|
||||
if ((bo->mem.placement & swap_placement) != swap_placement) {
|
||||
struct ttm_mem_reg evict_mem;
|
||||
|
||||
@@ -1702,6 +1699,14 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
* Make sure BO is idle.
|
||||
*/
|
||||
|
||||
ret = ttm_bo_wait(bo, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
|
||||
ttm_bo_unmap_virtual(bo);
|
||||
|
||||
/**
|
||||
|
@@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||
int ret;
|
||||
|
||||
if (old_mem->mem_type != TTM_PL_SYSTEM) {
|
||||
ttm_tt_unbind(ttm);
|
||||
ttm_bo_free_old_node(bo);
|
||||
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
|
||||
TTM_PL_MASK_MEM);
|
||||
@@ -321,7 +320,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
||||
}
|
||||
|
||||
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
bool evict, bool no_wait_gpu,
|
||||
bool evict, bool interruptible,
|
||||
bool no_wait_gpu,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
@@ -337,6 +337,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
unsigned long add = 0;
|
||||
int dir;
|
||||
|
||||
ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -401,8 +405,7 @@ out2:
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
|
||||
ttm_tt_unbind(ttm);
|
||||
if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
|
||||
ttm_tt_destroy(ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
@@ -462,6 +465,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||
INIT_LIST_HEAD(&fbo->lru);
|
||||
INIT_LIST_HEAD(&fbo->swap);
|
||||
INIT_LIST_HEAD(&fbo->io_reserve_lru);
|
||||
fbo->moving = NULL;
|
||||
drm_vma_node_reset(&fbo->vma_node);
|
||||
atomic_set(&fbo->cpu_writers, 0);
|
||||
|
||||
@@ -634,7 +638,6 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
|
||||
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
struct fence *fence,
|
||||
bool evict,
|
||||
bool no_wait_gpu,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
@@ -649,9 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
|
||||
(bo->ttm != NULL)) {
|
||||
ttm_tt_unbind(bo->ttm);
|
||||
if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
@@ -665,7 +666,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
* operation has completed.
|
||||
*/
|
||||
|
||||
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
|
||||
fence_put(bo->moving);
|
||||
bo->moving = fence_get(fence);
|
||||
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
||||
if (ret)
|
||||
@@ -694,3 +696,95 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
|
||||
|
||||
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
|
||||
struct fence *fence, bool evict,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
|
||||
struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
|
||||
struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
|
||||
|
||||
int ret;
|
||||
|
||||
reservation_object_add_excl_fence(bo->resv, fence);
|
||||
|
||||
if (!evict) {
|
||||
struct ttm_buffer_object *ghost_obj;
|
||||
|
||||
/**
|
||||
* This should help pipeline ordinary buffer moves.
|
||||
*
|
||||
* Hang old buffer memory on a new buffer object,
|
||||
* and leave it to be released when the GPU
|
||||
* operation has completed.
|
||||
*/
|
||||
|
||||
fence_put(bo->moving);
|
||||
bo->moving = fence_get(fence);
|
||||
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
reservation_object_add_excl_fence(ghost_obj->resv, fence);
|
||||
|
||||
/**
|
||||
* If we're not moving to fixed memory, the TTM object
|
||||
* needs to stay alive. Otherwhise hang it on the ghost
|
||||
* bo to be unbound and destroyed.
|
||||
*/
|
||||
|
||||
if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
|
||||
ghost_obj->ttm = NULL;
|
||||
else
|
||||
bo->ttm = NULL;
|
||||
|
||||
ttm_bo_unreserve(ghost_obj);
|
||||
ttm_bo_unref(&ghost_obj);
|
||||
|
||||
} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
|
||||
|
||||
/**
|
||||
* BO doesn't have a TTM we need to bind/unbind. Just remember
|
||||
* this eviction and free up the allocation
|
||||
*/
|
||||
|
||||
spin_lock(&from->move_lock);
|
||||
if (!from->move || fence_is_later(fence, from->move)) {
|
||||
fence_put(from->move);
|
||||
from->move = fence_get(fence);
|
||||
}
|
||||
spin_unlock(&from->move_lock);
|
||||
|
||||
ttm_bo_free_old_node(bo);
|
||||
|
||||
fence_put(bo->moving);
|
||||
bo->moving = fence_get(fence);
|
||||
|
||||
} else {
|
||||
/**
|
||||
* Last resort, wait for the move to be completed.
|
||||
*
|
||||
* Should never happen in pratice.
|
||||
*/
|
||||
|
||||
ret = ttm_bo_wait(bo, false, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
ttm_bo_free_old_node(bo);
|
||||
}
|
||||
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_pipeline_move);
|
||||
|
@@ -48,15 +48,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
|
||||
if (likely(!bo->moving))
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Quick non-stalling check for idle.
|
||||
*/
|
||||
ret = ttm_bo_wait(bo, false, true);
|
||||
if (likely(ret == 0))
|
||||
goto out_unlock;
|
||||
if (fence_is_signaled(bo->moving))
|
||||
goto out_clear;
|
||||
|
||||
/*
|
||||
* If possible, avoid waiting for GPU with mmap_sem
|
||||
@@ -68,17 +67,23 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
|
||||
goto out_unlock;
|
||||
|
||||
up_read(&vma->vm_mm->mmap_sem);
|
||||
(void) ttm_bo_wait(bo, true, false);
|
||||
(void) fence_wait(bo->moving, true);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ordinary wait.
|
||||
*/
|
||||
ret = ttm_bo_wait(bo, true, false);
|
||||
if (unlikely(ret != 0))
|
||||
ret = fence_wait(bo->moving, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
|
||||
VM_FAULT_NOPAGE;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
out_clear:
|
||||
fence_put(bo->moving);
|
||||
bo->moving = NULL;
|
||||
|
||||
out_unlock:
|
||||
return ret;
|
||||
|
@@ -166,11 +166,15 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
|
||||
|
||||
void ttm_tt_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
if (unlikely(ttm == NULL))
|
||||
int ret;
|
||||
|
||||
if (ttm == NULL)
|
||||
return;
|
||||
|
||||
if (ttm->state == tt_bound) {
|
||||
ttm_tt_unbind(ttm);
|
||||
ret = ttm->func->unbind(ttm);
|
||||
BUG_ON(ret);
|
||||
ttm->state = tt_unbound;
|
||||
}
|
||||
|
||||
if (ttm->state == tt_unbound)
|
||||
@@ -251,17 +255,6 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_dma_tt_fini);
|
||||
|
||||
void ttm_tt_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (ttm->state == tt_bound) {
|
||||
ret = ttm->func->unbind(ttm);
|
||||
BUG_ON(ret);
|
||||
ttm->state = tt_unbound;
|
||||
}
|
||||
}
|
||||
|
||||
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
int ret = 0;
|
||||
|
Reference in New Issue
Block a user