Merge tag 'drm-misc-next-2019-10-31' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.5: UAPI Changes: -dma-buf: Introduce and revert dma-buf heap (Andrew/John/Sean) Cross-subsystem Changes: - None Core Changes: -dma-buf: add dynamic mapping to allow exporters to choose dma_resv lock state on mmap/munmap (Christian) -vram: add prepare/cleanup fb helpers to vram helpers (Thomas) -ttm: always keep bo's on the lru + ttm cleanups (Christian) -sched: allow a free_job routine to sleep (Steven) -fb_helper: remove unused drm_fb_helper_defio_init() (Thomas) Driver Changes: -bochs/hibmc/vboxvideo: Use new vram helpers for prepare/cleanup fb (Thomas) -amdgpu: Implement dma-buf import/export without drm helpers (Christian) -panfrost: Simplify devfreq integration in driver (Steven) Cc: Christian König <christian.koenig@amd.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Steven Price <steven.price@arm.com> Cc: Andrew F. Davis <afd@ti.com> Cc: John Stultz <john.stultz@linaro.org> Cc: Sean Paul <seanpaul@chromium.org> Signed-off-by: Dave Airlie <airlied@redhat.com> From: Sean Paul <sean@poorly.run> Link: https://patchwork.freedesktop.org/patch/msgid/20191031193015.GA243509@art_vandelay
This commit is contained in:
@@ -51,7 +51,7 @@ struct ttm_agp_backend {
|
||||
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
|
||||
struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
|
||||
struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
|
||||
struct drm_mm_node *node = bo_mem->mm_node;
|
||||
struct agp_memory *mem;
|
||||
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
|
||||
|
@@ -51,6 +51,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
|
||||
DEFINE_MUTEX(ttm_global_mutex);
|
||||
unsigned ttm_bo_glob_use_count;
|
||||
struct ttm_bo_global ttm_bo_glob;
|
||||
EXPORT_SYMBOL(ttm_bo_glob);
|
||||
|
||||
static struct attribute ttm_bo_count = {
|
||||
.name = "bo_count",
|
||||
@@ -148,23 +149,21 @@ static void ttm_bo_release_list(struct kref *list_kref)
|
||||
{
|
||||
struct ttm_buffer_object *bo =
|
||||
container_of(list_kref, struct ttm_buffer_object, list_kref);
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
size_t acc_size = bo->acc_size;
|
||||
|
||||
BUG_ON(kref_read(&bo->list_kref));
|
||||
BUG_ON(kref_read(&bo->kref));
|
||||
BUG_ON(atomic_read(&bo->cpu_writers));
|
||||
BUG_ON(bo->mem.mm_node != NULL);
|
||||
BUG_ON(!list_empty(&bo->lru));
|
||||
BUG_ON(!list_empty(&bo->ddestroy));
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
atomic_dec(&bo->bdev->glob->bo_count);
|
||||
atomic_dec(&ttm_bo_glob.bo_count);
|
||||
dma_fence_put(bo->moving);
|
||||
if (!ttm_bo_uses_embedded_gem_object(bo))
|
||||
dma_resv_fini(&bo->base._resv);
|
||||
mutex_destroy(&bo->wu_mutex);
|
||||
bo->destroy(bo);
|
||||
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
|
||||
ttm_mem_global_free(&ttm_mem_glob, acc_size);
|
||||
}
|
||||
|
||||
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
|
||||
@@ -188,23 +187,17 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
|
||||
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
|
||||
TTM_PAGE_FLAG_SWAPPED))) {
|
||||
list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
|
||||
list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
|
||||
kref_get(&bo->list_kref);
|
||||
}
|
||||
}
|
||||
|
||||
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
|
||||
{
|
||||
ttm_bo_add_mem_to_lru(bo, &bo->mem);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_add_to_lru);
|
||||
|
||||
static void ttm_bo_ref_bug(struct kref *list_kref)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
||||
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
bool notify = false;
|
||||
@@ -224,16 +217,6 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
||||
bdev->driver->del_from_lru_notify(bo);
|
||||
}
|
||||
|
||||
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_global *glob = bo->bdev->glob;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
ttm_bo_del_from_lru(bo);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
|
||||
|
||||
static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
|
||||
struct ttm_buffer_object *bo)
|
||||
{
|
||||
@@ -248,7 +231,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
|
||||
ttm_bo_del_from_lru(bo);
|
||||
ttm_bo_add_to_lru(bo);
|
||||
ttm_bo_add_mem_to_lru(bo, &bo->mem);
|
||||
|
||||
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
|
||||
switch (bo->mem.mem_type) {
|
||||
@@ -311,7 +294,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
|
||||
dma_resv_assert_held(pos->first->base.resv);
|
||||
dma_resv_assert_held(pos->last->base.resv);
|
||||
|
||||
lru = &pos->first->bdev->glob->swap_lru[i];
|
||||
lru = &ttm_bo_glob.swap_lru[i];
|
||||
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
|
||||
}
|
||||
}
|
||||
@@ -475,7 +458,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
|
||||
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_global *glob = bdev->glob;
|
||||
int ret;
|
||||
|
||||
ret = ttm_bo_individualize_resv(bo);
|
||||
@@ -485,16 +467,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
||||
*/
|
||||
dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
|
||||
30 * HZ);
|
||||
spin_lock(&glob->lru_lock);
|
||||
spin_lock(&ttm_bo_glob.lru_lock);
|
||||
goto error;
|
||||
}
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
spin_lock(&ttm_bo_glob.lru_lock);
|
||||
ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
|
||||
if (!ret) {
|
||||
if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
|
||||
ttm_bo_del_from_lru(bo);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
if (bo->base.resv != &bo->base._resv)
|
||||
dma_resv_unlock(&bo->base._resv);
|
||||
|
||||
@@ -512,7 +494,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
||||
*/
|
||||
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
|
||||
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
|
||||
ttm_bo_add_to_lru(bo);
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
}
|
||||
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
@@ -523,7 +505,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
||||
error:
|
||||
kref_get(&bo->list_kref);
|
||||
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
|
||||
schedule_delayed_work(&bdev->wq,
|
||||
((HZ / 100) < 1) ? 1 : HZ / 100);
|
||||
@@ -546,7 +528,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
|
||||
bool interruptible, bool no_wait_gpu,
|
||||
bool unlock_resv)
|
||||
{
|
||||
struct ttm_bo_global *glob = bo->bdev->glob;
|
||||
struct dma_resv *resv;
|
||||
int ret;
|
||||
|
||||
@@ -565,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
|
||||
|
||||
if (unlock_resv)
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
|
||||
lret = dma_resv_wait_timeout_rcu(resv, true,
|
||||
interruptible,
|
||||
@@ -576,7 +557,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
|
||||
else if (lret == 0)
|
||||
return -EBUSY;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
spin_lock(&ttm_bo_glob.lru_lock);
|
||||
if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
|
||||
/*
|
||||
* We raced, and lost, someone else holds the reservation now,
|
||||
@@ -586,7 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
|
||||
* delayed destruction would succeed, so just return success
|
||||
* here.
|
||||
*/
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
return 0;
|
||||
}
|
||||
ret = 0;
|
||||
@@ -595,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
|
||||
if (ret || unlikely(list_empty(&bo->ddestroy))) {
|
||||
if (unlock_resv)
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -603,7 +584,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
|
||||
list_del_init(&bo->ddestroy);
|
||||
kref_put(&bo->list_kref, ttm_bo_ref_bug);
|
||||
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
ttm_bo_cleanup_memtype_use(bo);
|
||||
|
||||
if (unlock_resv)
|
||||
@@ -618,7 +599,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
|
||||
*/
|
||||
static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
|
||||
{
|
||||
struct ttm_bo_global *glob = bdev->glob;
|
||||
struct ttm_bo_global *glob = &ttm_bo_glob;
|
||||
struct list_head removed;
|
||||
bool empty;
|
||||
|
||||
@@ -842,13 +823,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
||||
struct ww_acquire_ctx *ticket)
|
||||
{
|
||||
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
|
||||
struct ttm_bo_global *glob = bdev->glob;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
||||
bool locked = false;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
spin_lock(&ttm_bo_glob.lru_lock);
|
||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
||||
list_for_each_entry(bo, &man->lru[i], lru) {
|
||||
bool busy;
|
||||
@@ -880,7 +860,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
||||
if (!bo) {
|
||||
if (busy_bo)
|
||||
kref_get(&busy_bo->list_kref);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
|
||||
if (busy_bo)
|
||||
kref_put(&busy_bo->list_kref, ttm_bo_release_list);
|
||||
@@ -896,17 +876,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ttm_bo_del_from_lru(bo);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
|
||||
ret = ttm_bo_evict(bo, ctx);
|
||||
if (locked) {
|
||||
if (locked)
|
||||
ttm_bo_unreserve(bo);
|
||||
} else {
|
||||
spin_lock(&glob->lru_lock);
|
||||
ttm_bo_add_to_lru(bo);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
}
|
||||
|
||||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||
return ret;
|
||||
@@ -1072,12 +1046,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
|
||||
mem->mem_type = mem_type;
|
||||
mem->placement = cur_flags;
|
||||
|
||||
if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
|
||||
spin_lock(&bo->bdev->glob->lru_lock);
|
||||
ttm_bo_del_from_lru(bo);
|
||||
ttm_bo_add_mem_to_lru(bo, mem);
|
||||
spin_unlock(&bo->bdev->glob->lru_lock);
|
||||
}
|
||||
spin_lock(&ttm_bo_glob.lru_lock);
|
||||
ttm_bo_del_from_lru(bo);
|
||||
ttm_bo_add_mem_to_lru(bo, mem);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1168,9 +1140,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
|
||||
error:
|
||||
if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
|
||||
spin_lock(&bo->bdev->glob->lru_lock);
|
||||
spin_lock(&ttm_bo_glob.lru_lock);
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
spin_unlock(&bo->bdev->glob->lru_lock);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -1294,9 +1266,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
|
||||
struct dma_resv *resv,
|
||||
void (*destroy) (struct ttm_buffer_object *))
|
||||
{
|
||||
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
|
||||
int ret = 0;
|
||||
unsigned long num_pages;
|
||||
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
|
||||
bool locked;
|
||||
|
||||
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
|
||||
@@ -1323,7 +1295,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
|
||||
|
||||
kref_init(&bo->kref);
|
||||
kref_init(&bo->list_kref);
|
||||
atomic_set(&bo->cpu_writers, 0);
|
||||
INIT_LIST_HEAD(&bo->lru);
|
||||
INIT_LIST_HEAD(&bo->ddestroy);
|
||||
INIT_LIST_HEAD(&bo->swap);
|
||||
@@ -1357,7 +1328,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
|
||||
dma_resv_init(&bo->base._resv);
|
||||
drm_vma_node_reset(&bo->base.vma_node);
|
||||
}
|
||||
atomic_inc(&bo->bdev->glob->bo_count);
|
||||
atomic_inc(&ttm_bo_glob.bo_count);
|
||||
|
||||
/*
|
||||
* For ttm_bo_type_device buffers, allocate
|
||||
@@ -1387,11 +1358,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
|
||||
spin_lock(&bdev->glob->lru_lock);
|
||||
ttm_bo_add_to_lru(bo);
|
||||
spin_unlock(&bdev->glob->lru_lock);
|
||||
}
|
||||
spin_lock(&ttm_bo_glob.lru_lock);
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1489,7 +1458,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
||||
.flags = TTM_OPT_FLAG_FORCE_ALLOC
|
||||
};
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
||||
struct ttm_bo_global *glob = bdev->glob;
|
||||
struct ttm_bo_global *glob = &ttm_bo_glob;
|
||||
struct dma_fence *fence;
|
||||
int ret;
|
||||
unsigned i;
|
||||
@@ -1658,8 +1627,6 @@ static int ttm_bo_global_init(void)
|
||||
goto out;
|
||||
|
||||
spin_lock_init(&glob->lru_lock);
|
||||
glob->mem_glob = &ttm_mem_glob;
|
||||
glob->mem_glob->bo_glob = glob;
|
||||
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
|
||||
|
||||
if (unlikely(glob->dummy_read_page == NULL)) {
|
||||
@@ -1683,10 +1650,10 @@ out:
|
||||
|
||||
int ttm_bo_device_release(struct ttm_bo_device *bdev)
|
||||
{
|
||||
struct ttm_bo_global *glob = &ttm_bo_glob;
|
||||
int ret = 0;
|
||||
unsigned i = TTM_NUM_MEM_TYPES;
|
||||
struct ttm_mem_type_manager *man;
|
||||
struct ttm_bo_global *glob = bdev->glob;
|
||||
|
||||
while (i--) {
|
||||
man = &bdev->man[i];
|
||||
@@ -1755,7 +1722,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
|
||||
INIT_LIST_HEAD(&bdev->ddestroy);
|
||||
bdev->dev_mapping = mapping;
|
||||
bdev->glob = glob;
|
||||
bdev->need_dma32 = need_dma32;
|
||||
mutex_lock(&ttm_global_mutex);
|
||||
list_add_tail(&bdev->device_list, &glob->device_list);
|
||||
@@ -1835,31 +1801,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_wait);
|
||||
|
||||
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Using ttm_bo_reserve makes sure the lru lists are updated.
|
||||
*/
|
||||
|
||||
ret = ttm_bo_reserve(bo, true, no_wait, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
ret = ttm_bo_wait(bo, true, no_wait);
|
||||
if (likely(ret == 0))
|
||||
atomic_inc(&bo->cpu_writers);
|
||||
ttm_bo_unreserve(bo);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
|
||||
|
||||
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
|
||||
{
|
||||
atomic_dec(&bo->cpu_writers);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
|
||||
|
||||
/**
|
||||
* A buffer object shrink method that tries to swap out the first
|
||||
* buffer object on the bo_global::swap_lru list.
|
||||
@@ -1959,8 +1900,7 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
|
||||
while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
|
||||
;
|
||||
while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_swapout_all);
|
||||
|
||||
|
@@ -102,7 +102,6 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
|
||||
mutex_lock(&man->io_reserve_mutex);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_io_lock);
|
||||
|
||||
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
@@ -111,7 +110,6 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
|
||||
|
||||
mutex_unlock(&man->io_reserve_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_io_unlock);
|
||||
|
||||
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
@@ -153,7 +151,6 @@ retry:
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_io_reserve);
|
||||
|
||||
void ttm_mem_io_free(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem)
|
||||
@@ -169,7 +166,6 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
|
||||
bdev->driver->io_mem_free(bdev, mem);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_io_free);
|
||||
|
||||
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
|
||||
{
|
||||
@@ -503,7 +499,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||
* TODO: Explicit member copy would probably be better here.
|
||||
*/
|
||||
|
||||
atomic_inc(&bo->bdev->glob->bo_count);
|
||||
atomic_inc(&ttm_bo_glob.bo_count);
|
||||
INIT_LIST_HEAD(&fbo->base.ddestroy);
|
||||
INIT_LIST_HEAD(&fbo->base.lru);
|
||||
INIT_LIST_HEAD(&fbo->base.swap);
|
||||
@@ -511,15 +507,16 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||
mutex_init(&fbo->base.wu_mutex);
|
||||
fbo->base.moving = NULL;
|
||||
drm_vma_node_reset(&fbo->base.base.vma_node);
|
||||
atomic_set(&fbo->base.cpu_writers, 0);
|
||||
|
||||
kref_init(&fbo->base.list_kref);
|
||||
kref_init(&fbo->base.kref);
|
||||
fbo->base.destroy = &ttm_transfered_destroy;
|
||||
fbo->base.acc_size = 0;
|
||||
fbo->base.base.resv = &fbo->base.base._resv;
|
||||
dma_resv_init(fbo->base.base.resv);
|
||||
ret = dma_resv_trylock(fbo->base.base.resv);
|
||||
if (bo->base.resv == &bo->base._resv)
|
||||
fbo->base.base.resv = &fbo->base.base._resv;
|
||||
|
||||
dma_resv_init(&fbo->base.base._resv);
|
||||
ret = dma_resv_trylock(&fbo->base.base._resv);
|
||||
WARN_ON(!ret);
|
||||
|
||||
*new_obj = &fbo->base;
|
||||
@@ -716,7 +713,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
|
||||
dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
|
||||
|
||||
/**
|
||||
* If we're not moving to fixed memory, the TTM object
|
||||
@@ -729,7 +726,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
else
|
||||
bo->ttm = NULL;
|
||||
|
||||
ttm_bo_unreserve(ghost_obj);
|
||||
dma_resv_unlock(&ghost_obj->base._resv);
|
||||
ttm_bo_put(ghost_obj);
|
||||
}
|
||||
|
||||
@@ -772,7 +769,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
|
||||
dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
|
||||
|
||||
/**
|
||||
* If we're not moving to fixed memory, the TTM object
|
||||
@@ -785,7 +782,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
|
||||
else
|
||||
bo->ttm = NULL;
|
||||
|
||||
ttm_bo_unreserve(ghost_obj);
|
||||
dma_resv_unlock(&ghost_obj->base._resv);
|
||||
ttm_bo_put(ghost_obj);
|
||||
|
||||
} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
|
||||
@@ -841,7 +838,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv);
|
||||
ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
|
||||
/* Last resort, wait for the BO to be idle when we are OOM */
|
||||
if (ret)
|
||||
ttm_bo_wait(bo, false, false);
|
||||
@@ -850,7 +847,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
|
||||
bo->mem.mem_type = TTM_PL_SYSTEM;
|
||||
bo->ttm = NULL;
|
||||
|
||||
ttm_bo_unreserve(ghost);
|
||||
dma_resv_unlock(&ghost->base._resv);
|
||||
ttm_bo_put(ghost);
|
||||
|
||||
return 0;
|
||||
|
@@ -177,9 +177,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
|
||||
}
|
||||
|
||||
if (bo->moving != moving) {
|
||||
spin_lock(&bdev->glob->lru_lock);
|
||||
spin_lock(&ttm_bo_glob.lru_lock);
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
spin_unlock(&bdev->glob->lru_lock);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
}
|
||||
dma_fence_put(moving);
|
||||
}
|
||||
@@ -480,6 +480,13 @@ EXPORT_SYMBOL(ttm_bo_mmap);
|
||||
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
|
||||
{
|
||||
ttm_bo_get(bo);
|
||||
|
||||
/*
|
||||
* FIXME: &drm_gem_object_funcs.mmap is called with the fake offset
|
||||
* removed. Add it back here until the rest of TTM works without it.
|
||||
*/
|
||||
vma->vm_pgoff += drm_vma_node_start(&bo->base.vma_node);
|
||||
|
||||
ttm_bo_mmap_vma_setup(bo, vma);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -43,37 +43,22 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
|
||||
}
|
||||
}
|
||||
|
||||
static void ttm_eu_del_from_lru_locked(struct list_head *list)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
ttm_bo_del_from_lru(bo);
|
||||
}
|
||||
}
|
||||
|
||||
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
struct ttm_bo_global *glob;
|
||||
|
||||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
entry = list_first_entry(list, struct ttm_validate_buffer, head);
|
||||
glob = entry->bo->bdev->glob;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
spin_lock(&ttm_bo_glob.lru_lock);
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
if (list_empty(&bo->lru))
|
||||
ttm_bo_add_to_lru(bo);
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
|
||||
if (ticket)
|
||||
ww_acquire_fini(ticket);
|
||||
@@ -94,18 +79,14 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
|
||||
|
||||
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list, bool intr,
|
||||
struct list_head *dups, bool del_lru)
|
||||
struct list_head *dups)
|
||||
{
|
||||
struct ttm_bo_global *glob;
|
||||
struct ttm_validate_buffer *entry;
|
||||
int ret;
|
||||
|
||||
if (list_empty(list))
|
||||
return 0;
|
||||
|
||||
entry = list_first_entry(list, struct ttm_validate_buffer, head);
|
||||
glob = entry->bo->bdev->glob;
|
||||
|
||||
if (ticket)
|
||||
ww_acquire_init(ticket, &reservation_ww_class);
|
||||
|
||||
@@ -113,12 +94,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
|
||||
if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
|
||||
ret = -EBUSY;
|
||||
|
||||
} else if (ret == -EALREADY && dups) {
|
||||
if (ret == -EALREADY && dups) {
|
||||
struct ttm_validate_buffer *safe = entry;
|
||||
entry = list_prev_entry(entry, head);
|
||||
list_del(&safe->head);
|
||||
@@ -173,11 +149,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
list_add(&entry->head, list);
|
||||
}
|
||||
|
||||
if (del_lru) {
|
||||
spin_lock(&glob->lru_lock);
|
||||
ttm_eu_del_from_lru_locked(list);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
|
||||
@@ -187,30 +158,22 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_bo_global *glob;
|
||||
|
||||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
|
||||
glob = bo->bdev->glob;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
|
||||
spin_lock(&ttm_bo_glob.lru_lock);
|
||||
list_for_each_entry(entry, list, head) {
|
||||
bo = entry->bo;
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
if (entry->num_shared)
|
||||
dma_resv_add_shared_fence(bo->base.resv, fence);
|
||||
else
|
||||
dma_resv_add_excl_fence(bo->base.resv, fence);
|
||||
if (list_empty(&bo->lru))
|
||||
ttm_bo_add_to_lru(bo);
|
||||
else
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&ttm_bo_glob.lru_lock);
|
||||
if (ticket)
|
||||
ww_acquire_fini(ticket);
|
||||
}
|
||||
|
@@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
|
||||
|
||||
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
|
||||
spin_unlock(&glob->lock);
|
||||
ret = ttm_bo_swapout(glob->bo_glob, ctx);
|
||||
ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
|
||||
spin_lock(&glob->lock);
|
||||
if (unlikely(ret != 0))
|
||||
break;
|
||||
|
@@ -1028,7 +1028,7 @@ void ttm_page_alloc_fini(void)
|
||||
static void
|
||||
ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
|
||||
{
|
||||
struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
|
||||
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
|
||||
unsigned i;
|
||||
|
||||
if (mem_count_update == 0)
|
||||
@@ -1049,7 +1049,7 @@ put_pages:
|
||||
|
||||
int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
|
||||
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
|
@@ -886,8 +886,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
|
||||
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
|
||||
struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
|
||||
unsigned long num_pages = ttm->num_pages;
|
||||
struct dma_pool *pool;
|
||||
struct dma_page *d_page;
|
||||
@@ -991,8 +991,8 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate);
|
||||
/* Put all pages in pages list to correct pool to wait for reuse */
|
||||
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
||||
{
|
||||
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
|
||||
struct dma_pool *pool;
|
||||
struct dma_page *d_page, *next;
|
||||
enum pool_type type;
|
||||
|
Reference in New Issue
Block a user