Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (476 commits) vmwgfx: Implement a proper GMR eviction mechanism drm/radeon/kms: fix r6xx/7xx 1D tiling CS checker v2 drm/radeon/kms: properly compute group_size on 6xx/7xx drm/radeon/kms: fix 2D tile height alignment in the r600 CS checker drm/radeon/kms/evergreen: set the clear state to the blit state drm/radeon/kms: don't poll dac load detect. gpu: Add Intel GMA500(Poulsbo) Stub Driver drm/radeon/kms: MC vram map needs to be >= pci aperture size drm/radeon/kms: implement display watermark support for evergreen drm/radeon/kms/evergreen: add some additional safe regs v2 drm/radeon/r600: fix tiling issues in CS checker. drm/i915: Move gpu_write_list to per-ring drm/i915: Invalidate the to-ring, flush the old-ring when updating domains drm/i915/ringbuffer: Write the value passed in to the tail register agp/intel: Restore valid PTE bit for Sandybridge afterbdd3072
drm/i915: Fix flushing regression from9af90d19f
drm/i915/sdvo: Remove unused encoding member i915: enable AVI infoframe for intel_hdmi.c [v4] drm/i915: Fix current fb blocking for page flip drm/i915: IS_IRONLAKE is synonymous with gen == 5 ... Fix up conflicts in - drivers/gpu/drm/i915/{i915_gem.c, i915/intel_overlay.c}: due to the new simplified stack-based kmap_atomic() interface - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c: added .llseek entry due to BKL removal cleanups.
这个提交包含在:
@@ -4,6 +4,7 @@
|
||||
ccflags-y := -Iinclude/drm
|
||||
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
|
||||
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
|
||||
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
|
||||
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
|
||||
ttm_bo_manager.o
|
||||
|
||||
obj-$(CONFIG_DRM_TTM) += ttm.o
|
||||
|
@@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be =
|
||||
container_of(backend, struct ttm_agp_backend, backend);
|
||||
struct drm_mm_node *node = bo_mem->mm_node;
|
||||
struct agp_memory *mem = agp_be->mem;
|
||||
int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
|
||||
int ret;
|
||||
@@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
|
||||
mem->is_flushed = 1;
|
||||
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
|
||||
|
||||
ret = agp_bind_memory(mem, bo_mem->mm_node->start);
|
||||
ret = agp_bind_memory(mem, node->start);
|
||||
if (ret)
|
||||
printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
|
||||
|
||||
|
@@ -84,11 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
|
||||
man->available_caching);
|
||||
printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
|
||||
man->default_caching);
|
||||
if (mem_type != TTM_PL_SYSTEM) {
|
||||
spin_lock(&bdev->glob->lru_lock);
|
||||
drm_mm_debug_table(&man->manager, TTM_PFX);
|
||||
spin_unlock(&bdev->glob->lru_lock);
|
||||
}
|
||||
if (mem_type != TTM_PL_SYSTEM)
|
||||
(*man->func->debug)(man, TTM_PFX);
|
||||
}
|
||||
|
||||
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
|
||||
@@ -169,18 +166,13 @@ static void ttm_bo_release_list(struct kref *list_kref)
|
||||
|
||||
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
|
||||
{
|
||||
|
||||
if (interruptible) {
|
||||
int ret = 0;
|
||||
|
||||
ret = wait_event_interruptible(bo->event_queue,
|
||||
return wait_event_interruptible(bo->event_queue,
|
||||
atomic_read(&bo->reserved) == 0);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
} else {
|
||||
wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_wait_unreserved);
|
||||
|
||||
@@ -421,7 +413,7 @@ moved:
|
||||
|
||||
if (bo->mem.mm_node) {
|
||||
spin_lock(&bo->lock);
|
||||
bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
|
||||
bo->offset = (bo->mem.start << PAGE_SHIFT) +
|
||||
bdev->man[bo->mem.mem_type].gpu_offset;
|
||||
bo->cur_placement = bo->mem.placement;
|
||||
spin_unlock(&bo->lock);
|
||||
@@ -442,135 +434,144 @@ out_err:
|
||||
}
|
||||
|
||||
/**
|
||||
* Call bo::reserved and with the lru lock held.
|
||||
* Call bo::reserved.
|
||||
* Will release GPU memory type usage on destruction.
|
||||
* This is the place to put in driver specific hooks.
|
||||
* Will release the bo::reserved lock and the
|
||||
* lru lock on exit.
|
||||
* This is the place to put in driver specific hooks to release
|
||||
* driver private resources.
|
||||
* Will release the bo::reserved lock.
|
||||
*/
|
||||
|
||||
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
|
||||
if (bo->ttm) {
|
||||
|
||||
/**
|
||||
* Release the lru_lock, since we don't want to have
|
||||
* an atomic requirement on ttm_tt[unbind|destroy].
|
||||
*/
|
||||
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ttm_tt_unbind(bo->ttm);
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
bo->ttm = NULL;
|
||||
spin_lock(&glob->lru_lock);
|
||||
}
|
||||
|
||||
if (bo->mem.mm_node) {
|
||||
drm_mm_put_block(bo->mem.mm_node);
|
||||
bo->mem.mm_node = NULL;
|
||||
}
|
||||
ttm_bo_mem_put(bo, &bo->mem);
|
||||
|
||||
atomic_set(&bo->reserved, 0);
|
||||
wake_up_all(&bo->event_queue);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* If bo idle, remove from delayed- and lru lists, and unref.
|
||||
* If not idle, and already on delayed list, do nothing.
|
||||
* If not idle, and not on delayed list, put on delayed list,
|
||||
* up the list_kref and schedule a delayed list check.
|
||||
*/
|
||||
|
||||
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
|
||||
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
struct ttm_bo_driver *driver = bdev->driver;
|
||||
struct ttm_bo_driver *driver;
|
||||
void *sync_obj;
|
||||
void *sync_obj_arg;
|
||||
int put_count;
|
||||
int ret;
|
||||
|
||||
spin_lock(&bo->lock);
|
||||
retry:
|
||||
(void) ttm_bo_wait(bo, false, false, !remove_all);
|
||||
|
||||
(void) ttm_bo_wait(bo, false, false, true);
|
||||
if (!bo->sync_obj) {
|
||||
int put_count;
|
||||
|
||||
spin_unlock(&bo->lock);
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
|
||||
|
||||
/**
|
||||
* Someone else has the object reserved. Bail and retry.
|
||||
* Lock inversion between bo::reserve and bo::lock here,
|
||||
* but that's OK, since we're only trylocking.
|
||||
*/
|
||||
|
||||
if (unlikely(ret == -EBUSY)) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_lock(&bo->lock);
|
||||
goto requeue;
|
||||
}
|
||||
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
||||
|
||||
/**
|
||||
* We can re-check for sync object without taking
|
||||
* the bo::lock since setting the sync object requires
|
||||
* also bo::reserved. A busy object at this point may
|
||||
* be caused by another thread starting an accelerated
|
||||
* eviction.
|
||||
*/
|
||||
|
||||
if (unlikely(bo->sync_obj)) {
|
||||
atomic_set(&bo->reserved, 0);
|
||||
wake_up_all(&bo->event_queue);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_lock(&bo->lock);
|
||||
if (remove_all)
|
||||
goto retry;
|
||||
else
|
||||
goto requeue;
|
||||
}
|
||||
if (unlikely(ret == -EBUSY))
|
||||
goto queue;
|
||||
|
||||
spin_unlock(&bo->lock);
|
||||
put_count = ttm_bo_del_from_lru(bo);
|
||||
|
||||
if (!list_empty(&bo->ddestroy)) {
|
||||
list_del_init(&bo->ddestroy);
|
||||
++put_count;
|
||||
}
|
||||
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ttm_bo_cleanup_memtype_use(bo);
|
||||
|
||||
while (put_count--)
|
||||
kref_put(&bo->list_kref, ttm_bo_ref_bug);
|
||||
|
||||
return 0;
|
||||
}
|
||||
requeue:
|
||||
spin_lock(&glob->lru_lock);
|
||||
if (list_empty(&bo->ddestroy)) {
|
||||
void *sync_obj = bo->sync_obj;
|
||||
void *sync_obj_arg = bo->sync_obj_arg;
|
||||
|
||||
kref_get(&bo->list_kref);
|
||||
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&bo->lock);
|
||||
|
||||
if (sync_obj)
|
||||
driver->sync_obj_flush(sync_obj, sync_obj_arg);
|
||||
schedule_delayed_work(&bdev->wq,
|
||||
((HZ / 100) < 1) ? 1 : HZ / 100);
|
||||
ret = 0;
|
||||
|
||||
return;
|
||||
} else {
|
||||
spin_lock(&glob->lru_lock);
|
||||
}
|
||||
queue:
|
||||
sync_obj = bo->sync_obj;
|
||||
sync_obj_arg = bo->sync_obj_arg;
|
||||
driver = bdev->driver;
|
||||
|
||||
kref_get(&bo->list_kref);
|
||||
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&bo->lock);
|
||||
|
||||
if (sync_obj)
|
||||
driver->sync_obj_flush(sync_obj, sync_obj_arg);
|
||||
schedule_delayed_work(&bdev->wq,
|
||||
((HZ / 100) < 1) ? 1 : HZ / 100);
|
||||
}
|
||||
|
||||
/**
|
||||
* function ttm_bo_cleanup_refs
|
||||
* If bo idle, remove from delayed- and lru lists, and unref.
|
||||
* If not idle, do nothing.
|
||||
*
|
||||
* @interruptible Any sleeps should occur interruptibly.
|
||||
* @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
|
||||
* @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
|
||||
*/
|
||||
|
||||
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
|
||||
bool interruptible,
|
||||
bool no_wait_reserve,
|
||||
bool no_wait_gpu)
|
||||
{
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
int put_count;
|
||||
int ret = 0;
|
||||
|
||||
retry:
|
||||
spin_lock(&bo->lock);
|
||||
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
|
||||
spin_unlock(&bo->lock);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
ret = ttm_bo_reserve_locked(bo, interruptible,
|
||||
no_wait_reserve, false, 0);
|
||||
|
||||
if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&bo->lock);
|
||||
ret = -EBUSY;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
/**
|
||||
* We can re-check for sync object without taking
|
||||
* the bo::lock since setting the sync object requires
|
||||
* also bo::reserved. A busy object at this point may
|
||||
* be caused by another thread recently starting an accelerated
|
||||
* eviction.
|
||||
*/
|
||||
|
||||
if (unlikely(bo->sync_obj)) {
|
||||
atomic_set(&bo->reserved, 0);
|
||||
wake_up_all(&bo->event_queue);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
put_count = ttm_bo_del_from_lru(bo);
|
||||
list_del_init(&bo->ddestroy);
|
||||
++put_count;
|
||||
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ttm_bo_cleanup_memtype_use(bo);
|
||||
|
||||
while (put_count--)
|
||||
kref_put(&bo->list_kref, ttm_bo_ref_bug);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -602,7 +603,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
|
||||
}
|
||||
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ret = ttm_bo_cleanup_refs(entry, remove_all);
|
||||
ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
|
||||
!remove_all);
|
||||
kref_put(&entry->list_kref, ttm_bo_release_list);
|
||||
entry = nentry;
|
||||
|
||||
@@ -645,7 +647,7 @@ static void ttm_bo_release(struct kref *kref)
|
||||
bo->vm_node = NULL;
|
||||
}
|
||||
write_unlock(&bdev->vm_lock);
|
||||
ttm_bo_cleanup_refs(bo, false);
|
||||
ttm_bo_cleanup_refs_or_queue(bo);
|
||||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||
write_lock(&bdev->vm_lock);
|
||||
}
|
||||
@@ -680,7 +682,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
||||
bool no_wait_reserve, bool no_wait_gpu)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
struct ttm_mem_reg evict_mem;
|
||||
struct ttm_placement placement;
|
||||
int ret = 0;
|
||||
@@ -726,12 +727,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
||||
if (ret) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
|
||||
spin_lock(&glob->lru_lock);
|
||||
if (evict_mem.mm_node) {
|
||||
drm_mm_put_block(evict_mem.mm_node);
|
||||
evict_mem.mm_node = NULL;
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ttm_bo_mem_put(bo, &evict_mem);
|
||||
goto out;
|
||||
}
|
||||
bo->evicted = true;
|
||||
@@ -759,6 +755,18 @@ retry:
|
||||
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
|
||||
kref_get(&bo->list_kref);
|
||||
|
||||
if (!list_empty(&bo->ddestroy)) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ret = ttm_bo_cleanup_refs(bo, interruptible,
|
||||
no_wait_reserve, no_wait_gpu);
|
||||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||
|
||||
if (likely(ret == 0 || ret == -ERESTARTSYS))
|
||||
return ret;
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
|
||||
|
||||
if (unlikely(ret == -EBUSY)) {
|
||||
@@ -792,41 +800,14 @@ retry:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_type_manager *man,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem,
|
||||
struct drm_mm_node **node)
|
||||
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
unsigned long lpfn;
|
||||
int ret;
|
||||
struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
|
||||
|
||||
lpfn = placement->lpfn;
|
||||
if (!lpfn)
|
||||
lpfn = man->size;
|
||||
*node = NULL;
|
||||
do {
|
||||
ret = drm_mm_pre_get(&man->manager);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
*node = drm_mm_search_free_in_range(&man->manager,
|
||||
mem->num_pages, mem->page_alignment,
|
||||
placement->fpfn, lpfn, 1);
|
||||
if (unlikely(*node == NULL)) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
return 0;
|
||||
}
|
||||
*node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
|
||||
mem->page_alignment,
|
||||
placement->fpfn,
|
||||
lpfn);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
} while (*node == NULL);
|
||||
return 0;
|
||||
if (mem->mm_node)
|
||||
(*man->func->put_node)(man, mem);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_mem_put);
|
||||
|
||||
/**
|
||||
* Repeatedly evict memory from the LRU for @mem_type until we create enough
|
||||
@@ -843,14 +824,13 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_global *glob = bdev->glob;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
||||
struct drm_mm_node *node;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
|
||||
ret = (*man->func->get_node)(man, bo, placement, mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
if (node)
|
||||
if (mem->mm_node)
|
||||
break;
|
||||
spin_lock(&glob->lru_lock);
|
||||
if (list_empty(&man->lru)) {
|
||||
@@ -863,9 +843,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
} while (1);
|
||||
if (node == NULL)
|
||||
if (mem->mm_node == NULL)
|
||||
return -ENOMEM;
|
||||
mem->mm_node = node;
|
||||
mem->mem_type = mem_type;
|
||||
return 0;
|
||||
}
|
||||
@@ -939,7 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
bool type_found = false;
|
||||
bool type_ok = false;
|
||||
bool has_erestartsys = false;
|
||||
struct drm_mm_node *node = NULL;
|
||||
int i, ret;
|
||||
|
||||
mem->mm_node = NULL;
|
||||
@@ -973,17 +951,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
|
||||
if (man->has_type && man->use_type) {
|
||||
type_found = true;
|
||||
ret = ttm_bo_man_get_node(bo, man, placement, mem,
|
||||
&node);
|
||||
ret = (*man->func->get_node)(man, bo, placement, mem);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
if (node)
|
||||
if (mem->mm_node)
|
||||
break;
|
||||
}
|
||||
|
||||
if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
|
||||
mem->mm_node = node;
|
||||
if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
|
||||
mem->mem_type = mem_type;
|
||||
mem->placement = cur_flags;
|
||||
return 0;
|
||||
@@ -1053,7 +1029,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
||||
bool interruptible, bool no_wait_reserve,
|
||||
bool no_wait_gpu)
|
||||
{
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
int ret = 0;
|
||||
struct ttm_mem_reg mem;
|
||||
|
||||
@@ -1081,11 +1056,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
||||
goto out_unlock;
|
||||
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
|
||||
out_unlock:
|
||||
if (ret && mem.mm_node) {
|
||||
spin_lock(&glob->lru_lock);
|
||||
drm_mm_put_block(mem.mm_node);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
}
|
||||
if (ret && mem.mm_node)
|
||||
ttm_bo_mem_put(bo, &mem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1093,11 +1065,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
int i;
|
||||
struct drm_mm_node *node = mem->mm_node;
|
||||
|
||||
if (node && placement->lpfn != 0 &&
|
||||
(node->start < placement->fpfn ||
|
||||
node->start + node->size > placement->lpfn))
|
||||
if (mem->mm_node && placement->lpfn != 0 &&
|
||||
(mem->start < placement->fpfn ||
|
||||
mem->start + mem->num_pages > placement->lpfn))
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < placement->num_placement; i++) {
|
||||
@@ -1341,7 +1312,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
||||
|
||||
int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
|
||||
{
|
||||
struct ttm_bo_global *glob = bdev->glob;
|
||||
struct ttm_mem_type_manager *man;
|
||||
int ret = -EINVAL;
|
||||
|
||||
@@ -1364,13 +1334,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
|
||||
if (mem_type > 0) {
|
||||
ttm_bo_force_list_clean(bdev, mem_type, false);
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
if (drm_mm_clean(&man->manager))
|
||||
drm_mm_takedown(&man->manager);
|
||||
else
|
||||
ret = -EBUSY;
|
||||
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ret = (*man->func->takedown)(man);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -1421,6 +1385,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
||||
ret = bdev->driver->init_mem_type(bdev, type, man);
|
||||
if (ret)
|
||||
return ret;
|
||||
man->bdev = bdev;
|
||||
|
||||
ret = 0;
|
||||
if (type != TTM_PL_SYSTEM) {
|
||||
@@ -1430,7 +1395,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
||||
type);
|
||||
return ret;
|
||||
}
|
||||
ret = drm_mm_init(&man->manager, 0, p_size);
|
||||
|
||||
ret = (*man->func->init)(man, p_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@@ -1824,6 +1790,13 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
||||
struct ttm_buffer_object, swap);
|
||||
kref_get(&bo->list_kref);
|
||||
|
||||
if (!list_empty(&bo->ddestroy)) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
(void) ttm_bo_cleanup_refs(bo, false, false, false);
|
||||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||
continue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserve buffer. Since we unlock while sleeping, we need
|
||||
* to re-check that nobody removed us from the swap-list while
|
||||
|
148
drivers/gpu/drm/ttm/ttm_bo_manager.c
普通文件
148
drivers/gpu/drm/ttm/ttm_bo_manager.c
普通文件
@@ -0,0 +1,148 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include "ttm/ttm_module.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_bo_global *glob = man->bdev->glob;
|
||||
struct drm_mm *mm = man->priv;
|
||||
struct drm_mm_node *node = NULL;
|
||||
unsigned long lpfn;
|
||||
int ret;
|
||||
|
||||
lpfn = placement->lpfn;
|
||||
if (!lpfn)
|
||||
lpfn = man->size;
|
||||
do {
|
||||
ret = drm_mm_pre_get(mm);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
node = drm_mm_search_free_in_range(mm,
|
||||
mem->num_pages, mem->page_alignment,
|
||||
placement->fpfn, lpfn, 1);
|
||||
if (unlikely(node == NULL)) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
return 0;
|
||||
}
|
||||
node = drm_mm_get_block_atomic_range(node, mem->num_pages,
|
||||
mem->page_alignment,
|
||||
placement->fpfn,
|
||||
lpfn);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
} while (node == NULL);
|
||||
|
||||
mem->mm_node = node;
|
||||
mem->start = node->start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_bo_global *glob = man->bdev->glob;
|
||||
|
||||
if (mem->mm_node) {
|
||||
spin_lock(&glob->lru_lock);
|
||||
drm_mm_put_block(mem->mm_node);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
mem->mm_node = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
|
||||
unsigned long p_size)
|
||||
{
|
||||
struct drm_mm *mm;
|
||||
int ret;
|
||||
|
||||
mm = kzalloc(sizeof(*mm), GFP_KERNEL);
|
||||
if (!mm)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = drm_mm_init(mm, 0, p_size);
|
||||
if (ret) {
|
||||
kfree(mm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
man->priv = mm;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct ttm_bo_global *glob = man->bdev->glob;
|
||||
struct drm_mm *mm = man->priv;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
if (drm_mm_clean(mm)) {
|
||||
drm_mm_takedown(mm);
|
||||
kfree(mm);
|
||||
man->priv = NULL;
|
||||
} else
|
||||
ret = -EBUSY;
|
||||
spin_unlock(&glob->lru_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
|
||||
const char *prefix)
|
||||
{
|
||||
struct ttm_bo_global *glob = man->bdev->glob;
|
||||
struct drm_mm *mm = man->priv;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
drm_mm_debug_table(mm, prefix);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
|
||||
ttm_bo_man_init,
|
||||
ttm_bo_man_takedown,
|
||||
ttm_bo_man_get_node,
|
||||
ttm_bo_man_put_node,
|
||||
ttm_bo_man_debug
|
||||
};
|
||||
EXPORT_SYMBOL(ttm_bo_manager_func);
|
@@ -39,14 +39,7 @@
|
||||
|
||||
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
|
||||
if (old_mem->mm_node) {
|
||||
spin_lock(&bo->glob->lru_lock);
|
||||
drm_mm_put_block(old_mem->mm_node);
|
||||
spin_unlock(&bo->glob->lru_lock);
|
||||
}
|
||||
old_mem->mm_node = NULL;
|
||||
ttm_bo_mem_put(bo, &bo->mem);
|
||||
}
|
||||
|
||||
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||
@@ -263,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
dir = 1;
|
||||
|
||||
if ((old_mem->mem_type == new_mem->mem_type) &&
|
||||
(new_mem->mm_node->start <
|
||||
old_mem->mm_node->start + old_mem->mm_node->size)) {
|
||||
(new_mem->start < old_mem->start + old_mem->size)) {
|
||||
dir = -1;
|
||||
add = new_mem->num_pages - 1;
|
||||
}
|
||||
|
在新工单中引用
屏蔽一个用户