Merge branch 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (207 commits) drm/radeon/kms/pm/r600: select the mid clock mode for single head low profile drm/radeon: fix power supply kconfig interaction. drm/radeon/kms: record object that have been list reserved drm/radeon: AGP memory is only I/O if the aperture can be mapped by the CPU. drm/radeon/kms: don't default display priority to high on rs4xx drm/edid: fix typo in 1600x1200@75 mode drm/nouveau: fix i2c-related init table handlers drm/nouveau: support init table i2c device identifier 0x81 drm/nouveau: ensure we've parsed i2c table entry for INIT_*I2C* handlers drm/nouveau: display error message for any failed init table opcode drm/nouveau: fix init table handlers to return proper error codes drm/nv50: support fractional feedback divider on newer chips drm/nv50: fix monitor detection on certain chipsets drm/nv50: store full dcb i2c entry from vbios drm/nv50: fix suspend/resume with DP outputs drm/nv50: output calculated crtc pll when debugging on drm/nouveau: dump pll limits entries when debugging is on drm/nouveau: bios parser fixes for eDP boards drm/nouveau: fix a nouveau_bo dereference after it's been destroyed drm/nv40: remove some completed ctxprog TODOs ...
This commit is contained in:
@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
|
||||
printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
|
||||
printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
|
||||
printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
|
||||
printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
|
||||
printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
|
||||
printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
|
||||
printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
|
||||
man->available_caching);
|
||||
@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
|
||||
|
||||
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *mem,
|
||||
bool evict, bool interruptible, bool no_wait)
|
||||
bool evict, bool interruptible,
|
||||
bool no_wait_reserve, bool no_wait_gpu)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
|
||||
@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
||||
|
||||
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
|
||||
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
|
||||
ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
|
||||
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
|
||||
else if (bdev->driver->move)
|
||||
ret = bdev->driver->move(bo, evict, interruptible,
|
||||
no_wait, mem);
|
||||
no_wait_reserve, no_wait_gpu, mem);
|
||||
else
|
||||
ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
|
||||
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
|
||||
|
||||
if (ret)
|
||||
goto out_err;
|
||||
@@ -605,8 +604,22 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_unref);
|
||||
|
||||
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
|
||||
{
|
||||
return cancel_delayed_work_sync(&bdev->wq);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
|
||||
|
||||
void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
|
||||
{
|
||||
if (resched)
|
||||
schedule_delayed_work(&bdev->wq,
|
||||
((HZ / 100) < 1) ? 1 : HZ / 100);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
|
||||
|
||||
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
||||
bool no_wait)
|
||||
bool no_wait_reserve, bool no_wait_gpu)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
@@ -615,7 +628,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&bo->lock);
|
||||
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
|
||||
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
|
||||
spin_unlock(&bo->lock);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
@@ -631,6 +644,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
||||
|
||||
evict_mem = bo->mem;
|
||||
evict_mem.mm_node = NULL;
|
||||
evict_mem.bus.io_reserved = false;
|
||||
|
||||
placement.fpfn = 0;
|
||||
placement.lpfn = 0;
|
||||
@@ -638,7 +652,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
||||
placement.num_busy_placement = 0;
|
||||
bdev->driver->evict_flags(bo, &placement);
|
||||
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
|
||||
no_wait);
|
||||
no_wait_reserve, no_wait_gpu);
|
||||
if (ret) {
|
||||
if (ret != -ERESTARTSYS) {
|
||||
printk(KERN_ERR TTM_PFX
|
||||
@@ -650,7 +664,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
||||
}
|
||||
|
||||
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
|
||||
no_wait);
|
||||
no_wait_reserve, no_wait_gpu);
|
||||
if (ret) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
|
||||
@@ -670,7 +684,8 @@ out:
|
||||
|
||||
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
||||
uint32_t mem_type,
|
||||
bool interruptible, bool no_wait)
|
||||
bool interruptible, bool no_wait_reserve,
|
||||
bool no_wait_gpu)
|
||||
{
|
||||
struct ttm_bo_global *glob = bdev->glob;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
||||
@@ -687,11 +702,11 @@ retry:
|
||||
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
|
||||
kref_get(&bo->list_kref);
|
||||
|
||||
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
||||
ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
|
||||
|
||||
if (unlikely(ret == -EBUSY)) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
if (likely(!no_wait))
|
||||
if (likely(!no_wait_gpu))
|
||||
ret = ttm_bo_wait_unreserved(bo, interruptible);
|
||||
|
||||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||
@@ -713,7 +728,7 @@ retry:
|
||||
while (put_count--)
|
||||
kref_put(&bo->list_kref, ttm_bo_ref_bug);
|
||||
|
||||
ret = ttm_bo_evict(bo, interruptible, no_wait);
|
||||
ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||
@@ -764,7 +779,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
||||
uint32_t mem_type,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem,
|
||||
bool interruptible, bool no_wait)
|
||||
bool interruptible,
|
||||
bool no_wait_reserve,
|
||||
bool no_wait_gpu)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_global *glob = bdev->glob;
|
||||
@@ -785,7 +802,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
|
||||
no_wait);
|
||||
no_wait_reserve, no_wait_gpu);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
} while (1);
|
||||
@@ -855,7 +872,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
|
||||
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem,
|
||||
bool interruptible, bool no_wait)
|
||||
bool interruptible, bool no_wait_reserve,
|
||||
bool no_wait_gpu)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_type_manager *man;
|
||||
@@ -952,7 +970,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
}
|
||||
|
||||
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
|
||||
interruptible, no_wait);
|
||||
interruptible, no_wait_reserve, no_wait_gpu);
|
||||
if (ret == 0 && mem->mm_node) {
|
||||
mem->placement = cur_flags;
|
||||
mem->mm_node->private = bo;
|
||||
@@ -978,7 +996,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
|
||||
|
||||
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
bool interruptible, bool no_wait)
|
||||
bool interruptible, bool no_wait_reserve,
|
||||
bool no_wait_gpu)
|
||||
{
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
int ret = 0;
|
||||
@@ -992,20 +1011,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
||||
* instead of doing it here.
|
||||
*/
|
||||
spin_lock(&bo->lock);
|
||||
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
|
||||
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
|
||||
spin_unlock(&bo->lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
mem.num_pages = bo->num_pages;
|
||||
mem.size = mem.num_pages << PAGE_SHIFT;
|
||||
mem.page_alignment = bo->mem.page_alignment;
|
||||
mem.bus.io_reserved = false;
|
||||
/*
|
||||
* Determine where to move the buffer.
|
||||
*/
|
||||
ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
|
||||
ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
|
||||
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
|
||||
out_unlock:
|
||||
if (ret && mem.mm_node) {
|
||||
spin_lock(&glob->lru_lock);
|
||||
@@ -1039,7 +1059,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
|
||||
|
||||
int ttm_bo_validate(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
bool interruptible, bool no_wait)
|
||||
bool interruptible, bool no_wait_reserve,
|
||||
bool no_wait_gpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -1054,7 +1075,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
||||
*/
|
||||
ret = ttm_bo_mem_compat(placement, &bo->mem);
|
||||
if (ret < 0) {
|
||||
ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
|
||||
ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
@@ -1153,6 +1174,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
bo->mem.num_pages = bo->num_pages;
|
||||
bo->mem.mm_node = NULL;
|
||||
bo->mem.page_alignment = page_alignment;
|
||||
bo->mem.bus.io_reserved = false;
|
||||
bo->buffer_start = buffer_start & PAGE_MASK;
|
||||
bo->priv_flags = 0;
|
||||
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
|
||||
@@ -1175,7 +1197,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
ret = ttm_bo_validate(bo, placement, interruptible, false);
|
||||
ret = ttm_bo_validate(bo, placement, interruptible, false, false);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
@@ -1249,7 +1271,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
||||
spin_lock(&glob->lru_lock);
|
||||
while (!list_empty(&man->lru)) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ret = ttm_mem_evict_first(bdev, mem_type, false, false);
|
||||
ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
|
||||
if (ret) {
|
||||
if (allow_errors) {
|
||||
return ret;
|
||||
@@ -1553,26 +1575,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
return true;
|
||||
}
|
||||
|
||||
int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem,
|
||||
unsigned long *bus_base,
|
||||
unsigned long *bus_offset, unsigned long *bus_size)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
|
||||
*bus_size = 0;
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
|
||||
return -EINVAL;
|
||||
|
||||
if (ttm_mem_reg_is_pci(bdev, mem)) {
|
||||
*bus_offset = mem->mm_node->start << PAGE_SHIFT;
|
||||
*bus_size = mem->num_pages << PAGE_SHIFT;
|
||||
*bus_base = man->io_offset;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
@@ -1581,8 +1583,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
|
||||
|
||||
if (!bdev->dev_mapping)
|
||||
return;
|
||||
|
||||
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
|
||||
ttm_mem_io_free(bdev, &bo->mem);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
|
||||
|
||||
@@ -1811,7 +1813,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
||||
evict_mem.mem_type = TTM_PL_SYSTEM;
|
||||
|
||||
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
|
||||
false, false);
|
||||
false, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
}
|
||||
|
Reference in New Issue
Block a user