Merge branch 'drm-radeon-kms' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-radeon-kms' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (35 commits) drm/radeon: set fb aperture sizes for framebuffer handoff. drm/ttm: fix highuser vs dma32 confusion. drm/radeon: Fix size used for benchmarking BO copies. drm/radeon: Add radeon.test parameter for running BO GPU copy tests. drm/radeon/kms: allow interruptible waits for objects. drm/ttm: powerpc: Fix Highmem cache flushing. x86: Export kmap_atomic_prot() needed for TTM. drm/ttm: Fix ttm in-kernel copying of pages with non-standard caching attributes. drm/ttm: Fix an oops and sync object leak. drm/radeon/kms: vram sizing on certain r100 chips needs workaround. drm/radeon: Pay more attention to object placement requested by userspace. drm/radeon: Fall back to evicting BOs with memcpy if necessary. drm/radeon: Don't unreserve twice on failure to validate. drm/radeon/kms: fix bandwidth computation on avivo hardware drm/radeon/kms: add initial colortiling support. drm/radeon/kms: fix hotspot handling on pre-avivo chips drm/radeon/kms: enable frac fb divs on rs600/rs690/rs740 drm/radeon/kms: add PLL flag to prefer frequencies <= the target freq drm/radeon/kms: block RN50 from using 3D engine. drm/radeon/kms: fix VRAM sizing like DDX does it. ...
此提交包含在:
@@ -43,7 +43,6 @@
|
||||
#define TTM_BO_HASH_ORDER 13
|
||||
|
||||
static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
|
||||
static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
|
||||
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
|
||||
|
||||
static inline uint32_t ttm_bo_type_flags(unsigned type)
|
||||
@@ -224,6 +223,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
|
||||
TTM_ASSERT_LOCKED(&bo->mutex);
|
||||
bo->ttm = NULL;
|
||||
|
||||
if (bdev->need_dma32)
|
||||
page_flags |= TTM_PAGE_FLAG_DMA32;
|
||||
|
||||
switch (bo->type) {
|
||||
case ttm_bo_type_device:
|
||||
if (zero_alloc)
|
||||
@@ -304,6 +306,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
||||
|
||||
}
|
||||
|
||||
if (bdev->driver->move_notify)
|
||||
bdev->driver->move_notify(bo, mem);
|
||||
|
||||
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
|
||||
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
|
||||
ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
|
||||
@@ -655,31 +660,52 @@ retry_pre_get:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
|
||||
uint32_t cur_placement,
|
||||
uint32_t proposed_placement)
|
||||
{
|
||||
uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
|
||||
uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
|
||||
|
||||
/**
|
||||
* Keep current caching if possible.
|
||||
*/
|
||||
|
||||
if ((cur_placement & caching) != 0)
|
||||
result |= (cur_placement & caching);
|
||||
else if ((man->default_caching & caching) != 0)
|
||||
result |= man->default_caching;
|
||||
else if ((TTM_PL_FLAG_CACHED & caching) != 0)
|
||||
result |= TTM_PL_FLAG_CACHED;
|
||||
else if ((TTM_PL_FLAG_WC & caching) != 0)
|
||||
result |= TTM_PL_FLAG_WC;
|
||||
else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
|
||||
result |= TTM_PL_FLAG_UNCACHED;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
|
||||
bool disallow_fixed,
|
||||
uint32_t mem_type,
|
||||
uint32_t mask, uint32_t *res_mask)
|
||||
uint32_t proposed_placement,
|
||||
uint32_t *masked_placement)
|
||||
{
|
||||
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
|
||||
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
|
||||
return false;
|
||||
|
||||
if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
|
||||
if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
|
||||
return false;
|
||||
|
||||
if ((mask & man->available_caching) == 0)
|
||||
if ((proposed_placement & man->available_caching) == 0)
|
||||
return false;
|
||||
if (mask & man->default_caching)
|
||||
cur_flags |= man->default_caching;
|
||||
else if (mask & TTM_PL_FLAG_CACHED)
|
||||
cur_flags |= TTM_PL_FLAG_CACHED;
|
||||
else if (mask & TTM_PL_FLAG_WC)
|
||||
cur_flags |= TTM_PL_FLAG_WC;
|
||||
else
|
||||
cur_flags |= TTM_PL_FLAG_UNCACHED;
|
||||
|
||||
*res_mask = cur_flags;
|
||||
cur_flags |= (proposed_placement & man->available_caching);
|
||||
|
||||
*masked_placement = cur_flags;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -723,6 +749,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
if (!type_ok)
|
||||
continue;
|
||||
|
||||
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
|
||||
cur_flags);
|
||||
|
||||
if (mem_type == TTM_PL_SYSTEM)
|
||||
break;
|
||||
|
||||
@@ -779,6 +808,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
proposed_placement, &cur_flags))
|
||||
continue;
|
||||
|
||||
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
|
||||
cur_flags);
|
||||
|
||||
ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
|
||||
interruptible, no_wait);
|
||||
|
||||
@@ -1305,7 +1337,8 @@ EXPORT_SYMBOL(ttm_bo_device_release);
|
||||
|
||||
int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_global *mem_glob,
|
||||
struct ttm_bo_driver *driver, uint64_t file_page_offset)
|
||||
struct ttm_bo_driver *driver, uint64_t file_page_offset,
|
||||
bool need_dma32)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
@@ -1342,6 +1375,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
INIT_LIST_HEAD(&bdev->ddestroy);
|
||||
INIT_LIST_HEAD(&bdev->swap_lru);
|
||||
bdev->dev_mapping = NULL;
|
||||
bdev->need_dma32 = need_dma32;
|
||||
ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
|
||||
ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
|
||||
if (unlikely(ret != 0)) {
|
||||
@@ -1419,6 +1453,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
|
||||
|
||||
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
|
||||
|
||||
static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
|
||||
{
|
||||
|
@@ -136,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
|
||||
}
|
||||
|
||||
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
|
||||
unsigned long page)
|
||||
unsigned long page,
|
||||
pgprot_t prot)
|
||||
{
|
||||
struct page *d = ttm_tt_get_page(ttm, page);
|
||||
void *dst;
|
||||
@@ -145,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
|
||||
return -ENOMEM;
|
||||
|
||||
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
|
||||
dst = kmap(d);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
dst = kmap_atomic_prot(d, KM_USER0, prot);
|
||||
#else
|
||||
if (prot != PAGE_KERNEL)
|
||||
dst = vmap(&d, 1, 0, prot);
|
||||
else
|
||||
dst = kmap(d);
|
||||
#endif
|
||||
if (!dst)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_fromio(dst, src, PAGE_SIZE);
|
||||
kunmap(d);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
kunmap_atomic(dst, KM_USER0);
|
||||
#else
|
||||
if (prot != PAGE_KERNEL)
|
||||
vunmap(dst);
|
||||
else
|
||||
kunmap(d);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
||||
unsigned long page)
|
||||
unsigned long page,
|
||||
pgprot_t prot)
|
||||
{
|
||||
struct page *s = ttm_tt_get_page(ttm, page);
|
||||
void *src;
|
||||
@@ -164,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
||||
return -ENOMEM;
|
||||
|
||||
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
|
||||
src = kmap(s);
|
||||
#ifdef CONFIG_X86
|
||||
src = kmap_atomic_prot(s, KM_USER0, prot);
|
||||
#else
|
||||
if (prot != PAGE_KERNEL)
|
||||
src = vmap(&s, 1, 0, prot);
|
||||
else
|
||||
src = kmap(s);
|
||||
#endif
|
||||
if (!src)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_toio(dst, src, PAGE_SIZE);
|
||||
kunmap(s);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
kunmap_atomic(src, KM_USER0);
|
||||
#else
|
||||
if (prot != PAGE_KERNEL)
|
||||
vunmap(src);
|
||||
else
|
||||
kunmap(s);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -214,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
|
||||
for (i = 0; i < new_mem->num_pages; ++i) {
|
||||
page = i * dir + add;
|
||||
if (old_iomap == NULL)
|
||||
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
|
||||
else if (new_iomap == NULL)
|
||||
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
|
||||
else
|
||||
if (old_iomap == NULL) {
|
||||
pgprot_t prot = ttm_io_prot(old_mem->placement,
|
||||
PAGE_KERNEL);
|
||||
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
|
||||
prot);
|
||||
} else if (new_iomap == NULL) {
|
||||
pgprot_t prot = ttm_io_prot(new_mem->placement,
|
||||
PAGE_KERNEL);
|
||||
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
|
||||
prot);
|
||||
} else
|
||||
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
|
||||
if (ret)
|
||||
goto out1;
|
||||
@@ -509,8 +550,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
if (evict) {
|
||||
ret = ttm_bo_wait(bo, false, false, false);
|
||||
spin_unlock(&bo->lock);
|
||||
driver->sync_obj_unref(&bo->sync_obj);
|
||||
|
||||
if (tmp_obj)
|
||||
driver->sync_obj_unref(&tmp_obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -532,6 +573,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
|
||||
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
|
||||
spin_unlock(&bo->lock);
|
||||
if (tmp_obj)
|
||||
driver->sync_obj_unref(&tmp_obj);
|
||||
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
||||
if (ret)
|
||||
|
@@ -101,6 +101,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
if (bdev->driver->fault_reserve_notify)
|
||||
bdev->driver->fault_reserve_notify(bo);
|
||||
|
||||
/*
|
||||
* Wait for buffer data in transit, due to a pipelined
|
||||
* move.
|
||||
|
@@ -86,10 +86,16 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < num_pages; ++i) {
|
||||
if (pages[i]) {
|
||||
unsigned long start = (unsigned long)page_address(pages[i]);
|
||||
flush_dcache_range(start, start + PAGE_SIZE);
|
||||
}
|
||||
struct page *page = pages[i];
|
||||
void *page_virtual;
|
||||
|
||||
if (unlikely(page == NULL))
|
||||
continue;
|
||||
|
||||
page_virtual = kmap_atomic(page, KM_USER0);
|
||||
flush_dcache_range((unsigned long) page_virtual,
|
||||
(unsigned long) page_virtual + PAGE_SIZE);
|
||||
kunmap_atomic(page_virtual, KM_USER0);
|
||||
}
|
||||
#else
|
||||
if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
|
||||
@@ -131,10 +137,17 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
|
||||
|
||||
static struct page *ttm_tt_alloc_page(unsigned page_flags)
|
||||
{
|
||||
if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
|
||||
return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
|
||||
gfp_t gfp_flags = GFP_USER;
|
||||
|
||||
return alloc_page(GFP_HIGHUSER);
|
||||
if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
|
||||
gfp_flags |= __GFP_ZERO;
|
||||
|
||||
if (page_flags & TTM_PAGE_FLAG_DMA32)
|
||||
gfp_flags |= __GFP_DMA32;
|
||||
else
|
||||
gfp_flags |= __GFP_HIGHMEM;
|
||||
|
||||
return alloc_page(gfp_flags);
|
||||
}
|
||||
|
||||
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
|
||||
|
新增問題並參考
封鎖使用者