Merge Linus master to drm-next
linux-next conflict reported needed resolution. Conflicts: drivers/gpu/drm/drm_crtc.c drivers/gpu/drm/drm_edid.c drivers/gpu/drm/i915/intel_sdvo.c drivers/gpu/drm/radeon/radeon_ttm.c drivers/gpu/drm/ttm/ttm_bo.c
This commit is contained in:
@@ -43,7 +43,6 @@
|
||||
#define TTM_BO_HASH_ORDER 13
|
||||
|
||||
static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
|
||||
static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
|
||||
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
|
||||
static void ttm_bo_global_kobj_release(struct kobject *kobj);
|
||||
|
||||
@@ -259,6 +258,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
|
||||
TTM_ASSERT_LOCKED(&bo->mutex);
|
||||
bo->ttm = NULL;
|
||||
|
||||
if (bdev->need_dma32)
|
||||
page_flags |= TTM_PAGE_FLAG_DMA32;
|
||||
|
||||
switch (bo->type) {
|
||||
case ttm_bo_type_device:
|
||||
if (zero_alloc)
|
||||
@@ -339,6 +341,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
||||
|
||||
}
|
||||
|
||||
if (bdev->driver->move_notify)
|
||||
bdev->driver->move_notify(bo, mem);
|
||||
|
||||
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
|
||||
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
|
||||
ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
|
||||
@@ -694,31 +699,52 @@ retry_pre_get:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
|
||||
uint32_t cur_placement,
|
||||
uint32_t proposed_placement)
|
||||
{
|
||||
uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
|
||||
uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
|
||||
|
||||
/**
|
||||
* Keep current caching if possible.
|
||||
*/
|
||||
|
||||
if ((cur_placement & caching) != 0)
|
||||
result |= (cur_placement & caching);
|
||||
else if ((man->default_caching & caching) != 0)
|
||||
result |= man->default_caching;
|
||||
else if ((TTM_PL_FLAG_CACHED & caching) != 0)
|
||||
result |= TTM_PL_FLAG_CACHED;
|
||||
else if ((TTM_PL_FLAG_WC & caching) != 0)
|
||||
result |= TTM_PL_FLAG_WC;
|
||||
else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
|
||||
result |= TTM_PL_FLAG_UNCACHED;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
|
||||
bool disallow_fixed,
|
||||
uint32_t mem_type,
|
||||
uint32_t mask, uint32_t *res_mask)
|
||||
uint32_t proposed_placement,
|
||||
uint32_t *masked_placement)
|
||||
{
|
||||
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
|
||||
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
|
||||
return false;
|
||||
|
||||
if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
|
||||
if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
|
||||
return false;
|
||||
|
||||
if ((mask & man->available_caching) == 0)
|
||||
if ((proposed_placement & man->available_caching) == 0)
|
||||
return false;
|
||||
if (mask & man->default_caching)
|
||||
cur_flags |= man->default_caching;
|
||||
else if (mask & TTM_PL_FLAG_CACHED)
|
||||
cur_flags |= TTM_PL_FLAG_CACHED;
|
||||
else if (mask & TTM_PL_FLAG_WC)
|
||||
cur_flags |= TTM_PL_FLAG_WC;
|
||||
else
|
||||
cur_flags |= TTM_PL_FLAG_UNCACHED;
|
||||
|
||||
*res_mask = cur_flags;
|
||||
cur_flags |= (proposed_placement & man->available_caching);
|
||||
|
||||
*masked_placement = cur_flags;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -763,6 +789,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
if (!type_ok)
|
||||
continue;
|
||||
|
||||
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
|
||||
cur_flags);
|
||||
|
||||
if (mem_type == TTM_PL_SYSTEM)
|
||||
break;
|
||||
|
||||
@@ -819,6 +848,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
proposed_placement, &cur_flags))
|
||||
continue;
|
||||
|
||||
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
|
||||
cur_flags);
|
||||
|
||||
ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
|
||||
interruptible, no_wait);
|
||||
|
||||
@@ -1194,13 +1226,14 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
||||
int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
|
||||
{
|
||||
struct ttm_bo_global *glob = bdev->glob;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
||||
struct ttm_mem_type_manager *man;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (mem_type >= TTM_NUM_MEM_TYPES) {
|
||||
printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
|
||||
return ret;
|
||||
}
|
||||
man = &bdev->man[mem_type];
|
||||
|
||||
if (!man->has_type) {
|
||||
printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
|
||||
@@ -1417,7 +1450,8 @@ EXPORT_SYMBOL(ttm_bo_device_release);
|
||||
int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
struct ttm_bo_global *glob,
|
||||
struct ttm_bo_driver *driver,
|
||||
uint64_t file_page_offset)
|
||||
uint64_t file_page_offset,
|
||||
bool need_dma32)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
@@ -1446,6 +1480,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
INIT_LIST_HEAD(&bdev->ddestroy);
|
||||
bdev->dev_mapping = NULL;
|
||||
bdev->glob = glob;
|
||||
bdev->need_dma32 = need_dma32;
|
||||
|
||||
mutex_lock(&glob->device_list_mutex);
|
||||
list_add_tail(&bdev->device_list, &glob->device_list);
|
||||
@@ -1511,6 +1546,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
|
||||
|
||||
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
|
||||
|
||||
static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
|
||||
{
|
||||
@@ -1632,6 +1668,10 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
|
||||
driver->sync_obj_unref(&sync_obj);
|
||||
driver->sync_obj_unref(&tmp_obj);
|
||||
spin_lock(&bo->lock);
|
||||
} else {
|
||||
spin_unlock(&bo->lock);
|
||||
driver->sync_obj_unref(&sync_obj);
|
||||
spin_lock(&bo->lock);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@@ -136,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
|
||||
}
|
||||
|
||||
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
|
||||
unsigned long page)
|
||||
unsigned long page,
|
||||
pgprot_t prot)
|
||||
{
|
||||
struct page *d = ttm_tt_get_page(ttm, page);
|
||||
void *dst;
|
||||
@@ -145,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
|
||||
return -ENOMEM;
|
||||
|
||||
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
|
||||
dst = kmap(d);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
dst = kmap_atomic_prot(d, KM_USER0, prot);
|
||||
#else
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
|
||||
dst = vmap(&d, 1, 0, prot);
|
||||
else
|
||||
dst = kmap(d);
|
||||
#endif
|
||||
if (!dst)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_fromio(dst, src, PAGE_SIZE);
|
||||
kunmap(d);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
kunmap_atomic(dst, KM_USER0);
|
||||
#else
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
|
||||
vunmap(dst);
|
||||
else
|
||||
kunmap(d);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
||||
unsigned long page)
|
||||
unsigned long page,
|
||||
pgprot_t prot)
|
||||
{
|
||||
struct page *s = ttm_tt_get_page(ttm, page);
|
||||
void *src;
|
||||
@@ -164,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
||||
return -ENOMEM;
|
||||
|
||||
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
|
||||
src = kmap(s);
|
||||
#ifdef CONFIG_X86
|
||||
src = kmap_atomic_prot(s, KM_USER0, prot);
|
||||
#else
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
|
||||
src = vmap(&s, 1, 0, prot);
|
||||
else
|
||||
src = kmap(s);
|
||||
#endif
|
||||
if (!src)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_toio(dst, src, PAGE_SIZE);
|
||||
kunmap(s);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
kunmap_atomic(src, KM_USER0);
|
||||
#else
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
|
||||
vunmap(src);
|
||||
else
|
||||
kunmap(s);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -214,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
|
||||
for (i = 0; i < new_mem->num_pages; ++i) {
|
||||
page = i * dir + add;
|
||||
if (old_iomap == NULL)
|
||||
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
|
||||
else if (new_iomap == NULL)
|
||||
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
|
||||
else
|
||||
if (old_iomap == NULL) {
|
||||
pgprot_t prot = ttm_io_prot(old_mem->placement,
|
||||
PAGE_KERNEL);
|
||||
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
|
||||
prot);
|
||||
} else if (new_iomap == NULL) {
|
||||
pgprot_t prot = ttm_io_prot(new_mem->placement,
|
||||
PAGE_KERNEL);
|
||||
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
|
||||
prot);
|
||||
} else
|
||||
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
|
||||
if (ret)
|
||||
goto out1;
|
||||
@@ -509,8 +550,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
if (evict) {
|
||||
ret = ttm_bo_wait(bo, false, false, false);
|
||||
spin_unlock(&bo->lock);
|
||||
driver->sync_obj_unref(&bo->sync_obj);
|
||||
|
||||
if (tmp_obj)
|
||||
driver->sync_obj_unref(&tmp_obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -532,6 +573,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
|
||||
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
|
||||
spin_unlock(&bo->lock);
|
||||
if (tmp_obj)
|
||||
driver->sync_obj_unref(&tmp_obj);
|
||||
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
||||
if (ret)
|
||||
|
@@ -101,6 +101,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
if (bdev->driver->fault_reserve_notify)
|
||||
bdev->driver->fault_reserve_notify(bo);
|
||||
|
||||
/*
|
||||
* Wait for buffer data in transit, due to a pipelined
|
||||
* move.
|
||||
@@ -327,7 +330,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
|
||||
goto out_unref;
|
||||
|
||||
kmap_offset = dev_offset - bo->vm_node->start;
|
||||
if (unlikely(kmap_offset) >= bo->num_pages) {
|
||||
if (unlikely(kmap_offset >= bo->num_pages)) {
|
||||
ret = -EFBIG;
|
||||
goto out_unref;
|
||||
}
|
||||
@@ -401,7 +404,7 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
|
||||
bool dummy;
|
||||
|
||||
kmap_offset = (*f_pos >> PAGE_SHIFT);
|
||||
if (unlikely(kmap_offset) >= bo->num_pages)
|
||||
if (unlikely(kmap_offset >= bo->num_pages))
|
||||
return -EFBIG;
|
||||
|
||||
page_offset = *f_pos & ~PAGE_MASK;
|
||||
|
@@ -86,10 +86,16 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < num_pages; ++i) {
|
||||
if (pages[i]) {
|
||||
unsigned long start = (unsigned long)page_address(pages[i]);
|
||||
flush_dcache_range(start, start + PAGE_SIZE);
|
||||
}
|
||||
struct page *page = pages[i];
|
||||
void *page_virtual;
|
||||
|
||||
if (unlikely(page == NULL))
|
||||
continue;
|
||||
|
||||
page_virtual = kmap_atomic(page, KM_USER0);
|
||||
flush_dcache_range((unsigned long) page_virtual,
|
||||
(unsigned long) page_virtual + PAGE_SIZE);
|
||||
kunmap_atomic(page_virtual, KM_USER0);
|
||||
}
|
||||
#else
|
||||
if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
|
||||
@@ -131,10 +137,17 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
|
||||
|
||||
static struct page *ttm_tt_alloc_page(unsigned page_flags)
|
||||
{
|
||||
if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
|
||||
return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
|
||||
gfp_t gfp_flags = GFP_USER;
|
||||
|
||||
return alloc_page(GFP_HIGHUSER);
|
||||
if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
|
||||
gfp_flags |= __GFP_ZERO;
|
||||
|
||||
if (page_flags & TTM_PAGE_FLAG_DMA32)
|
||||
gfp_flags |= __GFP_DMA32;
|
||||
else
|
||||
gfp_flags |= __GFP_HIGHMEM;
|
||||
|
||||
return alloc_page(gfp_flags);
|
||||
}
|
||||
|
||||
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
|
||||
|
Reference in New Issue
Block a user