Merge drm/drm-next into drm-misc-next
drm-next has been forwarded to 5.0-rc1, and we need it to apply the damage helper for dirtyfb series from Noralf Trønnes. Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
This commit is contained in:
@@ -178,7 +178,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
||||
}
|
||||
|
||||
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
|
||||
p->relocs[i].tv.shared = !r->write_domain;
|
||||
p->relocs[i].tv.num_shared = !r->write_domain;
|
||||
|
||||
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
|
||||
priority);
|
||||
@@ -253,7 +253,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
|
||||
|
||||
resv = reloc->robj->tbo.resv;
|
||||
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
|
||||
reloc->tv.shared);
|
||||
reloc->tv.num_shared);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@@ -552,7 +552,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
||||
tv.bo = &bo_va->bo->tbo;
|
||||
tv.shared = true;
|
||||
tv.num_shared = 1;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
|
||||
|
@@ -119,40 +119,38 @@ static void radeon_mn_release(struct mmu_notifier *mn,
|
||||
* unmap them by move them into system domain again.
|
||||
*/
|
||||
static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
bool blockable)
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct interval_tree_node *it;
|
||||
unsigned long end;
|
||||
int ret = 0;
|
||||
|
||||
/* notification is exclusive, but interval is inclusive */
|
||||
end -= 1;
|
||||
end = range->end - 1;
|
||||
|
||||
/* TODO we should be able to split locking for interval tree and
|
||||
* the tear down.
|
||||
*/
|
||||
if (blockable)
|
||||
if (range->blockable)
|
||||
mutex_lock(&rmn->lock);
|
||||
else if (!mutex_trylock(&rmn->lock))
|
||||
return -EAGAIN;
|
||||
|
||||
it = interval_tree_iter_first(&rmn->objects, start, end);
|
||||
it = interval_tree_iter_first(&rmn->objects, range->start, end);
|
||||
while (it) {
|
||||
struct radeon_mn_node *node;
|
||||
struct radeon_bo *bo;
|
||||
long r;
|
||||
|
||||
if (!blockable) {
|
||||
if (!range->blockable) {
|
||||
ret = -EAGAIN;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
node = container_of(it, struct radeon_mn_node, it);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
it = interval_tree_iter_next(it, range->start, end);
|
||||
|
||||
list_for_each_entry(bo, &node->bos, mn_list) {
|
||||
|
||||
|
@@ -142,7 +142,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
|
||||
list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[0].tv.bo = &vm->page_directory->tbo;
|
||||
list[0].tv.shared = true;
|
||||
list[0].tv.num_shared = 1;
|
||||
list[0].tiling_flags = 0;
|
||||
list_add(&list[0].tv.head, head);
|
||||
|
||||
@@ -154,7 +154,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
|
||||
list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[idx].tv.bo = &list[idx].robj->tbo;
|
||||
list[idx].tv.shared = true;
|
||||
list[idx].tv.num_shared = 1;
|
||||
list[idx].tiling_flags = 0;
|
||||
list_add(&list[idx++].tv.head, head);
|
||||
}
|
||||
@@ -946,7 +946,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
|
||||
bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
|
||||
|
||||
if (mem) {
|
||||
addr = mem->start << PAGE_SHIFT;
|
||||
addr = (u64)mem->start << PAGE_SHIFT;
|
||||
if (mem->mem_type != TTM_PL_SYSTEM) {
|
||||
bo_va->flags |= RADEON_VM_PAGE_VALID;
|
||||
}
|
||||
|
Reference in New Issue
Block a user