lib/interval_tree: fast overlap detection
Allow interval trees to quickly check for overlaps to avoid unnecesary tree lookups in interval_tree_iter_first(). As of this patch, all interval tree flavors will require using a 'rb_root_cached' such that we can have the leftmost node easily available. While most users will make use of this feature, those with special functions (in addition to the generic insert, delete, search calls) will avoid using the cached option as they can do funky things with insertions -- for example, vma_interval_tree_insert_after(). [jglisse@redhat.com: fix deadlock from typo vm_lock_anon_vma()] Link: http://lkml.kernel.org/r/20170808225719.20723-1-jglisse@redhat.com Link: http://lkml.kernel.org/r/20170719014603.19029-12-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Doug Ledford <dledford@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Cc: David Airlie <airlied@linux.ie> Cc: Jason Wang <jasowang@redhat.com> Cc: Christian Benvenuti <benve@cisco.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Šī revīzija ir iekļauta:

revīziju iesūtīja
Linus Torvalds

vecāks
09663c86e2
revīzija
f808c13fd3
@@ -924,7 +924,7 @@ struct radeon_vm_id {
|
||||
struct radeon_vm {
|
||||
struct mutex mutex;
|
||||
|
||||
struct rb_root va;
|
||||
struct rb_root_cached va;
|
||||
|
||||
/* protecting invalidated and freed */
|
||||
spinlock_t status_lock;
|
||||
|
@@ -50,7 +50,7 @@ struct radeon_mn {
|
||||
|
||||
/* objects protected by lock */
|
||||
struct mutex lock;
|
||||
struct rb_root objects;
|
||||
struct rb_root_cached objects;
|
||||
};
|
||||
|
||||
struct radeon_mn_node {
|
||||
@@ -75,8 +75,8 @@ static void radeon_mn_destroy(struct work_struct *work)
|
||||
mutex_lock(&rdev->mn_lock);
|
||||
mutex_lock(&rmn->lock);
|
||||
hash_del(&rmn->node);
|
||||
rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
|
||||
it.rb) {
|
||||
rbtree_postorder_for_each_entry_safe(node, next_node,
|
||||
&rmn->objects.rb_root, it.rb) {
|
||||
|
||||
interval_tree_remove(&node->it, &rmn->objects);
|
||||
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
|
||||
@@ -205,7 +205,7 @@ static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
|
||||
rmn->mm = mm;
|
||||
rmn->mn.ops = &radeon_mn_ops;
|
||||
mutex_init(&rmn->lock);
|
||||
rmn->objects = RB_ROOT;
|
||||
rmn->objects = RB_ROOT_CACHED;
|
||||
|
||||
r = __mmu_notifier_register(&rmn->mn, mm);
|
||||
if (r)
|
||||
|
@@ -1185,7 +1185,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
vm->ids[i].last_id_use = NULL;
|
||||
}
|
||||
mutex_init(&vm->mutex);
|
||||
vm->va = RB_ROOT;
|
||||
vm->va = RB_ROOT_CACHED;
|
||||
spin_lock_init(&vm->status_lock);
|
||||
INIT_LIST_HEAD(&vm->invalidated);
|
||||
INIT_LIST_HEAD(&vm->freed);
|
||||
@@ -1232,10 +1232,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
struct radeon_bo_va *bo_va, *tmp;
|
||||
int i, r;
|
||||
|
||||
if (!RB_EMPTY_ROOT(&vm->va)) {
|
||||
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
|
||||
dev_err(rdev->dev, "still active bo inside vm\n");
|
||||
}
|
||||
rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
|
||||
rbtree_postorder_for_each_entry_safe(bo_va, tmp,
|
||||
&vm->va.rb_root, it.rb) {
|
||||
interval_tree_remove(&bo_va->it, &vm->va);
|
||||
r = radeon_bo_reserve(bo_va->bo, false);
|
||||
if (!r) {
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user