drm/ttm: convert to unified vma offset manager
Use the new vma-manager infrastructure. This doesn't change any implementation details as the vma-offset-manager is nearly copied 1-to-1 from TTM. The vm_lock is moved into the offset manager so we can drop it from TTM. During lookup, we use the vma locking helpers to take a reference to the found object. In all other scenarios, locking stays the same as before. We always guarantee that drm_vma_offset_remove() is called only during destruction. Hence, helpers like drm_vma_node_offset_addr() are always safe as long as the node has a valid offset. This also drops the addr_space_offset member as it is a copy of vm_start in vma_node objects. Use the accessor functions instead. v4: - remove vm_lock - use drm_vma_offset_lock_lookup() to protect lookup (instead of vm_lock) Cc: Dave Airlie <airlied@redhat.com> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Maarten Lankhorst <maarten.lankhorst@canonical.com> Cc: Martin Peres <martin.peres@labri.fr> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: David Herrmann <dh.herrmann@gmail.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Dave Airlie <airlied@gmail.com>
Šī revīzija ir iekļauta:

revīziju iesūtīja
Dave Airlie

vecāks
0de23977cf
revīzija
72525b3f33
@@ -615,13 +615,7 @@ static void ttm_bo_release(struct kref *kref)
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
|
||||
|
||||
write_lock(&bdev->vm_lock);
|
||||
if (likely(bo->vm_node != NULL)) {
|
||||
rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
|
||||
drm_mm_put_block(bo->vm_node);
|
||||
bo->vm_node = NULL;
|
||||
}
|
||||
write_unlock(&bdev->vm_lock);
|
||||
drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
|
||||
ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free_vm(bo);
|
||||
ttm_mem_io_unlock(man);
|
||||
@@ -1129,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
bo->resv = &bo->ttm_resv;
|
||||
reservation_object_init(bo->resv);
|
||||
atomic_inc(&bo->glob->bo_count);
|
||||
drm_vma_node_reset(&bo->vma_node);
|
||||
|
||||
ret = ttm_bo_check_placement(bo, placement);
|
||||
|
||||
@@ -1424,10 +1419,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
|
||||
TTM_DEBUG("Swap list was clean\n");
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
||||
BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
|
||||
write_lock(&bdev->vm_lock);
|
||||
drm_mm_takedown(&bdev->addr_space_mm);
|
||||
write_unlock(&bdev->vm_lock);
|
||||
drm_vma_offset_manager_destroy(&bdev->vma_manager);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1441,7 +1433,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
rwlock_init(&bdev->vm_lock);
|
||||
bdev->driver = driver;
|
||||
|
||||
memset(bdev->man, 0, sizeof(bdev->man));
|
||||
@@ -1454,9 +1445,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_sys;
|
||||
|
||||
bdev->addr_space_rb = RB_ROOT;
|
||||
drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
|
||||
|
||||
drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
|
||||
0x10000000);
|
||||
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
|
||||
INIT_LIST_HEAD(&bdev->ddestroy);
|
||||
bdev->dev_mapping = NULL;
|
||||
@@ -1498,12 +1488,17 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
loff_t offset = (loff_t) bo->addr_space_offset;
|
||||
loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
|
||||
loff_t offset, holelen;
|
||||
|
||||
if (!bdev->dev_mapping)
|
||||
return;
|
||||
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
|
||||
|
||||
if (drm_vma_node_has_offset(&bo->vma_node)) {
|
||||
offset = (loff_t) drm_vma_node_offset_addr(&bo->vma_node);
|
||||
holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
|
||||
|
||||
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
|
||||
}
|
||||
ttm_mem_io_free_vm(bo);
|
||||
}
|
||||
|
||||
@@ -1520,31 +1515,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
|
||||
|
||||
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
|
||||
|
||||
static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct rb_node **cur = &bdev->addr_space_rb.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct ttm_buffer_object *cur_bo;
|
||||
unsigned long offset = bo->vm_node->start;
|
||||
unsigned long cur_offset;
|
||||
|
||||
while (*cur) {
|
||||
parent = *cur;
|
||||
cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
|
||||
cur_offset = cur_bo->vm_node->start;
|
||||
if (offset < cur_offset)
|
||||
cur = &parent->rb_left;
|
||||
else if (offset > cur_offset)
|
||||
cur = &parent->rb_right;
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
||||
rb_link_node(&bo->vm_rb, parent, cur);
|
||||
rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_bo_setup_vm:
|
||||
*
|
||||
@@ -1559,38 +1529,9 @@ static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
|
||||
static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
int ret;
|
||||
|
||||
retry_pre_get:
|
||||
ret = drm_mm_pre_get(&bdev->addr_space_mm);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
write_lock(&bdev->vm_lock);
|
||||
bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
|
||||
bo->mem.num_pages, 0, 0);
|
||||
|
||||
if (unlikely(bo->vm_node == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
|
||||
bo->mem.num_pages, 0);
|
||||
|
||||
if (unlikely(bo->vm_node == NULL)) {
|
||||
write_unlock(&bdev->vm_lock);
|
||||
goto retry_pre_get;
|
||||
}
|
||||
|
||||
ttm_bo_vm_insert_rb(bo);
|
||||
write_unlock(&bdev->vm_lock);
|
||||
bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
|
||||
|
||||
return 0;
|
||||
out_unlock:
|
||||
write_unlock(&bdev->vm_lock);
|
||||
return ret;
|
||||
return drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
|
||||
bo->mem.num_pages);
|
||||
}
|
||||
|
||||
int ttm_bo_wait(struct ttm_buffer_object *bo,
|
||||
|
@@ -30,6 +30,7 @@
|
||||
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/drm_vma_manager.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/wait.h>
|
||||
@@ -450,7 +451,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||
INIT_LIST_HEAD(&fbo->lru);
|
||||
INIT_LIST_HEAD(&fbo->swap);
|
||||
INIT_LIST_HEAD(&fbo->io_reserve_lru);
|
||||
fbo->vm_node = NULL;
|
||||
drm_vma_node_reset(&fbo->vma_node);
|
||||
atomic_set(&fbo->cpu_writers, 0);
|
||||
|
||||
spin_lock(&bdev->fence_lock);
|
||||
|
@@ -33,6 +33,7 @@
|
||||
#include <ttm/ttm_module.h>
|
||||
#include <ttm/ttm_bo_driver.h>
|
||||
#include <ttm/ttm_placement.h>
|
||||
#include <drm/drm_vma_manager.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/module.h>
|
||||
@@ -40,37 +41,6 @@
|
||||
|
||||
#define TTM_BO_VM_NUM_PREFAULT 16
|
||||
|
||||
static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
|
||||
unsigned long page_start,
|
||||
unsigned long num_pages)
|
||||
{
|
||||
struct rb_node *cur = bdev->addr_space_rb.rb_node;
|
||||
unsigned long cur_offset;
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_buffer_object *best_bo = NULL;
|
||||
|
||||
while (likely(cur != NULL)) {
|
||||
bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
|
||||
cur_offset = bo->vm_node->start;
|
||||
if (page_start >= cur_offset) {
|
||||
cur = cur->rb_right;
|
||||
best_bo = bo;
|
||||
if (page_start == cur_offset)
|
||||
break;
|
||||
} else
|
||||
cur = cur->rb_left;
|
||||
}
|
||||
|
||||
if (unlikely(best_bo == NULL))
|
||||
return NULL;
|
||||
|
||||
if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
|
||||
(page_start + num_pages)))
|
||||
return NULL;
|
||||
|
||||
return best_bo;
|
||||
}
|
||||
|
||||
static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
|
||||
@@ -146,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
}
|
||||
|
||||
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
|
||||
bo->vm_node->start - vma->vm_pgoff;
|
||||
drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
|
||||
page_last = vma_pages(vma) +
|
||||
bo->vm_node->start - vma->vm_pgoff;
|
||||
drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
|
||||
|
||||
if (unlikely(page_offset >= bo->num_pages)) {
|
||||
retval = VM_FAULT_SIGBUS;
|
||||
@@ -249,6 +219,30 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
|
||||
.close = ttm_bo_vm_close
|
||||
};
|
||||
|
||||
static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
|
||||
unsigned long offset,
|
||||
unsigned long pages)
|
||||
{
|
||||
struct drm_vma_offset_node *node;
|
||||
struct ttm_buffer_object *bo = NULL;
|
||||
|
||||
drm_vma_offset_lock_lookup(&bdev->vma_manager);
|
||||
|
||||
node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
|
||||
if (likely(node)) {
|
||||
bo = container_of(node, struct ttm_buffer_object, vma_node);
|
||||
if (!kref_get_unless_zero(&bo->kref))
|
||||
bo = NULL;
|
||||
}
|
||||
|
||||
drm_vma_offset_unlock_lookup(&bdev->vma_manager);
|
||||
|
||||
if (!bo)
|
||||
pr_err("Could not find buffer object to map\n");
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
|
||||
struct ttm_bo_device *bdev)
|
||||
{
|
||||
@@ -256,17 +250,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
|
||||
struct ttm_buffer_object *bo;
|
||||
int ret;
|
||||
|
||||
read_lock(&bdev->vm_lock);
|
||||
bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
|
||||
vma_pages(vma));
|
||||
if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
|
||||
bo = NULL;
|
||||
read_unlock(&bdev->vm_lock);
|
||||
|
||||
if (unlikely(bo == NULL)) {
|
||||
pr_err("Could not find buffer object to map\n");
|
||||
bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
|
||||
if (unlikely(!bo))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
driver = bo->bdev->driver;
|
||||
if (unlikely(!driver->verify_access)) {
|
||||
@@ -324,12 +310,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
|
||||
bool no_wait = false;
|
||||
bool dummy;
|
||||
|
||||
read_lock(&bdev->vm_lock);
|
||||
bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
|
||||
if (likely(bo != NULL))
|
||||
ttm_bo_reference(bo);
|
||||
read_unlock(&bdev->vm_lock);
|
||||
|
||||
bo = ttm_bo_vm_lookup(bdev, dev_offset, 1);
|
||||
if (unlikely(bo == NULL))
|
||||
return -EFAULT;
|
||||
|
||||
@@ -343,7 +324,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unref;
|
||||
|
||||
kmap_offset = dev_offset - bo->vm_node->start;
|
||||
kmap_offset = dev_offset - drm_vma_node_start(&bo->vma_node);
|
||||
if (unlikely(kmap_offset >= bo->num_pages)) {
|
||||
ret = -EFBIG;
|
||||
goto out_unref;
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user