123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732 |
- // SPDX-License-Identifier: GPL-2.0-only
- /*
- * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
- */
- #include <linux/file.h>
- #include <linux/interval_tree.h>
- #include <linux/seq_file.h>
- #include <linux/sync_file.h>
- #include <linux/slab.h>
- #include "kgsl_device.h"
- #include "kgsl_mmu.h"
- #include "kgsl_reclaim.h"
- #include "kgsl_sharedmem.h"
- #include "kgsl_trace.h"
- struct kgsl_memdesc_bind_range {
- struct kgsl_mem_entry *entry;
- struct interval_tree_node range;
- };
- static struct kgsl_memdesc_bind_range *bind_to_range(struct interval_tree_node *node)
- {
- return container_of(node, struct kgsl_memdesc_bind_range, range);
- }
- static struct kgsl_memdesc_bind_range *bind_range_create(u64 start, u64 last,
- struct kgsl_mem_entry *entry)
- {
- struct kgsl_memdesc_bind_range *range =
- kzalloc(sizeof(*range), GFP_KERNEL);
- if (!range)
- return ERR_PTR(-ENOMEM);
- range->range.start = start;
- range->range.last = last;
- range->entry = kgsl_mem_entry_get(entry);
- if (!range->entry) {
- kfree(range);
- return ERR_PTR(-EINVAL);
- }
- atomic_inc(&entry->vbo_count);
- return range;
- }
- static void bind_range_destroy(struct kgsl_memdesc_bind_range *range)
- {
- struct kgsl_mem_entry *entry = range->entry;
- atomic_dec(&entry->vbo_count);
- kgsl_mem_entry_put(entry);
- kfree(range);
- }
- static u64 bind_range_len(struct kgsl_memdesc_bind_range *range)
- {
- return (range->range.last - range->range.start) + 1;
- }
- void kgsl_memdesc_print_vbo_ranges(struct kgsl_mem_entry *entry,
- struct seq_file *s)
- {
- struct interval_tree_node *next;
- struct kgsl_memdesc *memdesc = &entry->memdesc;
- if (!(memdesc->flags & KGSL_MEMFLAGS_VBO))
- return;
- /*
- * We are called in an atomic context so try to get the mutex but if we
- * don't then skip this item
- */
- if (!mutex_trylock(&memdesc->ranges_lock))
- return;
- next = interval_tree_iter_first(&memdesc->ranges, 0, ~0UL);
- while (next) {
- struct kgsl_memdesc_bind_range *range = bind_to_range(next);
- seq_printf(s, "%5d %5d 0x%16.16lx-0x%16.16lx\n",
- entry->id, range->entry->id, range->range.start,
- range->range.last);
- next = interval_tree_iter_next(next, 0, ~0UL);
- }
- mutex_unlock(&memdesc->ranges_lock);
- }
- static void kgsl_memdesc_remove_range(struct kgsl_mem_entry *target,
- u64 start, u64 last, struct kgsl_mem_entry *entry)
- {
- struct interval_tree_node *node, *next;
- struct kgsl_memdesc_bind_range *range;
- struct kgsl_memdesc *memdesc = &target->memdesc;
- mutex_lock(&memdesc->ranges_lock);
- next = interval_tree_iter_first(&memdesc->ranges, start, last);
- while (next) {
- node = next;
- range = bind_to_range(node);
- next = interval_tree_iter_next(node, start, last);
- /*
- * If entry is null, consider it as a special request. Unbind
- * the entire range between start and last in this case.
- */
- if (!entry || range->entry->id == entry->id) {
- if (kgsl_mmu_unmap_range(memdesc->pagetable,
- memdesc, range->range.start, bind_range_len(range)))
- continue;
- interval_tree_remove(node, &memdesc->ranges);
- trace_kgsl_mem_remove_bind_range(target,
- range->range.start, range->entry,
- bind_range_len(range));
- if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO))
- kgsl_mmu_map_zero_page_to_range(memdesc->pagetable,
- memdesc, range->range.start, bind_range_len(range));
- bind_range_destroy(range);
- }
- }
- mutex_unlock(&memdesc->ranges_lock);
- }
- static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
- u64 start, u64 last, struct kgsl_mem_entry *entry, u64 offset)
- {
- struct interval_tree_node *node, *next;
- struct kgsl_memdesc *memdesc = &target->memdesc;
- struct kgsl_memdesc_bind_range *range =
- bind_range_create(start, last, entry);
- int ret = 0;
- if (IS_ERR(range))
- return PTR_ERR(range);
- mutex_lock(&memdesc->ranges_lock);
- /*
- * If the VBO maps the zero page, then we can unmap the requested range
- * in one call. Otherwise we have to figure out what ranges to unmap
- * while walking the interval tree.
- */
- if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)) {
- ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc, start,
- last - start + 1);
- if (ret)
- goto error;
- }
- next = interval_tree_iter_first(&memdesc->ranges, start, last);
- while (next) {
- struct kgsl_memdesc_bind_range *cur;
- node = next;
- cur = bind_to_range(node);
- next = interval_tree_iter_next(node, start, last);
- trace_kgsl_mem_remove_bind_range(target, cur->range.start,
- cur->entry, bind_range_len(cur));
- interval_tree_remove(node, &memdesc->ranges);
- if (start <= cur->range.start) {
- if (last >= cur->range.last) {
- /* Unmap the entire cur range */
- if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO) {
- ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
- cur->range.start,
- cur->range.last - cur->range.start + 1);
- if (ret) {
- interval_tree_insert(node, &memdesc->ranges);
- goto error;
- }
- }
- bind_range_destroy(cur);
- continue;
- }
- /* Unmap the range overlapping cur */
- if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO) {
- ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
- cur->range.start,
- last - cur->range.start + 1);
- if (ret) {
- interval_tree_insert(node, &memdesc->ranges);
- goto error;
- }
- }
- /* Adjust the start of the mapping */
- cur->range.start = last + 1;
- /* And put it back into the tree */
- interval_tree_insert(node, &memdesc->ranges);
- trace_kgsl_mem_add_bind_range(target,
- cur->range.start, cur->entry, bind_range_len(cur));
- } else {
- if (last < cur->range.last) {
- struct kgsl_memdesc_bind_range *temp;
- /*
- * The range is split into two so make a new
- * entry for the far side
- */
- temp = bind_range_create(last + 1, cur->range.last,
- cur->entry);
- /* FIXME: Uhoh, this would be bad */
- BUG_ON(IS_ERR(temp));
- interval_tree_insert(&temp->range,
- &memdesc->ranges);
- trace_kgsl_mem_add_bind_range(target,
- temp->range.start,
- temp->entry, bind_range_len(temp));
- }
- /* Unmap the range overlapping cur */
- if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO) {
- ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
- start,
- min_t(u64, cur->range.last, last) - start + 1);
- if (ret) {
- interval_tree_insert(node, &memdesc->ranges);
- goto error;
- }
- }
- cur->range.last = start - 1;
- interval_tree_insert(node, &memdesc->ranges);
- trace_kgsl_mem_add_bind_range(target, cur->range.start,
- cur->entry, bind_range_len(cur));
- }
- }
- ret = kgsl_mmu_map_child(memdesc->pagetable, memdesc, start,
- &entry->memdesc, offset, last - start + 1);
- if (ret)
- goto error;
- /* Add the new range */
- interval_tree_insert(&range->range, &memdesc->ranges);
- trace_kgsl_mem_add_bind_range(target, range->range.start,
- range->entry, bind_range_len(range));
- mutex_unlock(&memdesc->ranges_lock);
- return ret;
- error:
- bind_range_destroy(range);
- mutex_unlock(&memdesc->ranges_lock);
- return ret;
- }
- static void kgsl_sharedmem_vbo_put_gpuaddr(struct kgsl_memdesc *memdesc)
- {
- struct interval_tree_node *node, *next;
- struct kgsl_memdesc_bind_range *range;
- int ret = 0;
- bool unmap_fail;
- /*
- * If the VBO maps the zero range then we can unmap the entire
- * pagetable region in one call.
- */
- if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO))
- ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
- 0, memdesc->size);
- unmap_fail = ret;
- /*
- * FIXME: do we have a use after free potential here? We might need to
- * lock this and set a "do not update" bit
- */
- /* Now delete each range and release the mem entries */
- next = interval_tree_iter_first(&memdesc->ranges, 0, ~0UL);
- while (next) {
- node = next;
- range = bind_to_range(node);
- next = interval_tree_iter_next(node, 0, ~0UL);
- interval_tree_remove(node, &memdesc->ranges);
- /* Unmap this range */
- if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)
- ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
- range->range.start,
- range->range.last - range->range.start + 1);
- /* Put the child's refcount if unmap succeeds */
- if (!ret)
- bind_range_destroy(range);
- else
- kfree(range);
- unmap_fail = unmap_fail || ret;
- }
- if (unmap_fail)
- return;
- /* Put back the GPU address */
- kgsl_mmu_put_gpuaddr(memdesc->pagetable, memdesc);
- memdesc->gpuaddr = 0;
- memdesc->pagetable = NULL;
- }
- static struct kgsl_memdesc_ops kgsl_vbo_ops = {
- .put_gpuaddr = kgsl_sharedmem_vbo_put_gpuaddr,
- };
- int kgsl_sharedmem_allocate_vbo(struct kgsl_device *device,
- struct kgsl_memdesc *memdesc, u64 size, u64 flags)
- {
- size = PAGE_ALIGN(size);
- /* Make sure that VBOs are supported by the MMU */
- if (WARN_ON_ONCE(!kgsl_mmu_has_feature(device,
- KGSL_MMU_SUPPORT_VBO)))
- return -EOPNOTSUPP;
- kgsl_memdesc_init(device, memdesc, flags);
- memdesc->priv = 0;
- memdesc->ops = &kgsl_vbo_ops;
- memdesc->size = size;
- /* Set up the interval tree and lock */
- memdesc->ranges = RB_ROOT_CACHED;
- mutex_init(&memdesc->ranges_lock);
- return 0;
- }
- static bool kgsl_memdesc_check_range(struct kgsl_memdesc *memdesc,
- u64 offset, u64 length)
- {
- return ((offset < memdesc->size) &&
- (offset + length > offset) &&
- (offset + length) <= memdesc->size);
- }
- static void kgsl_sharedmem_free_bind_op(struct kgsl_sharedmem_bind_op *op)
- {
- int i;
- if (IS_ERR_OR_NULL(op))
- return;
- for (i = 0; i < op->nr_ops; i++) {
- /* Decrement the vbo_count we added when creating the bind_op */
- if (op->ops[i].entry)
- atomic_dec(&op->ops[i].entry->vbo_count);
- /* Release the reference on the child entry */
- kgsl_mem_entry_put_deferred(op->ops[i].entry);
- }
- /* Release the reference on the target entry */
- kgsl_mem_entry_put_deferred(op->target);
- kvfree(op->ops);
- kfree(op);
- }
- struct kgsl_sharedmem_bind_op *
- kgsl_sharedmem_create_bind_op(struct kgsl_process_private *private,
- u32 target_id, void __user *ranges, u32 ranges_nents,
- u64 ranges_size)
- {
- struct kgsl_sharedmem_bind_op *op;
- struct kgsl_mem_entry *target;
- int ret, i;
- /* There must be at least one defined operation */
- if (!ranges_nents)
- return ERR_PTR(-EINVAL);
- /* Find the target memory entry */
- target = kgsl_sharedmem_find_id(private, target_id);
- if (!target)
- return ERR_PTR(-ENOENT);
- if (!(target->memdesc.flags & KGSL_MEMFLAGS_VBO)) {
- kgsl_mem_entry_put(target);
- return ERR_PTR(-EINVAL);
- }
- /* Make a container for the bind operations */
- op = kzalloc(sizeof(*op), GFP_KERNEL);
- if (!op) {
- kgsl_mem_entry_put(target);
- return ERR_PTR(-ENOMEM);
- }
- /*
- * Make an array for the individual operations. Use __GFP_NOWARN and
- * __GFP_NORETRY to make sure a very large request quietly fails
- */
- op->ops = kvcalloc(ranges_nents, sizeof(*op->ops),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (!op->ops) {
- kfree(op);
- kgsl_mem_entry_put(target);
- return ERR_PTR(-ENOMEM);
- }
- op->nr_ops = ranges_nents;
- op->target = target;
- /* Make sure process is pinned in memory before proceeding */
- atomic_inc(&private->cmd_count);
- ret = kgsl_reclaim_to_pinned_state(private);
- if (ret)
- goto err;
- for (i = 0; i < ranges_nents; i++) {
- struct kgsl_gpumem_bind_range range;
- struct kgsl_mem_entry *entry;
- u32 size;
- size = min_t(u32, sizeof(range), ranges_size);
- ret = -EINVAL;
- if (copy_from_user(&range, ranges, size)) {
- ret = -EFAULT;
- goto err;
- }
- /* The offset must be page aligned */
- if (!PAGE_ALIGNED(range.target_offset))
- goto err;
- /* The length of the operation must be aligned and non zero */
- if (!range.length || !PAGE_ALIGNED(range.length))
- goto err;
- /* Make sure the range fits in the target */
- if (!kgsl_memdesc_check_range(&target->memdesc,
- range.target_offset, range.length))
- goto err;
- /*
- * Special case: Consider child id 0 as a special request incase of
- * unbind. This helps to unbind the specified range (could span multiple
- * child buffers) without supplying backing physical buffer information.
- */
- if (range.child_id == 0 && range.op == KGSL_GPUMEM_RANGE_OP_UNBIND) {
- op->ops[i].entry = NULL;
- op->ops[i].start = range.target_offset;
- op->ops[i].last = range.target_offset + range.length - 1;
- /* Child offset doesn't matter for unbind. set it to 0 */
- op->ops[i].child_offset = 0;
- op->ops[i].op = range.op;
- ranges += ranges_size;
- continue;
- }
- /* Get the child object */
- op->ops[i].entry = kgsl_sharedmem_find_id(private,
- range.child_id);
- entry = op->ops[i].entry;
- if (!entry) {
- ret = -ENOENT;
- goto err;
- }
- /* Keep the child pinned in memory */
- atomic_inc(&entry->vbo_count);
- /* Make sure the child is not a VBO */
- if ((entry->memdesc.flags & KGSL_MEMFLAGS_VBO)) {
- ret = -EINVAL;
- goto err;
- }
- /*
- * Make sure that only secure children are mapped in secure VBOs
- * and vice versa
- */
- if ((target->memdesc.flags & KGSL_MEMFLAGS_SECURE) !=
- (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE)) {
- ret = -EPERM;
- goto err;
- }
- /* Make sure the range operation is valid */
- if (range.op != KGSL_GPUMEM_RANGE_OP_BIND &&
- range.op != KGSL_GPUMEM_RANGE_OP_UNBIND)
- goto err;
- if (range.op == KGSL_GPUMEM_RANGE_OP_BIND) {
- if (!PAGE_ALIGNED(range.child_offset))
- goto err;
- /* Make sure the range fits in the child */
- if (!kgsl_memdesc_check_range(&entry->memdesc,
- range.child_offset, range.length))
- goto err;
- } else {
- /* For unop operations the child offset must be 0 */
- if (range.child_offset)
- goto err;
- }
- op->ops[i].entry = entry;
- op->ops[i].start = range.target_offset;
- op->ops[i].last = range.target_offset + range.length - 1;
- op->ops[i].child_offset = range.child_offset;
- op->ops[i].op = range.op;
- ranges += ranges_size;
- }
- atomic_dec(&private->cmd_count);
- init_completion(&op->comp);
- kref_init(&op->ref);
- return op;
- err:
- atomic_dec(&private->cmd_count);
- kgsl_sharedmem_free_bind_op(op);
- return ERR_PTR(ret);
- }
- void kgsl_sharedmem_bind_range_destroy(struct kref *kref)
- {
- struct kgsl_sharedmem_bind_op *op = container_of(kref,
- struct kgsl_sharedmem_bind_op, ref);
- kgsl_sharedmem_free_bind_op(op);
- }
- static void kgsl_sharedmem_bind_worker(struct work_struct *work)
- {
- struct kgsl_sharedmem_bind_op *op = container_of(work,
- struct kgsl_sharedmem_bind_op, work);
- int i;
- for (i = 0; i < op->nr_ops; i++) {
- if (op->ops[i].op == KGSL_GPUMEM_RANGE_OP_BIND)
- kgsl_memdesc_add_range(op->target,
- op->ops[i].start,
- op->ops[i].last,
- op->ops[i].entry,
- op->ops[i].child_offset);
- else
- kgsl_memdesc_remove_range(op->target,
- op->ops[i].start,
- op->ops[i].last,
- op->ops[i].entry);
- }
- /* Wake up any threads waiting for the bind operation */
- complete_all(&op->comp);
- if (op->callback)
- op->callback(op);
- /* Put the refcount we took when scheduling the worker */
- kgsl_sharedmem_put_bind_op(op);
- }
- void kgsl_sharedmem_bind_ranges(struct kgsl_sharedmem_bind_op *op)
- {
- /* Take a reference to the operation while it is scheduled */
- kref_get(&op->ref);
- INIT_WORK(&op->work, kgsl_sharedmem_bind_worker);
- schedule_work(&op->work);
- }
- struct kgsl_sharedmem_bind_fence {
- struct dma_fence base;
- spinlock_t lock;
- int fd;
- struct kgsl_sharedmem_bind_op *op;
- };
- static const char *bind_fence_get_driver_name(struct dma_fence *fence)
- {
- return "kgsl_sharedmem_bind";
- }
- static const char *bind_fence_get_timeline_name(struct dma_fence *fence)
- {
- return "(unbound)";
- }
- static void bind_fence_release(struct dma_fence *fence)
- {
- struct kgsl_sharedmem_bind_fence *bind_fence = container_of(fence,
- struct kgsl_sharedmem_bind_fence, base);
- kgsl_sharedmem_put_bind_op(bind_fence->op);
- kfree(bind_fence);
- }
- static void
- kgsl_sharedmem_bind_fence_callback(struct kgsl_sharedmem_bind_op *op)
- {
- struct kgsl_sharedmem_bind_fence *bind_fence = op->data;
- dma_fence_signal(&bind_fence->base);
- dma_fence_put(&bind_fence->base);
- }
- static const struct dma_fence_ops kgsl_sharedmem_bind_fence_ops = {
- .get_driver_name = bind_fence_get_driver_name,
- .get_timeline_name = bind_fence_get_timeline_name,
- .release = bind_fence_release,
- };
- static struct kgsl_sharedmem_bind_fence *
- kgsl_sharedmem_bind_fence(struct kgsl_sharedmem_bind_op *op)
- {
- struct kgsl_sharedmem_bind_fence *fence;
- struct sync_file *sync_file;
- int fd;
- fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (!fence)
- return ERR_PTR(-ENOMEM);
- spin_lock_init(&fence->lock);
- dma_fence_init(&fence->base, &kgsl_sharedmem_bind_fence_ops,
- &fence->lock, dma_fence_context_alloc(1), 0);
- fd = get_unused_fd_flags(O_CLOEXEC);
- if (fd < 0) {
- kfree(fence);
- return ERR_PTR(fd);
- }
- sync_file = sync_file_create(&fence->base);
- if (!sync_file) {
- put_unused_fd(fd);
- kfree(fence);
- return ERR_PTR(-ENOMEM);
- }
- fd_install(fd, sync_file->file);
- fence->fd = fd;
- fence->op = op;
- return fence;
- }
- long kgsl_ioctl_gpumem_bind_ranges(struct kgsl_device_private *dev_priv,
- unsigned int cmd, void *data)
- {
- struct kgsl_process_private *private = dev_priv->process_priv;
- struct kgsl_gpumem_bind_ranges *param = data;
- struct kgsl_sharedmem_bind_op *op;
- int ret;
- /* If ranges_size isn't set, return the expected size to the user */
- if (!param->ranges_size) {
- param->ranges_size = sizeof(struct kgsl_gpumem_bind_range);
- return 0;
- }
- /* FENCE_OUT only makes sense with ASYNC */
- if ((param->flags & KGSL_GPUMEM_BIND_FENCE_OUT) &&
- !(param->flags & KGSL_GPUMEM_BIND_ASYNC))
- return -EINVAL;
- op = kgsl_sharedmem_create_bind_op(private, param->id,
- u64_to_user_ptr(param->ranges), param->ranges_nents,
- param->ranges_size);
- if (IS_ERR(op))
- return PTR_ERR(op);
- if (param->flags & KGSL_GPUMEM_BIND_ASYNC) {
- struct kgsl_sharedmem_bind_fence *fence;
- if (param->flags & KGSL_GPUMEM_BIND_FENCE_OUT) {
- fence = kgsl_sharedmem_bind_fence(op);
- if (IS_ERR(fence)) {
- kgsl_sharedmem_put_bind_op(op);
- return PTR_ERR(fence);
- }
- op->data = fence;
- op->callback = kgsl_sharedmem_bind_fence_callback;
- param->fence_id = fence->fd;
- }
- kgsl_sharedmem_bind_ranges(op);
- if (!(param->flags & KGSL_GPUMEM_BIND_FENCE_OUT))
- kgsl_sharedmem_put_bind_op(op);
- return 0;
- }
- /*
- * Schedule the work. All the resources will be released after
- * the bind operation is done
- */
- kgsl_sharedmem_bind_ranges(op);
- ret = wait_for_completion_interruptible(&op->comp);
- kgsl_sharedmem_put_bind_op(op);
- return ret;
- }
|