ion: Merge all ION related changes from msm-4.19 to msm-lahaina
Merge all changes that deal with the ION memory manager from msm-4.19 to msm-lahaina as of commit: 9dd0018410fe: Merge "clk: qcom: npucc-kona: Enable safe config and HW_CTL for all RCGs". Change-Id: I0c362ff9e6938883535467f0809ee360a733638e Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
This commit is contained in:
@@ -30,6 +30,17 @@ config SW_SYNC
|
||||
WARNING: improper use of this can result in deadlocking kernel
|
||||
drivers from userspace. Intended for test and debug only.
|
||||
|
||||
config DEBUG_DMA_BUF_REF
|
||||
bool "DEBUG Reference Count"
|
||||
depends on STACKDEPOT
|
||||
depends on DMA_SHARED_BUFFER
|
||||
default n
|
||||
help
|
||||
Save stack traces for every call to dma_buf_get and dma_buf_put, to
|
||||
help debug memory leaks. Potential leaks may be found by manually
|
||||
matching the get/put call stacks. This feature consumes extra memory
|
||||
in order to save the stack traces using STACKDEPOT.
|
||||
|
||||
config UDMABUF
|
||||
bool "userspace dmabuf misc driver"
|
||||
default n
|
||||
|
@@ -3,4 +3,5 @@ obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
|
||||
reservation.o seqno-fence.o
|
||||
obj-$(CONFIG_SYNC_FILE) += sync_file.o
|
||||
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
|
||||
obj-$(CONFIG_DEBUG_DMA_BUF_REF) += dma-buf-ref.o
|
||||
obj-$(CONFIG_UDMABUF) += udmabuf.o
|
||||
|
114
drivers/dma-buf/dma-buf-ref.c
Normal file
114
drivers/dma-buf/dma-buf-ref.c
Normal file
@@ -0,0 +1,114 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stackdepot.h>
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#define DMA_BUF_STACK_DEPTH (16)
|
||||
|
||||
struct dma_buf_ref {
|
||||
struct list_head list;
|
||||
depot_stack_handle_t handle;
|
||||
int count;
|
||||
};
|
||||
|
||||
void dma_buf_ref_init(struct msm_dma_buf *msm_dma_buf)
|
||||
{
|
||||
INIT_LIST_HEAD(&msm_dma_buf->refs);
|
||||
}
|
||||
|
||||
void dma_buf_ref_destroy(struct msm_dma_buf *msm_dma_buf)
|
||||
{
|
||||
struct dma_buf_ref *r, *n;
|
||||
struct dma_buf *dmabuf = &msm_dma_buf->dma_buf;
|
||||
|
||||
mutex_lock(&dmabuf->lock);
|
||||
list_for_each_entry_safe(r, n, &msm_dma_buf->refs, list) {
|
||||
list_del(&r->list);
|
||||
kfree(r);
|
||||
}
|
||||
mutex_unlock(&dmabuf->lock);
|
||||
}
|
||||
|
||||
static void dma_buf_ref_insert_handle(struct msm_dma_buf *msm_dma_buf,
|
||||
depot_stack_handle_t handle,
|
||||
int count)
|
||||
{
|
||||
struct dma_buf_ref *r;
|
||||
struct dma_buf *dmabuf = &msm_dma_buf->dma_buf;
|
||||
|
||||
mutex_lock(&dmabuf->lock);
|
||||
list_for_each_entry(r, &msm_dma_buf->refs, list) {
|
||||
if (r->handle == handle) {
|
||||
r->count += count;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
r = kzalloc(sizeof(*r), GFP_KERNEL);
|
||||
if (!r)
|
||||
goto out;
|
||||
|
||||
INIT_LIST_HEAD(&r->list);
|
||||
r->handle = handle;
|
||||
r->count = count;
|
||||
list_add(&r->list, &msm_dma_buf->refs);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dmabuf->lock);
|
||||
}
|
||||
|
||||
void dma_buf_ref_mod(struct msm_dma_buf *msm_dma_buf, int nr)
|
||||
{
|
||||
unsigned long entries[DMA_BUF_STACK_DEPTH];
|
||||
struct stack_trace trace = {
|
||||
.nr_entries = 0,
|
||||
.entries = entries,
|
||||
.max_entries = DMA_BUF_STACK_DEPTH,
|
||||
.skip = 1
|
||||
};
|
||||
depot_stack_handle_t handle;
|
||||
|
||||
save_stack_trace(&trace);
|
||||
if (trace.nr_entries != 0 &&
|
||||
trace.entries[trace.nr_entries-1] == ULONG_MAX)
|
||||
trace.nr_entries--;
|
||||
|
||||
handle = depot_save_stack(&trace, GFP_KERNEL);
|
||||
if (!handle)
|
||||
return;
|
||||
|
||||
dma_buf_ref_insert_handle(msm_dma_buf, handle, nr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called with dmabuf->lock held
|
||||
*/
|
||||
int dma_buf_ref_show(struct seq_file *s, struct msm_dma_buf *msm_dma_buf)
|
||||
{
|
||||
char *buf;
|
||||
struct dma_buf_ref *ref;
|
||||
int count = 0;
|
||||
struct stack_trace trace;
|
||||
|
||||
buf = (void *)__get_free_page(GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
list_for_each_entry(ref, &msm_dma_buf->refs, list) {
|
||||
count += ref->count;
|
||||
|
||||
seq_printf(s, "References: %d\n", ref->count);
|
||||
depot_fetch_stack(ref->handle, &trace);
|
||||
snprint_stack_trace(buf, PAGE_SIZE, &trace, 0);
|
||||
seq_puts(s, buf);
|
||||
seq_putc(s, '\n');
|
||||
}
|
||||
|
||||
seq_printf(s, "Total references: %d\n\n\n", count);
|
||||
free_page((unsigned long)buf);
|
||||
|
||||
return 0;
|
||||
}
|
@@ -79,12 +79,14 @@ static struct file_system_type dma_buf_fs_type = {
|
||||
|
||||
static int dma_buf_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct msm_dma_buf *msm_dma_buf;
|
||||
struct dma_buf *dmabuf;
|
||||
|
||||
if (!is_dma_buf_file(file))
|
||||
return -EINVAL;
|
||||
|
||||
dmabuf = file->private_data;
|
||||
msm_dma_buf = to_msm_dma_buf(dmabuf);
|
||||
|
||||
BUG_ON(dmabuf->vmapping_counter);
|
||||
|
||||
@@ -98,17 +100,18 @@ static int dma_buf_release(struct inode *inode, struct file *file)
|
||||
*/
|
||||
BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
|
||||
|
||||
dmabuf->ops->release(dmabuf);
|
||||
|
||||
mutex_lock(&db_list.lock);
|
||||
list_del(&dmabuf->list_node);
|
||||
mutex_unlock(&db_list.lock);
|
||||
|
||||
dmabuf->ops->release(dmabuf);
|
||||
dma_buf_ref_destroy(msm_dma_buf);
|
||||
|
||||
if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
|
||||
reservation_object_fini(dmabuf->resv);
|
||||
|
||||
module_put(dmabuf->owner);
|
||||
kfree(dmabuf);
|
||||
kfree(msm_dma_buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -505,10 +508,11 @@ err_alloc_file:
|
||||
*/
|
||||
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
||||
{
|
||||
struct msm_dma_buf *msm_dma_buf;
|
||||
struct dma_buf *dmabuf;
|
||||
struct reservation_object *resv = exp_info->resv;
|
||||
struct file *file;
|
||||
size_t alloc_size = sizeof(struct dma_buf);
|
||||
size_t alloc_size = sizeof(struct msm_dma_buf);
|
||||
int ret;
|
||||
|
||||
if (!exp_info->resv)
|
||||
@@ -528,12 +532,13 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
||||
if (!try_module_get(exp_info->owner))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
|
||||
if (!dmabuf) {
|
||||
msm_dma_buf = kzalloc(alloc_size, GFP_KERNEL);
|
||||
if (!msm_dma_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto err_module;
|
||||
}
|
||||
|
||||
dmabuf = &msm_dma_buf->dma_buf;
|
||||
dmabuf->priv = exp_info->priv;
|
||||
dmabuf->ops = exp_info->ops;
|
||||
dmabuf->size = exp_info->size;
|
||||
@@ -561,6 +566,9 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
||||
mutex_init(&dmabuf->lock);
|
||||
INIT_LIST_HEAD(&dmabuf->attachments);
|
||||
|
||||
dma_buf_ref_init(msm_dma_buf);
|
||||
dma_buf_ref_mod(msm_dma_buf, 1);
|
||||
|
||||
mutex_lock(&db_list.lock);
|
||||
list_add(&dmabuf->list_node, &db_list.head);
|
||||
mutex_unlock(&db_list.lock);
|
||||
@@ -568,7 +576,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
||||
return dmabuf;
|
||||
|
||||
err_dmabuf:
|
||||
kfree(dmabuf);
|
||||
kfree(msm_dma_buf);
|
||||
err_module:
|
||||
module_put(exp_info->owner);
|
||||
return ERR_PTR(ret);
|
||||
@@ -620,6 +628,7 @@ struct dma_buf *dma_buf_get(int fd)
|
||||
fput(file);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
dma_buf_ref_mod(to_msm_dma_buf(file->private_data), 1);
|
||||
|
||||
return file->private_data;
|
||||
}
|
||||
@@ -640,6 +649,7 @@ void dma_buf_put(struct dma_buf *dmabuf)
|
||||
if (WARN_ON(!dmabuf || !dmabuf->file))
|
||||
return;
|
||||
|
||||
dma_buf_ref_mod(to_msm_dma_buf(dmabuf), -1);
|
||||
fput(dmabuf->file);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_put);
|
||||
@@ -1281,6 +1291,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
|
||||
seq_printf(s, "Total %d devices attached\n\n",
|
||||
attach_count);
|
||||
|
||||
dma_buf_ref_show(s, to_msm_dma_buf(buf_obj));
|
||||
|
||||
count++;
|
||||
size += buf_obj->size;
|
||||
mutex_unlock(&buf_obj->lock);
|
||||
|
@@ -34,3 +34,58 @@ config ION_CMA_HEAP
|
||||
Choose this option to enable CMA heaps with Ion. This heap is backed
|
||||
by the Contiguous Memory Allocator (CMA). If your system has these
|
||||
regions, you should say Y here.
|
||||
|
||||
config ION_MSM_HEAPS
|
||||
tristate "MSM platform-based Ion heaps support"
|
||||
depends on ION && DMA_CMA && QCOM_SECURE_BUFFER
|
||||
help
|
||||
Enable this option to enable platform-based Ion heaps. The heaps
|
||||
will register with the Ion core framework, at which point userspace
|
||||
clients can allocate different types of memory (e.g. secure, cached,
|
||||
and uncached) from the different types of heaps. The MSM heaps
|
||||
allow Ion buffers to be shared through the shared DMA buffer framework
|
||||
and the heaps implement their own cache maintenance operations.
|
||||
If you're not sure, enable here.
|
||||
|
||||
config ION_FORCE_DMA_SYNC
|
||||
bool "Force ION to always DMA sync buffer memory"
|
||||
depends on ION
|
||||
help
|
||||
Force ION to DMA sync buffer memory when it is allocated and to
|
||||
always DMA sync the buffer memory on calls to begin/end cpu
|
||||
access. This makes ION DMA sync behavior similar to that of the
|
||||
older version of ION.
|
||||
We generally don't want to enable this config as it breaks the
|
||||
cache maintenance model.
|
||||
If you're not sure say N here.
|
||||
|
||||
config ION_DEFER_FREE_NO_SCHED_IDLE
|
||||
bool "Increases the priority of ION defer free thread"
|
||||
depends on ION
|
||||
help
|
||||
Certain heaps such as the system heaps make use of a low priority
|
||||
thread to help free buffer allocations back to the pool which might
|
||||
result in future allocations requests going to the buddy instead of
|
||||
the pool when there is a high buffer allocation rate.
|
||||
Choose this option to remove the SCHED_IDLE flag in case of defer
|
||||
free thereby increasing the priority of defer free thread.
|
||||
if you're not sure say Y here.
|
||||
|
||||
config ION_POOL_AUTO_REFILL
|
||||
bool "Refill the ION heap pools automatically"
|
||||
depends on ION
|
||||
help
|
||||
Choose this option to refill the ION system heap pools (non-secure)
|
||||
automatically when the pool pages count becomes lower than a set low mark.
|
||||
This refilling is done by worker thread which is invoked asynchronously
|
||||
when the pool count reaches below low mark.
|
||||
if you're not sure say Y here.
|
||||
|
||||
config ION_POOL_FILL_MARK
|
||||
int "ion pool fillmark size in MB"
|
||||
depends on ION_POOL_AUTO_REFILL
|
||||
range 16 256
|
||||
default 100
|
||||
help
|
||||
Set the fillmark of the pool in terms of mega bytes and the lowmark is
|
||||
ION_POOL_LOW_MARK_PERCENT of fillmark value.
|
||||
|
@@ -1,5 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_ION) += ion.o ion_heap.o
|
||||
obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o
|
||||
obj-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o
|
||||
obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o
|
||||
obj-$(CONFIG_ION_MSM_HEAPS) += msm_ion_heaps.o
|
||||
msm_ion_heaps-objs += msm_ion_of.o msm_ion_dma_buf.o ion_page_pool.o \
|
||||
ion_system_heap.o ion_carveout_heap.o ion_system_secure_heap.o \
|
||||
ion_cma_heap.o ion_secure_util.o
|
||||
|
@@ -12,21 +12,29 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include "ion.h"
|
||||
#include "msm_ion_priv.h"
|
||||
#include "ion_secure_util.h"
|
||||
|
||||
#define ION_CARVEOUT_ALLOCATE_FAIL -1
|
||||
|
||||
#define to_carveout_heap(_heap) \
|
||||
container_of(to_msm_ion_heap(_heap), struct ion_carveout_heap, heap)
|
||||
|
||||
struct ion_carveout_heap {
|
||||
struct ion_heap heap;
|
||||
struct msm_ion_heap heap;
|
||||
struct gen_pool *pool;
|
||||
phys_addr_t base;
|
||||
};
|
||||
|
||||
static phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
|
||||
unsigned long size)
|
||||
{
|
||||
struct ion_carveout_heap *carveout_heap =
|
||||
container_of(heap, struct ion_carveout_heap, heap);
|
||||
struct ion_carveout_heap *carveout_heap = to_carveout_heap(heap);
|
||||
unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
|
||||
|
||||
if (!offset)
|
||||
@@ -38,8 +46,7 @@ static phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
|
||||
static void ion_carveout_free(struct ion_heap *heap, phys_addr_t addr,
|
||||
unsigned long size)
|
||||
{
|
||||
struct ion_carveout_heap *carveout_heap =
|
||||
container_of(heap, struct ion_carveout_heap, heap);
|
||||
struct ion_carveout_heap *carveout_heap = to_carveout_heap(heap);
|
||||
|
||||
if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
|
||||
return;
|
||||
@@ -55,6 +62,8 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
|
||||
struct sg_table *table;
|
||||
phys_addr_t paddr;
|
||||
int ret;
|
||||
struct ion_carveout_heap *carveout_heap = to_carveout_heap(heap);
|
||||
struct device *dev = carveout_heap->heap.dev;
|
||||
|
||||
table = kmalloc(sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
@@ -72,6 +81,11 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
|
||||
sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
|
||||
buffer->sg_table = table;
|
||||
|
||||
if (ion_buffer_cached(buffer))
|
||||
ion_pages_sync_for_device(dev, sg_page(table->sgl),
|
||||
buffer->size, DMA_FROM_DEVICE);
|
||||
ion_prepare_sgl_for_force_dma_sync(buffer->sg_table);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_table:
|
||||
@@ -84,12 +98,18 @@ err_free:
|
||||
static void ion_carveout_heap_free(struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_heap *heap = buffer->heap;
|
||||
struct ion_carveout_heap *carveout_heap = to_carveout_heap(heap);
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
struct page *page = sg_page(table->sgl);
|
||||
phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
|
||||
phys_addr_t paddr = page_to_phys(page);
|
||||
struct device *dev = carveout_heap->heap.dev;
|
||||
|
||||
ion_heap_buffer_zero(buffer);
|
||||
|
||||
if (ion_buffer_cached(buffer))
|
||||
ion_pages_sync_for_device(dev, page, buffer->size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
ion_carveout_free(heap, paddr, buffer->size);
|
||||
sg_free_table(table);
|
||||
kfree(table);
|
||||
@@ -103,14 +123,23 @@ static struct ion_heap_ops carveout_heap_ops = {
|
||||
.unmap_kernel = ion_heap_unmap_kernel,
|
||||
};
|
||||
|
||||
struct ion_heap *ion_carveout_heap_create(phys_addr_t base, size_t size)
|
||||
static struct ion_heap *
|
||||
__ion_carveout_heap_create(struct ion_platform_heap *heap_data,
|
||||
bool sync)
|
||||
{
|
||||
struct ion_carveout_heap *carveout_heap;
|
||||
int ret;
|
||||
|
||||
struct page *page;
|
||||
size_t size;
|
||||
struct device *dev = (struct device *)heap_data->priv;
|
||||
|
||||
page = pfn_to_page(PFN_DOWN(heap_data->base));
|
||||
size = heap_data->size;
|
||||
|
||||
if (sync)
|
||||
ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL);
|
||||
|
||||
page = pfn_to_page(PFN_DOWN(base));
|
||||
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
@@ -124,10 +153,211 @@ struct ion_heap *ion_carveout_heap_create(phys_addr_t base, size_t size)
|
||||
kfree(carveout_heap);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
gen_pool_add(carveout_heap->pool, base, size, -1);
|
||||
carveout_heap->heap.ops = &carveout_heap_ops;
|
||||
carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
|
||||
carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
|
||||
carveout_heap->base = heap_data->base;
|
||||
carveout_heap->heap.dev = dev;
|
||||
gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
|
||||
-1);
|
||||
carveout_heap->heap.ion_heap.ops = &carveout_heap_ops;
|
||||
carveout_heap->heap.ion_heap.type = ION_HEAP_TYPE_CARVEOUT;
|
||||
carveout_heap->heap.ion_heap.flags = ION_HEAP_FLAG_DEFER_FREE;
|
||||
|
||||
return &carveout_heap->heap;
|
||||
return &carveout_heap->heap.ion_heap;
|
||||
}
|
||||
|
||||
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
|
||||
{
|
||||
return __ion_carveout_heap_create(heap_data, true);
|
||||
}
|
||||
|
||||
static void ion_carveout_heap_destroy(struct ion_heap *heap)
|
||||
{
|
||||
struct ion_carveout_heap *carveout_heap = to_carveout_heap(heap);
|
||||
|
||||
gen_pool_destroy(carveout_heap->pool);
|
||||
kfree(carveout_heap);
|
||||
carveout_heap = NULL;
|
||||
}
|
||||
|
||||
struct ion_sc_entry {
|
||||
struct list_head list;
|
||||
struct ion_heap *heap;
|
||||
u32 token;
|
||||
};
|
||||
|
||||
struct ion_sc_heap {
|
||||
struct msm_ion_heap heap;
|
||||
struct list_head children;
|
||||
};
|
||||
|
||||
static struct ion_heap *ion_sc_find_child(struct ion_heap *heap, u32 flags)
|
||||
{
|
||||
struct ion_sc_heap *manager;
|
||||
struct ion_sc_entry *entry;
|
||||
|
||||
manager = container_of(to_msm_ion_heap(heap), struct ion_sc_heap, heap);
|
||||
flags = flags & ION_FLAGS_CP_MASK;
|
||||
list_for_each_entry(entry, &manager->children, list) {
|
||||
if (entry->token == flags)
|
||||
return entry->heap;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int ion_sc_heap_allocate(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer, unsigned long len,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct ion_heap *child;
|
||||
|
||||
/* cache maintenance is not possible on secure memory */
|
||||
flags &= ~((unsigned long)ION_FLAG_CACHED);
|
||||
buffer->flags = flags;
|
||||
|
||||
child = ion_sc_find_child(heap, flags);
|
||||
if (!child)
|
||||
return -EINVAL;
|
||||
return ion_carveout_heap_allocate(child, buffer, len, flags);
|
||||
}
|
||||
|
||||
static void ion_sc_heap_free(struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_heap *child;
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
struct page *page = sg_page(table->sgl);
|
||||
phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
|
||||
|
||||
child = ion_sc_find_child(buffer->heap, buffer->flags);
|
||||
if (!child) {
|
||||
WARN(1, "ion_secure_carvout: invalid buffer flags on free. Memory will be leaked\n.");
|
||||
return;
|
||||
}
|
||||
|
||||
ion_carveout_free(child, paddr, buffer->size);
|
||||
sg_free_table(table);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
static struct ion_heap_ops ion_sc_heap_ops = {
|
||||
.allocate = ion_sc_heap_allocate,
|
||||
.free = ion_sc_heap_free,
|
||||
};
|
||||
|
||||
static int ion_sc_get_dt_token(struct ion_sc_entry *entry,
|
||||
struct device_node *np, u64 base, u64 size)
|
||||
{
|
||||
u32 token;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (of_property_read_u32(np, "token", &token))
|
||||
return -EINVAL;
|
||||
|
||||
ret = ion_hyp_assign_from_flags(base, size, token);
|
||||
if (ret)
|
||||
pr_err("secure_carveout_heap: Assign token 0x%x failed\n",
|
||||
token);
|
||||
else
|
||||
entry->token = token;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ion_sc_add_child(struct ion_sc_heap *manager,
|
||||
struct device_node *np)
|
||||
{
|
||||
struct device *dev = manager->heap.dev;
|
||||
struct ion_platform_heap heap_data = {0};
|
||||
struct ion_sc_entry *entry;
|
||||
struct device_node *phandle;
|
||||
const __be32 *basep;
|
||||
u64 base, size;
|
||||
int ret;
|
||||
|
||||
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&entry->list);
|
||||
|
||||
phandle = of_parse_phandle(np, "memory-region", 0);
|
||||
if (!phandle)
|
||||
goto out_free;
|
||||
|
||||
basep = of_get_address(phandle, 0, &size, NULL);
|
||||
if (!basep)
|
||||
goto out_free;
|
||||
|
||||
base = of_translate_address(phandle, basep);
|
||||
if (base == OF_BAD_ADDR)
|
||||
goto out_free;
|
||||
|
||||
heap_data.priv = dev;
|
||||
heap_data.base = base;
|
||||
heap_data.size = size;
|
||||
|
||||
/* This will zero memory initially */
|
||||
entry->heap = __ion_carveout_heap_create(&heap_data, false);
|
||||
if (IS_ERR(entry->heap))
|
||||
goto out_free;
|
||||
|
||||
ret = ion_sc_get_dt_token(entry, np, base, size);
|
||||
if (ret)
|
||||
goto out_free_carveout;
|
||||
|
||||
list_add(&entry->list, &manager->children);
|
||||
dev_info(dev, "ion_secure_carveout: creating heap@0x%llx, size 0x%llx\n",
|
||||
base, size);
|
||||
return 0;
|
||||
|
||||
out_free_carveout:
|
||||
ion_carveout_heap_destroy(entry->heap);
|
||||
out_free:
|
||||
kfree(entry);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void ion_secure_carveout_heap_destroy(struct ion_heap *heap)
|
||||
{
|
||||
struct ion_sc_heap *manager =
|
||||
container_of(to_msm_ion_heap(heap), struct ion_sc_heap, heap);
|
||||
struct ion_sc_entry *entry, *tmp;
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &manager->children, list) {
|
||||
ion_carveout_heap_destroy(entry->heap);
|
||||
kfree(entry);
|
||||
}
|
||||
kfree(manager);
|
||||
}
|
||||
|
||||
struct ion_heap *
|
||||
ion_secure_carveout_heap_create(struct ion_platform_heap *heap_data)
|
||||
{
|
||||
struct device *dev = heap_data->priv;
|
||||
int ret;
|
||||
struct ion_sc_heap *manager;
|
||||
struct device_node *np;
|
||||
|
||||
manager = kzalloc(sizeof(*manager), GFP_KERNEL);
|
||||
if (!manager)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
INIT_LIST_HEAD(&manager->children);
|
||||
manager->heap.dev = dev;
|
||||
|
||||
for_each_child_of_node(dev->of_node, np) {
|
||||
ret = ion_sc_add_child(manager, np);
|
||||
if (ret) {
|
||||
dev_err(dev, "Creating child pool %s failed\n",
|
||||
np->name);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
manager->heap.ion_heap.ops = &ion_sc_heap_ops;
|
||||
manager->heap.ion_heap.type =
|
||||
(enum ion_heap_type)ION_HEAP_TYPE_SECURE_CARVEOUT;
|
||||
return &manager->heap.ion_heap;
|
||||
|
||||
err:
|
||||
ion_secure_carveout_heap_destroy(&manager->heap.ion_heap);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
@@ -4,6 +4,8 @@
|
||||
*
|
||||
* Copyright (C) Linaro 2012
|
||||
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
|
||||
*
|
||||
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
@@ -12,16 +14,20 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/cma.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include "ion.h"
|
||||
#include "msm_ion_priv.h"
|
||||
#include "ion_secure_util.h"
|
||||
|
||||
struct ion_cma_heap {
|
||||
struct ion_heap heap;
|
||||
struct msm_ion_heap heap;
|
||||
struct cma *cma;
|
||||
};
|
||||
|
||||
#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
|
||||
#define to_cma_heap(x) \
|
||||
container_of(to_msm_ion_heap(x), struct ion_cma_heap, heap)
|
||||
|
||||
/* ION CMA heap operations functions */
|
||||
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
@@ -35,6 +41,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
unsigned long align = get_order(size);
|
||||
int ret;
|
||||
struct device *dev = cma_heap->heap.dev;
|
||||
|
||||
if (align > CONFIG_CMA_ALIGNMENT)
|
||||
align = CONFIG_CMA_ALIGNMENT;
|
||||
@@ -43,6 +50,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!(flags & ION_FLAG_SECURE)) {
|
||||
if (PageHighMem(pages)) {
|
||||
unsigned long nr_clear_pages = nr_pages;
|
||||
struct page *page = pages;
|
||||
@@ -58,6 +66,13 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
} else {
|
||||
memset(page_address(pages), 0, size);
|
||||
}
|
||||
}
|
||||
|
||||
if (MAKE_ION_ALLOC_DMA_READY ||
|
||||
(flags & ION_FLAG_SECURE) ||
|
||||
(!ion_buffer_cached(buffer)))
|
||||
ion_pages_sync_for_device(dev, pages, size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
table = kmalloc(sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
@@ -71,6 +86,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
|
||||
buffer->priv_virt = pages;
|
||||
buffer->sg_table = table;
|
||||
ion_prepare_sgl_for_force_dma_sync(buffer->sg_table);
|
||||
return 0;
|
||||
|
||||
free_mem:
|
||||
@@ -101,38 +117,125 @@ static struct ion_heap_ops ion_cma_ops = {
|
||||
.unmap_kernel = ion_heap_unmap_kernel,
|
||||
};
|
||||
|
||||
static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
|
||||
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
|
||||
{
|
||||
struct ion_cma_heap *cma_heap;
|
||||
struct device *dev = (struct device *)data->priv;
|
||||
|
||||
if (!dev->cma_area)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
|
||||
|
||||
if (!cma_heap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cma_heap->heap.ops = &ion_cma_ops;
|
||||
cma_heap->cma = cma;
|
||||
cma_heap->heap.type = ION_HEAP_TYPE_DMA;
|
||||
return &cma_heap->heap;
|
||||
cma_heap->heap.ion_heap.ops = &ion_cma_ops;
|
||||
/*
|
||||
* get device from private heaps data, later it will be
|
||||
* used to make the link with reserved CMA memory
|
||||
*/
|
||||
cma_heap->heap.dev = dev;
|
||||
cma_heap->cma = dev->cma_area;
|
||||
cma_heap->heap.ion_heap.type = ION_HEAP_TYPE_DMA;
|
||||
return &cma_heap->heap.ion_heap;
|
||||
}
|
||||
|
||||
static int __ion_add_cma_heaps(struct cma *cma, void *data)
|
||||
static void ion_secure_cma_free(struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_heap *heap;
|
||||
if (ion_hyp_unassign_sg_from_flags(buffer->sg_table, buffer->flags,
|
||||
true))
|
||||
return;
|
||||
|
||||
heap = __ion_cma_heap_create(cma);
|
||||
if (IS_ERR(heap))
|
||||
return PTR_ERR(heap);
|
||||
|
||||
heap->name = cma_get_name(cma);
|
||||
|
||||
ion_device_add_heap(heap);
|
||||
return 0;
|
||||
ion_cma_free(buffer);
|
||||
}
|
||||
|
||||
static int ion_add_cma_heaps(void)
|
||||
static int ion_secure_cma_allocate(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer, unsigned long len,
|
||||
unsigned long flags)
|
||||
{
|
||||
cma_for_each_area(__ion_add_cma_heaps, NULL);
|
||||
return 0;
|
||||
int ret;
|
||||
struct ion_cma_heap *cma_heap = to_cma_heap(heap);
|
||||
|
||||
if (!(flags & ION_FLAGS_CP_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
ret = ion_cma_allocate(heap, buffer, len, flags);
|
||||
if (ret) {
|
||||
dev_err(cma_heap->heap.dev, "Unable to allocate cma buffer\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ion_hyp_assign_sg_from_flags(buffer->sg_table, flags, true);
|
||||
if (ret) {
|
||||
if (ret == -EADDRNOTAVAIL) {
|
||||
goto out_free_buf;
|
||||
} else {
|
||||
ion_cma_free(buffer);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
out_free_buf:
|
||||
ion_secure_cma_free(buffer);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
if (!hlos_accessible_buffer(buffer)) {
|
||||
pr_info("%s: Mapping non-HLOS accessible buffer disallowed\n",
|
||||
__func__);
|
||||
return NULL;
|
||||
}
|
||||
return ion_heap_map_kernel(heap, buffer);
|
||||
}
|
||||
|
||||
static int ion_secure_cma_map_user(struct ion_heap *mapper,
|
||||
struct ion_buffer *buffer,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
if (!hlos_accessible_buffer(buffer)) {
|
||||
pr_info("%s: Mapping non-HLOS accessible buffer disallowed\n",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
return ion_heap_map_user(mapper, buffer, vma);
|
||||
}
|
||||
|
||||
static struct ion_heap_ops ion_secure_cma_ops = {
|
||||
.allocate = ion_secure_cma_allocate,
|
||||
.free = ion_secure_cma_free,
|
||||
.map_user = ion_secure_cma_map_user,
|
||||
.map_kernel = ion_secure_cma_map_kernel,
|
||||
.unmap_kernel = ion_heap_unmap_kernel,
|
||||
};
|
||||
|
||||
struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *data)
|
||||
{
|
||||
struct ion_cma_heap *cma_heap;
|
||||
struct device *dev = (struct device *)data->priv;
|
||||
|
||||
if (!dev->cma_area)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
|
||||
|
||||
if (!cma_heap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cma_heap->heap.ion_heap.ops = &ion_secure_cma_ops;
|
||||
/*
|
||||
* get device from private heaps data, later it will be
|
||||
* used to make the link with reserved CMA memory
|
||||
*/
|
||||
cma_heap->heap.dev = dev;
|
||||
cma_heap->cma = dev->cma_area;
|
||||
cma_heap->heap.ion_heap.type =
|
||||
(enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA;
|
||||
return &cma_heap->heap.ion_heap;
|
||||
}
|
||||
device_initcall(ion_add_cma_heaps);
|
||||
|
@@ -244,8 +244,9 @@ static int ion_heap_deferred_free(void *data)
|
||||
|
||||
int ion_heap_init_deferred_free(struct ion_heap *heap)
|
||||
{
|
||||
#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE
|
||||
struct sched_param param = { .sched_priority = 0 };
|
||||
|
||||
#endif
|
||||
INIT_LIST_HEAD(&heap->free_list);
|
||||
init_waitqueue_head(&heap->waitqueue);
|
||||
heap->task = kthread_run(ion_heap_deferred_free, heap,
|
||||
@@ -255,8 +256,9 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
|
||||
__func__);
|
||||
return PTR_ERR_OR_ZERO(heap->task);
|
||||
}
|
||||
#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE
|
||||
sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
|
||||
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -10,8 +10,45 @@
|
||||
#include <linux/swap.h>
|
||||
#include <linux/sched/signal.h>
|
||||
|
||||
#include "ion.h"
|
||||
#include "msm_ion_priv.h"
|
||||
#include "ion_page_pool.h"
|
||||
|
||||
/* do a simple check to see if we are in any low memory situation */
|
||||
static bool pool_refill_ok(struct ion_page_pool *pool)
|
||||
{
|
||||
struct zonelist *zonelist;
|
||||
struct zoneref *z;
|
||||
struct zone *zone;
|
||||
int mark;
|
||||
enum zone_type classzone_idx = gfp_zone(pool->gfp_mask);
|
||||
s64 delta;
|
||||
|
||||
/* check if we are within the refill defer window */
|
||||
delta = ktime_ms_delta(ktime_get(), pool->last_low_watermark_ktime);
|
||||
if (delta < ION_POOL_REFILL_DEFER_WINDOW_MS)
|
||||
return false;
|
||||
|
||||
zonelist = node_zonelist(numa_node_id(), pool->gfp_mask);
|
||||
/*
|
||||
* make sure that if we allocate a pool->order page from buddy,
|
||||
* we don't put the zone watermarks go below the high threshold.
|
||||
* This makes sure there's no unwanted repetitive refilling and
|
||||
* reclaiming of buddy pages on the pool.
|
||||
*/
|
||||
for_each_zone_zonelist(zone, z, zonelist, classzone_idx) {
|
||||
mark = high_wmark_pages(zone);
|
||||
mark += 1 << pool->order;
|
||||
if (!zone_watermark_ok_safe(zone, pool->order, mark,
|
||||
classzone_idx)) {
|
||||
pool->last_low_watermark_ktime = ktime_get();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
|
||||
{
|
||||
if (fatal_signal_pending(current))
|
||||
@@ -36,11 +73,34 @@ static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
|
||||
pool->low_count++;
|
||||
}
|
||||
|
||||
atomic_inc(&pool->count);
|
||||
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
|
||||
1 << pool->order);
|
||||
(1 << (PAGE_SHIFT + pool->order)));
|
||||
mutex_unlock(&pool->mutex);
|
||||
}
|
||||
|
||||
void ion_page_pool_refill(struct ion_page_pool *pool)
|
||||
{
|
||||
struct page *page;
|
||||
gfp_t gfp_refill = (pool->gfp_mask | __GFP_RECLAIM) & ~__GFP_NORETRY;
|
||||
struct device *dev = pool->heap_dev;
|
||||
|
||||
/* skip refilling order 0 pools */
|
||||
if (!pool->order)
|
||||
return;
|
||||
|
||||
while (!pool_fillmark_reached(pool) && pool_refill_ok(pool)) {
|
||||
page = alloc_pages(gfp_refill, pool->order);
|
||||
if (!page)
|
||||
break;
|
||||
if (!pool->cached)
|
||||
ion_pages_sync_for_device(dev, page,
|
||||
PAGE_SIZE << pool->order,
|
||||
DMA_BIDIRECTIONAL);
|
||||
ion_page_pool_add(pool, page);
|
||||
}
|
||||
}
|
||||
|
||||
static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
|
||||
{
|
||||
struct page *page;
|
||||
@@ -55,39 +115,73 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
|
||||
pool->low_count--;
|
||||
}
|
||||
|
||||
atomic_dec(&pool->count);
|
||||
list_del(&page->lru);
|
||||
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
|
||||
-(1 << pool->order));
|
||||
-(1 << (PAGE_SHIFT + pool->order)));
|
||||
return page;
|
||||
}
|
||||
|
||||
struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
|
||||
struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
|
||||
BUG_ON(!pool);
|
||||
|
||||
mutex_lock(&pool->mutex);
|
||||
if (fatal_signal_pending(current))
|
||||
return ERR_PTR(-EINTR);
|
||||
|
||||
if (*from_pool && mutex_trylock(&pool->mutex)) {
|
||||
if (pool->high_count)
|
||||
page = ion_page_pool_remove(pool, true);
|
||||
else if (pool->low_count)
|
||||
page = ion_page_pool_remove(pool, false);
|
||||
mutex_unlock(&pool->mutex);
|
||||
}
|
||||
if (!page) {
|
||||
page = ion_page_pool_alloc_pages(pool);
|
||||
*from_pool = false;
|
||||
}
|
||||
|
||||
if (!page)
|
||||
page = ion_page_pool_alloc_pages(pool);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return page;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tries to allocate from only the specified Pool and returns NULL otherwise
|
||||
*/
|
||||
struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
|
||||
if (!pool)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (mutex_trylock(&pool->mutex)) {
|
||||
if (pool->high_count)
|
||||
page = ion_page_pool_remove(pool, true);
|
||||
else if (pool->low_count)
|
||||
page = ion_page_pool_remove(pool, false);
|
||||
mutex_unlock(&pool->mutex);
|
||||
}
|
||||
|
||||
if (!page)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return page;
|
||||
}
|
||||
|
||||
void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
|
||||
{
|
||||
BUG_ON(pool->order != compound_order(page));
|
||||
|
||||
ion_page_pool_add(pool, page);
|
||||
}
|
||||
|
||||
static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
|
||||
void ion_page_pool_free_immediate(struct ion_page_pool *pool, struct page *page)
|
||||
{
|
||||
ion_page_pool_free_pages(pool, page);
|
||||
}
|
||||
|
||||
int ion_page_pool_total(struct ion_page_pool *pool, bool high)
|
||||
{
|
||||
int count = pool->low_count;
|
||||
|
||||
@@ -131,20 +225,21 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
||||
return freed;
|
||||
}
|
||||
|
||||
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
|
||||
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
|
||||
bool cached)
|
||||
{
|
||||
struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
|
||||
struct ion_page_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
|
||||
|
||||
if (!pool)
|
||||
return NULL;
|
||||
pool->high_count = 0;
|
||||
pool->low_count = 0;
|
||||
INIT_LIST_HEAD(&pool->low_items);
|
||||
INIT_LIST_HEAD(&pool->high_items);
|
||||
pool->gfp_mask = gfp_mask | __GFP_COMP;
|
||||
pool->gfp_mask = gfp_mask;
|
||||
pool->order = order;
|
||||
mutex_init(&pool->mutex);
|
||||
plist_node_init(&pool->list, order);
|
||||
if (cached)
|
||||
pool->cached = true;
|
||||
|
||||
return pool;
|
||||
}
|
||||
|
@@ -13,6 +13,19 @@
|
||||
#include <linux/shrinker.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* ION page pool marks in bytes */
|
||||
#ifdef CONFIG_ION_POOL_AUTO_REFILL
|
||||
#define ION_POOL_FILL_MARK (CONFIG_ION_POOL_FILL_MARK * SZ_1M)
|
||||
#define POOL_LOW_MARK_PERCENT 40UL
|
||||
#define ION_POOL_LOW_MARK ((ION_POOL_FILL_MARK * POOL_LOW_MARK_PERCENT) / 100)
|
||||
#else
|
||||
#define ION_POOL_FILL_MARK 0UL
|
||||
#define ION_POOL_LOW_MARK 0UL
|
||||
#endif
|
||||
|
||||
/* if low watermark of zones have reached, defer the refill in this window */
|
||||
#define ION_POOL_REFILL_DEFER_WINDOW_MS 10
|
||||
|
||||
/**
|
||||
* functions for creating and destroying a heap pool -- allows you
|
||||
* to keep a pool of pre allocated memory to use from your heap. Keeping
|
||||
@@ -25,13 +38,18 @@
|
||||
* struct ion_page_pool - pagepool struct
|
||||
* @high_count: number of highmem items in the pool
|
||||
* @low_count: number of lowmem items in the pool
|
||||
* @count: total number of pages/items in the pool
|
||||
* @high_items: list of highmem items
|
||||
* @low_items: list of lowmem items
|
||||
* @last_low_watermark_ktime: most recent time at which the zone watermarks were
|
||||
* low
|
||||
* @mutex: lock protecting this struct and especially the count
|
||||
* item list
|
||||
* @gfp_mask: gfp_mask to use from alloc
|
||||
* @order: order of pages in the pool
|
||||
* @list: plist node for list of pools
|
||||
* @cached: it's cached pool or not
|
||||
* @heap_dev: device for the ion heap associated with this pool
|
||||
*
|
||||
* Allows you to keep a pool of pre allocated pages to use from your heap.
|
||||
* Keeping a pool of pages that is ready for dma, ie any cached mapping have
|
||||
@@ -41,18 +59,28 @@
|
||||
struct ion_page_pool {
|
||||
int high_count;
|
||||
int low_count;
|
||||
atomic_t count;
|
||||
struct list_head high_items;
|
||||
struct list_head low_items;
|
||||
ktime_t last_low_watermark_ktime;
|
||||
struct mutex mutex;
|
||||
gfp_t gfp_mask;
|
||||
unsigned int order;
|
||||
struct plist_node list;
|
||||
bool cached;
|
||||
struct device *heap_dev;
|
||||
};
|
||||
|
||||
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
|
||||
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
|
||||
bool cached);
|
||||
void ion_page_pool_destroy(struct ion_page_pool *pool);
|
||||
struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
|
||||
struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool);
|
||||
void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
|
||||
struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *a);
|
||||
void ion_page_pool_free_immediate(struct ion_page_pool *pool,
|
||||
struct page *page);
|
||||
int ion_page_pool_total(struct ion_page_pool *pool, bool high);
|
||||
size_t ion_system_heap_secure_page_pool_total(struct ion_heap *heap, int vmid);
|
||||
|
||||
/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
|
||||
* @pool: the pool
|
||||
@@ -63,4 +91,26 @@ void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
|
||||
*/
|
||||
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
||||
int nr_to_scan);
|
||||
|
||||
void ion_page_pool_refill(struct ion_page_pool *pool);
|
||||
|
||||
static __always_inline int get_pool_fillmark(struct ion_page_pool *pool)
|
||||
{
|
||||
return ION_POOL_FILL_MARK / (PAGE_SIZE << pool->order);
|
||||
}
|
||||
|
||||
static __always_inline int get_pool_lowmark(struct ion_page_pool *pool)
|
||||
{
|
||||
return ION_POOL_LOW_MARK / (PAGE_SIZE << pool->order);
|
||||
}
|
||||
|
||||
static __always_inline bool pool_count_below_lowmark(struct ion_page_pool *pool)
|
||||
{
|
||||
return atomic_read(&pool->count) < get_pool_lowmark(pool);
|
||||
}
|
||||
|
||||
static __always_inline bool pool_fillmark_reached(struct ion_page_pool *pool)
|
||||
{
|
||||
return atomic_read(&pool->count) >= get_pool_fillmark(pool);
|
||||
}
|
||||
#endif /* _ION_PAGE_POOL_H */
|
||||
|
271
drivers/staging/android/ion/ion_secure_util.c
Normal file
271
drivers/staging/android/ion/ion_secure_util.c
Normal file
@@ -0,0 +1,271 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include "ion_secure_util.h"
|
||||
#include "ion.h"
|
||||
#include "msm_ion_priv.h"
|
||||
|
||||
bool is_secure_vmid_valid(int vmid)
|
||||
{
|
||||
return (vmid == VMID_CP_TOUCH ||
|
||||
vmid == VMID_CP_BITSTREAM ||
|
||||
vmid == VMID_CP_PIXEL ||
|
||||
vmid == VMID_CP_NON_PIXEL ||
|
||||
vmid == VMID_CP_CAMERA ||
|
||||
vmid == VMID_CP_SEC_DISPLAY ||
|
||||
vmid == VMID_CP_APP ||
|
||||
vmid == VMID_CP_CAMERA_PREVIEW ||
|
||||
vmid == VMID_CP_SPSS_SP ||
|
||||
vmid == VMID_CP_SPSS_SP_SHARED ||
|
||||
vmid == VMID_CP_SPSS_HLOS_SHARED ||
|
||||
vmid == VMID_CP_CDSP);
|
||||
}
|
||||
|
||||
int get_secure_vmid(unsigned long flags)
|
||||
{
|
||||
if (flags & ION_FLAG_CP_TOUCH)
|
||||
return VMID_CP_TOUCH;
|
||||
if (flags & ION_FLAG_CP_BITSTREAM)
|
||||
return VMID_CP_BITSTREAM;
|
||||
if (flags & ION_FLAG_CP_PIXEL)
|
||||
return VMID_CP_PIXEL;
|
||||
if (flags & ION_FLAG_CP_NON_PIXEL)
|
||||
return VMID_CP_NON_PIXEL;
|
||||
if (flags & ION_FLAG_CP_CAMERA)
|
||||
return VMID_CP_CAMERA;
|
||||
if (flags & ION_FLAG_CP_SEC_DISPLAY)
|
||||
return VMID_CP_SEC_DISPLAY;
|
||||
if (flags & ION_FLAG_CP_APP)
|
||||
return VMID_CP_APP;
|
||||
if (flags & ION_FLAG_CP_CAMERA_PREVIEW)
|
||||
return VMID_CP_CAMERA_PREVIEW;
|
||||
if (flags & ION_FLAG_CP_SPSS_SP)
|
||||
return VMID_CP_SPSS_SP;
|
||||
if (flags & ION_FLAG_CP_SPSS_SP_SHARED)
|
||||
return VMID_CP_SPSS_SP_SHARED;
|
||||
if (flags & ION_FLAG_CP_SPSS_HLOS_SHARED)
|
||||
return VMID_CP_SPSS_HLOS_SHARED;
|
||||
if (flags & ION_FLAG_CP_CDSP)
|
||||
return VMID_CP_CDSP;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static unsigned int count_set_bits(unsigned long val)
|
||||
{
|
||||
return ((unsigned int)bitmap_weight(&val, BITS_PER_LONG));
|
||||
}
|
||||
|
||||
static int get_vmid(unsigned long flags)
|
||||
{
|
||||
int vmid;
|
||||
|
||||
vmid = get_secure_vmid(flags);
|
||||
if (vmid < 0) {
|
||||
if (flags & ION_FLAG_CP_HLOS)
|
||||
vmid = VMID_HLOS;
|
||||
}
|
||||
return vmid;
|
||||
}
|
||||
|
||||
int ion_populate_vm_list(unsigned long flags, unsigned int *vm_list,
|
||||
int nelems)
|
||||
{
|
||||
unsigned int itr = 0;
|
||||
int vmid;
|
||||
|
||||
flags = flags & ION_FLAGS_CP_MASK;
|
||||
if (!flags)
|
||||
return -EINVAL;
|
||||
|
||||
for_each_set_bit(itr, &flags, BITS_PER_LONG) {
|
||||
vmid = get_vmid(0x1UL << itr);
|
||||
if (vmid < 0 || !nelems)
|
||||
return -EINVAL;
|
||||
|
||||
vm_list[nelems - 1] = vmid;
|
||||
nelems--;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
|
||||
int source_nelems, bool clear_page_private)
|
||||
{
|
||||
u32 dest_vmid = VMID_HLOS;
|
||||
u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
|
||||
struct scatterlist *sg;
|
||||
int ret, i;
|
||||
|
||||
if (source_nelems <= 0) {
|
||||
pr_err("%s: source_nelems invalid\n",
|
||||
__func__);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = hyp_assign_table(sgt, source_vm_list, source_nelems, &dest_vmid,
|
||||
&dest_perms, 1);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (clear_page_private)
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
||||
ClearPagePrivate(sg_page(sg));
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list,
|
||||
int dest_nelems, bool set_page_private)
|
||||
{
|
||||
u32 source_vmid = VMID_HLOS;
|
||||
struct scatterlist *sg;
|
||||
int *dest_perms;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
if (dest_nelems <= 0) {
|
||||
pr_err("%s: dest_nelems invalid\n",
|
||||
__func__);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dest_perms = kcalloc(dest_nelems, sizeof(*dest_perms), GFP_KERNEL);
|
||||
if (!dest_perms) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < dest_nelems; i++)
|
||||
dest_perms[i] = msm_secure_get_vmid_perms(dest_vm_list[i]);
|
||||
|
||||
ret = hyp_assign_table(sgt, &source_vmid, 1,
|
||||
dest_vm_list, dest_perms, dest_nelems);
|
||||
|
||||
if (ret) {
|
||||
pr_err("%s: Assign call failed\n",
|
||||
__func__);
|
||||
goto out_free_dest;
|
||||
}
|
||||
if (set_page_private)
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
||||
SetPagePrivate(sg_page(sg));
|
||||
|
||||
out_free_dest:
|
||||
kfree(dest_perms);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
|
||||
bool set_page_private)
|
||||
{
|
||||
int ret = 0;
|
||||
int *source_vm_list;
|
||||
int source_nelems;
|
||||
|
||||
source_nelems = count_set_bits(flags & ION_FLAGS_CP_MASK);
|
||||
source_vm_list = kcalloc(source_nelems, sizeof(*source_vm_list),
|
||||
GFP_KERNEL);
|
||||
if (!source_vm_list)
|
||||
return -ENOMEM;
|
||||
ret = ion_populate_vm_list(flags, source_vm_list, source_nelems);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to get secure vmids\n", __func__);
|
||||
goto out_free_source;
|
||||
}
|
||||
|
||||
ret = ion_hyp_unassign_sg(sgt, source_vm_list, source_nelems,
|
||||
set_page_private);
|
||||
|
||||
out_free_source:
|
||||
kfree(source_vm_list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
|
||||
bool set_page_private)
|
||||
{
|
||||
int ret = 0;
|
||||
int *dest_vm_list = NULL;
|
||||
int dest_nelems;
|
||||
|
||||
dest_nelems = count_set_bits(flags & ION_FLAGS_CP_MASK);
|
||||
dest_vm_list = kcalloc(dest_nelems, sizeof(*dest_vm_list), GFP_KERNEL);
|
||||
if (!dest_vm_list) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ion_populate_vm_list(flags, dest_vm_list, dest_nelems);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to get secure vmid(s)\n", __func__);
|
||||
goto out_free_dest_vm;
|
||||
}
|
||||
|
||||
ret = ion_hyp_assign_sg(sgt, dest_vm_list, dest_nelems,
|
||||
set_page_private);
|
||||
|
||||
out_free_dest_vm:
|
||||
kfree(dest_vm_list);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool hlos_accessible_buffer(struct ion_buffer *buffer)
|
||||
{
|
||||
if ((buffer->flags & ION_FLAG_SECURE) &&
|
||||
!(buffer->flags & ION_FLAG_CP_HLOS) &&
|
||||
!(buffer->flags & ION_FLAG_CP_SPSS_HLOS_SHARED))
|
||||
return false;
|
||||
else if ((get_secure_vmid(buffer->flags) > 0) &&
|
||||
!(buffer->flags & ION_FLAG_CP_HLOS) &&
|
||||
!(buffer->flags & ION_FLAG_CP_SPSS_HLOS_SHARED))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int ion_hyp_assign_from_flags(u64 base, u64 size, unsigned long flags)
|
||||
{
|
||||
u32 *vmids, *modes;
|
||||
u32 nr, i;
|
||||
int ret = -EINVAL;
|
||||
u32 src_vm = VMID_HLOS;
|
||||
|
||||
nr = count_set_bits(flags);
|
||||
vmids = kcalloc(nr, sizeof(*vmids), GFP_KERNEL);
|
||||
if (!vmids)
|
||||
return -ENOMEM;
|
||||
|
||||
modes = kcalloc(nr, sizeof(*modes), GFP_KERNEL);
|
||||
if (!modes) {
|
||||
kfree(vmids);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if ((flags & ~ION_FLAGS_CP_MASK) ||
|
||||
ion_populate_vm_list(flags, vmids, nr)) {
|
||||
pr_err("%s: Failed to parse secure flags 0x%lx\n", __func__,
|
||||
flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr; i++)
|
||||
modes[i] = msm_secure_get_vmid_perms(vmids[i]);
|
||||
|
||||
ret = hyp_assign_phys(base, size, &src_vm, 1, vmids, modes, nr);
|
||||
if (ret)
|
||||
pr_err("%s: Assign call failed, flags 0x%lx\n", __func__,
|
||||
flags);
|
||||
|
||||
out:
|
||||
kfree(modes);
|
||||
kfree(vmids);
|
||||
return ret;
|
||||
}
|
25
drivers/staging/android/ion/ion_secure_util.h
Normal file
25
drivers/staging/android/ion/ion_secure_util.h
Normal file
@@ -0,0 +1,25 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "ion.h"
|
||||
|
||||
#ifndef _ION_SECURE_UTIL_H
|
||||
#define _ION_SECURE_UTIL_H
|
||||
|
||||
int get_secure_vmid(unsigned long flags);
|
||||
bool is_secure_vmid_valid(int vmid);
|
||||
int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list,
|
||||
int dest_nelems, bool set_page_private);
|
||||
int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
|
||||
int source_nelems, bool clear_page_private);
|
||||
int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
|
||||
bool set_page_private);
|
||||
int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
|
||||
bool set_page_private);
|
||||
int ion_hyp_assign_from_flags(u64 base, u64 size, unsigned long flags);
|
||||
|
||||
bool hlos_accessible_buffer(struct ion_buffer *buffer);
|
||||
|
||||
#endif /* _ION_SECURE_UTIL_H */
|
@@ -3,6 +3,8 @@
|
||||
* ION Memory Allocator system heap exporter
|
||||
*
|
||||
* Copyright (C) 2011 Google, Inc.
|
||||
* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/page.h>
|
||||
@@ -13,18 +15,26 @@
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <linux/sched/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include "ion_system_heap.h"
|
||||
#include "ion.h"
|
||||
#include "ion_page_pool.h"
|
||||
|
||||
#define NUM_ORDERS ARRAY_SIZE(orders)
|
||||
#include "msm_ion_priv.h"
|
||||
#include "ion_system_heap.h"
|
||||
#include "ion_system_secure_heap.h"
|
||||
#include "ion_secure_util.h"
|
||||
|
||||
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
|
||||
__GFP_NORETRY) & ~__GFP_RECLAIM;
|
||||
static gfp_t low_order_gfp_flags = GFP_HIGHUSER | __GFP_ZERO;
|
||||
static const unsigned int orders[] = {8, 4, 0};
|
||||
|
||||
static int order_to_index(unsigned int order)
|
||||
bool pool_auto_refill_en __read_mostly =
|
||||
IS_ENABLED(CONFIG_ION_POOL_AUTO_REFILL);
|
||||
|
||||
int order_to_index(unsigned int order)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -40,44 +50,132 @@ static inline unsigned int order_to_size(int order)
|
||||
return PAGE_SIZE << order;
|
||||
}
|
||||
|
||||
struct ion_system_heap {
|
||||
struct ion_heap heap;
|
||||
struct ion_page_pool *pools[NUM_ORDERS];
|
||||
struct pages_mem {
|
||||
struct page **pages;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long order)
|
||||
int ion_heap_is_system_heap_type(enum ion_heap_type type)
|
||||
{
|
||||
struct ion_page_pool *pool = heap->pools[order_to_index(order)];
|
||||
|
||||
return ion_page_pool_alloc(pool);
|
||||
return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM);
|
||||
}
|
||||
|
||||
static void free_buffer_page(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer, struct page *page)
|
||||
static struct page *alloc_buffer_page(struct ion_system_heap *sys_heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long order,
|
||||
bool *from_pool)
|
||||
{
|
||||
int cached = (int)ion_buffer_cached(buffer);
|
||||
struct page *page;
|
||||
struct ion_page_pool *pool;
|
||||
unsigned int order = compound_order(page);
|
||||
int vmid = get_secure_vmid(buffer->flags);
|
||||
struct device *dev = sys_heap->heap.dev;
|
||||
|
||||
/* go to system */
|
||||
if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
|
||||
__free_pages(page, order);
|
||||
return;
|
||||
if (vmid > 0)
|
||||
pool = sys_heap->secure_pools[vmid][order_to_index(order)];
|
||||
else if (!cached)
|
||||
pool = sys_heap->uncached_pools[order_to_index(order)];
|
||||
else
|
||||
pool = sys_heap->cached_pools[order_to_index(order)];
|
||||
|
||||
page = ion_page_pool_alloc(pool, from_pool);
|
||||
|
||||
if (pool_auto_refill_en &&
|
||||
pool_count_below_lowmark(pool)) {
|
||||
wake_up_process(sys_heap->kworker[cached]);
|
||||
}
|
||||
|
||||
pool = heap->pools[order_to_index(order)];
|
||||
if (IS_ERR(page))
|
||||
return page;
|
||||
|
||||
ion_page_pool_free(pool, page);
|
||||
if ((MAKE_ION_ALLOC_DMA_READY && vmid <= 0) || !(*from_pool))
|
||||
ion_pages_sync_for_device(dev, page, PAGE_SIZE << order,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
static struct page *alloc_largest_available(struct ion_system_heap *heap,
|
||||
/*
|
||||
* For secure pages that need to be freed and not added back to the pool; the
|
||||
* hyp_unassign should be called before calling this function
|
||||
*/
|
||||
void free_buffer_page(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer, struct page *page,
|
||||
unsigned int order)
|
||||
{
|
||||
bool cached = ion_buffer_cached(buffer);
|
||||
int vmid = get_secure_vmid(buffer->flags);
|
||||
|
||||
if (!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
|
||||
struct ion_page_pool *pool;
|
||||
|
||||
if (vmid > 0)
|
||||
pool = heap->secure_pools[vmid][order_to_index(order)];
|
||||
else if (cached)
|
||||
pool = heap->cached_pools[order_to_index(order)];
|
||||
else
|
||||
pool = heap->uncached_pools[order_to_index(order)];
|
||||
|
||||
if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)
|
||||
ion_page_pool_free_immediate(pool, page);
|
||||
else
|
||||
ion_page_pool_free(pool, page);
|
||||
} else {
|
||||
__free_pages(page, order);
|
||||
}
|
||||
}
|
||||
|
||||
static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long size,
|
||||
unsigned int max_order)
|
||||
{
|
||||
struct page *page;
|
||||
struct page_info *info;
|
||||
int i;
|
||||
bool from_pool;
|
||||
|
||||
info = kmalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
if (size < order_to_size(orders[i]))
|
||||
continue;
|
||||
if (max_order < orders[i])
|
||||
continue;
|
||||
from_pool = !(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC);
|
||||
page = alloc_buffer_page(heap, buffer, orders[i], &from_pool);
|
||||
if (IS_ERR(page))
|
||||
continue;
|
||||
|
||||
info->page = page;
|
||||
info->order = orders[i];
|
||||
info->from_pool = from_pool;
|
||||
INIT_LIST_HEAD(&info->list);
|
||||
return info;
|
||||
}
|
||||
kfree(info);
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static struct page_info *
|
||||
alloc_from_pool_preferred(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long size,
|
||||
unsigned int max_order)
|
||||
{
|
||||
struct page *page;
|
||||
struct page_info *info;
|
||||
int i;
|
||||
|
||||
if (buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)
|
||||
goto force_alloc;
|
||||
|
||||
info = kmalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
if (size < order_to_size(orders[i]))
|
||||
@@ -85,14 +183,89 @@ static struct page *alloc_largest_available(struct ion_system_heap *heap,
|
||||
if (max_order < orders[i])
|
||||
continue;
|
||||
|
||||
page = alloc_buffer_page(heap, buffer, orders[i]);
|
||||
if (!page)
|
||||
page = alloc_from_secure_pool_order(heap, buffer, orders[i]);
|
||||
if (IS_ERR(page))
|
||||
continue;
|
||||
|
||||
return page;
|
||||
info->page = page;
|
||||
info->order = orders[i];
|
||||
info->from_pool = true;
|
||||
INIT_LIST_HEAD(&info->list);
|
||||
return info;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
page = split_page_from_secure_pool(heap, buffer);
|
||||
if (!IS_ERR(page)) {
|
||||
info->page = page;
|
||||
info->order = 0;
|
||||
info->from_pool = true;
|
||||
INIT_LIST_HEAD(&info->list);
|
||||
return info;
|
||||
}
|
||||
|
||||
kfree(info);
|
||||
force_alloc:
|
||||
return alloc_largest_available(heap, buffer, size, max_order);
|
||||
}
|
||||
|
||||
static unsigned int process_info(struct page_info *info,
|
||||
struct scatterlist *sg,
|
||||
struct scatterlist *sg_sync,
|
||||
struct pages_mem *data, unsigned int i)
|
||||
{
|
||||
struct page *page = info->page;
|
||||
unsigned int j;
|
||||
|
||||
if (sg_sync) {
|
||||
sg_set_page(sg_sync, page, (1 << info->order) * PAGE_SIZE, 0);
|
||||
sg_dma_address(sg_sync) = page_to_phys(page);
|
||||
}
|
||||
sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
|
||||
/*
|
||||
* This is not correct - sg_dma_address needs a dma_addr_t
|
||||
* that is valid for the the targeted device, but this works
|
||||
* on the currently targeted hardware.
|
||||
*/
|
||||
sg_dma_address(sg) = page_to_phys(page);
|
||||
if (data) {
|
||||
for (j = 0; j < (1 << info->order); ++j)
|
||||
data->pages[i++] = nth_page(page, j);
|
||||
}
|
||||
list_del(&info->list);
|
||||
kfree(info);
|
||||
return i;
|
||||
}
|
||||
|
||||
static int ion_heap_alloc_pages_mem(struct pages_mem *pages_mem)
|
||||
{
|
||||
struct page **pages;
|
||||
unsigned int page_tbl_size;
|
||||
|
||||
page_tbl_size = sizeof(struct page *) * (pages_mem->size >> PAGE_SHIFT);
|
||||
if (page_tbl_size > SZ_8K) {
|
||||
/*
|
||||
* Do fallback to ensure we have a balance between
|
||||
* performance and availability.
|
||||
*/
|
||||
pages = kmalloc(page_tbl_size,
|
||||
__GFP_COMP | __GFP_NORETRY |
|
||||
__GFP_NOWARN);
|
||||
if (!pages)
|
||||
pages = vmalloc(page_tbl_size);
|
||||
} else {
|
||||
pages = kmalloc(page_tbl_size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
pages_mem->pages = pages;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ion_heap_free_pages_mem(struct pages_mem *pages_mem)
|
||||
{
|
||||
kvfree(pages_mem->pages);
|
||||
}
|
||||
|
||||
static int ion_system_heap_allocate(struct ion_heap *heap,
|
||||
@@ -100,71 +273,184 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
|
||||
unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct ion_system_heap *sys_heap = container_of(heap,
|
||||
struct ion_system_heap,
|
||||
heap);
|
||||
struct ion_system_heap *sys_heap = to_system_heap(heap);
|
||||
struct sg_table *table;
|
||||
struct sg_table table_sync = {0};
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist *sg_sync;
|
||||
int ret = -ENOMEM;
|
||||
struct list_head pages;
|
||||
struct page *page, *tmp_page;
|
||||
struct list_head pages_from_pool;
|
||||
struct page_info *info, *tmp_info;
|
||||
int i = 0;
|
||||
unsigned int nents_sync = 0;
|
||||
unsigned long size_remaining = PAGE_ALIGN(size);
|
||||
unsigned int max_order = orders[0];
|
||||
struct pages_mem data;
|
||||
unsigned int sz;
|
||||
int vmid = get_secure_vmid(buffer->flags);
|
||||
|
||||
if (size / PAGE_SIZE > totalram_pages() / 2)
|
||||
return -ENOMEM;
|
||||
|
||||
if (ion_heap_is_system_heap_type(buffer->heap->type) &&
|
||||
is_secure_vmid_valid(vmid)) {
|
||||
pr_info("%s: System heap doesn't support secure allocations\n",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
data.size = 0;
|
||||
INIT_LIST_HEAD(&pages);
|
||||
INIT_LIST_HEAD(&pages_from_pool);
|
||||
|
||||
while (size_remaining > 0) {
|
||||
page = alloc_largest_available(sys_heap, buffer, size_remaining,
|
||||
if (is_secure_vmid_valid(vmid))
|
||||
info = alloc_from_pool_preferred(sys_heap, buffer,
|
||||
size_remaining,
|
||||
max_order);
|
||||
if (!page)
|
||||
goto free_pages;
|
||||
list_add_tail(&page->lru, &pages);
|
||||
size_remaining -= PAGE_SIZE << compound_order(page);
|
||||
max_order = compound_order(page);
|
||||
else
|
||||
info = alloc_largest_available(sys_heap, buffer,
|
||||
size_remaining,
|
||||
max_order);
|
||||
|
||||
if (IS_ERR(info)) {
|
||||
ret = PTR_ERR(info);
|
||||
goto err;
|
||||
}
|
||||
|
||||
sz = (1 << info->order) * PAGE_SIZE;
|
||||
|
||||
if (info->from_pool) {
|
||||
list_add_tail(&info->list, &pages_from_pool);
|
||||
} else {
|
||||
list_add_tail(&info->list, &pages);
|
||||
data.size += sz;
|
||||
++nents_sync;
|
||||
}
|
||||
size_remaining -= sz;
|
||||
max_order = info->order;
|
||||
i++;
|
||||
}
|
||||
table = kmalloc(sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
goto free_pages;
|
||||
|
||||
if (sg_alloc_table(table, i, GFP_KERNEL))
|
||||
goto free_table;
|
||||
ret = ion_heap_alloc_pages_mem(&data);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
table = kzalloc(sizeof(*table), GFP_KERNEL);
|
||||
if (!table) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_data_pages;
|
||||
}
|
||||
|
||||
ret = sg_alloc_table(table, i, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
if (nents_sync) {
|
||||
ret = sg_alloc_table(&table_sync, nents_sync, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto err_free_sg;
|
||||
}
|
||||
|
||||
i = 0;
|
||||
sg = table->sgl;
|
||||
list_for_each_entry_safe(page, tmp_page, &pages, lru) {
|
||||
sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
|
||||
sg_sync = table_sync.sgl;
|
||||
|
||||
/*
|
||||
* We now have two separate lists. One list contains pages from the
|
||||
* pool and the other pages from buddy. We want to merge these
|
||||
* together while preserving the ordering of the pages (higher order
|
||||
* first).
|
||||
*/
|
||||
do {
|
||||
info = list_first_entry_or_null(&pages, struct page_info, list);
|
||||
tmp_info = list_first_entry_or_null(&pages_from_pool,
|
||||
struct page_info, list);
|
||||
if (info && tmp_info) {
|
||||
if (info->order >= tmp_info->order) {
|
||||
i = process_info(info, sg, sg_sync, &data, i);
|
||||
sg_sync = sg_next(sg_sync);
|
||||
} else {
|
||||
i = process_info(tmp_info, sg, 0, 0, i);
|
||||
}
|
||||
} else if (info) {
|
||||
i = process_info(info, sg, sg_sync, &data, i);
|
||||
sg_sync = sg_next(sg_sync);
|
||||
} else if (tmp_info) {
|
||||
i = process_info(tmp_info, sg, 0, 0, i);
|
||||
}
|
||||
sg = sg_next(sg);
|
||||
list_del(&page->lru);
|
||||
|
||||
} while (sg);
|
||||
|
||||
if (nents_sync) {
|
||||
if (vmid > 0) {
|
||||
ret = ion_hyp_assign_sg(&table_sync, &vmid, 1, true);
|
||||
if (ret)
|
||||
goto err_free_sg2;
|
||||
}
|
||||
}
|
||||
|
||||
buffer->sg_table = table;
|
||||
if (nents_sync)
|
||||
sg_free_table(&table_sync);
|
||||
ion_heap_free_pages_mem(&data);
|
||||
ion_prepare_sgl_for_force_dma_sync(buffer->sg_table);
|
||||
return 0;
|
||||
|
||||
free_table:
|
||||
err_free_sg2:
|
||||
/* We failed to zero buffers. Bypass pool */
|
||||
buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
|
||||
|
||||
if (vmid > 0)
|
||||
ion_hyp_unassign_sg(table, &vmid, 1, true);
|
||||
|
||||
for_each_sg(table->sgl, sg, table->nents, i)
|
||||
free_buffer_page(sys_heap, buffer, sg_page(sg),
|
||||
get_order(sg->length));
|
||||
if (nents_sync)
|
||||
sg_free_table(&table_sync);
|
||||
err_free_sg:
|
||||
sg_free_table(table);
|
||||
err1:
|
||||
kfree(table);
|
||||
free_pages:
|
||||
list_for_each_entry_safe(page, tmp_page, &pages, lru)
|
||||
free_buffer_page(sys_heap, buffer, page);
|
||||
return -ENOMEM;
|
||||
err_free_data_pages:
|
||||
ion_heap_free_pages_mem(&data);
|
||||
err:
|
||||
list_for_each_entry_safe(info, tmp_info, &pages, list) {
|
||||
free_buffer_page(sys_heap, buffer, info->page, info->order);
|
||||
kfree(info);
|
||||
}
|
||||
list_for_each_entry_safe(info, tmp_info, &pages_from_pool, list) {
|
||||
free_buffer_page(sys_heap, buffer, info->page, info->order);
|
||||
kfree(info);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ion_system_heap_free(struct ion_buffer *buffer)
|
||||
void ion_system_heap_free(struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_system_heap *sys_heap = container_of(buffer->heap,
|
||||
struct ion_system_heap,
|
||||
heap);
|
||||
struct ion_heap *heap = buffer->heap;
|
||||
struct ion_system_heap *sys_heap = to_system_heap(heap);
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
int vmid = get_secure_vmid(buffer->flags);
|
||||
|
||||
/* zero the buffer before goto page pool */
|
||||
if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
|
||||
if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) &&
|
||||
!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
|
||||
if (vmid < 0)
|
||||
ion_heap_buffer_zero(buffer);
|
||||
} else if (vmid > 0) {
|
||||
if (ion_hyp_unassign_sg(table, &vmid, 1, true))
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_sg(table->sgl, sg, table->nents, i)
|
||||
free_buffer_page(sys_heap, buffer, sg_page(sg));
|
||||
free_buffer_page(sys_heap, buffer, sg_page(sg),
|
||||
get_order(sg->length));
|
||||
sg_free_table(table);
|
||||
kfree(table);
|
||||
}
|
||||
@@ -172,35 +458,44 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
|
||||
static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
|
||||
int nr_to_scan)
|
||||
{
|
||||
struct ion_page_pool *pool;
|
||||
struct ion_system_heap *sys_heap;
|
||||
int nr_total = 0;
|
||||
int i, nr_freed;
|
||||
int i, j, nr_freed = 0;
|
||||
int only_scan = 0;
|
||||
struct ion_page_pool *pool;
|
||||
|
||||
sys_heap = container_of(heap, struct ion_system_heap, heap);
|
||||
sys_heap = to_system_heap(heap);
|
||||
|
||||
if (!nr_to_scan)
|
||||
only_scan = 1;
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
pool = sys_heap->pools[i];
|
||||
/* shrink the pools starting from lower order ones */
|
||||
for (i = NUM_ORDERS - 1; i >= 0; i--) {
|
||||
nr_freed = 0;
|
||||
|
||||
if (only_scan) {
|
||||
nr_total += ion_page_pool_shrink(pool,
|
||||
gfp_mask,
|
||||
for (j = 0; j < VMID_LAST; j++) {
|
||||
if (is_secure_vmid_valid(j))
|
||||
nr_freed +=
|
||||
ion_secure_page_pool_shrink(sys_heap,
|
||||
j, i,
|
||||
nr_to_scan);
|
||||
}
|
||||
|
||||
} else {
|
||||
nr_freed = ion_page_pool_shrink(pool,
|
||||
gfp_mask,
|
||||
nr_to_scan);
|
||||
nr_to_scan -= nr_freed;
|
||||
pool = sys_heap->uncached_pools[i];
|
||||
nr_freed += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
|
||||
|
||||
pool = sys_heap->cached_pools[i];
|
||||
nr_freed += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
|
||||
nr_total += nr_freed;
|
||||
|
||||
if (!only_scan) {
|
||||
nr_to_scan -= nr_freed;
|
||||
/* shrink completed */
|
||||
if (nr_to_scan <= 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return nr_total;
|
||||
}
|
||||
|
||||
@@ -213,16 +508,127 @@ static struct ion_heap_ops system_heap_ops = {
|
||||
.shrink = ion_system_heap_shrink,
|
||||
};
|
||||
|
||||
static __maybe_unused int ion_system_heap_debug_show(struct ion_heap *heap,
|
||||
struct seq_file *s,
|
||||
void *unused)
|
||||
{
|
||||
struct ion_system_heap *sys_heap;
|
||||
bool use_seq = s;
|
||||
unsigned long uncached_total = 0;
|
||||
unsigned long cached_total = 0;
|
||||
unsigned long secure_total = 0;
|
||||
struct ion_page_pool *pool;
|
||||
int i, j;
|
||||
|
||||
sys_heap = to_system_heap(heap);
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
pool = sys_heap->uncached_pools[i];
|
||||
if (use_seq) {
|
||||
seq_printf(s,
|
||||
"%d order %u highmem pages in uncached pool = %lu total\n",
|
||||
pool->high_count, pool->order,
|
||||
(1 << pool->order) * PAGE_SIZE *
|
||||
pool->high_count);
|
||||
seq_printf(s,
|
||||
"%d order %u lowmem pages in uncached pool = %lu total\n",
|
||||
pool->low_count, pool->order,
|
||||
(1 << pool->order) * PAGE_SIZE *
|
||||
pool->low_count);
|
||||
}
|
||||
|
||||
uncached_total += (1 << pool->order) * PAGE_SIZE *
|
||||
pool->high_count;
|
||||
uncached_total += (1 << pool->order) * PAGE_SIZE *
|
||||
pool->low_count;
|
||||
}
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
pool = sys_heap->cached_pools[i];
|
||||
if (use_seq) {
|
||||
seq_printf(s,
|
||||
"%d order %u highmem pages in cached pool = %lu total\n",
|
||||
pool->high_count, pool->order,
|
||||
(1 << pool->order) * PAGE_SIZE *
|
||||
pool->high_count);
|
||||
seq_printf(s,
|
||||
"%d order %u lowmem pages in cached pool = %lu total\n",
|
||||
pool->low_count, pool->order,
|
||||
(1 << pool->order) * PAGE_SIZE *
|
||||
pool->low_count);
|
||||
}
|
||||
|
||||
cached_total += (1 << pool->order) * PAGE_SIZE *
|
||||
pool->high_count;
|
||||
cached_total += (1 << pool->order) * PAGE_SIZE *
|
||||
pool->low_count;
|
||||
}
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
for (j = 0; j < VMID_LAST; j++) {
|
||||
if (!is_secure_vmid_valid(j))
|
||||
continue;
|
||||
pool = sys_heap->secure_pools[j][i];
|
||||
|
||||
if (use_seq) {
|
||||
seq_printf(s,
|
||||
"VMID %d: %d order %u highmem pages in secure pool = %lu total\n",
|
||||
j, pool->high_count, pool->order,
|
||||
(1 << pool->order) * PAGE_SIZE *
|
||||
pool->high_count);
|
||||
seq_printf(s,
|
||||
"VMID %d: %d order %u lowmem pages in secure pool = %lu total\n",
|
||||
j, pool->low_count, pool->order,
|
||||
(1 << pool->order) * PAGE_SIZE *
|
||||
pool->low_count);
|
||||
}
|
||||
|
||||
secure_total += (1 << pool->order) * PAGE_SIZE *
|
||||
pool->high_count;
|
||||
secure_total += (1 << pool->order) * PAGE_SIZE *
|
||||
pool->low_count;
|
||||
}
|
||||
}
|
||||
|
||||
if (use_seq) {
|
||||
seq_puts(s, "--------------------------------------------\n");
|
||||
seq_printf(s, "uncached pool = %lu cached pool = %lu secure pool = %lu\n",
|
||||
uncached_total, cached_total, secure_total);
|
||||
seq_printf(s, "pool total (uncached + cached + secure) = %lu\n",
|
||||
uncached_total + cached_total + secure_total);
|
||||
seq_puts(s, "--------------------------------------------\n");
|
||||
} else {
|
||||
pr_info("-------------------------------------------------\n");
|
||||
pr_info("uncached pool = %lu cached pool = %lu secure pool = %lu\n",
|
||||
uncached_total, cached_total, secure_total);
|
||||
pr_info("pool total (uncached + cached + secure) = %lu\n",
|
||||
uncached_total + cached_total + secure_total);
|
||||
pr_info("-------------------------------------------------\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++)
|
||||
if (pools[i])
|
||||
if (pools[i]) {
|
||||
ion_page_pool_destroy(pools[i]);
|
||||
pools[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int ion_system_heap_create_pools(struct ion_page_pool **pools)
|
||||
/**
|
||||
* ion_system_heap_create_pools - Creates pools for all orders
|
||||
*
|
||||
* If this fails you don't need to destroy any pools. It's all or
|
||||
* nothing. If it succeeds you'll eventually need to use
|
||||
* ion_system_heap_destroy_pools to destroy the pools.
|
||||
*/
|
||||
static int ion_system_heap_create_pools(struct ion_system_heap *sys_heap,
|
||||
struct ion_page_pool **pools,
|
||||
bool cached)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -230,58 +636,140 @@ static int ion_system_heap_create_pools(struct ion_page_pool **pools)
|
||||
struct ion_page_pool *pool;
|
||||
gfp_t gfp_flags = low_order_gfp_flags;
|
||||
|
||||
if (orders[i] > 4)
|
||||
if (orders[i])
|
||||
gfp_flags = high_order_gfp_flags;
|
||||
|
||||
pool = ion_page_pool_create(gfp_flags, orders[i]);
|
||||
pool = ion_page_pool_create(gfp_flags, orders[i], cached);
|
||||
if (!pool)
|
||||
goto err_create_pool;
|
||||
pool->heap_dev = sys_heap->heap.dev;
|
||||
pools[i] = pool;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_create_pool:
|
||||
ion_system_heap_destroy_pools(pools);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static struct ion_heap *__ion_system_heap_create(void)
|
||||
static int ion_sys_heap_worker(void *data)
|
||||
{
|
||||
struct ion_page_pool **pools = (struct ion_page_pool **)data;
|
||||
int i;
|
||||
|
||||
for (;;) {
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
if (pool_count_below_lowmark(pools[i]))
|
||||
ion_page_pool_refill(pools[i]);
|
||||
}
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (unlikely(kthread_should_stop())) {
|
||||
set_current_state(TASK_RUNNING);
|
||||
break;
|
||||
}
|
||||
schedule();
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct task_struct *ion_create_kworker(struct ion_page_pool **pools,
|
||||
bool cached)
|
||||
{
|
||||
struct sched_attr attr = { 0 };
|
||||
struct task_struct *thread;
|
||||
int ret;
|
||||
char *buf;
|
||||
cpumask_t *cpumask;
|
||||
DECLARE_BITMAP(bmap, nr_cpumask_bits);
|
||||
|
||||
attr.sched_nice = ION_KTHREAD_NICE_VAL;
|
||||
buf = cached ? "cached" : "uncached";
|
||||
/*
|
||||
* Affine the kthreads to min capacity CPUs
|
||||
* TODO: remove this hack once is_min_capability_cpu is available
|
||||
*/
|
||||
bitmap_fill(bmap, 0x4);
|
||||
cpumask = to_cpumask(bmap);
|
||||
|
||||
thread = kthread_create(ion_sys_heap_worker, pools,
|
||||
"ion-pool-%s-worker", buf);
|
||||
if (IS_ERR(thread)) {
|
||||
pr_err("%s: failed to create %s worker thread: %ld\n",
|
||||
__func__, buf, PTR_ERR(thread));
|
||||
return thread;
|
||||
}
|
||||
ret = sched_setattr(thread, &attr);
|
||||
if (ret) {
|
||||
kthread_stop(thread);
|
||||
pr_warn("%s: failed to set task priority for %s worker thread: ret = %d\n",
|
||||
__func__, buf, ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
kthread_bind_mask(thread, cpumask);
|
||||
return thread;
|
||||
}
|
||||
|
||||
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
|
||||
{
|
||||
struct ion_system_heap *heap;
|
||||
int ret = -ENOMEM;
|
||||
int i;
|
||||
|
||||
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
|
||||
if (!heap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
heap->heap.ops = &system_heap_ops;
|
||||
heap->heap.type = ION_HEAP_TYPE_SYSTEM;
|
||||
heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
|
||||
heap->heap.dev = data->priv;
|
||||
heap->heap.ion_heap.ops = &system_heap_ops;
|
||||
heap->heap.ion_heap.type = ION_HEAP_TYPE_SYSTEM;
|
||||
heap->heap.ion_heap.flags = ION_HEAP_FLAG_DEFER_FREE;
|
||||
|
||||
if (ion_system_heap_create_pools(heap->pools))
|
||||
goto free_heap;
|
||||
for (i = 0; i < VMID_LAST; i++)
|
||||
if (is_secure_vmid_valid(i))
|
||||
if (ion_system_heap_create_pools(heap,
|
||||
heap->secure_pools[i],
|
||||
false))
|
||||
goto destroy_secure_pools;
|
||||
|
||||
return &heap->heap;
|
||||
if (ion_system_heap_create_pools(heap, heap->uncached_pools, false))
|
||||
goto destroy_secure_pools;
|
||||
|
||||
free_heap:
|
||||
if (ion_system_heap_create_pools(heap, heap->cached_pools, true))
|
||||
goto destroy_uncached_pools;
|
||||
|
||||
if (pool_auto_refill_en) {
|
||||
heap->kworker[ION_KTHREAD_UNCACHED] =
|
||||
ion_create_kworker(heap->uncached_pools, false);
|
||||
if (IS_ERR(heap->kworker[ION_KTHREAD_UNCACHED])) {
|
||||
ret = PTR_ERR(heap->kworker[ION_KTHREAD_UNCACHED]);
|
||||
goto destroy_pools;
|
||||
}
|
||||
heap->kworker[ION_KTHREAD_CACHED] =
|
||||
ion_create_kworker(heap->cached_pools, true);
|
||||
if (IS_ERR(heap->kworker[ION_KTHREAD_CACHED])) {
|
||||
kthread_stop(heap->kworker[ION_KTHREAD_UNCACHED]);
|
||||
ret = PTR_ERR(heap->kworker[ION_KTHREAD_CACHED]);
|
||||
goto destroy_pools;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_init(&heap->split_page_mutex);
|
||||
|
||||
return &heap->heap.ion_heap;
|
||||
destroy_pools:
|
||||
ion_system_heap_destroy_pools(heap->cached_pools);
|
||||
destroy_uncached_pools:
|
||||
ion_system_heap_destroy_pools(heap->uncached_pools);
|
||||
destroy_secure_pools:
|
||||
for (i = 0; i < VMID_LAST; i++) {
|
||||
if (heap->secure_pools[i])
|
||||
ion_system_heap_destroy_pools(heap->secure_pools[i]);
|
||||
}
|
||||
kfree(heap);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int ion_system_heap_create(void)
|
||||
{
|
||||
struct ion_heap *heap;
|
||||
|
||||
heap = __ion_system_heap_create();
|
||||
if (IS_ERR(heap))
|
||||
return PTR_ERR(heap);
|
||||
heap->name = "ion_system_heap";
|
||||
|
||||
ion_device_add_heap(heap);
|
||||
|
||||
return 0;
|
||||
}
|
||||
device_initcall(ion_system_heap_create);
|
||||
|
||||
static int ion_system_contig_heap_allocate(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long len,
|
||||
@@ -317,6 +805,8 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
|
||||
|
||||
buffer->sg_table = table;
|
||||
|
||||
ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
|
||||
|
||||
return 0;
|
||||
|
||||
free_table:
|
||||
@@ -349,7 +839,7 @@ static struct ion_heap_ops kmalloc_ops = {
|
||||
.map_user = ion_heap_map_user,
|
||||
};
|
||||
|
||||
static struct ion_heap *__ion_system_contig_heap_create(void)
|
||||
struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
|
||||
{
|
||||
struct ion_heap *heap;
|
||||
|
||||
@@ -358,21 +848,5 @@ static struct ion_heap *__ion_system_contig_heap_create(void)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
heap->ops = &kmalloc_ops;
|
||||
heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
|
||||
heap->name = "ion_system_contig_heap";
|
||||
|
||||
return heap;
|
||||
}
|
||||
|
||||
static int ion_system_contig_heap_create(void)
|
||||
{
|
||||
struct ion_heap *heap;
|
||||
|
||||
heap = __ion_system_contig_heap_create();
|
||||
if (IS_ERR(heap))
|
||||
return PTR_ERR(heap);
|
||||
|
||||
ion_device_add_heap(heap);
|
||||
|
||||
return 0;
|
||||
}
|
||||
device_initcall(ion_system_contig_heap_create);
|
||||
|
59
drivers/staging/android/ion/ion_system_heap.h
Normal file
59
drivers/staging/android/ion/ion_system_heap.h
Normal file
@@ -0,0 +1,59 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include "ion.h"
|
||||
#include "msm_ion_priv.h"
|
||||
|
||||
#ifndef _ION_SYSTEM_HEAP_H
|
||||
#define _ION_SYSTEM_HEAP_H
|
||||
|
||||
#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
|
||||
#if defined(CONFIG_IOMMU_IO_PGTABLE_ARMV7S)
|
||||
static const unsigned int orders[] = {8, 4, 0};
|
||||
#else
|
||||
static const unsigned int orders[] = {9, 4, 0};
|
||||
#endif
|
||||
#else
|
||||
static const unsigned int orders[] = {0};
|
||||
#endif
|
||||
|
||||
#define NUM_ORDERS ARRAY_SIZE(orders)
|
||||
|
||||
#define ION_KTHREAD_NICE_VAL 10
|
||||
|
||||
#define to_system_heap(_heap) \
|
||||
container_of(to_msm_ion_heap(_heap), struct ion_system_heap, heap)
|
||||
|
||||
enum ion_kthread_type {
|
||||
ION_KTHREAD_UNCACHED,
|
||||
ION_KTHREAD_CACHED,
|
||||
ION_MAX_NUM_KTHREADS
|
||||
};
|
||||
|
||||
struct ion_system_heap {
|
||||
struct msm_ion_heap heap;
|
||||
struct ion_page_pool *uncached_pools[MAX_ORDER];
|
||||
struct ion_page_pool *cached_pools[MAX_ORDER];
|
||||
/* worker threads to refill the pool */
|
||||
struct task_struct *kworker[ION_MAX_NUM_KTHREADS];
|
||||
struct ion_page_pool *secure_pools[VMID_LAST][MAX_ORDER];
|
||||
/* Prevents unnecessary page splitting */
|
||||
struct mutex split_page_mutex;
|
||||
};
|
||||
|
||||
struct page_info {
|
||||
struct page *page;
|
||||
bool from_pool;
|
||||
unsigned int order;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
int order_to_index(unsigned int order);
|
||||
|
||||
void free_buffer_page(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer, struct page *page,
|
||||
unsigned int order);
|
||||
|
||||
#endif /* _ION_SYSTEM_HEAP_H */
|
491
drivers/staging/android/ion/ion_system_secure_heap.c
Normal file
491
drivers/staging/android/ion/ion_system_secure_heap.c
Normal file
@@ -0,0 +1,491 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include "ion_system_secure_heap.h"
|
||||
#include "ion_system_heap.h"
|
||||
#include "ion.h"
|
||||
#include "ion_page_pool.h"
|
||||
#include "msm_ion_priv.h"
|
||||
#include "ion_secure_util.h"
|
||||
|
||||
#define to_system_secure_heap(_heap) \
|
||||
container_of(to_msm_ion_heap(_heap), struct ion_system_secure_heap, \
|
||||
heap)
|
||||
|
||||
struct ion_system_secure_heap {
|
||||
struct ion_heap *sys_heap;
|
||||
struct msm_ion_heap heap;
|
||||
|
||||
/* Protects prefetch_list */
|
||||
spinlock_t work_lock;
|
||||
bool destroy_heap;
|
||||
struct list_head prefetch_list;
|
||||
struct delayed_work prefetch_work;
|
||||
};
|
||||
|
||||
struct prefetch_info {
|
||||
struct list_head list;
|
||||
int vmid;
|
||||
u64 size;
|
||||
bool shrink;
|
||||
};
|
||||
|
||||
/*
|
||||
* The video client may not hold the last reference count on the
|
||||
* ion_buffer(s). Delay for a short time after the video client sends
|
||||
* the IOC_DRAIN event to increase the chance that the reference
|
||||
* count drops to zero. Time in milliseconds.
|
||||
*/
|
||||
#define SHRINK_DELAY 1000
|
||||
|
||||
int ion_heap_is_system_secure_heap_type(enum ion_heap_type type)
|
||||
{
|
||||
return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE);
|
||||
}
|
||||
|
||||
static bool is_cp_flag_present(unsigned long flags)
|
||||
{
|
||||
return flags & (ION_FLAG_CP_TOUCH |
|
||||
ION_FLAG_CP_BITSTREAM |
|
||||
ION_FLAG_CP_PIXEL |
|
||||
ION_FLAG_CP_NON_PIXEL |
|
||||
ION_FLAG_CP_CAMERA);
|
||||
}
|
||||
|
||||
static void ion_system_secure_heap_free(struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_heap *heap = buffer->heap;
|
||||
struct ion_system_secure_heap *secure_heap =
|
||||
to_system_secure_heap(heap);
|
||||
|
||||
secure_heap->sys_heap->ops->free(buffer);
|
||||
}
|
||||
|
||||
static int ion_system_secure_heap_allocate(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ion_system_secure_heap *secure_heap =
|
||||
to_system_secure_heap(heap);
|
||||
enum ion_heap_type type = secure_heap->heap.ion_heap.type;
|
||||
|
||||
if (!ion_heap_is_system_secure_heap_type(type) ||
|
||||
!is_cp_flag_present(flags)) {
|
||||
pr_info("%s: Incorrect heap type or incorrect flags\n",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = secure_heap->sys_heap->ops->allocate(secure_heap->sys_heap,
|
||||
buffer, size, flags);
|
||||
if (ret) {
|
||||
pr_info("%s: Failed to get allocation for %s, ret = %d\n",
|
||||
__func__, heap->name, ret);
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void process_one_prefetch(struct ion_heap *sys_heap,
|
||||
struct prefetch_info *info)
|
||||
{
|
||||
struct ion_buffer buffer;
|
||||
int ret;
|
||||
int vmid;
|
||||
|
||||
memset(&buffer, 0, sizeof(struct ion_buffer));
|
||||
buffer.heap = sys_heap;
|
||||
|
||||
ret = sys_heap->ops->allocate(sys_heap, &buffer, info->size,
|
||||
buffer.flags);
|
||||
if (ret) {
|
||||
pr_debug("%s: Failed to prefetch 0x%llx, ret = %d\n",
|
||||
__func__, info->size, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
vmid = get_secure_vmid(info->vmid);
|
||||
if (vmid < 0)
|
||||
goto out;
|
||||
|
||||
ret = ion_hyp_assign_sg(buffer.sg_table, &vmid, 1, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Now free it to the secure heap */
|
||||
buffer.heap = sys_heap;
|
||||
buffer.flags = info->vmid;
|
||||
|
||||
out:
|
||||
sys_heap->ops->free(&buffer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Since no lock is held, results are approximate.
|
||||
*/
|
||||
size_t ion_system_secure_heap_page_pool_total(struct ion_heap *heap,
|
||||
int vmid_flags)
|
||||
{
|
||||
struct ion_system_heap *sys_heap;
|
||||
struct ion_page_pool *pool;
|
||||
size_t total = 0;
|
||||
int vmid, i;
|
||||
|
||||
sys_heap = to_system_heap(heap);
|
||||
vmid = get_secure_vmid(vmid_flags);
|
||||
if (vmid < 0)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
pool = sys_heap->secure_pools[vmid][i];
|
||||
total += ion_page_pool_total(pool, true);
|
||||
}
|
||||
|
||||
return total << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static void process_one_shrink(struct ion_system_secure_heap *secure_heap,
|
||||
struct ion_heap *sys_heap,
|
||||
struct prefetch_info *info)
|
||||
{
|
||||
struct ion_buffer buffer;
|
||||
size_t pool_size, size;
|
||||
int ret;
|
||||
|
||||
memset(&buffer, 0, sizeof(struct ion_buffer));
|
||||
buffer.heap = &secure_heap->heap.ion_heap;
|
||||
buffer.flags = info->vmid;
|
||||
|
||||
pool_size = ion_system_secure_heap_page_pool_total(sys_heap,
|
||||
info->vmid);
|
||||
size = min_t(size_t, pool_size, info->size);
|
||||
ret = sys_heap->ops->allocate(sys_heap, &buffer, size, buffer.flags);
|
||||
if (ret) {
|
||||
pr_debug("%s: Failed to shrink 0x%llx, ret = %d\n",
|
||||
__func__, info->size, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
buffer.private_flags = ION_PRIV_FLAG_SHRINKER_FREE;
|
||||
buffer.heap = sys_heap;
|
||||
sys_heap->ops->free(&buffer);
|
||||
}
|
||||
|
||||
static void ion_system_secure_heap_prefetch_work(struct work_struct *work)
|
||||
{
|
||||
struct ion_system_secure_heap *secure_heap = container_of(work,
|
||||
struct ion_system_secure_heap,
|
||||
prefetch_work.work);
|
||||
struct ion_heap *sys_heap = secure_heap->sys_heap;
|
||||
struct prefetch_info *info, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&secure_heap->work_lock, flags);
|
||||
list_for_each_entry_safe(info, tmp,
|
||||
&secure_heap->prefetch_list, list) {
|
||||
list_del(&info->list);
|
||||
spin_unlock_irqrestore(&secure_heap->work_lock, flags);
|
||||
|
||||
if (info->shrink)
|
||||
process_one_shrink(secure_heap, sys_heap, info);
|
||||
else
|
||||
process_one_prefetch(sys_heap, info);
|
||||
|
||||
kfree(info);
|
||||
spin_lock_irqsave(&secure_heap->work_lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&secure_heap->work_lock, flags);
|
||||
}
|
||||
|
||||
static int alloc_prefetch_info(struct ion_prefetch_regions __user *
|
||||
user_regions, bool shrink,
|
||||
struct list_head *items)
|
||||
{
|
||||
struct prefetch_info *info;
|
||||
u64 user_sizes;
|
||||
int err;
|
||||
unsigned int nr_sizes, vmid, i;
|
||||
|
||||
err = get_user(nr_sizes, &user_regions->nr_sizes);
|
||||
err |= get_user(user_sizes, &user_regions->sizes);
|
||||
err |= get_user(vmid, &user_regions->vmid);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
if (!is_secure_vmid_valid(get_secure_vmid(vmid)))
|
||||
return -EINVAL;
|
||||
|
||||
if (nr_sizes > 0x10)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < nr_sizes; i++) {
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
err = get_user(info->size, ((u64 __user *)user_sizes + i));
|
||||
if (err)
|
||||
goto out_free;
|
||||
|
||||
info->vmid = vmid;
|
||||
info->shrink = shrink;
|
||||
INIT_LIST_HEAD(&info->list);
|
||||
list_add_tail(&info->list, items);
|
||||
}
|
||||
return err;
|
||||
out_free:
|
||||
kfree(info);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __ion_system_secure_heap_resize(struct ion_heap *heap, void *ptr,
|
||||
bool shrink)
|
||||
{
|
||||
struct ion_system_secure_heap *secure_heap =
|
||||
to_system_secure_heap(heap);
|
||||
struct ion_prefetch_data *data = ptr;
|
||||
int i, ret = 0;
|
||||
struct prefetch_info *info, *tmp;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(items);
|
||||
|
||||
if ((int)heap->type != ION_HEAP_TYPE_SYSTEM_SECURE)
|
||||
return -EINVAL;
|
||||
|
||||
if (data->nr_regions > 0x10)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < data->nr_regions; i++) {
|
||||
struct ion_prefetch_regions *r;
|
||||
|
||||
r = (struct ion_prefetch_regions *)data->regions + i;
|
||||
ret = alloc_prefetch_info(r, shrink, &items);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&secure_heap->work_lock, flags);
|
||||
if (secure_heap->destroy_heap) {
|
||||
spin_unlock_irqrestore(&secure_heap->work_lock, flags);
|
||||
goto out_free;
|
||||
}
|
||||
list_splice_tail_init(&items, &secure_heap->prefetch_list);
|
||||
queue_delayed_work(system_unbound_wq, &secure_heap->prefetch_work,
|
||||
shrink ? msecs_to_jiffies(SHRINK_DELAY) : 0);
|
||||
spin_unlock_irqrestore(&secure_heap->work_lock, flags);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
list_for_each_entry_safe(info, tmp, &items, list) {
|
||||
list_del(&info->list);
|
||||
kfree(info);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ion_system_secure_heap_prefetch(struct ion_heap *heap, void *ptr)
|
||||
{
|
||||
return __ion_system_secure_heap_resize(heap, ptr, false);
|
||||
}
|
||||
|
||||
int ion_system_secure_heap_drain(struct ion_heap *heap, void *ptr)
|
||||
{
|
||||
return __ion_system_secure_heap_resize(heap, ptr, true);
|
||||
}
|
||||
|
||||
static void *ion_system_secure_heap_map_kernel(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
pr_info("%s: Kernel mapping from secure heap %s disallowed\n",
|
||||
__func__, heap->name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static void ion_system_secure_heap_unmap_kernel(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
}
|
||||
|
||||
static int ion_system_secure_heap_map_user(struct ion_heap *mapper,
|
||||
struct ion_buffer *buffer,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
pr_info("%s: Mapping from secure heap %s disallowed\n",
|
||||
__func__, mapper->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int ion_system_secure_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
|
||||
int nr_to_scan)
|
||||
{
|
||||
struct ion_system_secure_heap *secure_heap =
|
||||
to_system_secure_heap(heap);
|
||||
|
||||
return secure_heap->sys_heap->ops->shrink(secure_heap->sys_heap,
|
||||
gfp_mask, nr_to_scan);
|
||||
}
|
||||
|
||||
static struct ion_heap_ops system_secure_heap_ops = {
|
||||
.allocate = ion_system_secure_heap_allocate,
|
||||
.free = ion_system_secure_heap_free,
|
||||
.map_kernel = ion_system_secure_heap_map_kernel,
|
||||
.unmap_kernel = ion_system_secure_heap_unmap_kernel,
|
||||
.map_user = ion_system_secure_heap_map_user,
|
||||
.shrink = ion_system_secure_heap_shrink,
|
||||
};
|
||||
|
||||
struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *unused)
|
||||
{
|
||||
struct ion_system_secure_heap *heap;
|
||||
|
||||
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
|
||||
if (!heap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
heap->heap.ion_heap.ops = &system_secure_heap_ops;
|
||||
heap->heap.ion_heap.type =
|
||||
(enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE;
|
||||
heap->sys_heap = get_ion_heap(ION_SYSTEM_HEAP_ID);
|
||||
|
||||
heap->destroy_heap = false;
|
||||
heap->work_lock = __SPIN_LOCK_UNLOCKED(heap->work_lock);
|
||||
INIT_LIST_HEAD(&heap->prefetch_list);
|
||||
INIT_DELAYED_WORK(&heap->prefetch_work,
|
||||
ion_system_secure_heap_prefetch_work);
|
||||
return &heap->heap.ion_heap;
|
||||
}
|
||||
|
||||
struct page *alloc_from_secure_pool_order(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long order)
|
||||
{
|
||||
int vmid = get_secure_vmid(buffer->flags);
|
||||
struct ion_page_pool *pool;
|
||||
|
||||
if (!is_secure_vmid_valid(vmid))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pool = heap->secure_pools[vmid][order_to_index(order)];
|
||||
return ion_page_pool_alloc_pool_only(pool);
|
||||
}
|
||||
|
||||
struct page *split_page_from_secure_pool(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
int i, j;
|
||||
struct page *page;
|
||||
unsigned int order;
|
||||
|
||||
mutex_lock(&heap->split_page_mutex);
|
||||
|
||||
/*
|
||||
* Someone may have just split a page and returned the unused portion
|
||||
* back to the pool, so try allocating from the pool one more time
|
||||
* before splitting. We want to maintain large pages sizes when
|
||||
* possible.
|
||||
*/
|
||||
page = alloc_from_secure_pool_order(heap, buffer, 0);
|
||||
if (!IS_ERR(page))
|
||||
goto got_page;
|
||||
|
||||
for (i = NUM_ORDERS - 2; i >= 0; i--) {
|
||||
order = orders[i];
|
||||
page = alloc_from_secure_pool_order(heap, buffer, order);
|
||||
if (IS_ERR(page))
|
||||
continue;
|
||||
|
||||
split_page(page, order);
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Return the remaining order-0 pages to the pool.
|
||||
* SetPagePrivate flag to mark memory as secure.
|
||||
*/
|
||||
if (!IS_ERR(page)) {
|
||||
for (j = 1; j < (1 << order); j++) {
|
||||
SetPagePrivate(page + j);
|
||||
free_buffer_page(heap, buffer, page + j, 0);
|
||||
}
|
||||
}
|
||||
got_page:
|
||||
mutex_unlock(&heap->split_page_mutex);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
int ion_secure_page_pool_shrink(struct ion_system_heap *sys_heap,
|
||||
int vmid, int order_idx, int nr_to_scan)
|
||||
{
|
||||
int ret, freed = 0;
|
||||
int order = orders[order_idx];
|
||||
struct page *page, *tmp;
|
||||
struct sg_table sgt;
|
||||
struct scatterlist *sg;
|
||||
struct ion_page_pool *pool = sys_heap->secure_pools[vmid][order_idx];
|
||||
LIST_HEAD(pages);
|
||||
|
||||
if (nr_to_scan == 0)
|
||||
return ion_page_pool_total(pool, true);
|
||||
|
||||
while (freed < nr_to_scan) {
|
||||
page = ion_page_pool_alloc_pool_only(pool);
|
||||
if (IS_ERR(page))
|
||||
break;
|
||||
list_add(&page->lru, &pages);
|
||||
freed += (1 << order);
|
||||
}
|
||||
|
||||
if (!freed)
|
||||
return freed;
|
||||
|
||||
ret = sg_alloc_table(&sgt, (freed >> order), GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out1;
|
||||
sg = sgt.sgl;
|
||||
list_for_each_entry(page, &pages, lru) {
|
||||
sg_set_page(sg, page, (1 << order) * PAGE_SIZE, 0);
|
||||
sg_dma_address(sg) = page_to_phys(page);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
ret = ion_hyp_unassign_sg(&sgt, &vmid, 1, true);
|
||||
if (ret == -EADDRNOTAVAIL)
|
||||
goto out3;
|
||||
else if (ret < 0)
|
||||
goto out2;
|
||||
|
||||
list_for_each_entry_safe(page, tmp, &pages, lru) {
|
||||
list_del(&page->lru);
|
||||
ion_page_pool_free_immediate(pool, page);
|
||||
}
|
||||
|
||||
sg_free_table(&sgt);
|
||||
return freed;
|
||||
|
||||
out2:
|
||||
sg_free_table(&sgt);
|
||||
out1:
|
||||
/* Restore pages to secure pool */
|
||||
list_for_each_entry_safe(page, tmp, &pages, lru) {
|
||||
list_del(&page->lru);
|
||||
ion_page_pool_free(pool, page);
|
||||
}
|
||||
return 0;
|
||||
out3:
|
||||
/*
|
||||
* The security state of the pages is unknown after a failure;
|
||||
* They can neither be added back to the secure pool nor buddy system.
|
||||
*/
|
||||
sg_free_table(&sgt);
|
||||
return 0;
|
||||
}
|
24
drivers/staging/android/ion/ion_system_secure_heap.h
Normal file
24
drivers/staging/android/ion/ion_system_secure_heap.h
Normal file
@@ -0,0 +1,24 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#include "ion.h"
|
||||
#include "ion_system_heap.h"
|
||||
|
||||
#ifndef _ION_SYSTEM_SECURE_HEAP_H
|
||||
#define _ION_SYSTEM_SECURE_HEAP_H
|
||||
|
||||
int ion_system_secure_heap_prefetch(struct ion_heap *heap, void *data);
|
||||
int ion_system_secure_heap_drain(struct ion_heap *heap, void *data);
|
||||
|
||||
struct page *alloc_from_secure_pool_order(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long order);
|
||||
|
||||
struct page *split_page_from_secure_pool(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer);
|
||||
|
||||
int ion_secure_page_pool_shrink(struct ion_system_heap *sys_heap,
|
||||
int vmid, int order_idx, int nr_to_scan);
|
||||
|
||||
#endif /* _ION_SYSTEM_SECURE_HEAP_H */
|
726
drivers/staging/android/ion/msm_ion_dma_buf.c
Normal file
726
drivers/staging/android/ion/msm_ion_dma_buf.c
Normal file
@@ -0,0 +1,726 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2011 Google, Inc.
|
||||
* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/msm_dma_iommu_mapping.h>
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/ion.h>
|
||||
|
||||
#include "ion.h"
|
||||
#include "msm_ion_priv.h"
|
||||
#include "ion_secure_util.h"
|
||||
|
||||
static void _ion_buffer_destroy(struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_heap *heap = buffer->heap;
|
||||
struct ion_device *dev = buffer->dev;
|
||||
|
||||
msm_dma_buf_freed(buffer);
|
||||
|
||||
mutex_lock(&dev->buffer_lock);
|
||||
rb_erase(&buffer->node, &dev->buffers);
|
||||
mutex_unlock(&dev->buffer_lock);
|
||||
|
||||
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
|
||||
ion_heap_freelist_add(heap, buffer);
|
||||
else
|
||||
ion_buffer_destroy(buffer);
|
||||
}
|
||||
|
||||
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
if (buffer->kmap_cnt) {
|
||||
buffer->kmap_cnt++;
|
||||
return buffer->vaddr;
|
||||
}
|
||||
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
|
||||
if (WARN_ONCE(!vaddr,
|
||||
"heap->ops->map_kernel should return ERR_PTR on error"))
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (IS_ERR(vaddr))
|
||||
return vaddr;
|
||||
buffer->vaddr = vaddr;
|
||||
buffer->kmap_cnt++;
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
static void ion_buffer_kmap_put(struct ion_buffer *buffer)
|
||||
{
|
||||
if (buffer->kmap_cnt == 0) {
|
||||
pr_warn_ratelimited("ION client likely missing a call to dma_buf_kmap or dma_buf_vmap, pid:%d\n",
|
||||
current->pid);
|
||||
return;
|
||||
}
|
||||
|
||||
buffer->kmap_cnt--;
|
||||
if (!buffer->kmap_cnt) {
|
||||
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
|
||||
buffer->vaddr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static struct sg_table *dup_sg_table(struct sg_table *table)
|
||||
{
|
||||
struct sg_table *new_table;
|
||||
int ret, i;
|
||||
struct scatterlist *sg, *new_sg;
|
||||
|
||||
new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
|
||||
if (!new_table)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
|
||||
if (ret) {
|
||||
kfree(new_table);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
new_sg = new_table->sgl;
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
memcpy(new_sg, sg, sizeof(*sg));
|
||||
sg_dma_address(new_sg) = 0;
|
||||
sg_dma_len(new_sg) = 0;
|
||||
new_sg = sg_next(new_sg);
|
||||
}
|
||||
|
||||
return new_table;
|
||||
}
|
||||
|
||||
static void free_duped_table(struct sg_table *table)
|
||||
{
|
||||
sg_free_table(table);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
struct ion_dma_buf_attachment {
|
||||
struct device *dev;
|
||||
struct sg_table *table;
|
||||
struct list_head list;
|
||||
bool dma_mapped;
|
||||
};
|
||||
|
||||
static int ion_dma_buf_attach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *attachment)
|
||||
{
|
||||
struct ion_dma_buf_attachment *a;
|
||||
struct sg_table *table;
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
|
||||
a = kzalloc(sizeof(*a), GFP_KERNEL);
|
||||
if (!a)
|
||||
return -ENOMEM;
|
||||
|
||||
table = dup_sg_table(buffer->sg_table);
|
||||
if (IS_ERR(table)) {
|
||||
kfree(a);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
a->table = table;
|
||||
a->dev = attachment->dev;
|
||||
a->dma_mapped = false;
|
||||
INIT_LIST_HEAD(&a->list);
|
||||
|
||||
attachment->priv = a;
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
list_add(&a->list, &buffer->attachments);
|
||||
mutex_unlock(&buffer->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *attachment)
|
||||
{
|
||||
struct ion_dma_buf_attachment *a = attachment->priv;
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
list_del(&a->list);
|
||||
mutex_unlock(&buffer->lock);
|
||||
free_duped_table(a->table);
|
||||
|
||||
kfree(a);
|
||||
}
|
||||
|
||||
bool ion_buffer_cached(struct ion_buffer *buffer)
|
||||
{
|
||||
return !!(buffer->flags & ION_FLAG_CACHED);
|
||||
}
|
||||
|
||||
static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct ion_dma_buf_attachment *a = attachment->priv;
|
||||
struct sg_table *table;
|
||||
int count, map_attrs;
|
||||
struct ion_buffer *buffer = attachment->dmabuf->priv;
|
||||
unsigned long ino = file_inode(attachment->dmabuf->file)->i_ino;
|
||||
|
||||
table = a->table;
|
||||
|
||||
map_attrs = attachment->dma_map_attrs;
|
||||
if (!(buffer->flags & ION_FLAG_CACHED) ||
|
||||
!hlos_accessible_buffer(buffer))
|
||||
map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC)
|
||||
trace_ion_dma_map_cmo_skip(attachment->dev,
|
||||
ino,
|
||||
ion_buffer_cached(buffer),
|
||||
hlos_accessible_buffer(buffer),
|
||||
attachment->dma_map_attrs,
|
||||
direction);
|
||||
else
|
||||
trace_ion_dma_map_cmo_apply(attachment->dev,
|
||||
ino,
|
||||
ion_buffer_cached(buffer),
|
||||
hlos_accessible_buffer(buffer),
|
||||
attachment->dma_map_attrs,
|
||||
direction);
|
||||
|
||||
if (map_attrs & DMA_ATTR_DELAYED_UNMAP) {
|
||||
count = msm_dma_map_sg_attrs(attachment->dev, table->sgl,
|
||||
table->nents, direction,
|
||||
attachment->dmabuf, map_attrs);
|
||||
} else {
|
||||
count = dma_map_sg_attrs(attachment->dev, table->sgl,
|
||||
table->nents, direction,
|
||||
map_attrs);
|
||||
}
|
||||
|
||||
if (count <= 0) {
|
||||
mutex_unlock(&buffer->lock);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
a->dma_mapped = true;
|
||||
mutex_unlock(&buffer->lock);
|
||||
return table;
|
||||
}
|
||||
|
||||
void ion_prepare_sgl_for_force_dma_sync(struct sg_table *table)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
|
||||
/*
|
||||
* this will set up dma addresses for the sglist -- it is not
|
||||
* technically correct as per the dma api -- a specific
|
||||
* device isn't really taking ownership here. However, in
|
||||
* practice on our systems the only dma_address space is
|
||||
* physical addresses.
|
||||
*/
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
sg_dma_address(sg) = sg_phys(sg);
|
||||
sg_dma_len(sg) = sg->length;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
||||
struct sg_table *table,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
int map_attrs;
|
||||
struct ion_buffer *buffer = attachment->dmabuf->priv;
|
||||
struct ion_dma_buf_attachment *a = attachment->priv;
|
||||
unsigned long ino = file_inode(attachment->dmabuf->file)->i_ino;
|
||||
|
||||
map_attrs = attachment->dma_map_attrs;
|
||||
if (!(buffer->flags & ION_FLAG_CACHED) ||
|
||||
!hlos_accessible_buffer(buffer))
|
||||
map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC)
|
||||
trace_ion_dma_unmap_cmo_skip(attachment->dev,
|
||||
ino,
|
||||
ion_buffer_cached(buffer),
|
||||
hlos_accessible_buffer(buffer),
|
||||
attachment->dma_map_attrs,
|
||||
direction);
|
||||
else
|
||||
trace_ion_dma_unmap_cmo_apply(attachment->dev,
|
||||
ino,
|
||||
ion_buffer_cached(buffer),
|
||||
hlos_accessible_buffer(buffer),
|
||||
attachment->dma_map_attrs,
|
||||
direction);
|
||||
|
||||
if (map_attrs & DMA_ATTR_DELAYED_UNMAP)
|
||||
msm_dma_unmap_sg_attrs(attachment->dev, table->sgl,
|
||||
table->nents, direction,
|
||||
attachment->dmabuf,
|
||||
map_attrs);
|
||||
else
|
||||
dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents,
|
||||
direction, map_attrs);
|
||||
a->dma_mapped = false;
|
||||
mutex_unlock(&buffer->lock);
|
||||
}
|
||||
|
||||
void ion_pages_sync_for_device(struct device *dev, struct page *page,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist sg;
|
||||
|
||||
sg_init_table(&sg, 1);
|
||||
sg_set_page(&sg, page, size, 0);
|
||||
/*
|
||||
* This is not correct - sg_dma_address needs a dma_addr_t that is valid
|
||||
* for the targeted device, but this works on the currently targeted
|
||||
* hardware.
|
||||
*/
|
||||
sg_dma_address(&sg) = page_to_phys(page);
|
||||
dma_sync_sg_for_device(dev, &sg, 1, dir);
|
||||
}
|
||||
|
||||
static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
int ret = 0;
|
||||
|
||||
if (!buffer->heap->ops->map_user) {
|
||||
pr_err("%s: this heap does not define a method for mapping to userspace\n",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(buffer->flags & ION_FLAG_CACHED))
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
/* now map it to userspace */
|
||||
ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
|
||||
mutex_unlock(&buffer->lock);
|
||||
|
||||
if (ret)
|
||||
pr_err("%s: failure mapping buffer to userspace\n",
|
||||
__func__);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ion_dma_buf_release(struct dma_buf *dmabuf)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
|
||||
_ion_buffer_destroy(buffer);
|
||||
kfree(dmabuf->exp_name);
|
||||
}
|
||||
|
||||
static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
void *vaddr = ERR_PTR(-EINVAL);
|
||||
|
||||
if (buffer->heap->ops->map_kernel) {
|
||||
mutex_lock(&buffer->lock);
|
||||
vaddr = ion_buffer_kmap_get(buffer);
|
||||
mutex_unlock(&buffer->lock);
|
||||
} else {
|
||||
pr_warn_ratelimited("heap %s doesn't support map_kernel\n",
|
||||
buffer->heap->name);
|
||||
}
|
||||
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
|
||||
if (buffer->heap->ops->map_kernel) {
|
||||
mutex_lock(&buffer->lock);
|
||||
ion_buffer_kmap_put(buffer);
|
||||
mutex_unlock(&buffer->lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
|
||||
{
|
||||
/*
|
||||
* TODO: Once clients remove their hacks where they assume kmap(ed)
|
||||
* addresses are virtually contiguous implement this properly
|
||||
*/
|
||||
void *vaddr = ion_dma_buf_vmap(dmabuf);
|
||||
|
||||
if (IS_ERR(vaddr))
|
||||
return vaddr;
|
||||
|
||||
return vaddr + offset * PAGE_SIZE;
|
||||
}
|
||||
|
||||
static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
|
||||
void *ptr)
|
||||
{
|
||||
/*
|
||||
* TODO: Once clients remove their hacks where they assume kmap(ed)
|
||||
* addresses are virtually contiguous implement this properly
|
||||
*/
|
||||
ion_dma_buf_vunmap(dmabuf, ptr);
|
||||
}
|
||||
|
||||
static int ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl,
|
||||
unsigned int nents, unsigned long offset,
|
||||
unsigned long length,
|
||||
enum dma_data_direction dir, bool for_cpu)
|
||||
{
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
unsigned int len = 0;
|
||||
dma_addr_t sg_dma_addr;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
if (sg_dma_len(sg) == 0)
|
||||
break;
|
||||
|
||||
if (i > 0) {
|
||||
pr_warn("Partial cmo only supported with 1 segment\n"
|
||||
"is dma_set_max_seg_size being set on dev:%s\n",
|
||||
dev_name(dev));
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
unsigned int sg_offset, sg_left, size = 0;
|
||||
|
||||
if (i == 0)
|
||||
sg_dma_addr = sg_dma_address(sg);
|
||||
|
||||
len += sg->length;
|
||||
if (len <= offset) {
|
||||
sg_dma_addr += sg->length;
|
||||
continue;
|
||||
}
|
||||
|
||||
sg_left = len - offset;
|
||||
sg_offset = sg->length - sg_left;
|
||||
|
||||
size = (length < sg_left) ? length : sg_left;
|
||||
if (for_cpu)
|
||||
dma_sync_single_range_for_cpu(dev, sg_dma_addr,
|
||||
sg_offset, size, dir);
|
||||
else
|
||||
dma_sync_single_range_for_device(dev, sg_dma_addr,
|
||||
sg_offset, size, dir);
|
||||
|
||||
offset += size;
|
||||
length -= size;
|
||||
sg_dma_addr += sg->length;
|
||||
|
||||
if (length == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
struct ion_dma_buf_attachment *a;
|
||||
unsigned long ino = file_inode(dmabuf->file)->i_ino;
|
||||
int ret = 0;
|
||||
|
||||
if (!hlos_accessible_buffer(buffer)) {
|
||||
trace_ion_begin_cpu_access_cmo_skip(NULL, ino,
|
||||
ion_buffer_cached(buffer),
|
||||
false, direction);
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!(buffer->flags & ION_FLAG_CACHED)) {
|
||||
trace_ion_begin_cpu_access_cmo_skip(NULL, ino, false, true,
|
||||
direction);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
|
||||
struct device *dev = msm_ion_heap_device(buffer->heap);
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
|
||||
dma_sync_sg_for_cpu(dev, table->sgl, table->nents, direction);
|
||||
|
||||
trace_ion_begin_cpu_access_cmo_apply(dev, ino, true, true,
|
||||
direction);
|
||||
|
||||
mutex_unlock(&buffer->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(a, &buffer->attachments, list) {
|
||||
if (!a->dma_mapped) {
|
||||
trace_ion_begin_cpu_access_notmapped(a->dev,
|
||||
ino,
|
||||
true, true,
|
||||
direction);
|
||||
continue;
|
||||
}
|
||||
|
||||
dma_sync_sg_for_cpu(a->dev, a->table->sgl,
|
||||
a->table->nents, direction);
|
||||
|
||||
trace_ion_begin_cpu_access_cmo_apply(a->dev, ino, true,
|
||||
true, direction);
|
||||
}
|
||||
mutex_unlock(&buffer->lock);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
struct ion_dma_buf_attachment *a;
|
||||
unsigned long ino = file_inode(dmabuf->file)->i_ino;
|
||||
int ret = 0;
|
||||
|
||||
if (!hlos_accessible_buffer(buffer)) {
|
||||
trace_ion_end_cpu_access_cmo_skip(NULL, ino,
|
||||
ion_buffer_cached(buffer),
|
||||
false, direction);
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!(buffer->flags & ION_FLAG_CACHED)) {
|
||||
trace_ion_end_cpu_access_cmo_skip(NULL, ino, false, true,
|
||||
direction);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
|
||||
struct device *dev = msm_ion_heap_device(buffer->heap);
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
|
||||
dma_sync_sg_for_device(dev, table->sgl, table->nents,
|
||||
direction);
|
||||
|
||||
trace_ion_end_cpu_access_cmo_apply(dev, ino, true,
|
||||
true, direction);
|
||||
mutex_unlock(&buffer->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(a, &buffer->attachments, list) {
|
||||
if (!a->dma_mapped) {
|
||||
trace_ion_end_cpu_access_notmapped(a->dev,
|
||||
ino,
|
||||
true, true,
|
||||
direction);
|
||||
continue;
|
||||
}
|
||||
|
||||
dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
|
||||
direction);
|
||||
|
||||
trace_ion_end_cpu_access_cmo_apply(a->dev, ino, true,
|
||||
true, direction);
|
||||
}
|
||||
mutex_unlock(&buffer->lock);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction dir,
|
||||
unsigned int offset,
|
||||
unsigned int len)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
struct ion_dma_buf_attachment *a;
|
||||
unsigned long ino = file_inode(dmabuf->file)->i_ino;
|
||||
int ret = 0;
|
||||
|
||||
if (!hlos_accessible_buffer(buffer)) {
|
||||
trace_ion_begin_cpu_access_cmo_skip(NULL, ino,
|
||||
ion_buffer_cached(buffer),
|
||||
false, dir);
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!(buffer->flags & ION_FLAG_CACHED)) {
|
||||
trace_ion_begin_cpu_access_cmo_skip(NULL, ino, false, true,
|
||||
dir);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
|
||||
struct device *dev = msm_ion_heap_device(buffer->heap);
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
|
||||
ret = ion_sgl_sync_range(dev, table->sgl, table->nents,
|
||||
offset, len, dir, true);
|
||||
|
||||
if (!ret)
|
||||
trace_ion_begin_cpu_access_cmo_apply(dev, ino,
|
||||
true, true, dir);
|
||||
else
|
||||
trace_ion_begin_cpu_access_cmo_skip(dev, ino,
|
||||
true, true, dir);
|
||||
mutex_unlock(&buffer->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(a, &buffer->attachments, list) {
|
||||
int tmp = 0;
|
||||
|
||||
if (!a->dma_mapped) {
|
||||
trace_ion_begin_cpu_access_notmapped(a->dev,
|
||||
ino,
|
||||
true, true,
|
||||
dir);
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents,
|
||||
offset, len, dir, true);
|
||||
|
||||
if (!tmp) {
|
||||
trace_ion_begin_cpu_access_cmo_apply(a->dev, ino,
|
||||
true, true, dir);
|
||||
} else {
|
||||
trace_ion_begin_cpu_access_cmo_skip(a->dev, ino,
|
||||
true, true, dir);
|
||||
ret = tmp;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&buffer->lock);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
unsigned int offset,
|
||||
unsigned int len)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
struct ion_dma_buf_attachment *a;
|
||||
unsigned long ino = file_inode(dmabuf->file)->i_ino;
|
||||
|
||||
int ret = 0;
|
||||
|
||||
if (!hlos_accessible_buffer(buffer)) {
|
||||
trace_ion_end_cpu_access_cmo_skip(NULL, ino,
|
||||
ion_buffer_cached(buffer),
|
||||
false, direction);
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!(buffer->flags & ION_FLAG_CACHED)) {
|
||||
trace_ion_end_cpu_access_cmo_skip(NULL, ino, false,
|
||||
true, direction);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
|
||||
struct device *dev = msm_ion_heap_device(buffer->heap);
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
|
||||
ret = ion_sgl_sync_range(dev, table->sgl, table->nents,
|
||||
offset, len, direction, false);
|
||||
|
||||
if (!ret)
|
||||
trace_ion_end_cpu_access_cmo_apply(dev, ino,
|
||||
true, true,
|
||||
direction);
|
||||
else
|
||||
trace_ion_end_cpu_access_cmo_skip(dev, ino,
|
||||
true, true,
|
||||
direction);
|
||||
|
||||
mutex_unlock(&buffer->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(a, &buffer->attachments, list) {
|
||||
int tmp = 0;
|
||||
|
||||
if (!a->dma_mapped) {
|
||||
trace_ion_end_cpu_access_notmapped(a->dev,
|
||||
ino,
|
||||
true, true,
|
||||
direction);
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents,
|
||||
offset, len, direction, false);
|
||||
|
||||
if (!tmp) {
|
||||
trace_ion_end_cpu_access_cmo_apply(a->dev, ino,
|
||||
true, true,
|
||||
direction);
|
||||
|
||||
} else {
|
||||
trace_ion_end_cpu_access_cmo_skip(a->dev, ino, true,
|
||||
true, direction);
|
||||
ret = tmp;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&buffer->lock);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ion_dma_buf_get_flags(struct dma_buf *dmabuf,
|
||||
unsigned long *flags)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
*flags = buffer->flags;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct dma_buf_ops msm_ion_dma_buf_ops = {
|
||||
.map_dma_buf = ion_map_dma_buf,
|
||||
.unmap_dma_buf = ion_unmap_dma_buf,
|
||||
.mmap = ion_mmap,
|
||||
.release = ion_dma_buf_release,
|
||||
.attach = ion_dma_buf_attach,
|
||||
.detach = ion_dma_buf_detatch,
|
||||
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
|
||||
.end_cpu_access = ion_dma_buf_end_cpu_access,
|
||||
.begin_cpu_access_partial = ion_dma_buf_begin_cpu_access_partial,
|
||||
.end_cpu_access_partial = ion_dma_buf_end_cpu_access_partial,
|
||||
.map = ion_dma_buf_kmap,
|
||||
.unmap = ion_dma_buf_kunmap,
|
||||
.vmap = ion_dma_buf_vmap,
|
||||
.vunmap = ion_dma_buf_vunmap,
|
||||
.get_flags = ion_dma_buf_get_flags,
|
||||
};
|
504
drivers/staging/android/ion/msm_ion_of.c
Normal file
504
drivers/staging/android/ion/msm_ion_of.c
Normal file
@@ -0,0 +1,504 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/cma.h>
|
||||
#include <linux/msm_ion_of.h>
|
||||
#include "ion.h"
|
||||
#include "msm_ion_priv.h"
|
||||
|
||||
#define ION_COMPAT_STR "qcom,msm-ion"
|
||||
#define ION_NOT_READY 0
|
||||
#define ION_INIT_FAILURE 1
|
||||
#define ION_READY 2
|
||||
|
||||
static int num_heaps;
|
||||
static int status = ION_NOT_READY;
|
||||
static struct ion_heap **heaps;
|
||||
|
||||
struct ion_heap_desc {
|
||||
unsigned int id;
|
||||
enum ion_heap_type type;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static struct ion_heap_desc ion_heap_meta[] = {
|
||||
{
|
||||
.id = ION_SYSTEM_HEAP_ID,
|
||||
.name = ION_SYSTEM_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_SECURE_HEAP_ID,
|
||||
.name = ION_SECURE_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_CP_MM_HEAP_ID,
|
||||
.name = ION_MM_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_USER_CONTIG_HEAP_ID,
|
||||
.name = ION_USER_CONTIG_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_QSECOM_HEAP_ID,
|
||||
.name = ION_QSECOM_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_QSECOM_TA_HEAP_ID,
|
||||
.name = ION_QSECOM_TA_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_SPSS_HEAP_ID,
|
||||
.name = ION_SPSS_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_ADSP_HEAP_ID,
|
||||
.name = ION_ADSP_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_SECURE_DISPLAY_HEAP_ID,
|
||||
.name = ION_SECURE_DISPLAY_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_AUDIO_HEAP_ID,
|
||||
.name = ION_AUDIO_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_SECURE_CARVEOUT_HEAP_ID,
|
||||
.name = ION_SECURE_CARVEOUT_HEAP_NAME,
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
#define MAKE_HEAP_TYPE_MAPPING(h) { .name = #h, \
|
||||
.heap_type = ION_HEAP_TYPE_##h, }
|
||||
|
||||
static struct heap_types_info {
|
||||
const char *name;
|
||||
int heap_type;
|
||||
} heap_types_info[] = {
|
||||
MAKE_HEAP_TYPE_MAPPING(SYSTEM),
|
||||
MAKE_HEAP_TYPE_MAPPING(SYSTEM_CONTIG),
|
||||
MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
|
||||
MAKE_HEAP_TYPE_MAPPING(SECURE_CARVEOUT),
|
||||
MAKE_HEAP_TYPE_MAPPING(DMA),
|
||||
MAKE_HEAP_TYPE_MAPPING(SYSTEM_SECURE),
|
||||
MAKE_HEAP_TYPE_MAPPING(HYP_CMA),
|
||||
};
|
||||
|
||||
static struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
|
||||
{
|
||||
struct ion_heap *heap = NULL;
|
||||
int heap_type = heap_data->type;
|
||||
|
||||
switch (heap_type) {
|
||||
case ION_HEAP_TYPE_SYSTEM_CONTIG:
|
||||
pr_err("%s: Heap type is disabled: %d\n", __func__,
|
||||
heap_data->type);
|
||||
break;
|
||||
case ION_HEAP_TYPE_SYSTEM:
|
||||
heap = ion_system_heap_create(heap_data);
|
||||
break;
|
||||
case ION_HEAP_TYPE_CARVEOUT:
|
||||
heap = ion_carveout_heap_create(heap_data);
|
||||
break;
|
||||
#ifdef CONFIG_CMA
|
||||
case ION_HEAP_TYPE_DMA:
|
||||
heap = ion_cma_heap_create(heap_data);
|
||||
break;
|
||||
case (enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA:
|
||||
heap = ion_cma_secure_heap_create(heap_data);
|
||||
break;
|
||||
#endif
|
||||
case (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE:
|
||||
heap = ion_system_secure_heap_create(heap_data);
|
||||
break;
|
||||
case (enum ion_heap_type)ION_HEAP_TYPE_SECURE_CARVEOUT:
|
||||
heap = ion_secure_carveout_heap_create(heap_data);
|
||||
break;
|
||||
default:
|
||||
pr_err("%s: Invalid heap type %d\n", __func__,
|
||||
heap_data->type);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (IS_ERR_OR_NULL(heap)) {
|
||||
pr_err("%s: error creating heap %s type %d base %pa size %zu\n",
|
||||
__func__, heap_data->name, heap_data->type,
|
||||
&heap_data->base, heap_data->size);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
heap->name = heap_data->name;
|
||||
heap->id = heap_data->id;
|
||||
return heap;
|
||||
}
|
||||
|
||||
struct device *msm_ion_heap_device(struct ion_heap *heap)
|
||||
{
|
||||
if (!heap)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (status == ION_NOT_READY)
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
else if (status == ION_INIT_FAILURE)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
return to_msm_ion_heap(heap)->dev;
|
||||
}
|
||||
|
||||
struct device *msm_ion_heap_device_by_id(int heap_id)
|
||||
{
|
||||
struct ion_heap *heap;
|
||||
|
||||
if (status == ION_NOT_READY)
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
else if (status == ION_INIT_FAILURE)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
heap = get_ion_heap(heap_id);
|
||||
if (heap)
|
||||
return msm_ion_heap_device(heap);
|
||||
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
EXPORT_SYMBOL(msm_ion_heap_device_by_id);
|
||||
|
||||
static int msm_ion_get_heap_type_from_dt_node(struct device_node *node,
|
||||
int *heap_type)
|
||||
{
|
||||
const char *name;
|
||||
int i, ret = -EINVAL;
|
||||
|
||||
ret = of_property_read_string(node, "qcom,ion-heap-type", &name);
|
||||
if (ret)
|
||||
goto out;
|
||||
for (i = 0; i < ARRAY_SIZE(heap_types_info); ++i) {
|
||||
if (!strcmp(heap_types_info[i].name, name)) {
|
||||
*heap_type = heap_types_info[i].heap_type;
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
WARN(1, "Unknown heap type: %s. You might need to update heap_types_info in %s",
|
||||
name, __FILE__);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msm_ion_populate_heap(struct device_node *node,
|
||||
struct ion_platform_heap *heap)
|
||||
{
|
||||
unsigned int i;
|
||||
int ret = -EINVAL, heap_type = -1;
|
||||
unsigned int len = ARRAY_SIZE(ion_heap_meta);
|
||||
|
||||
for (i = 0; i < len; ++i) {
|
||||
if (ion_heap_meta[i].id == heap->id) {
|
||||
heap->name = ion_heap_meta[i].name;
|
||||
ret = msm_ion_get_heap_type_from_dt_node(node,
|
||||
&heap_type);
|
||||
if (ret)
|
||||
break;
|
||||
heap->type = heap_type;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (ret)
|
||||
pr_err("%s: Unable to populate heap, error: %d\n", __func__,
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void free_pdata(const struct ion_platform_data *pdata)
|
||||
{
|
||||
kfree(pdata->heaps);
|
||||
kfree(pdata);
|
||||
}
|
||||
|
||||
static int init_reserved_memory(struct device *dev, struct device_node *pnode)
|
||||
{
|
||||
int ret = 0;
|
||||
struct reserved_mem *rmem = of_reserved_mem_lookup(pnode);
|
||||
|
||||
if (!rmem) {
|
||||
dev_err(dev, "Failed to find reserved memory region\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* We only need to call this when the memory-region is managed by
|
||||
* a reserved memory region driver (e.g. CMA, coherent, etc). In that
|
||||
* case, they will have ops for device specific initialization for
|
||||
* the memory region. Otherwise, we have a pure carveout, which needs
|
||||
* not be initialized.
|
||||
*/
|
||||
if (rmem->ops) {
|
||||
ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to initialize memory region\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void release_reserved_memory(struct device *dev,
|
||||
struct device_node *pnode)
|
||||
{
|
||||
struct reserved_mem *rmem = of_reserved_mem_lookup(pnode);
|
||||
|
||||
if (rmem && rmem->ops)
|
||||
of_reserved_mem_device_release(dev);
|
||||
}
|
||||
|
||||
static void release_reserved_memory_regions(struct ion_platform_heap *heaps,
|
||||
int idx)
|
||||
{
|
||||
struct device *dev;
|
||||
struct device_node *node, *mem_node;
|
||||
|
||||
for (idx = idx - 1; idx >= 0; idx--) {
|
||||
dev = heaps[idx].priv;
|
||||
node = dev->of_node;
|
||||
mem_node = of_parse_phandle(node, "memory-region", 0);
|
||||
|
||||
if (mem_node)
|
||||
release_reserved_memory(dev, mem_node);
|
||||
}
|
||||
}
|
||||
|
||||
static int msm_ion_get_heap_dt_data(struct device_node *node,
|
||||
struct ion_platform_heap *heap)
|
||||
{
|
||||
struct device_node *pnode;
|
||||
struct device *dev = heap->priv;
|
||||
int ret = -EINVAL;
|
||||
|
||||
pnode = of_parse_phandle(node, "memory-region", 0);
|
||||
if (pnode) {
|
||||
const __be32 *basep;
|
||||
u64 size = 0;
|
||||
u64 base = 0;
|
||||
|
||||
ret = init_reserved_memory(dev, pnode);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
basep = of_get_address(pnode, 0, &size, NULL);
|
||||
if (!basep) {
|
||||
if (dev->cma_area) {
|
||||
base = cma_get_base(dev->cma_area);
|
||||
size = cma_get_size(dev->cma_area);
|
||||
ret = 0;
|
||||
} else if (dev->dma_mem) {
|
||||
base = dma_get_device_base(dev, dev->dma_mem);
|
||||
size = dma_get_size(dev->dma_mem);
|
||||
ret = 0;
|
||||
}
|
||||
} else {
|
||||
base = of_translate_address(pnode, basep);
|
||||
if (base != OF_BAD_ADDR)
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
heap->base = base;
|
||||
heap->size = size;
|
||||
}
|
||||
of_node_put(pnode);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
WARN(ret, "Failed to parse DT node for heap %s\n", heap->name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
|
||||
{
|
||||
struct ion_platform_data *pdata = 0;
|
||||
struct ion_platform_heap *heaps = NULL;
|
||||
struct device_node *node;
|
||||
struct platform_device *new_dev = NULL;
|
||||
const struct device_node *dt_node = pdev->dev.of_node;
|
||||
const __be32 *val;
|
||||
int ret = -EINVAL;
|
||||
u32 num_heaps = 0;
|
||||
int idx = 0;
|
||||
|
||||
for_each_available_child_of_node(dt_node, node)
|
||||
num_heaps++;
|
||||
|
||||
if (!num_heaps)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
|
||||
if (!pdata)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
heaps = kcalloc(num_heaps, sizeof(struct ion_platform_heap),
|
||||
GFP_KERNEL);
|
||||
if (!heaps) {
|
||||
kfree(pdata);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
pdata->heaps = heaps;
|
||||
pdata->nr = num_heaps;
|
||||
|
||||
for_each_available_child_of_node(dt_node, node) {
|
||||
new_dev = of_platform_device_create(node, NULL, &pdev->dev);
|
||||
if (!new_dev) {
|
||||
pr_err("Failed to create device %s\n", node->name);
|
||||
goto free_heaps;
|
||||
}
|
||||
of_dma_configure(&new_dev->dev, node, true);
|
||||
|
||||
pdata->heaps[idx].priv = &new_dev->dev;
|
||||
val = of_get_address(node, 0, NULL, NULL);
|
||||
if (!val) {
|
||||
pr_err("%s: Unable to find reg key\n", __func__);
|
||||
goto free_heaps;
|
||||
}
|
||||
pdata->heaps[idx].id = (u32)of_read_number(val, 1);
|
||||
|
||||
ret = msm_ion_populate_heap(node, &pdata->heaps[idx]);
|
||||
if (ret)
|
||||
goto free_heaps;
|
||||
|
||||
ret = msm_ion_get_heap_dt_data(node, &pdata->heaps[idx]);
|
||||
if (ret)
|
||||
goto free_heaps;
|
||||
|
||||
++idx;
|
||||
}
|
||||
return pdata;
|
||||
|
||||
free_heaps:
|
||||
release_reserved_memory_regions(pdata->heaps, idx);
|
||||
free_pdata(pdata);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
#else
|
||||
static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void free_pdata(const struct ion_platform_data *pdata)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
struct ion_heap *get_ion_heap(int heap_id)
|
||||
{
|
||||
int i;
|
||||
struct ion_heap *heap;
|
||||
|
||||
for (i = 0; i < num_heaps; i++) {
|
||||
heap = heaps[i];
|
||||
if (heap->id == heap_id)
|
||||
return heap;
|
||||
}
|
||||
|
||||
pr_err("%s: heap_id %d not found\n", __func__, heap_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int msm_ion_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct ion_platform_data *pdata;
|
||||
unsigned int pdata_needs_to_be_freed;
|
||||
int err = -1;
|
||||
int i;
|
||||
|
||||
if (pdev->dev.of_node) {
|
||||
pdata = msm_ion_parse_dt(pdev);
|
||||
if (IS_ERR(pdata)) {
|
||||
status = ION_INIT_FAILURE;
|
||||
return PTR_ERR(pdata);
|
||||
}
|
||||
pdata_needs_to_be_freed = 1;
|
||||
} else {
|
||||
pdata = pdev->dev.platform_data;
|
||||
pdata_needs_to_be_freed = 0;
|
||||
}
|
||||
|
||||
num_heaps = pdata->nr;
|
||||
|
||||
heaps = kcalloc(pdata->nr, sizeof(struct ion_heap *), GFP_KERNEL);
|
||||
|
||||
if (!heaps) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* create the heaps as specified in the board file */
|
||||
for (i = 0; i < num_heaps; i++) {
|
||||
struct ion_platform_heap *heap_data = &pdata->heaps[i];
|
||||
|
||||
heaps[i] = ion_heap_create(heap_data);
|
||||
if (IS_ERR_OR_NULL(heaps[i])) {
|
||||
heaps[i] = 0;
|
||||
continue;
|
||||
} else {
|
||||
if (heap_data->size)
|
||||
pr_info("ION heap %s created at %pa with size %zx\n",
|
||||
heap_data->name,
|
||||
&heap_data->base,
|
||||
heap_data->size);
|
||||
else
|
||||
pr_info("ION heap %s created\n",
|
||||
heap_data->name);
|
||||
}
|
||||
|
||||
ion_device_add_heap(heaps[i]);
|
||||
}
|
||||
if (pdata_needs_to_be_freed)
|
||||
free_pdata(pdata);
|
||||
|
||||
/*
|
||||
* Publish the status at the end, so our interfaces know that they
|
||||
* can stop returning -EPROBE_DEFER.
|
||||
*/
|
||||
status = ION_READY;
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
kfree(heaps);
|
||||
if (pdata_needs_to_be_freed)
|
||||
free_pdata(pdata);
|
||||
status = ION_INIT_FAILURE;
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct of_device_id msm_ion_match_table[] = {
|
||||
{.compatible = ION_COMPAT_STR},
|
||||
{},
|
||||
};
|
||||
|
||||
static struct platform_driver msm_ion_driver = {
|
||||
.probe = msm_ion_probe,
|
||||
.driver = {
|
||||
.name = "ion-msm",
|
||||
.of_match_table = msm_ion_match_table,
|
||||
},
|
||||
};
|
||||
module_platform_driver(msm_ion_driver);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
165
drivers/staging/android/ion/msm_ion_priv.h
Normal file
165
drivers/staging/android/ion/msm_ion_priv.h
Normal file
@@ -0,0 +1,165 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2011 Google, Inc.
|
||||
* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _MSM_ION_PRIV_H
|
||||
#define _MSM_ION_PRIV_H
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/shrinker.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/vmstat.h>
|
||||
#include "../uapi/ion.h"
|
||||
#include "../uapi/msm_ion.h"
|
||||
|
||||
#define ION_ADSP_HEAP_NAME "adsp"
|
||||
#define ION_SYSTEM_HEAP_NAME "system"
|
||||
#define ION_MM_HEAP_NAME "mm"
|
||||
#define ION_SPSS_HEAP_NAME "spss"
|
||||
#define ION_SECURE_CARVEOUT_HEAP_NAME "secure_carveout"
|
||||
#define ION_USER_CONTIG_HEAP_NAME "user_contig"
|
||||
#define ION_QSECOM_HEAP_NAME "qsecom"
|
||||
#define ION_QSECOM_TA_HEAP_NAME "qsecom_ta"
|
||||
#define ION_SECURE_HEAP_NAME "secure_heap"
|
||||
#define ION_SECURE_DISPLAY_HEAP_NAME "secure_display"
|
||||
#define ION_AUDIO_HEAP_NAME "audio"
|
||||
|
||||
/**
|
||||
* Debug feature. Make ION allocations DMA
|
||||
* ready to help identify clients who are wrongly
|
||||
* dependending on ION allocations being DMA
|
||||
* ready.
|
||||
*
|
||||
* As default set to 'false' since ION allocations
|
||||
* are no longer required to be DMA ready
|
||||
*/
|
||||
#ifdef CONFIG_ION_FORCE_DMA_SYNC
|
||||
#define MAKE_ION_ALLOC_DMA_READY 1
|
||||
#else
|
||||
#define MAKE_ION_ALLOC_DMA_READY 0
|
||||
#endif
|
||||
|
||||
#define to_msm_ion_heap(x) container_of(x, struct msm_ion_heap, ion_heap)
|
||||
|
||||
/**
|
||||
* struct ion_platform_heap - defines a heap in the given platform
|
||||
* @type: type of the heap from ion_heap_type enum
|
||||
* @id: unique identifier for heap. When allocating higher numb ers
|
||||
* will be allocated from first. At allocation these are passed
|
||||
* as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
|
||||
* @name: used for debug purposes
|
||||
* @base: base address of heap in physical memory if applicable
|
||||
* @size: size of the heap in bytes if applicable
|
||||
* @priv: private info passed from the board file
|
||||
*
|
||||
* Provided by the board file.
|
||||
*/
|
||||
struct ion_platform_heap {
|
||||
enum ion_heap_type type;
|
||||
unsigned int id;
|
||||
const char *name;
|
||||
phys_addr_t base;
|
||||
size_t size;
|
||||
phys_addr_t align;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct msm_ion_heap - defines an ion heap, as well as additional information
|
||||
* relevant to the heap.
|
||||
* @dev: the device structure associated with the heap
|
||||
* @ion_heap: ion heap
|
||||
*
|
||||
*/
|
||||
struct msm_ion_heap {
|
||||
struct device *dev;
|
||||
struct ion_heap ion_heap;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ion_platform_data - array of platform heaps passed from board file
|
||||
* @nr: number of structures in the array
|
||||
* @heaps: array of platform_heap structions
|
||||
*
|
||||
* Provided by the board file in the form of platform data to a platform device.
|
||||
*/
|
||||
struct ion_platform_data {
|
||||
int nr;
|
||||
struct ion_platform_heap *heaps;
|
||||
};
|
||||
|
||||
/**
|
||||
* ion_buffer_cached - this ion buffer is cached
|
||||
* @buffer: buffer
|
||||
*
|
||||
* indicates whether this ion buffer is cached
|
||||
*/
|
||||
bool ion_buffer_cached(struct ion_buffer *buffer);
|
||||
|
||||
/**
|
||||
* functions for creating and destroying the built in ion heaps.
|
||||
* architectures can add their own custom architecture specific
|
||||
* heaps as appropriate.
|
||||
*/
|
||||
|
||||
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused);
|
||||
|
||||
struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *heap);
|
||||
|
||||
struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *heap);
|
||||
|
||||
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data);
|
||||
|
||||
struct ion_heap
|
||||
*ion_secure_carveout_heap_create(struct ion_platform_heap *heap);
|
||||
|
||||
struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data);
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *heap);
|
||||
|
||||
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data);
|
||||
#else
|
||||
static inline struct ion_heap
|
||||
*ion_cma_secure_heap_create(struct ion_platform_heap *h)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *h)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct device *msm_ion_heap_device(struct ion_heap *heap);
|
||||
|
||||
struct ion_heap *get_ion_heap(int heap_id);
|
||||
|
||||
void ion_prepare_sgl_for_force_dma_sync(struct sg_table *table);
|
||||
|
||||
/**
|
||||
* ion_pages_sync_for_device - cache flush pages for use with the specified
|
||||
* device
|
||||
* @dev: the device the pages will be used with
|
||||
* @page: the first page to be flushed
|
||||
* @size: size in bytes of region to be flushed
|
||||
* @dir: direction of dma transfer
|
||||
*/
|
||||
void ion_pages_sync_for_device(struct device *dev, struct page *page,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
|
||||
extern const struct dma_buf_ops msm_ion_dma_buf_ops;
|
||||
#endif /* _MSM_ION_PRIV_H */
|
122
drivers/staging/android/uapi/msm_ion.h
Normal file
122
drivers/staging/android/uapi/msm_ion.h
Normal file
@@ -0,0 +1,122 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#ifndef _UAPI_LINUX_MSM_ION_H
|
||||
#define _UAPI_LINUX_MSM_ION_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define ION_BIT(nr) (1U << (nr))
|
||||
|
||||
/**
|
||||
* TARGET_ION_ABI_VERSION can be used by user space clients to ensure that at
|
||||
* compile time only their code which uses the appropriate ION APIs for
|
||||
* this kernel is included.
|
||||
*/
|
||||
#define TARGET_ION_ABI_VERSION 2
|
||||
|
||||
enum msm_ion_heap_types {
|
||||
ION_HEAP_TYPE_MSM_START = 6,
|
||||
ION_HEAP_TYPE_SECURE_DMA = ION_HEAP_TYPE_MSM_START,
|
||||
ION_HEAP_TYPE_SYSTEM_SECURE,
|
||||
ION_HEAP_TYPE_HYP_CMA,
|
||||
ION_HEAP_TYPE_SECURE_CARVEOUT,
|
||||
};
|
||||
|
||||
/**
|
||||
* These are the only ids that should be used for Ion heap ids.
|
||||
* The ids listed are the order in which allocation will be attempted
|
||||
* if specified. Don't swap the order of heap ids unless you know what
|
||||
* you are doing!
|
||||
* Id's are spaced by purpose to allow new Id's to be inserted in-between (for
|
||||
* possible fallbacks)
|
||||
*/
|
||||
|
||||
enum ion_heap_ids {
|
||||
INVALID_HEAP_ID = -1,
|
||||
ION_CP_MM_HEAP_ID = 8,
|
||||
ION_SECURE_HEAP_ID = 9,
|
||||
ION_SECURE_DISPLAY_HEAP_ID = 10,
|
||||
ION_SPSS_HEAP_ID = 13, /* Secure Processor ION heap */
|
||||
ION_ADSP_HEAP_ID = 22,
|
||||
ION_SYSTEM_HEAP_ID = 25,
|
||||
ION_QSECOM_HEAP_ID = 27,
|
||||
ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_FLAG_SECURE flag */
|
||||
};
|
||||
|
||||
/**
|
||||
* Newly added heap ids have to be #define(d) since all API changes must
|
||||
* include a new #define.
|
||||
*/
|
||||
#define ION_SECURE_CARVEOUT_HEAP_ID 14
|
||||
#define ION_QSECOM_TA_HEAP_ID 19
|
||||
#define ION_AUDIO_HEAP_ID 28
|
||||
#define ION_CAMERA_HEAP_ID 20
|
||||
#define ION_USER_CONTIG_HEAP_ID 26
|
||||
/**
|
||||
* Flags to be used when allocating from the secure heap for
|
||||
* content protection
|
||||
*/
|
||||
#define ION_FLAG_CP_TOUCH ION_BIT(17)
|
||||
#define ION_FLAG_CP_BITSTREAM ION_BIT(18)
|
||||
#define ION_FLAG_CP_PIXEL ION_BIT(19)
|
||||
#define ION_FLAG_CP_NON_PIXEL ION_BIT(20)
|
||||
#define ION_FLAG_CP_CAMERA ION_BIT(21)
|
||||
#define ION_FLAG_CP_HLOS ION_BIT(22)
|
||||
#define ION_FLAG_CP_SPSS_SP ION_BIT(23)
|
||||
#define ION_FLAG_CP_SPSS_SP_SHARED ION_BIT(24)
|
||||
#define ION_FLAG_CP_SEC_DISPLAY ION_BIT(25)
|
||||
#define ION_FLAG_CP_APP ION_BIT(26)
|
||||
#define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27)
|
||||
/* ION_FLAG_ALLOW_NON_CONTIG uses ION_BIT(28) */
|
||||
#define ION_FLAG_CP_CDSP ION_BIT(29)
|
||||
#define ION_FLAG_CP_SPSS_HLOS_SHARED ION_BIT(30)
|
||||
|
||||
#define ION_FLAGS_CP_MASK 0x6FFE0000
|
||||
|
||||
/**
|
||||
* Flag to allow non continguous allocation of memory from secure
|
||||
* heap
|
||||
*/
|
||||
#define ION_FLAG_ALLOW_NON_CONTIG ION_BIT(28)
|
||||
|
||||
/**
|
||||
* Flag to use when allocating to indicate that a heap is secure.
|
||||
* Do NOT use BIT macro since it is defined in #ifdef __KERNEL__
|
||||
*/
|
||||
#define ION_FLAG_SECURE ION_BIT(ION_HEAP_ID_RESERVED)
|
||||
|
||||
/*
|
||||
* Used in conjunction with heap which pool memory to force an allocation
|
||||
* to come from the page allocator directly instead of from the pool allocation
|
||||
*/
|
||||
#define ION_FLAG_POOL_FORCE_ALLOC ION_BIT(16)
|
||||
|
||||
/**
|
||||
* Macro should be used with ion_heap_ids defined above.
|
||||
*/
|
||||
#define ION_HEAP(bit) ION_BIT(bit)
|
||||
|
||||
#define ION_IOC_MSM_MAGIC 'M'
|
||||
|
||||
struct ion_prefetch_regions {
|
||||
__u64 sizes;
|
||||
__u32 vmid;
|
||||
__u32 nr_sizes;
|
||||
};
|
||||
|
||||
struct ion_prefetch_data {
|
||||
__u64 len;
|
||||
__u64 regions;
|
||||
__u32 heap_id;
|
||||
__u32 nr_regions;
|
||||
};
|
||||
|
||||
#define ION_IOC_PREFETCH _IOWR(ION_IOC_MSM_MAGIC, 3, \
|
||||
struct ion_prefetch_data)
|
||||
|
||||
#define ION_IOC_DRAIN _IOWR(ION_IOC_MSM_MAGIC, 4, \
|
||||
struct ion_prefetch_data)
|
||||
|
||||
#endif /* _UAPI_LINUX_MSM_ION_H */
|
30
include/linux/dma-buf-ref.h
Normal file
30
include/linux/dma-buf-ref.h
Normal file
@@ -0,0 +1,30 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _DMA_BUF_REF_H
|
||||
#define _DMA_BUF_REF_H
|
||||
|
||||
struct msm_dma_buf;
|
||||
struct seq_file;
|
||||
|
||||
#ifdef CONFIG_DEBUG_DMA_BUF_REF
|
||||
void dma_buf_ref_init(struct msm_dma_buf *b);
|
||||
void dma_buf_ref_destroy(struct msm_dma_buf *b);
|
||||
void dma_buf_ref_mod(struct msm_dma_buf *b, int nr);
|
||||
int dma_buf_ref_show(struct seq_file *s, struct msm_dma_buf *msm_dma_buf);
|
||||
|
||||
#else
|
||||
static inline void dma_buf_ref_init(struct msm_dma_buf *b) {}
|
||||
static inline void dma_buf_ref_destroy(struct msm_dma_buf *b) {}
|
||||
static inline void dma_buf_ref_mod(struct msm_dma_buf *b, int nr) {}
|
||||
static inline int dma_buf_ref_show(struct seq_file *s,
|
||||
struct msm_dma_buf *msm_dma_buf)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* _DMA_BUF_REF_H */
|
@@ -20,6 +20,7 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/dma-fence.h>
|
||||
#include <linux/dma-buf-ref.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
struct device;
|
||||
@@ -390,6 +391,17 @@ struct dma_buf {
|
||||
} cb_excl, cb_shared;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct msm_dma_buf - Holds the meta data associated with a shared buffer
|
||||
* object, as well as the buffer object.
|
||||
* @refs: list entry for dma-buf reference tracking
|
||||
* @dma_buf: the shared buffer object
|
||||
*/
|
||||
struct msm_dma_buf {
|
||||
struct list_head refs;
|
||||
struct dma_buf dma_buf;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dma_buf_attachment - holds device-buffer attachment data
|
||||
* @dmabuf: buffer for this attachment.
|
||||
@@ -454,6 +466,12 @@ struct dma_buf_export_info {
|
||||
struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
|
||||
.owner = THIS_MODULE }
|
||||
|
||||
/**
|
||||
* to_msm_dma_buf - helper macro for deriving an msm_dma_buf from a dma_buf.
|
||||
*/
|
||||
#define to_msm_dma_buf(_dma_buf) \
|
||||
container_of(_dma_buf, struct msm_dma_buf, dma_buf)
|
||||
|
||||
/**
|
||||
* get_dma_buf - convenience wrapper for get_file.
|
||||
* @dmabuf: [in] pointer to dma_buf
|
||||
@@ -466,6 +484,7 @@ struct dma_buf_export_info {
|
||||
static inline void get_dma_buf(struct dma_buf *dmabuf)
|
||||
{
|
||||
get_file(dmabuf->file);
|
||||
dma_buf_ref_mod(to_msm_dma_buf(dmabuf), 1);
|
||||
}
|
||||
|
||||
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
|
||||
|
@@ -798,10 +798,15 @@ static inline int dma_get_cache_alignment(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DMA_DECLARE_COHERENT
|
||||
struct dma_coherent_mem;
|
||||
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
||||
dma_addr_t device_addr, size_t size);
|
||||
void dma_release_declared_memory(struct device *dev);
|
||||
dma_addr_t dma_get_device_base(struct device *dev,
|
||||
struct dma_coherent_mem *mem);
|
||||
unsigned long dma_get_size(struct dma_coherent_mem *mem);
|
||||
#else
|
||||
struct dma_coherent_mem {};
|
||||
static inline int
|
||||
dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
||||
dma_addr_t device_addr, size_t size)
|
||||
@@ -813,6 +818,17 @@ static inline void
|
||||
dma_release_declared_memory(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_get_device_base(struct device *dev,
|
||||
struct dma_coherent_mem *mem)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long dma_get_size(struct dma_coherent_mem *mem)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DMA_DECLARE_COHERENT */
|
||||
|
||||
static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
|
||||
|
11
include/linux/msm_ion_of.h
Normal file
11
include/linux/msm_ion_of.h
Normal file
@@ -0,0 +1,11 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _MSM_ION_KERNEL_H
|
||||
#define _MSM_ION_KERNEL_H
|
||||
|
||||
struct device *msm_ion_heap_device_by_id(int heap_id);
|
||||
|
||||
#endif /* _MSM_ION_H */
|
170
include/trace/events/ion.h
Normal file
170
include/trace/events/ion.h
Normal file
@@ -0,0 +1,170 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM ion
|
||||
|
||||
#if !defined(_TRACE_ION_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_ION_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#define DEV_NAME_NONE "None"
|
||||
|
||||
DECLARE_EVENT_CLASS(ion_dma_map_cmo_class,
|
||||
|
||||
TP_PROTO(const struct device *dev, unsigned long ino,
|
||||
bool cached, bool hlos_accessible, unsigned long map_attrs,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, ino, cached, hlos_accessible, map_attrs, dir),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE)
|
||||
__field(unsigned long, ino)
|
||||
__field(bool, cached)
|
||||
__field(bool, hlos_accessible)
|
||||
__field(unsigned long, map_attrs)
|
||||
__field(enum dma_data_direction, dir)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE);
|
||||
__entry->ino = ino;
|
||||
__entry->cached = cached;
|
||||
__entry->hlos_accessible = hlos_accessible;
|
||||
__entry->map_attrs = map_attrs;
|
||||
__entry->dir = dir;
|
||||
),
|
||||
|
||||
TP_printk("dev=%s ino=%lu cached=%d access=%d map_attrs=0x%lx dir=%d",
|
||||
__get_str(dev_name),
|
||||
__entry->ino,
|
||||
__entry->cached,
|
||||
__entry->hlos_accessible,
|
||||
__entry->map_attrs,
|
||||
__entry->dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_map_cmo_apply,
|
||||
|
||||
TP_PROTO(const struct device *dev, unsigned long ino,
|
||||
bool cached, bool hlos_accessible, unsigned long map_attrs,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, ino, cached, hlos_accessible, map_attrs, dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_map_cmo_skip,
|
||||
|
||||
TP_PROTO(const struct device *dev, unsigned long ino,
|
||||
bool cached, bool hlos_accessible, unsigned long map_attrs,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, ino, cached, hlos_accessible, map_attrs, dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_unmap_cmo_apply,
|
||||
|
||||
TP_PROTO(const struct device *dev, unsigned long ino,
|
||||
bool cached, bool hlos_accessible, unsigned long map_attrs,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, ino, cached, hlos_accessible, map_attrs, dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_unmap_cmo_skip,
|
||||
|
||||
TP_PROTO(const struct device *dev, unsigned long ino,
|
||||
bool cached, bool hlos_accessible, unsigned long map_attrs,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, ino, cached, hlos_accessible, map_attrs, dir)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(ion_access_cmo_class,
|
||||
|
||||
TP_PROTO(const struct device *dev, unsigned long ino,
|
||||
bool cached, bool hlos_accessible,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, ino, cached, hlos_accessible, dir),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE)
|
||||
__field(unsigned long, ino)
|
||||
__field(bool, cached)
|
||||
__field(bool, hlos_accessible)
|
||||
__field(enum dma_data_direction, dir)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE);
|
||||
__entry->ino = ino;
|
||||
__entry->cached = cached;
|
||||
__entry->hlos_accessible = hlos_accessible;
|
||||
__entry->dir = dir;
|
||||
),
|
||||
|
||||
TP_printk("dev=%s ino=%ld cached=%d access=%d dir=%d",
|
||||
__get_str(dev_name),
|
||||
__entry->ino,
|
||||
__entry->cached,
|
||||
__entry->hlos_accessible,
|
||||
__entry->dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_cmo_apply,
|
||||
TP_PROTO(const struct device *dev, unsigned long ino,
|
||||
bool cached, bool hlos_accessible,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, ino, cached, hlos_accessible, dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_cmo_skip,
|
||||
TP_PROTO(const struct device *dev, unsigned long ino,
|
||||
bool cached, bool hlos_accessible,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, ino, cached, hlos_accessible, dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_notmapped,
|
||||
TP_PROTO(const struct device *dev, unsigned long ino,
|
||||
bool cached, bool hlos_accessible,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, ino, cached, hlos_accessible, dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_cmo_apply,
|
||||
TP_PROTO(const struct device *dev, unsigned long ino,
|
||||
bool cached, bool hlos_accessible,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, ino, cached, hlos_accessible, dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_cmo_skip,
|
||||
TP_PROTO(const struct device *dev, unsigned long ino,
|
||||
bool cached, bool hlos_accessible,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, ino, cached, hlos_accessible, dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_notmapped,
|
||||
TP_PROTO(const struct device *dev, unsigned long ino,
|
||||
bool cached, bool hlos_accessible,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, ino, cached, hlos_accessible, dir)
|
||||
);
|
||||
#endif /* _TRACE_ION_H */
|
||||
|
||||
#include <trace/define_trace.h>
|
||||
|
1
include/uapi/linux/ion.h
Symbolic link
1
include/uapi/linux/ion.h
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../drivers/staging/android/uapi/ion.h
|
1
include/uapi/linux/msm_ion.h
Symbolic link
1
include/uapi/linux/msm_ion.h
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../drivers/staging/android/uapi/msm_ion.h
|
@@ -28,7 +28,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_get_device_base(struct device *dev,
|
||||
dma_addr_t dma_get_device_base(struct device *dev,
|
||||
struct dma_coherent_mem * mem)
|
||||
{
|
||||
if (mem->use_dev_dma_pfn_offset)
|
||||
@@ -37,6 +37,11 @@ static inline dma_addr_t dma_get_device_base(struct device *dev,
|
||||
return mem->device_base;
|
||||
}
|
||||
|
||||
unsigned long dma_get_size(struct dma_coherent_mem *mem)
|
||||
{
|
||||
return mem->size << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static int dma_init_coherent_memory(phys_addr_t phys_addr,
|
||||
dma_addr_t device_addr, size_t size,
|
||||
struct dma_coherent_mem **mem)
|
||||
|
Reference in New Issue
Block a user