SMCInvoke: memory object optimization

Add support for map objects to be written into the async
side channel rather than have to be explicitly asked for
by QTEE.

Change-Id: Ia1c56555dc3a6422f4fb448f94f73fef661ac89d
Signed-off-by: Reut Zysman <quic_rzysman@quicinc.com>
This commit is contained in:
Reut Zysman
2022-11-09 10:50:49 -08:00
committed by Gerrit - the friendly Code Review server
parent e354297278
commit a7350ce560

View File

@@ -73,6 +73,11 @@
#define SMCINVOKE_RESULT_INBOUND_REQ_NEEDED 3
/* TZ defined values - End */
/* Asynchronous protocol values */
/* Driver async version is set to match the minimal TZ version that supports async memory object */
#define SMCINVOKE_ASYNC_VERSION (0x00010002)
#define SMCINVOKE_ASYNC_OP_MEMORY_OBJECT (0x00000003)
/*
* This is the state when server FD has been closed but
* TZ still has refs of CBOBjs served by this server
@@ -196,6 +201,13 @@ static struct class *driver_class;
static struct device *class_dev;
static struct platform_device *smcinvoke_pdev;
/* We disable async memory object support by default,
* until we receive the first message from TZ over the
* async channel and can determine TZ async version.
*/
static bool mem_obj_async_support = false;
static uint32_t tz_async_version = 0x0;
struct smcinvoke_buf_hdr {
uint32_t offset;
uint32_t size;
@@ -234,6 +246,41 @@ struct smcinvoke_piggyback_msg {
int32_t objs[0];
};
/* Mapped memory object data
*
* memObjRef Handle reference for the memory object
* mapObjRef Handle reference for the map object
* addr Mapped memory address
* size Size of mapped memory
* perm Access rights for the memory
*/
struct smcinvoke_mem_obj_info {
uint32_t memObjRef;
uint32_t mapObjRef;
uint64_t addr;
uint64_t size;
uint32_t perm;
};
/* Memory object info to be written into the async buffer
*
* version Async protocol version
* op Async protocol operation
* count Number of memory objects passed
* mo Mapped memory object data
*/
struct smcinvoke_mem_obj_msg {
uint32_t version;
uint32_t op;
uint32_t count;
struct smcinvoke_mem_obj_info mo[];
};
struct smcinvoke_mem_obj_pending_async {
struct smcinvoke_mem_obj *mem_obj;
struct list_head list;
};
/* Data structure to hold request coming from TZ */
struct smcinvoke_cb_txn {
uint32_t txn_id;
@@ -714,6 +761,32 @@ static void smcinvoke_destroy_kthreads(void)
}
}
/* Queue newly created memory object to l_pending_mem_obj list.
* Later, the mapping information for objects in this list will be sent to TZ
* over the async side channel.
*
* No return value as TZ is always able to explicitly ask for this information
* in case this function fails and the memory object is not added to this list.
*/
static void queue_mem_obj_pending_async_locked(struct smcinvoke_mem_obj *mem_obj, struct list_head *l_pending_mem_obj)
{
struct smcinvoke_mem_obj_pending_async *t_mem_obj_pending =
kzalloc(sizeof(*t_mem_obj_pending), GFP_KERNEL);
/*
* We are not failing execution in case of a failure here,
* since TZ can always ask for this information explicitly
* if it's not available in the side channel.
*/
if (!t_mem_obj_pending) {
pr_err("Unable to allocate memory\n");
return;
}
t_mem_obj_pending->mem_obj = mem_obj;
list_add(&t_mem_obj_pending->list, l_pending_mem_obj);
}
static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj)
{
int ret = 0;
@@ -1040,8 +1113,141 @@ static bool is_remote_obj(int32_t uhandle, struct smcinvoke_file_data **tzobj,
return ret;
}
static int create_mem_obj(struct dma_buf *dma_buf, int32_t *mem_obj,
int32_t server_id, int32_t user_handle)
static int smcinvoke_create_bridge(struct smcinvoke_mem_obj *mem_obj)
{
int ret = 0;
int tz_perm = PERM_READ|PERM_WRITE;
uint32_t *vmid_list;
uint32_t *perms_list;
uint32_t nelems = 0;
struct dma_buf *dmabuf = mem_obj->dma_buf;
phys_addr_t phys = mem_obj->p_addr;
size_t size = mem_obj->p_addr_len;
if (!qtee_shmbridge_is_enabled())
return 0;
ret = mem_buf_dma_buf_copy_vmperm(dmabuf, (int **)&vmid_list,
(int **)&perms_list, (int *)&nelems);
if (ret) {
pr_err("mem_buf_dma_buf_copy_vmperm failure, err=%d\n", ret);
return ret;
}
if (mem_buf_dma_buf_exclusive_owner(dmabuf))
perms_list[0] = PERM_READ | PERM_WRITE;
ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems,
tz_perm, &mem_obj->shmbridge_handle);
if (ret == 0) {
/* In case of ret=0/success handle has to be freed in memobj release */
mem_obj->is_smcinvoke_created_shmbridge = true;
} else if (ret == -EEXIST) {
ret = 0;
goto exit;
} else {
pr_err("creation of shm bridge for mem_region_id %d failed ret %d\n",
mem_obj->mem_region_id, ret);
goto exit;
}
trace_smcinvoke_create_bridge(mem_obj->shmbridge_handle, mem_obj->mem_region_id);
exit:
kfree(perms_list);
kfree(vmid_list);
return ret;
}
/* Map memory region for a given memory object.
* Mapping information will be saved as part of the memory object structure.
*/
static int32_t smcinvoke_map_mem_region_locked(struct smcinvoke_mem_obj* mem_obj)
{
int ret = OBJECT_OK;
struct dma_buf_attachment *buf_attach = NULL;
struct sg_table *sgt = NULL;
if (!mem_obj) {
pr_err("Invalid memory object\n");
return OBJECT_ERROR_BADOBJ;
}
if (!mem_obj->p_addr) {
kref_init(&mem_obj->mem_map_obj_ref_cnt);
buf_attach = dma_buf_attach(mem_obj->dma_buf,
&smcinvoke_pdev->dev);
if (IS_ERR(buf_attach)) {
ret = OBJECT_ERROR_KMEM;
pr_err("dma buf attach failed, ret: %d\n", ret);
goto out;
}
mem_obj->buf_attach = buf_attach;
sgt = dma_buf_map_attachment(buf_attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
pr_err("mapping dma buffers failed, ret: %d\n",
PTR_ERR(sgt));
ret = OBJECT_ERROR_KMEM;
goto out;
}
mem_obj->sgt = sgt;
/* contiguous only => nents=1 */
if (sgt->nents != 1) {
ret = OBJECT_ERROR_INVALID;
pr_err("sg enries are not contigous, ret: %d\n", ret);
goto out;
}
mem_obj->p_addr = sg_dma_address(sgt->sgl);
mem_obj->p_addr_len = sgt->sgl->length;
if (!mem_obj->p_addr) {
ret = OBJECT_ERROR_INVALID;
pr_err("invalid physical address, ret: %d\n", ret);
goto out;
}
/* Increase reference count as we are feeding the memobj to
* smcinvoke and unlock the mutex. No need to hold the mutex in
* case of shmbridge creation.
*/
kref_get(&mem_obj->mem_map_obj_ref_cnt);
mutex_unlock(&g_smcinvoke_lock);
ret = smcinvoke_create_bridge(mem_obj);
/* Take lock again and decrease the reference count which we
* increased for shmbridge but before proceeding further we
* have to check again if the memobj is still valid or not
* after decreasing the reference.
*/
mutex_lock(&g_smcinvoke_lock);
kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
if (ret) {
ret = OBJECT_ERROR_INVALID;
pr_err("Unable to create shm bridge, ret: %d\n", ret);
goto out;
}
if (!find_mem_obj_locked(mem_obj->mem_region_id,
SMCINVOKE_MEM_RGN_OBJ)) {
mutex_unlock(&g_smcinvoke_lock);
pr_err("Memory object not found\n");
return OBJECT_ERROR_BADOBJ;
}
mem_obj->mem_map_obj_id = next_mem_map_obj_id_locked();
}
out:
if (ret != OBJECT_OK)
kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
return ret;
}
static int create_mem_obj(struct dma_buf *dma_buf, int32_t *tzhandle,
struct smcinvoke_mem_obj **mem_obj, int32_t server_id, int32_t user_handle)
{
struct smcinvoke_mem_obj *t_mem_obj = NULL;
struct smcinvoke_server_info *server_i = NULL;
@@ -1066,7 +1272,8 @@ static int create_mem_obj(struct dma_buf *dma_buf, int32_t *mem_obj,
t_mem_obj->mem_obj_user_fd = user_handle;
list_add_tail(&t_mem_obj->list, &g_mem_objs);
mutex_unlock(&g_smcinvoke_lock);
*mem_obj = TZHANDLE_MAKE_LOCAL(MEM_RGN_SRVR_ID,
*mem_obj = t_mem_obj;
*tzhandle = TZHANDLE_MAKE_LOCAL(MEM_RGN_SRVR_ID,
t_mem_obj->mem_region_id);
return 0;
}
@@ -1078,10 +1285,11 @@ static int create_mem_obj(struct dma_buf *dma_buf, int32_t *mem_obj,
* other threads from releasing that FD while IOCTL is in progress.
*/
static int get_tzhandle_from_uhandle(int32_t uhandle, int32_t server_fd,
struct file **filp, uint32_t *tzhandle)
struct file **filp, uint32_t *tzhandle, struct list_head *l_pending_mem_obj)
{
int ret = -EBADF;
uint16_t server_id = 0;
struct smcinvoke_mem_obj *mem_obj = NULL;
if (UHANDLE_IS_NULL(uhandle)) {
*tzhandle = SMCINVOKE_TZ_OBJ_NULL;
@@ -1106,7 +1314,23 @@ static int get_tzhandle_from_uhandle(int32_t uhandle, int32_t server_fd,
if (is_dma_fd(UHANDLE_GET_FD(uhandle), &dma_buf)) {
server_id = get_server_id(server_fd);
ret = create_mem_obj(dma_buf, tzhandle, server_id, uhandle);
ret = create_mem_obj(dma_buf, tzhandle, &mem_obj, server_id, uhandle);
if (!ret && mem_obj_async_support && l_pending_mem_obj) {
mutex_lock(&g_smcinvoke_lock);
/* Map the newly created memory object and add it
* to l_pending_mem_obj list.
* Before returning to TZ, add the mapping data
* to the async side channel so it's available to TZ
* together with the memory object.
*/
if (!smcinvoke_map_mem_region_locked(mem_obj)) {
queue_mem_obj_pending_async_locked(mem_obj, l_pending_mem_obj);
} else {
pr_err("Failed to map memory region\n");
}
mutex_unlock(&g_smcinvoke_lock);
}
} else if (is_remote_obj(UHANDLE_GET_FD(uhandle),
&tzobj, filp)) {
*tzhandle = tzobj->tzhandle;
@@ -1204,52 +1428,6 @@ out:
return ret;
}
static int smcinvoke_create_bridge(struct smcinvoke_mem_obj *mem_obj)
{
int ret = 0;
int tz_perm = PERM_READ|PERM_WRITE;
uint32_t *vmid_list;
uint32_t *perms_list;
uint32_t nelems = 0;
struct dma_buf *dmabuf = mem_obj->dma_buf;
phys_addr_t phys = mem_obj->p_addr;
size_t size = mem_obj->p_addr_len;
if (!qtee_shmbridge_is_enabled())
return 0;
ret = mem_buf_dma_buf_copy_vmperm(dmabuf, (int **)&vmid_list,
(int **)&perms_list, (int *)&nelems);
if (ret) {
pr_err("mem_buf_dma_buf_copy_vmperm failure, err=%d\n", ret);
return ret;
}
if (mem_buf_dma_buf_exclusive_owner(dmabuf))
perms_list[0] = PERM_READ | PERM_WRITE;
ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems,
tz_perm, &mem_obj->shmbridge_handle);
if (ret == 0) {
/* In case of ret=0/success handle has to be freed in memobj release */
mem_obj->is_smcinvoke_created_shmbridge = true;
} else if (ret == -EEXIST) {
ret = 0;
goto exit;
} else {
pr_err("creation of shm bridge for mem_region_id %d failed ret %d\n",
mem_obj->mem_region_id, ret);
goto exit;
}
trace_smcinvoke_create_bridge(mem_obj->shmbridge_handle, mem_obj->mem_region_id);
exit:
kfree(perms_list);
kfree(vmid_list);
return ret;
}
static int32_t smcinvoke_release_mem_obj_locked(void *buf, size_t buf_len)
{
struct smcinvoke_tzcb_req *msg = buf;
@@ -1264,7 +1442,7 @@ static int32_t smcinvoke_release_mem_obj_locked(void *buf, size_t buf_len)
return release_tzhandle_locked(msg->hdr.tzhandle);
}
static int32_t smcinvoke_map_mem_region(void *buf, size_t buf_len)
static int32_t smcinvoke_process_map_mem_region_req(void *buf, size_t buf_len)
{
int ret = OBJECT_OK;
struct smcinvoke_tzcb_req *msg = buf;
@@ -1275,8 +1453,6 @@ static int32_t smcinvoke_map_mem_region(void *buf, size_t buf_len)
} *ob = NULL;
int32_t *oo = NULL;
struct smcinvoke_mem_obj *mem_obj = NULL;
struct dma_buf_attachment *buf_attach = NULL;
struct sg_table *sgt = NULL;
if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 1, 1, 1) ||
(buf_len - msg->args[0].b.offset < msg->args[0].b.size)) {
@@ -1297,80 +1473,20 @@ static int32_t smcinvoke_map_mem_region(void *buf, size_t buf_len)
}
if (!mem_obj->p_addr) {
kref_init(&mem_obj->mem_map_obj_ref_cnt);
buf_attach = dma_buf_attach(mem_obj->dma_buf,
&smcinvoke_pdev->dev);
if (IS_ERR(buf_attach)) {
ret = OBJECT_ERROR_KMEM;
pr_err("dma buf attach failed, ret: %d\n", ret);
goto out;
}
mem_obj->buf_attach = buf_attach;
sgt = dma_buf_map_attachment(buf_attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
pr_err("mapping dma buffers failed, ret: %d\n",
PTR_ERR(sgt));
ret = OBJECT_ERROR_KMEM;
goto out;
}
mem_obj->sgt = sgt;
/* contiguous only => nents=1 */
if (sgt->nents != 1) {
ret = OBJECT_ERROR_INVALID;
pr_err("sg enries are not contigous, ret: %d\n", ret);
goto out;
}
mem_obj->p_addr = sg_dma_address(sgt->sgl);
mem_obj->p_addr_len = sgt->sgl->length;
if (!mem_obj->p_addr) {
ret = OBJECT_ERROR_INVALID;
pr_err("invalid physical address, ret: %d\n", ret);
goto out;
}
/* Increase reference count as we are feeding the memobj to
* smcinvoke and unlock the mutex. No need to hold the mutex in
* case of shmbridge creation.
*/
kref_get(&mem_obj->mem_map_obj_ref_cnt);
mutex_unlock(&g_smcinvoke_lock);
ret = smcinvoke_create_bridge(mem_obj);
/* Take lock again and decrease the reference count which we
* increased for shmbridge but before proceeding further we
* have to check again if the memobj is still valid or not
* after decreasing the reference.
*/
mutex_lock(&g_smcinvoke_lock);
kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
if (ret) {
ret = OBJECT_ERROR_INVALID;
goto out;
}
if (!find_mem_obj_locked(TZHANDLE_GET_OBJID(msg->args[1].handle),
SMCINVOKE_MEM_RGN_OBJ)) {
mutex_unlock(&g_smcinvoke_lock);
pr_err("Memory object not found\n");
return OBJECT_ERROR_BADOBJ;
}
mem_obj->mem_map_obj_id = next_mem_map_obj_id_locked();
ret = smcinvoke_map_mem_region_locked(mem_obj);
} else {
kref_get(&mem_obj->mem_map_obj_ref_cnt);
}
ob->p_addr = mem_obj->p_addr;
ob->len = mem_obj->p_addr_len;
ob->perms = SMCINVOKE_MEM_PERM_RW;
*oo = TZHANDLE_MAKE_LOCAL(MEM_MAP_SRVR_ID, mem_obj->mem_map_obj_id);
out:
if (ret != OBJECT_OK)
kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
if (!ret) {
ob->p_addr = mem_obj->p_addr;
ob->len = mem_obj->p_addr_len;
ob->perms = SMCINVOKE_MEM_PERM_RW;
*oo = TZHANDLE_MAKE_LOCAL(MEM_MAP_SRVR_ID, mem_obj->mem_map_obj_id);
}
mutex_unlock(&g_smcinvoke_lock);
return ret;
}
@@ -1397,7 +1513,8 @@ static void process_kernel_obj(void *buf, size_t buf_len)
switch (cb_req->hdr.op) {
case OBJECT_OP_MAP_REGION:
cb_req->result = smcinvoke_map_mem_region(buf, buf_len);
pr_debug("Received a request to map memory region\n");
cb_req->result = smcinvoke_process_map_mem_region_req(buf, buf_len);
break;
case OBJECT_OP_YIELD:
cb_req->result = OBJECT_OK;
@@ -1868,7 +1985,8 @@ static size_t compute_in_msg_size(const struct smcinvoke_cmd_req *req,
static int marshal_in_invoke_req(const struct smcinvoke_cmd_req *req,
const union smcinvoke_arg *args_buf, uint32_t tzhandle,
uint8_t *buf, size_t buf_size, struct file **arr_filp,
int32_t *tzhandles_to_release, uint32_t context_type)
int32_t *tzhandles_to_release, uint32_t context_type,
struct list_head *l_pending_mem_obj)
{
int ret = -EINVAL, i = 0, j = 0, k = 0;
const struct smcinvoke_msg_hdr msg_hdr = {
@@ -1920,7 +2038,7 @@ static int marshal_in_invoke_req(const struct smcinvoke_cmd_req *req,
FOR_ARGS(i, req->counts, OI) {
ret = get_tzhandle_from_uhandle(args_buf[i].o.fd,
args_buf[i].o.cb_server_fd, &arr_filp[j++],
&(tz_args[i].handle));
&(tz_args[i].handle), l_pending_mem_obj);
if (ret)
goto out;
@@ -2090,7 +2208,7 @@ static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req,
}
ret = get_tzhandle_from_uhandle(tmp_arg.o.fd,
tmp_arg.o.cb_server_fd, &arr_filp[i],
&(tz_args[i].handle));
&(tz_args[i].handle), NULL);
if (ret)
goto out;
tzhandles_to_release[i] = tz_args[i].handle;
@@ -2109,6 +2227,23 @@ out:
return ret;
}
static void set_tz_version (uint32_t tz_version)
{
tz_async_version = tz_version;
/* We enable async memory object support when TZ async
* version is equal or larger than the driver version.
* It is expected that if the protocol changes in later
* TZ versions, TZ will support backward compatibility
* so this condition should still be valid.
*/
if (tz_version >= SMCINVOKE_ASYNC_VERSION) {
mem_obj_async_support = true;
pr_debug("Enabled asynchronous memory object support\n");
}
}
static void process_piggyback_data(void *buf, size_t buf_size)
{
int i;
@@ -2120,12 +2255,94 @@ static void process_piggyback_data(void *buf, size_t buf_size)
req.hdr.op = msg->op;
req.hdr.counts = 0; /* release op does not require any args */
req.hdr.tzhandle = objs[i];
if (tz_async_version == 0)
set_tz_version(msg->version);
process_tzcb_req(&req, sizeof(struct smcinvoke_tzcb_req), NULL);
/* cbobjs_in_flight will be adjusted during CB processing */
}
}
/* Add memory object mapped data to the async side channel, so it's available to TZ
* together with the memory object.
*
* No return value as TZ is always able to explicitly ask for this information
* in case this function fails.
*/
static void add_mem_obj_info_to_async_side_channel_locked(void *buf, size_t buf_size, struct list_head *l_pending_mem_obj)
{
struct smcinvoke_mem_obj_msg *msg = buf;
struct smcinvoke_mem_obj_pending_async *mem_obj_pending = NULL;
size_t header_size = 0;
size_t mo_size = 0;
size_t used = 0;
size_t index = 0;
if (list_empty(l_pending_mem_obj))
return;
header_size = sizeof(struct smcinvoke_mem_obj_msg);
mo_size = sizeof(struct smcinvoke_mem_obj_info);
/* Minimal size required is the header data + one mem obj info */
if (buf_size < header_size + mo_size) {
pr_err("Unable to add memory object info to async channel\n");
return;
}
msg->version = SMCINVOKE_ASYNC_VERSION;
msg->op = SMCINVOKE_ASYNC_OP_MEMORY_OBJECT;
msg->count = 0;
used = header_size;
index = 0;
list_for_each_entry(mem_obj_pending, l_pending_mem_obj, list) {
if (NULL == mem_obj_pending->mem_obj) {
pr_err("Memory object is no longer valid\n");
continue;
}
if (used + mo_size > buf_size) {
pr_err("Not all memory object info was added to the async channel\n");
break;
}
msg->mo[index].memObjRef = TZHANDLE_MAKE_LOCAL(MEM_RGN_SRVR_ID, mem_obj_pending->mem_obj->mem_region_id);
msg->mo[index].mapObjRef = TZHANDLE_MAKE_LOCAL(MEM_MAP_SRVR_ID, mem_obj_pending->mem_obj->mem_map_obj_id);
msg->mo[index].addr = mem_obj_pending->mem_obj->p_addr;
msg->mo[index].size = mem_obj_pending->mem_obj->p_addr_len;
msg->mo[index].perm = SMCINVOKE_MEM_PERM_RW;
used += sizeof(msg->mo[index]);
index++;
}
msg->count = index;
pr_debug("Added %d memory objects to the side channel, total size = %d\n", index, used);
return;
}
/*
* Delete entire pending async list.
*/
static void delete_pending_async_list_locked(struct list_head *l_pending_mem_obj)
{
struct smcinvoke_mem_obj_pending_async *mem_obj_pending = NULL;
struct smcinvoke_mem_obj_pending_async *temp = NULL;
if (list_empty(l_pending_mem_obj))
return;
list_for_each_entry_safe(mem_obj_pending, temp, l_pending_mem_obj, list) {
mem_obj_pending->mem_obj = NULL;
list_del(&mem_obj_pending->list);
kfree(mem_obj_pending);
}
}
static long process_ack_local_obj(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -2361,6 +2578,7 @@ static long process_invoke_req(struct file *filp, unsigned int cmd,
union smcinvoke_arg *args_buf = NULL;
struct smcinvoke_file_data *tzobj = filp->private_data;
struct qtee_shm in_shm = {0}, out_shm = {0};
LIST_HEAD(l_mem_objs_pending_async); /* Holds new memory objects, to be later sent to TZ */
/*
* Hold reference to remote object until invoke op is not
@@ -2455,12 +2673,19 @@ static long process_invoke_req(struct file *filp, unsigned int cmd,
ret = marshal_in_invoke_req(&req, args_buf, tzobj->tzhandle, in_msg,
inmsg_size, filp_to_release, tzhandles_to_release,
context_type);
context_type, &l_mem_objs_pending_async);
if (ret) {
pr_err("failed to marshal in invoke req, ret :%d\n", ret);
goto out;
}
if (mem_obj_async_support) {
mutex_lock(&g_smcinvoke_lock);
add_mem_obj_info_to_async_side_channel_locked(out_msg, outmsg_size, &l_mem_objs_pending_async);
delete_pending_async_list_locked(&l_mem_objs_pending_async);
mutex_unlock(&g_smcinvoke_lock);
}
ret = prepare_send_scm_msg(in_msg, in_shm.paddr, inmsg_size,
out_msg, out_shm.paddr, outmsg_size,
&req, args_buf, &tz_acked, context_type,