|
@@ -47,7 +47,6 @@
|
|
#define SMCINVOKE_DEV "smcinvoke"
|
|
#define SMCINVOKE_DEV "smcinvoke"
|
|
#define SMCINVOKE_TZ_ROOT_OBJ 1
|
|
#define SMCINVOKE_TZ_ROOT_OBJ 1
|
|
#define SMCINVOKE_TZ_OBJ_NULL 0
|
|
#define SMCINVOKE_TZ_OBJ_NULL 0
|
|
-#define SMCINVOKE_TZ_MIN_BUF_SIZE 4096
|
|
|
|
#define SMCINVOKE_ARGS_ALIGN_SIZE (sizeof(uint64_t))
|
|
#define SMCINVOKE_ARGS_ALIGN_SIZE (sizeof(uint64_t))
|
|
#define SMCINVOKE_NEXT_AVAILABLE_TXN 0
|
|
#define SMCINVOKE_NEXT_AVAILABLE_TXN 0
|
|
#define SMCINVOKE_REQ_PLACED 1
|
|
#define SMCINVOKE_REQ_PLACED 1
|
|
@@ -334,7 +333,6 @@ struct smcinvoke_mem_obj {
|
|
uint64_t p_addr;
|
|
uint64_t p_addr;
|
|
size_t p_addr_len;
|
|
size_t p_addr_len;
|
|
struct list_head list;
|
|
struct list_head list;
|
|
- bool is_smcinvoke_created_shmbridge;
|
|
|
|
uint64_t shmbridge_handle;
|
|
uint64_t shmbridge_handle;
|
|
struct smcinvoke_server_info *server;
|
|
struct smcinvoke_server_info *server;
|
|
int32_t mem_obj_user_fd;
|
|
int32_t mem_obj_user_fd;
|
|
@@ -390,6 +388,8 @@ static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
|
|
struct qtee_shm *in_shm, struct qtee_shm *out_shm);
|
|
struct qtee_shm *in_shm, struct qtee_shm *out_shm);
|
|
|
|
|
|
static void process_piggyback_data(void *buf, size_t buf_size);
|
|
static void process_piggyback_data(void *buf, size_t buf_size);
|
|
|
|
+static void add_mem_obj_info_to_async_side_channel_locked(void *buf, size_t buf_size, struct list_head *l_pending_mem_obj);
|
|
|
|
+static void delete_pending_async_list_locked(struct list_head *l_pending_mem_obj);
|
|
|
|
|
|
static void destroy_cb_server(struct kref *kref)
|
|
static void destroy_cb_server(struct kref *kref)
|
|
{
|
|
{
|
|
@@ -673,7 +673,7 @@ static void __wakeup_postprocess_kthread(struct smcinvoke_worker_thread *smcinvo
|
|
static int smcinvoke_postprocess_kthread_func(void *data)
|
|
static int smcinvoke_postprocess_kthread_func(void *data)
|
|
{
|
|
{
|
|
struct smcinvoke_worker_thread *smcinvoke_wrk_trd = data;
|
|
struct smcinvoke_worker_thread *smcinvoke_wrk_trd = data;
|
|
- const char *tag;
|
|
|
|
|
|
+ static const char *const tag[] = {"shmbridge","object","adci","invalid"};
|
|
|
|
|
|
if (!smcinvoke_wrk_trd) {
|
|
if (!smcinvoke_wrk_trd) {
|
|
pr_err("Bad input.\n");
|
|
pr_err("Bad input.\n");
|
|
@@ -688,21 +688,18 @@ static int smcinvoke_postprocess_kthread_func(void *data)
|
|
== POST_KT_WAKEUP));
|
|
== POST_KT_WAKEUP));
|
|
switch (smcinvoke_wrk_trd->type) {
|
|
switch (smcinvoke_wrk_trd->type) {
|
|
case SHMB_WORKER_THREAD:
|
|
case SHMB_WORKER_THREAD:
|
|
- tag = "shmbridge";
|
|
|
|
pr_debug("kthread to %s postprocess is called %d\n",
|
|
pr_debug("kthread to %s postprocess is called %d\n",
|
|
- tag, atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
|
|
|
|
|
|
+ tag[SHMB_WORKER_THREAD], atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
|
|
smcinvoke_shmbridge_post_process();
|
|
smcinvoke_shmbridge_post_process();
|
|
break;
|
|
break;
|
|
case OBJECT_WORKER_THREAD:
|
|
case OBJECT_WORKER_THREAD:
|
|
- tag = "object";
|
|
|
|
pr_debug("kthread to %s postprocess is called %d\n",
|
|
pr_debug("kthread to %s postprocess is called %d\n",
|
|
- tag, atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
|
|
|
|
|
|
+ tag[OBJECT_WORKER_THREAD], atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
|
|
smcinvoke_object_post_process();
|
|
smcinvoke_object_post_process();
|
|
break;
|
|
break;
|
|
case ADCI_WORKER_THREAD:
|
|
case ADCI_WORKER_THREAD:
|
|
- tag = "adci";
|
|
|
|
pr_debug("kthread to %s postprocess is called %d\n",
|
|
pr_debug("kthread to %s postprocess is called %d\n",
|
|
- tag, atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
|
|
|
|
|
|
+ tag[ADCI_WORKER_THREAD], atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
|
|
smcinvoke_start_adci_thread();
|
|
smcinvoke_start_adci_thread();
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
@@ -722,7 +719,7 @@ static int smcinvoke_postprocess_kthread_func(void *data)
|
|
atomic_set(&smcinvoke_wrk_trd->postprocess_kthread_state,
|
|
atomic_set(&smcinvoke_wrk_trd->postprocess_kthread_state,
|
|
POST_KT_SLEEP);
|
|
POST_KT_SLEEP);
|
|
}
|
|
}
|
|
- pr_warn("kthread to %s postprocess stopped\n", tag);
|
|
|
|
|
|
+ pr_warn("kthread(worker_thread) processed, worker_thread type is %d \n", smcinvoke_wrk_trd->type);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -808,7 +805,6 @@ static void queue_mem_obj_pending_async_locked(struct smcinvoke_mem_obj *mem_obj
|
|
static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj)
|
|
static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj)
|
|
{
|
|
{
|
|
int ret = 0;
|
|
int ret = 0;
|
|
- bool is_bridge_created = mem_obj->is_smcinvoke_created_shmbridge;
|
|
|
|
struct dma_buf *dmabuf_to_free = mem_obj->dma_buf;
|
|
struct dma_buf *dmabuf_to_free = mem_obj->dma_buf;
|
|
uint64_t shmbridge_handle = mem_obj->shmbridge_handle;
|
|
uint64_t shmbridge_handle = mem_obj->shmbridge_handle;
|
|
struct smcinvoke_shmbridge_deregister_pending_list *entry = NULL;
|
|
struct smcinvoke_shmbridge_deregister_pending_list *entry = NULL;
|
|
@@ -819,7 +815,7 @@ static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj)
|
|
mem_obj = NULL;
|
|
mem_obj = NULL;
|
|
mutex_unlock(&g_smcinvoke_lock);
|
|
mutex_unlock(&g_smcinvoke_lock);
|
|
|
|
|
|
- if (is_bridge_created)
|
|
|
|
|
|
+ if (shmbridge_handle)
|
|
ret = qtee_shmbridge_deregister(shmbridge_handle);
|
|
ret = qtee_shmbridge_deregister(shmbridge_handle);
|
|
if (ret) {
|
|
if (ret) {
|
|
pr_err("Error:%d delete bridge failed leaking memory 0x%x\n",
|
|
pr_err("Error:%d delete bridge failed leaking memory 0x%x\n",
|
|
@@ -1158,16 +1154,10 @@ static int smcinvoke_create_bridge(struct smcinvoke_mem_obj *mem_obj)
|
|
ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems,
|
|
ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems,
|
|
tz_perm, &mem_obj->shmbridge_handle);
|
|
tz_perm, &mem_obj->shmbridge_handle);
|
|
|
|
|
|
- if (ret == 0) {
|
|
|
|
- /* In case of ret=0/success handle has to be freed in memobj release */
|
|
|
|
- mem_obj->is_smcinvoke_created_shmbridge = true;
|
|
|
|
- } else if (ret == -EEXIST) {
|
|
|
|
- ret = 0;
|
|
|
|
- goto exit;
|
|
|
|
- } else {
|
|
|
|
- pr_err("creation of shm bridge for mem_region_id %d failed ret %d\n",
|
|
|
|
- mem_obj->mem_region_id, ret);
|
|
|
|
- goto exit;
|
|
|
|
|
|
+ if (ret) {
|
|
|
|
+ pr_err("creation of shm bridge for mem_region_id %d failed ret %d\n",
|
|
|
|
+ mem_obj->mem_region_id, ret);
|
|
|
|
+ goto exit;
|
|
}
|
|
}
|
|
|
|
|
|
trace_smcinvoke_create_bridge(mem_obj->shmbridge_handle, mem_obj->mem_region_id);
|
|
trace_smcinvoke_create_bridge(mem_obj->shmbridge_handle, mem_obj->mem_region_id);
|
|
@@ -1531,7 +1521,7 @@ static void process_kernel_obj(void *buf, size_t buf_len)
|
|
|
|
|
|
switch (cb_req->hdr.op) {
|
|
switch (cb_req->hdr.op) {
|
|
case OBJECT_OP_MAP_REGION:
|
|
case OBJECT_OP_MAP_REGION:
|
|
- pr_debug("Received a request to map memory region\n");
|
|
|
|
|
|
+ pr_err("Received a request to map memory region\n");
|
|
cb_req->result = smcinvoke_process_map_mem_region_req(buf, buf_len);
|
|
cb_req->result = smcinvoke_process_map_mem_region_req(buf, buf_len);
|
|
break;
|
|
break;
|
|
case OBJECT_OP_YIELD:
|
|
case OBJECT_OP_YIELD:
|
|
@@ -2198,6 +2188,13 @@ static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req,
|
|
int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
|
|
int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
|
|
struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req;
|
|
struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req;
|
|
union smcinvoke_tz_args *tz_args = tzcb_req->args;
|
|
union smcinvoke_tz_args *tz_args = tzcb_req->args;
|
|
|
|
+ size_t tz_buf_offset = TZCB_BUF_OFFSET(tzcb_req);
|
|
|
|
+ LIST_HEAD(l_mem_objs_pending_async); /* Holds new memory objects, to be later sent to TZ */
|
|
|
|
+ uint32_t max_offset = 0;
|
|
|
|
+ uint32_t buffer_size_max_offset = 0;
|
|
|
|
+ void* async_buf_begin;
|
|
|
|
+ size_t async_buf_size;
|
|
|
|
+ uint32_t offset = 0;
|
|
|
|
|
|
release_tzhandles(&cb_txn->cb_req->hdr.tzhandle, 1);
|
|
release_tzhandles(&cb_txn->cb_req->hdr.tzhandle, 1);
|
|
tzcb_req->result = user_req->result;
|
|
tzcb_req->result = user_req->result;
|
|
@@ -2207,6 +2204,16 @@ static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req,
|
|
ret = 0;
|
|
ret = 0;
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ FOR_ARGS(i, tzcb_req->hdr.counts, BI) {
|
|
|
|
+
|
|
|
|
+ /* Find the max offset and the size of the buffer in that offset */
|
|
|
|
+ if (tz_args[i].b.offset > max_offset) {
|
|
|
|
+ max_offset = tz_args[i].b.offset;
|
|
|
|
+ buffer_size_max_offset = tz_args[i].b.size;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
FOR_ARGS(i, tzcb_req->hdr.counts, BO) {
|
|
FOR_ARGS(i, tzcb_req->hdr.counts, BO) {
|
|
union smcinvoke_arg tmp_arg;
|
|
union smcinvoke_arg tmp_arg;
|
|
|
|
|
|
@@ -2224,6 +2231,12 @@ static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req,
|
|
ret = -EFAULT;
|
|
ret = -EFAULT;
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ /* Find the max offset and the size of the buffer in that offset */
|
|
|
|
+ if (tz_args[i].b.offset > max_offset) {
|
|
|
|
+ max_offset = tz_args[i].b.offset;
|
|
|
|
+ buffer_size_max_offset = tz_args[i].b.size;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
FOR_ARGS(i, tzcb_req->hdr.counts, OO) {
|
|
FOR_ARGS(i, tzcb_req->hdr.counts, OO) {
|
|
@@ -2237,7 +2250,8 @@ static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req,
|
|
}
|
|
}
|
|
ret = get_tzhandle_from_uhandle(tmp_arg.o.fd,
|
|
ret = get_tzhandle_from_uhandle(tmp_arg.o.fd,
|
|
tmp_arg.o.cb_server_fd, &arr_filp[i],
|
|
tmp_arg.o.cb_server_fd, &arr_filp[i],
|
|
- &(tz_args[i].handle), NULL);
|
|
|
|
|
|
+ &(tz_args[i].handle), &l_mem_objs_pending_async);
|
|
|
|
+
|
|
if (ret)
|
|
if (ret)
|
|
goto out;
|
|
goto out;
|
|
tzhandles_to_release[i] = tz_args[i].handle;
|
|
tzhandles_to_release[i] = tz_args[i].handle;
|
|
@@ -2247,12 +2261,36 @@ static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req,
|
|
}
|
|
}
|
|
ret = 0;
|
|
ret = 0;
|
|
out:
|
|
out:
|
|
- FOR_ARGS(i, tzcb_req->hdr.counts, OI) {
|
|
|
|
- if (TZHANDLE_IS_CB_OBJ(tz_args[i].handle))
|
|
|
|
- release_tzhandles(&tz_args[i].handle, 1);
|
|
|
|
- }
|
|
|
|
|
|
+ FOR_ARGS(i, tzcb_req->hdr.counts, OI) {
|
|
|
|
+ if (TZHANDLE_IS_CB_OBJ(tz_args[i].handle))
|
|
|
|
+ release_tzhandles(&tz_args[i].handle, 1);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ do {
|
|
|
|
+ if (mem_obj_async_support) {
|
|
|
|
+ /* We will be able to add the async information to the buffer beyond the data in the max offset, if exists.
|
|
|
|
+ * If doesn't exist, we can add the async information after the header and the args. */
|
|
|
|
+ offset = (max_offset ? (max_offset + buffer_size_max_offset) : tz_buf_offset);
|
|
|
|
+ offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
|
|
|
|
+ async_buf_begin = (uint8_t *)tzcb_req + offset;
|
|
|
|
+
|
|
|
|
+ if (async_buf_begin - (void *)tzcb_req > g_max_cb_buf_size) {
|
|
|
|
+ pr_err("Unable to add memory object info to the async channel\n");
|
|
|
|
+ break;
|
|
|
|
+ } else {
|
|
|
|
+ async_buf_size = g_max_cb_buf_size - (async_buf_begin - (void *)tzcb_req);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ mutex_lock(&g_smcinvoke_lock);
|
|
|
|
+ add_mem_obj_info_to_async_side_channel_locked(async_buf_begin, async_buf_size, &l_mem_objs_pending_async);
|
|
|
|
+ delete_pending_async_list_locked(&l_mem_objs_pending_async);
|
|
|
|
+ mutex_unlock(&g_smcinvoke_lock);
|
|
|
|
+ }
|
|
|
|
+ } while (0);
|
|
|
|
+
|
|
if (ret)
|
|
if (ret)
|
|
release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO);
|
|
release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO);
|
|
|
|
+
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|