Merge ec2db68e0f on remote branch

Change-Id: I9e19bc8afa3b8a0dbd88d9a30564ac31b26ddcd2
This commit is contained in:
Linux Build Service Account
2023-05-19 19:19:20 -07:00
4 changed files with 174 additions and 69 deletions

View File

@@ -24,7 +24,6 @@
#include "ipclite_client.h"
#include "ipclite.h"
#define VMID_CDSP 30
#define GLOBAL_ATOMICS_ENABLED 1
#define GLOBAL_ATOMICS_DISABLED 0
#define FIFO_FULL_RESERVE 8
@@ -997,26 +996,6 @@ static void ipcmem_init(struct ipclite_mem *ipcmem)
IPCLITE_OS_LOG(IPCLITE_DBG, "Ipcmem init completed\n");
}
/*Add VMIDs corresponding to EVA, CDSP and VPU to set IPCMEM access control*/
static int set_ipcmem_access_control(struct ipclite_info *ipclite)
{
int ret = 0;
u64 srcVM = BIT(QCOM_SCM_VMID_HLOS);
struct qcom_scm_vmperm destVM[2];
destVM[0].vmid = QCOM_SCM_VMID_HLOS;
destVM[0].perm = QCOM_SCM_PERM_RW;
destVM[1].vmid = VMID_CDSP;
destVM[1].perm = QCOM_SCM_PERM_RW;
ret = qcom_scm_assign_mem(ipclite->ipcmem.mem.aux_base,
ipclite->ipcmem.mem.size, &srcVM,
destVM, ARRAY_SIZE(destVM));
return ret;
}
static int ipclite_channel_irq_init(struct device *parent, struct device_node *node,
struct ipclite_channel *channel)
{
@@ -1461,7 +1440,7 @@ static int ipclite_probe(struct platform_device *pdev)
if (hwlock_id != -EPROBE_DEFER)
dev_err(&pdev->dev, "failed to retrieve hwlock\n");
ret = hwlock_id;
goto error;
goto release;
}
IPCLITE_OS_LOG(IPCLITE_DBG, "Hwlock id retrieved, hwlock_id=%d\n", hwlock_id);
@@ -1469,7 +1448,7 @@ static int ipclite_probe(struct platform_device *pdev)
if (!ipclite->hwlock) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to assign hwlock_id\n");
ret = -ENXIO;
goto error;
goto release;
}
IPCLITE_OS_LOG(IPCLITE_DBG, "Hwlock id assigned successfully, hwlock=%p\n",
ipclite->hwlock);
@@ -1485,12 +1464,6 @@ static int ipclite_probe(struct platform_device *pdev)
mem = &(ipclite->ipcmem.mem);
memset(mem->virt_base, 0, mem->size);
ret = set_ipcmem_access_control(ipclite);
if (ret) {
IPCLITE_OS_LOG(IPCLITE_ERR, "failed to set access control policy\n");
goto release;
}
ipcmem_init(&ipclite->ipcmem);
/* Set up sysfs for debug */
@@ -1558,6 +1531,7 @@ mem_release:
*/
release:
kfree(ipclite);
ipclite = NULL;
error:
IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite probe failed\n");
return ret;

View File

@@ -497,6 +497,42 @@ int synx_native_signal_fence(struct synx_coredata *synx_obj,
return rc;
}
int synx_native_signal_merged_fence(struct synx_coredata *synx_obj, u32 status)
{
int rc = SYNX_SUCCESS;
unsigned long flags;
int i = 0, num_fences = 0;
struct synx_coredata **synx_child_obj = NULL;
rc = synx_get_child_coredata(synx_obj, &synx_child_obj, &num_fences);
if (rc != SYNX_SUCCESS)
return rc;
for(i = 0; i < num_fences; i++)
{
if (IS_ERR_OR_NULL(synx_child_obj[i]) || IS_ERR_OR_NULL(synx_child_obj[i]->fence)) {
dprintk(SYNX_ERR, "Invalid child coredata %d\n", i);
rc = -SYNX_NOENT;
goto fail;
}
mutex_lock(&synx_child_obj[i]->obj_lock);
spin_lock_irqsave(synx_child_obj[i]->fence->lock, flags);
if (synx_util_get_object_status_locked(synx_child_obj[i]) != SYNX_STATE_ACTIVE ||
!synx_util_is_global_object(synx_child_obj[i]))
{
spin_unlock_irqrestore(synx_child_obj[i]->fence->lock, flags);
mutex_unlock(&synx_child_obj[i]->obj_lock);
continue;
}
spin_unlock_irqrestore(synx_child_obj[i]->fence->lock, flags);
rc = synx_native_signal_fence(synx_child_obj[i], status);
mutex_unlock(&synx_child_obj[i]->obj_lock);
}
fail:
kfree(synx_child_obj);
return rc;
}
void synx_signal_handler(struct work_struct *cb_dispatch)
{
int rc = SYNX_SUCCESS;
@@ -552,14 +588,25 @@ void synx_signal_handler(struct work_struct *cb_dispatch)
mutex_lock(&synx_obj->obj_lock);
if (signal_cb->flag & SYNX_SIGNAL_FROM_IPC)
rc = synx_native_signal_fence(synx_obj, status);
if (signal_cb->flag & SYNX_SIGNAL_FROM_IPC) {
if (synx_util_is_merged_object(synx_obj)) {
rc = synx_native_signal_merged_fence(synx_obj, status);
if (rc != SYNX_SUCCESS) {
mutex_unlock(&synx_obj->obj_lock);
dprintk(SYNX_ERR,
"failed to signal merged fence for %u failed=%d\n",
h_synx, rc);
goto fail;
}
}
else
rc = synx_native_signal_fence(synx_obj, status);
}
if (rc == SYNX_SUCCESS)
if (rc == SYNX_SUCCESS && !synx_util_is_merged_object(synx_obj))
rc = synx_native_signal_core(synx_obj, status,
(signal_cb->flag & SYNX_SIGNAL_FROM_CALLBACK) ?
true : false, signal_cb->ext_sync_id);
(signal_cb->flag & SYNX_SIGNAL_FROM_CALLBACK) ?
true : false, signal_cb->ext_sync_id);
mutex_unlock(&synx_obj->obj_lock);
if (rc != SYNX_SUCCESS)
@@ -751,6 +798,9 @@ int synx_async_wait(struct synx_session *session,
if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params))
return -SYNX_INVALID;
if (params->timeout_ms != SYNX_NO_TIMEOUT)
return -SYNX_NOSUPPORT;
client = synx_get_client(session);
if (IS_ERR_OR_NULL(client))
return -SYNX_INVALID;
@@ -766,14 +816,6 @@ int synx_async_wait(struct synx_session *session,
}
mutex_lock(&synx_obj->obj_lock);
if (synx_util_is_merged_object(synx_obj)) {
dprintk(SYNX_ERR,
"[sess :%llu] cannot async wait on merged handle %u\n",
client->id, params->h_synx);
rc = -SYNX_INVALID;
goto release;
}
synx_cb = kzalloc(sizeof(*synx_cb), GFP_ATOMIC);
if (IS_ERR_OR_NULL(synx_cb)) {
rc = -SYNX_NOMEM;
@@ -795,10 +837,17 @@ int synx_async_wait(struct synx_session *session,
}
if (synx_util_is_global_handle(params->h_synx) ||
synx_util_is_global_object(synx_obj))
synx_util_is_global_object(synx_obj)) {
status = synx_global_test_status_set_wait(
synx_util_global_idx(params->h_synx),
SYNX_CORE_APSS);
if (status != SYNX_STATE_ACTIVE) {
if (synx_util_is_merged_object(synx_obj))
synx_native_signal_merged_fence(synx_obj, status);
else
synx_native_signal_fence(synx_obj, status);
}
}
else
status = synx_util_get_object_status(synx_obj);
@@ -862,10 +911,9 @@ int synx_cancel_async_wait(
}
mutex_lock(&synx_obj->obj_lock);
if (synx_util_is_merged_object(synx_obj) ||
synx_util_is_external_object(synx_obj)) {
if (synx_util_is_external_object(synx_obj)) {
dprintk(SYNX_ERR,
"cannot cancel wait on composite handle\n");
"cannot cancel wait on external fence\n");
goto release;
}
@@ -944,14 +992,14 @@ EXPORT_SYMBOL(synx_cancel_async_wait);
int synx_merge(struct synx_session *session,
struct synx_merge_params *params)
{
int rc, i, j = 0;
u32 h_child;
u32 count = 0;
u32 *h_child_list;
int rc, i, num_signaled = 0;
u32 count = 0, h_child, status = SYNX_STATE_ACTIVE;
u32 *h_child_list = NULL, *h_child_idx_list = NULL;
struct synx_client *client;
struct dma_fence **fences = NULL;
struct synx_coredata *synx_obj;
struct synx_map_entry *map_entry;
struct synx_coredata *synx_obj, *synx_obj_child;
struct synx_handle_coredata *synx_data_child;
if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params))
return -SYNX_INVALID;
@@ -1008,36 +1056,67 @@ int synx_merge(struct synx_session *session,
goto clear;
}
h_child_list = kzalloc(count*4, GFP_KERNEL);
if (IS_ERR_OR_NULL(h_child_list)) {
rc = -SYNX_NOMEM;
goto clear;
}
h_child_idx_list = kzalloc(count*4, GFP_KERNEL);
if (IS_ERR_OR_NULL(h_child_idx_list)) {
rc = -SYNX_NOMEM;
goto clear;
}
for (i = 0; i < count; i++) {
h_child = synx_util_get_fence_entry((u64)fences[i], 1);
if (!synx_util_is_global_handle(h_child))
continue;
h_child_list[num_signaled] = h_child;
h_child_idx_list[num_signaled++] = synx_util_global_idx(h_child);
}
if (params->flags & SYNX_MERGE_GLOBAL_FENCE) {
h_child_list = kzalloc(count*4, GFP_KERNEL);
if (IS_ERR_OR_NULL(synx_obj)) {
rc = -SYNX_NOMEM;
goto clear;
}
for (i = 0; i < count; i++) {
h_child = synx_util_get_fence_entry((u64)fences[i], 1);
if (!synx_util_is_global_handle(h_child))
continue;
h_child_list[j++] = synx_util_global_idx(h_child);
}
rc = synx_global_merge(h_child_list, j,
rc = synx_global_merge(h_child_idx_list, num_signaled,
synx_util_global_idx(*params->h_merged_obj));
if (rc != SYNX_SUCCESS) {
dprintk(SYNX_ERR, "global merge failed\n");
kfree(h_child_list);
kfree(h_child_idx_list);
goto clear;
}
}
else {
for(i = 0; i < num_signaled; i++) {
status = synx_global_test_status_set_wait(synx_util_global_idx(h_child_list[i]), SYNX_CORE_APSS);
if (status != SYNX_STATE_ACTIVE) {
synx_data_child = synx_util_acquire_handle(client, h_child_list[i]);
synx_obj_child = synx_util_obtain_object(synx_data_child);
if (IS_ERR_OR_NULL(synx_obj_child)) {
dprintk(SYNX_ERR,
"[sess :%llu] invalid child handle %u\n",
client->id, h_child_list[i]);
continue;
}
rc = synx_native_signal_fence(synx_obj_child, status);
if (rc != SYNX_SUCCESS)
dprintk(SYNX_ERR, "h_synx %u failed with status %d\n", h_child_list[i], rc);
}
}
}
dprintk(SYNX_MEM,
"[sess :%llu] merge allocated %u, core %pK, fence %pK\n",
client->id, *params->h_merged_obj, synx_obj,
synx_obj->fence);
kfree(h_child_list);
kfree(h_child_idx_list);
synx_put_client(client);
return SYNX_SUCCESS;
clear:
synx_util_release_map_entry(map_entry);
clean_up:
@@ -1118,8 +1197,15 @@ int synx_wait(struct synx_session *session,
if (synx_util_is_global_handle(h_synx)) {
rc = synx_global_test_status_set_wait(
synx_util_global_idx(h_synx), SYNX_CORE_APSS);
if (rc != SYNX_STATE_ACTIVE)
if (rc != SYNX_STATE_ACTIVE) {
mutex_lock(&synx_obj->obj_lock);
if (synx_util_is_merged_object(synx_obj))
synx_native_signal_merged_fence(synx_obj, rc);
else
synx_native_signal_fence(synx_obj, rc);
mutex_unlock(&synx_obj->obj_lock);
goto fail;
}
}
timeleft = dma_fence_wait_timeout(synx_obj->fence, (bool) 0,
@@ -1977,6 +2063,7 @@ static int synx_handle_async_wait(
params.h_synx = user_data.synx_obj;
params.cb_func = synx_util_default_user_callback;
params.userdata = (void *)user_data.payload[0];
params.timeout_ms = user_data.payload[2];
rc = synx_async_wait(session, &params);
if (rc)

View File

@@ -1205,6 +1205,48 @@ free:
kfree(synx_cb);
}
int synx_get_child_coredata(struct synx_coredata *synx_obj, struct synx_coredata ***child_synx_obj, int *num_fences)
{
int rc = SYNX_SUCCESS;
int i = 0, handle_count = 0;
u32 h_child = 0;
struct dma_fence_array *array = NULL;
struct synx_coredata **synx_datas = NULL;
struct synx_map_entry *fence_entry = NULL;
if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(num_fences))
return -SYNX_INVALID;
if (dma_fence_is_array(synx_obj->fence)) {
array = to_dma_fence_array(synx_obj->fence);
if (IS_ERR_OR_NULL(array))
return -SYNX_INVALID;
synx_datas = kcalloc(array->num_fences, sizeof(*synx_datas), GFP_KERNEL);
if (IS_ERR_OR_NULL(synx_datas))
return -SYNX_NOMEM;
for (i = 0; i < array->num_fences; i++) {
h_child = synx_util_get_fence_entry((u64)array->fences[i], 1);
fence_entry = synx_util_get_map_entry(h_child);
if (IS_ERR_OR_NULL(fence_entry) || IS_ERR_OR_NULL(fence_entry->synx_obj))
{
dprintk(SYNX_ERR, "Invalid handle access %u", h_child);
rc = -SYNX_NOENT;
goto fail;
}
synx_datas[handle_count++] = fence_entry->synx_obj;
synx_util_release_map_entry(fence_entry);
}
}
*child_synx_obj = synx_datas;
*num_fences = handle_count;
return rc;
fail:
kfree(synx_datas);
return rc;
}
u32 synx_util_get_fence_entry(u64 key, u32 global)
{
u32 h_synx = 0;

View File

@@ -178,4 +178,6 @@ void synx_util_map_import_params_to_create(
struct bind_operations *synx_util_get_bind_ops(u32 type);
u32 synx_util_map_client_id_to_core(enum synx_client_id id);
int synx_get_child_coredata(struct synx_coredata *synx_obj, struct synx_coredata ***child_synx_obj, int *num_fences);
#endif /* __SYNX_UTIL_H__ */