msm: ipclite: Rebased SSR updates from kernel_platform

Added latest SSR updates in IPCLite from kernel_platform
to vendor space

Change-Id: I9e551a0d69f45d89cae2165e25468945fcc68f7f
Signed-off-by: Chelliah Vinu R <quic_chelliah@quicinc.com>
This commit is contained in:
Chelliah Vinu R
2022-10-23 16:37:56 +05:30
parent 25cb61693a
commit a2639f4c3d
4 changed files with 173 additions and 36 deletions

View File

@@ -33,22 +33,14 @@ static struct ipclite_info *ipclite;
static struct ipclite_client synx_client;
static struct ipclite_client test_client;
struct ipclite_hw_mutex_ops *ipclite_hw_mutex;
struct mutex ssr_mutex;
uint32_t channel_status_info[IPCMEM_NUM_HOSTS];
u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED;
#define FIFO_FULL_RESERVE 8
#define FIFO_ALIGNMENT 8
void ipclite_hwlock_reset(enum ipcmem_host_type core_id)
{
/* verify and reset the hw mutex lock */
if (core_id == ipclite->ipcmem.toc->global_atomic_hwlock_owner) {
ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
hwspin_unlock_raw(ipclite->hwlock);
}
}
EXPORT_SYMBOL(ipclite_hwlock_reset);
static void ipclite_hw_mutex_acquire(void)
{
int32_t ret;
@@ -61,7 +53,7 @@ static void ipclite_hw_mutex_acquire(void)
if (ret)
pr_err("Hw mutex lock acquire failed\n");
ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_APPS;
ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_APPS;
pr_debug("Hw mutex lock acquired\n");
}
@@ -72,7 +64,8 @@ static void ipclite_hw_mutex_release(void)
{
if (ipclite != NULL) {
if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) {
ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner =
IPCMEM_INVALID_HOST;
hwspin_unlock_irqrestore(ipclite->hwlock,
&ipclite->ipclite_hw_mutex->flags);
pr_debug("Hw mutex lock release\n");
@@ -479,6 +472,100 @@ static int ipclite_tx(struct ipclite_channel *channel,
return ret;
}
int ipclite_ssr_update(int32_t proc_id)
{
int ret = 0;
if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
pr_debug("Invalid proc_id %d\n", proc_id);
return -EINVAL;
}
if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
channel_status_info[proc_id] = CHANNEL_ACTIVE;
} else {
pr_err("Cannot send msg to remote client. Channel inactive\n");
return -IPCLITE_EINCHAN;
}
}
ret = mbox_send_message(ipclite->channel[proc_id].irq_info[IPCLITE_SSR_SIGNAL].mbox_chan,
NULL);
if (ret < 0) {
pr_debug("Signal sending failed to core : %d ret : %d\n", proc_id, ret);
return ret;
}
pr_debug("SSR update send completed with ret=%d\n", ret);
return ret;
}
void ipclite_recover(enum ipcmem_host_type core_id)
{
int ret, i, host, host0, host1;
pr_debug("IPCLite Recover - Crashed Core : %d\n", core_id);
/* verify and reset the hw mutex lock */
if (core_id == ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner) {
ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
hwspin_unlock_raw(ipclite->hwlock);
pr_debug("HW Lock Reset\n");
}
mutex_lock(&ssr_mutex);
/* Set the Global Channel Status to 0 to avoid Race condition */
for (i = 0; i < MAX_PARTITION_COUNT; i++) {
host0 = ipcmem_toc_partition_entries[i].host0;
host1 = ipcmem_toc_partition_entries[i].host1;
if (host0 == core_id || host1 == core_id) {
ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *)
(&(ipclite->ipcmem.toc->toc_entry[host0][host1].status)), 0);
ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *)
(&(ipclite->ipcmem.toc->toc_entry[host1][host0].status)), 0);
channel_status_info[core_id] =
ipclite->ipcmem.toc->toc_entry[host0][host1].status;
}
pr_debug("Global Channel Status : [%d][%d] : %d\n", host0, host1,
ipclite->ipcmem.toc->toc_entry[host0][host1].status);
pr_debug("Global Channel Status : [%d][%d] : %d\n", host1, host0,
ipclite->ipcmem.toc->toc_entry[host1][host0].status);
}
/* Resets the TX/RX queue */
*(ipclite->channel[core_id].tx_fifo->head) = 0;
*(ipclite->channel[core_id].rx_fifo->tail) = 0;
pr_debug("TX Fifo Reset : %d\n", *(ipclite->channel[core_id].tx_fifo->head));
pr_debug("RX Fifo Reset : %d\n", *(ipclite->channel[core_id].rx_fifo->tail));
/* Increment the Global Channel Status for APPS and crashed core*/
ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
(&(ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status)));
ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
(&(ipclite->ipcmem.toc->toc_entry[core_id][IPCMEM_APPS].status)));
channel_status_info[core_id] =
ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status;
/* Update other cores about SSR */
for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
if (host != core_id) {
ret = ipclite_ssr_update(host);
if (ret < 0)
pr_debug("Failed to send the SSR update %d\n", host);
else
pr_debug("SSR update sent to host %d\n", host);
}
}
mutex_unlock(&ssr_mutex);
}
EXPORT_SYMBOL(ipclite_recover);
int ipclite_msg_send(int32_t proc_id, uint64_t data)
{
int ret = 0;
@@ -488,9 +575,13 @@ int ipclite_msg_send(int32_t proc_id, uint64_t data)
return -EINVAL;
}
if (ipclite->channel[proc_id].channel_status != ACTIVE_CHANNEL) {
pr_err("Cannot send msg to remote client. Channel inactive\n");
return -ENXIO;
if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
channel_status_info[proc_id] = CHANNEL_ACTIVE;
} else {
pr_err("Cannot send msg to remote client. Channel inactive\n");
return -IPCLITE_EINCHAN;
}
}
ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
@@ -523,10 +614,13 @@ int ipclite_test_msg_send(int32_t proc_id, uint64_t data)
return -EINVAL;
}
/* Limit Message Sending without Client Registration */
if (ipclite->channel[proc_id].channel_status != ACTIVE_CHANNEL) {
pr_err("Cannot send msg to remote client. Channel inactive\n");
return -ENXIO;
if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
channel_status_info[proc_id] = CHANNEL_ACTIVE;
} else {
pr_err("Cannot send msg to remote client. Channel inactive\n");
return -IPCLITE_EINCHAN;
}
}
ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
@@ -586,7 +680,7 @@ static int map_ipcmem(struct ipclite_info *ipclite, const char *name)
static void ipcmem_init(struct ipclite_mem *ipcmem)
{
int host0, host1;
int host, host0, host1;
int i = 0;
ipcmem->toc = ipcmem->mem.virt_base;
@@ -621,6 +715,28 @@ static void ipcmem_init(struct ipclite_mem *ipcmem)
ipcmem->toc->toc_entry[host0][host1] = ipcmem_toc_partition_entries[i];
ipcmem->toc->toc_entry[host1][host0] = ipcmem_toc_partition_entries[i];
if (host0 == IPCMEM_APPS && host1 == IPCMEM_APPS) {
/* Updating the Global Channel Status for APPS Loopback */
ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVE;
ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVE;
/* Updating Local Channel Status */
channel_status_info[host1] = ipcmem->toc->toc_entry[host0][host1].status;
} else if (host0 == IPCMEM_APPS || host1 == IPCMEM_APPS) {
/* Updating the Global Channel Status */
ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVATE_IN_PROGRESS;
ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVATE_IN_PROGRESS;
/* Updating Local Channel Status */
if (host0 == IPCMEM_APPS)
host = host1;
else if (host1 == IPCMEM_APPS)
host = host0;
channel_status_info[host] = ipcmem->toc->toc_entry[host0][host1].status;
}
ipcmem->partition[i] = (struct ipcmem_partition *)
((char *)ipcmem->mem.virt_base +
ipcmem_toc_partition_entries[i].base_offset);
@@ -668,7 +784,7 @@ static int ipclite_channel_irq_init(struct device *parent, struct device_node *n
{
int ret = 0;
u32 index;
char strs[4][9] = {"msg", "mem-init", "version", "test"};
char strs[5][9] = {"msg", "mem-init", "version", "test", "ssr"};
struct ipclite_irq_info *irq_info;
struct device *dev;
@@ -887,7 +1003,8 @@ static int ipclite_channel_init(struct device *parent,
goto err_put_dev;
}
}
ipclite->channel[remote_pid].channel_status = ACTIVE_CHANNEL;
ipclite->ipcmem.toc->recovery.configured_core[remote_pid] = CONFIGURED_CORE;
pr_debug("Channel init completed, ret = %d\n", ret);
return ret;
@@ -941,6 +1058,9 @@ static int ipclite_probe(struct platform_device *pdev)
}
pr_debug("Hwlock id assigned successfully, hwlock=%p\n", ipclite->hwlock);
/* Initializing Local Mutex Lock for SSR functionality */
mutex_init(&ssr_mutex);
ret = map_ipcmem(ipclite, "memory-region");
if (ret) {
pr_err("failed to map ipcmem\n");
@@ -990,7 +1110,7 @@ static int ipclite_probe(struct platform_device *pdev)
ipclite->ipclite_hw_mutex = ipclite_hw_mutex;
/* initialize hwlock owner to invalid host */
ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
pr_info("IPCLite probe completed successfully\n");
return ret;