msm: ipclite: Rebased SSR updates from kernel_platform

Added latest SSR updates in IPCLite from kernel_platform
to vendor space

Change-Id: I9e551a0d69f45d89cae2165e25468945fcc68f7f
Signed-off-by: Chelliah Vinu R <quic_chelliah@quicinc.com>
This commit is contained in:
Chelliah Vinu R
2022-10-23 16:37:56 +05:30
szülő 25cb61693a
commit a2639f4c3d
4 fájl változott, egészen pontosan 173 új sor hozzáadva és 36 régi sor törölve

Fájl megtekintése

@@ -33,22 +33,14 @@ static struct ipclite_info *ipclite;
static struct ipclite_client synx_client;
static struct ipclite_client test_client;
struct ipclite_hw_mutex_ops *ipclite_hw_mutex;
struct mutex ssr_mutex;
uint32_t channel_status_info[IPCMEM_NUM_HOSTS];
u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED;
#define FIFO_FULL_RESERVE 8
#define FIFO_ALIGNMENT 8
void ipclite_hwlock_reset(enum ipcmem_host_type core_id)
{
/* verify and reset the hw mutex lock */
if (core_id == ipclite->ipcmem.toc->global_atomic_hwlock_owner) {
ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
hwspin_unlock_raw(ipclite->hwlock);
}
}
EXPORT_SYMBOL(ipclite_hwlock_reset);
static void ipclite_hw_mutex_acquire(void)
{
int32_t ret;
@@ -61,7 +53,7 @@ static void ipclite_hw_mutex_acquire(void)
if (ret)
pr_err("Hw mutex lock acquire failed\n");
ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_APPS;
ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_APPS;
pr_debug("Hw mutex lock acquired\n");
}
@@ -72,7 +64,8 @@ static void ipclite_hw_mutex_release(void)
{
if (ipclite != NULL) {
if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) {
ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner =
IPCMEM_INVALID_HOST;
hwspin_unlock_irqrestore(ipclite->hwlock,
&ipclite->ipclite_hw_mutex->flags);
pr_debug("Hw mutex lock release\n");
@@ -479,6 +472,100 @@ static int ipclite_tx(struct ipclite_channel *channel,
return ret;
}
int ipclite_ssr_update(int32_t proc_id)
{
int ret = 0;
if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
pr_debug("Invalid proc_id %d\n", proc_id);
return -EINVAL;
}
if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
channel_status_info[proc_id] = CHANNEL_ACTIVE;
} else {
pr_err("Cannot send msg to remote client. Channel inactive\n");
return -IPCLITE_EINCHAN;
}
}
ret = mbox_send_message(ipclite->channel[proc_id].irq_info[IPCLITE_SSR_SIGNAL].mbox_chan,
NULL);
if (ret < 0) {
pr_debug("Signal sending failed to core : %d ret : %d\n", proc_id, ret);
return ret;
}
pr_debug("SSR update send completed with ret=%d\n", ret);
return ret;
}
void ipclite_recover(enum ipcmem_host_type core_id)
{
int ret, i, host, host0, host1;
pr_debug("IPCLite Recover - Crashed Core : %d\n", core_id);
/* verify and reset the hw mutex lock */
if (core_id == ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner) {
ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
hwspin_unlock_raw(ipclite->hwlock);
pr_debug("HW Lock Reset\n");
}
mutex_lock(&ssr_mutex);
/* Set the Global Channel Status to 0 to avoid Race condition */
for (i = 0; i < MAX_PARTITION_COUNT; i++) {
host0 = ipcmem_toc_partition_entries[i].host0;
host1 = ipcmem_toc_partition_entries[i].host1;
if (host0 == core_id || host1 == core_id) {
ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *)
(&(ipclite->ipcmem.toc->toc_entry[host0][host1].status)), 0);
ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *)
(&(ipclite->ipcmem.toc->toc_entry[host1][host0].status)), 0);
channel_status_info[core_id] =
ipclite->ipcmem.toc->toc_entry[host0][host1].status;
}
pr_debug("Global Channel Status : [%d][%d] : %d\n", host0, host1,
ipclite->ipcmem.toc->toc_entry[host0][host1].status);
pr_debug("Global Channel Status : [%d][%d] : %d\n", host1, host0,
ipclite->ipcmem.toc->toc_entry[host1][host0].status);
}
/* Resets the TX/RX queue */
*(ipclite->channel[core_id].tx_fifo->head) = 0;
*(ipclite->channel[core_id].rx_fifo->tail) = 0;
pr_debug("TX Fifo Reset : %d\n", *(ipclite->channel[core_id].tx_fifo->head));
pr_debug("RX Fifo Reset : %d\n", *(ipclite->channel[core_id].rx_fifo->tail));
/* Increment the Global Channel Status for APPS and crashed core*/
ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
(&(ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status)));
ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
(&(ipclite->ipcmem.toc->toc_entry[core_id][IPCMEM_APPS].status)));
channel_status_info[core_id] =
ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status;
/* Update other cores about SSR */
for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
if (host != core_id) {
ret = ipclite_ssr_update(host);
if (ret < 0)
pr_debug("Failed to send the SSR update %d\n", host);
else
pr_debug("SSR update sent to host %d\n", host);
}
}
mutex_unlock(&ssr_mutex);
}
EXPORT_SYMBOL(ipclite_recover);
int ipclite_msg_send(int32_t proc_id, uint64_t data)
{
int ret = 0;
@@ -488,9 +575,13 @@ int ipclite_msg_send(int32_t proc_id, uint64_t data)
return -EINVAL;
}
if (ipclite->channel[proc_id].channel_status != ACTIVE_CHANNEL) {
pr_err("Cannot send msg to remote client. Channel inactive\n");
return -ENXIO;
if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
channel_status_info[proc_id] = CHANNEL_ACTIVE;
} else {
pr_err("Cannot send msg to remote client. Channel inactive\n");
return -IPCLITE_EINCHAN;
}
}
ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
@@ -523,10 +614,13 @@ int ipclite_test_msg_send(int32_t proc_id, uint64_t data)
return -EINVAL;
}
/* Limit Message Sending without Client Registration */
if (ipclite->channel[proc_id].channel_status != ACTIVE_CHANNEL) {
pr_err("Cannot send msg to remote client. Channel inactive\n");
return -ENXIO;
if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
channel_status_info[proc_id] = CHANNEL_ACTIVE;
} else {
pr_err("Cannot send msg to remote client. Channel inactive\n");
return -IPCLITE_EINCHAN;
}
}
ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
@@ -586,7 +680,7 @@ static int map_ipcmem(struct ipclite_info *ipclite, const char *name)
static void ipcmem_init(struct ipclite_mem *ipcmem)
{
int host0, host1;
int host, host0, host1;
int i = 0;
ipcmem->toc = ipcmem->mem.virt_base;
@@ -621,6 +715,28 @@ static void ipcmem_init(struct ipclite_mem *ipcmem)
ipcmem->toc->toc_entry[host0][host1] = ipcmem_toc_partition_entries[i];
ipcmem->toc->toc_entry[host1][host0] = ipcmem_toc_partition_entries[i];
if (host0 == IPCMEM_APPS && host1 == IPCMEM_APPS) {
/* Updating the Global Channel Status for APPS Loopback */
ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVE;
ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVE;
/* Updating Local Channel Status */
channel_status_info[host1] = ipcmem->toc->toc_entry[host0][host1].status;
} else if (host0 == IPCMEM_APPS || host1 == IPCMEM_APPS) {
/* Updating the Global Channel Status */
ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVATE_IN_PROGRESS;
ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVATE_IN_PROGRESS;
/* Updating Local Channel Status */
if (host0 == IPCMEM_APPS)
host = host1;
else if (host1 == IPCMEM_APPS)
host = host0;
channel_status_info[host] = ipcmem->toc->toc_entry[host0][host1].status;
}
ipcmem->partition[i] = (struct ipcmem_partition *)
((char *)ipcmem->mem.virt_base +
ipcmem_toc_partition_entries[i].base_offset);
@@ -668,7 +784,7 @@ static int ipclite_channel_irq_init(struct device *parent, struct device_node *n
{
int ret = 0;
u32 index;
char strs[4][9] = {"msg", "mem-init", "version", "test"};
char strs[5][9] = {"msg", "mem-init", "version", "test", "ssr"};
struct ipclite_irq_info *irq_info;
struct device *dev;
@@ -887,7 +1003,8 @@ static int ipclite_channel_init(struct device *parent,
goto err_put_dev;
}
}
ipclite->channel[remote_pid].channel_status = ACTIVE_CHANNEL;
ipclite->ipcmem.toc->recovery.configured_core[remote_pid] = CONFIGURED_CORE;
pr_debug("Channel init completed, ret = %d\n", ret);
return ret;
@@ -941,6 +1058,9 @@ static int ipclite_probe(struct platform_device *pdev)
}
pr_debug("Hwlock id assigned successfully, hwlock=%p\n", ipclite->hwlock);
/* Initializing Local Mutex Lock for SSR functionality */
mutex_init(&ssr_mutex);
ret = map_ipcmem(ipclite, "memory-region");
if (ret) {
pr_err("failed to map ipcmem\n");
@@ -990,7 +1110,7 @@ static int ipclite_probe(struct platform_device *pdev)
ipclite->ipclite_hw_mutex = ipclite_hw_mutex;
/* initialize hwlock owner to invalid host */
ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
pr_info("IPCLite probe completed successfully\n");
return ret;

Fájl megtekintése

@@ -14,7 +14,7 @@
#define ACTIVE_CHANNEL 0x1
#define IPCMEM_TOC_SIZE (4*1024)
#define MAX_CHANNEL_SIGNALS 4
#define MAX_CHANNEL_SIGNALS 5
#define MAX_PARTITION_COUNT 7 /*7 partitions other than global partition*/
@@ -22,6 +22,7 @@
#define IPCLITE_MEM_INIT_SIGNAL 1
#define IPCLITE_VERSION_SIGNAL 2
#define IPCLITE_TEST_SIGNAL 3
#define IPCLITE_SSR_SIGNAL 4
/** Flag definitions for the entries */
#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION (0x01)
@@ -38,6 +39,12 @@
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT 1000
#define CHANNEL_INACTIVE 0
#define CHANNEL_ACTIVATE_IN_PROGRESS 1
#define CHANNEL_ACTIVE 2
#define CONFIGURED_CORE 1
/*IPCMEM Structure Definitions*/
struct ipclite_features {
@@ -45,6 +52,11 @@ struct ipclite_features {
uint32_t version_finalised;
};
struct ipclite_recover {
uint32_t global_atomic_hwlock_owner;
uint32_t configured_core[IPCMEM_NUM_HOSTS];
};
struct ipcmem_partition_header {
uint32_t type; /*partition type*/
uint32_t desc_offset; /*descriptor offset*/
@@ -77,7 +89,7 @@ struct ipcmem_toc {
/* as ipcmem is 4k and if host number increases */
/* it would create problems*/
struct ipclite_features ipclite_features;
uint32_t global_atomic_hwlock_owner;
struct ipclite_recover recovery;
};
struct ipcmem_region {
@@ -202,7 +214,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = {
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_APPS,
IPCMEM_CDSP,
1,
CHANNEL_INACTIVE,
},
/* APPS<->CVP (EVA) partition. */
{
@@ -211,7 +223,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = {
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_APPS,
IPCMEM_CVP,
1,
CHANNEL_INACTIVE,
},
/* APPS<->VPU partition. */
{
@@ -220,7 +232,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = {
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_APPS,
IPCMEM_VPU,
1,
CHANNEL_INACTIVE,
},
/* CDSP<->CVP (EVA) partition. */
{
@@ -229,7 +241,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = {
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_CDSP,
IPCMEM_CVP,
1,
CHANNEL_INACTIVE,
},
/* CDSP<->VPU partition. */
{
@@ -238,7 +250,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = {
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_CDSP,
IPCMEM_VPU,
1,
CHANNEL_INACTIVE,
},
/* VPU<->CVP (EVA) partition. */
{
@@ -247,7 +259,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = {
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_VPU,
IPCMEM_CVP,
1,
CHANNEL_INACTIVE,
},
/* APPS<->APPS partition. */
{
@@ -256,7 +268,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = {
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_APPS,
IPCMEM_APPS,
1,
CHANNEL_INACTIVE,
}
/* Last entry uses invalid hosts and no protections to signify the end. */
/* {

Fájl megtekintése

@@ -27,6 +27,11 @@ enum ipcmem_host_type {
IPCMEM_INVALID_HOST = 0xFF, /**< Invalid processor */
};
/**
* IPCLite return codes
*/
#define IPCLITE_EINCHAN 9 /**< Inactive Channel */
struct global_region_info {
void *virt_base;
uint32_t size;
@@ -84,13 +89,13 @@ int32_t ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv);
int32_t get_global_partition_info(struct global_region_info *global_ipcmem);
/**
* ipclite_hwlock_reset() - Resets the lock if the lock is currently held by core_id
* ipclite_recover() - Recovers the ipclite if any core goes for SSR
*
* core_id : takes the core id of which the lock needs to be resetted.
* core_id : takes the core id of the core which went to SSR.
*
* @return None.
*/
void ipclite_hwlock_reset(enum ipcmem_host_type core_id);
void ipclite_recover(enum ipcmem_host_type core_id);
/**
* ipclite_atomic_init_u32() - Initializes the global memory with uint32_t value.

Fájl megtekintése

@@ -744,7 +744,7 @@ int synx_global_recover(enum synx_core_id core_id)
if (!clear_idx)
return -SYNX_NOMEM;
ipclite_hwlock_reset(synx_global_map_core_id(core_id));
ipclite_recover(synx_global_map_core_id(core_id));
/* recover synx gmem lock if it was owned by core in ssr */
if (synx_gmem_lock_owner(0) == core_id) {