msm: synx: ipclite: TOC Restructuring

1. Offset based TOC setup, which should be parsed
   by the FWs to build required structures.

2. Dynamic Partitioning support - where the enabled
   hosts' info is parsed from DT and only required
   partitions are allocated in the global memory.

3. Magic Number based TOC header data integrity.

4. Clean ups
 - Channel status moved to partition header
 - Use only standard kernel return codes

Backward Compatibility Scenario:

Older APPSS code will have toc.size in place of
magic number, hence the value will be 4096, which
should be detected by the FW to use older structures.

Change-Id: I776eca4bdd997e983d35ef1e1f068cf73cdb72f7
Signed-off-by: Chelliah Vinu R <quic_chelliah@quicinc.com>
This commit is contained in:
Chelliah Vinu R
2023-03-17 22:38:42 +05:30
committed by Gerrit - the friendly Code Review server
parent e7e3b4aaac
commit 26a5a7df0d
3 changed files with 486 additions and 421 deletions

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
@@ -39,11 +39,22 @@ static struct ipclite_debug_inmem_buf *ipclite_dbg_inmem;
static struct mutex ssr_mutex;
static struct kobject *sysfs_kobj;
static uint32_t channel_status_info[IPCMEM_NUM_HOSTS];
static uint32_t enabled_hosts;
static uint32_t partitions;
static u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED;
static uint32_t ipclite_debug_level = IPCLITE_ERR | IPCLITE_WARN | IPCLITE_INFO;
static uint32_t ipclite_debug_control = IPCLITE_DMESG_LOG, ipclite_debug_dump;
static inline bool is_host_enabled(uint32_t host)
{
return (1U & (enabled_hosts >> host));
}
static inline bool is_loopback_except_apps(uint32_t h0, uint32_t h1)
{
return (h0 == h1 && h0 != IPCMEM_APPS);
}
static void IPCLITE_OS_INMEM_LOG(const char *psztStr, ...)
{
uint32_t local_index = 0;
@@ -83,7 +94,8 @@ static void ipclite_dump_debug_struct(void)
pr_info("------------------- Dumping IPCLite Debug Structure -------------------\n");
for (host = 0; host < IPCMEM_NUM_HOSTS; host++) {
if (ipclite->ipcmem.toc->recovery.configured_core[host]) {
if (!is_host_enabled(host))
continue;
temp_dbg_struct = (struct ipclite_debug_struct *)
(((char *)ipclite_dbg_struct) +
(sizeof(*temp_dbg_struct) * host));
@@ -101,9 +113,9 @@ static void ipclite_dump_debug_struct(void)
temp_dbg_struct->dbg_info_overall.last_sigid_recv);
for (i = 0; i < IPCMEM_NUM_HOSTS; i++) {
if (ipclite->ipcmem.toc->recovery.configured_core[i]) {
pr_info("----------> Host ID : %d Host ID : %d Channel State: %d\n",
host, i, ipclite->ipcmem.toc->toc_entry[host][i].status);
if (!is_host_enabled(i))
continue;
pr_info("----------> Host ID : %d Host ID : %d\n", host, i);
pr_info("No. of Messages Sent : %d No. of Messages Received : %d\n",
temp_dbg_struct->dbg_info_host[i].numsig_sent,
temp_dbg_struct->dbg_info_host[i].numsig_recv);
@@ -129,8 +141,6 @@ static void ipclite_dump_debug_struct(void)
temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[1]);
}
}
}
}
return;
}
@@ -178,7 +188,7 @@ static void ipclite_hw_mutex_acquire(void)
int32_t ret;
if (ipclite != NULL) {
if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) {
if (!global_atomic_support) {
ret = hwspin_lock_timeout_irqsave(ipclite->hwlock,
HWSPINLOCK_TIMEOUT,
&ipclite->ipclite_hw_mutex->flags);
@@ -187,7 +197,7 @@ static void ipclite_hw_mutex_acquire(void)
return;
}
ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_APPS;
ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_APPS;
IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock acquired\n");
}
@@ -197,9 +207,8 @@ static void ipclite_hw_mutex_acquire(void)
static void ipclite_hw_mutex_release(void)
{
if (ipclite != NULL) {
if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) {
ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner =
IPCMEM_INVALID_HOST;
if (!global_atomic_support) {
ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
hwspin_unlock_irqrestore(ipclite->hwlock,
&ipclite->ipclite_hw_mutex->flags);
IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock release\n");
@@ -636,6 +645,15 @@ static int ipclite_tx(struct ipclite_channel *channel,
unsigned long flags;
int ret = 0;
if (channel->status != ACTIVE) {
if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) {
channel->status = ACTIVE;
} else {
IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Channel not active\n");
return -EOPNOTSUPP;
}
}
spin_lock_irqsave(&channel->tx_lock, flags);
if (ipclite_tx_avail(channel) < dlen) {
spin_unlock_irqrestore(&channel->tx_lock, flags);
@@ -656,102 +674,98 @@ static int ipclite_tx(struct ipclite_channel *channel,
static int ipclite_send_debug_info(int32_t proc_id)
{
int ret = 0;
struct ipclite_channel *channel;
if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
return -EINVAL;
}
channel = &ipclite->channel[proc_id];
if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
channel_status_info[proc_id] = CHANNEL_ACTIVE;
if (channel->status != ACTIVE) {
if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) {
channel->status = ACTIVE;
} else {
IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
return -IPCLITE_EINCHAN;
return -EOPNOTSUPP;
}
}
ret = mbox_send_message(ipclite->channel[proc_id].irq_info[IPCLITE_DEBUG_SIGNAL].mbox_chan,
NULL);
if (ret < IPCLITE_SUCCESS) {
ret = mbox_send_message(channel->irq_info[IPCLITE_DEBUG_SIGNAL].mbox_chan, NULL);
if (ret < 0) {
IPCLITE_OS_LOG(IPCLITE_ERR,
"Debug Signal sending failed to Core : %d Signal : %d ret : %d\n",
proc_id, IPCLITE_DEBUG_SIGNAL, ret);
return -IPCLITE_FAILURE;
return ret;
}
IPCLITE_OS_LOG(IPCLITE_DBG,
"Debug Signal send completed to core : %d signal : %d ret : %d\n",
proc_id, IPCLITE_DEBUG_SIGNAL, ret);
return IPCLITE_SUCCESS;
return 0;
}
int ipclite_ssr_update(int32_t proc_id)
{
int ret = 0;
struct ipclite_channel *channel;
if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
return -EINVAL;
}
channel = &ipclite->channel[proc_id];
if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
channel_status_info[proc_id] = CHANNEL_ACTIVE;
if (channel->status != ACTIVE) {
if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) {
channel->status = ACTIVE;
} else {
IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
return -IPCLITE_EINCHAN;
return -EOPNOTSUPP;
}
}
ret = mbox_send_message(ipclite->channel[proc_id].irq_info[IPCLITE_SSR_SIGNAL].mbox_chan,
NULL);
if (ret < IPCLITE_SUCCESS) {
ret = mbox_send_message(channel->irq_info[IPCLITE_SSR_SIGNAL].mbox_chan, NULL);
if (ret < 0) {
IPCLITE_OS_LOG(IPCLITE_ERR,
"SSR Signal sending failed to Core : %d Signal : %d ret : %d\n",
proc_id, IPCLITE_SSR_SIGNAL, ret);
return -IPCLITE_FAILURE;
return ret;
}
IPCLITE_OS_LOG(IPCLITE_DBG,
"SSR Signal send completed to core : %d signal : %d ret : %d\n",
proc_id, IPCLITE_SSR_SIGNAL, ret);
return IPCLITE_SUCCESS;
return 0;
}
void ipclite_recover(enum ipcmem_host_type core_id)
{
int ret, i, host, host0, host1;
int ret, host, host0, host1;
uint32_t p;
IPCLITE_OS_LOG(IPCLITE_DBG, "IPCLite Recover - Crashed Core : %d\n", core_id);
/* verify and reset the hw mutex lock */
if (core_id == ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner) {
ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
if (core_id == ipclite->ipcmem.toc_data.host_info->hwlock_owner) {
ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
hwspin_unlock_raw(ipclite->hwlock);
IPCLITE_OS_LOG(IPCLITE_DBG, "HW Lock Reset\n");
}
mutex_lock(&ssr_mutex);
/* Set the Global Channel Status to 0 to avoid Race condition */
for (i = 0; i < MAX_PARTITION_COUNT; i++) {
host0 = ipcmem_toc_partition_entries[i].host0;
host1 = ipcmem_toc_partition_entries[i].host1;
if (host0 == core_id || host1 == core_id) {
for (p = 0; p < partitions; p++) {
host0 = ipclite->ipcmem.toc_data.partition_entry[p].host0;
host1 = ipclite->ipcmem.toc_data.partition_entry[p].host1;
if (host0 != core_id && host1 != core_id)
continue;
ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *)
(&(ipclite->ipcmem.toc->toc_entry[host0][host1].status)), 0);
ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *)
(&(ipclite->ipcmem.toc->toc_entry[host1][host0].status)), 0);
(&(ipclite->ipcmem.partition[p]->hdr.status)), 0);
channel_status_info[core_id] =
ipclite->ipcmem.toc->toc_entry[host0][host1].status;
}
IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n", host0, host1,
ipclite->ipcmem.toc->toc_entry[host0][host1].status);
IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n", host1, host0,
ipclite->ipcmem.toc->toc_entry[host1][host0].status);
IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n",
host0, host1, ipclite->ipcmem.partition[p]->hdr.status);
}
/* Resets the TX/RX queue */
@@ -765,24 +779,20 @@ void ipclite_recover(enum ipcmem_host_type core_id)
/* Increment the Global Channel Status for APPS and crashed core*/
ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
(&(ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status)));
ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
(&(ipclite->ipcmem.toc->toc_entry[core_id][IPCMEM_APPS].status)));
ipclite->channel[core_id].gstatus_ptr);
channel_status_info[core_id] =
ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status;
ipclite->channel[core_id].status = *ipclite->channel[core_id].gstatus_ptr;
/* Update other cores about SSR */
for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
if (host != core_id && ipclite->ipcmem.toc->recovery.configured_core[host]) {
if (!is_host_enabled(host) || host == core_id)
continue;
ret = ipclite_ssr_update(host);
if (ret < IPCLITE_SUCCESS)
IPCLITE_OS_LOG(IPCLITE_ERR,
"Failed to send SSR update to core : %d\n", host);
if (ret < 0)
IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send SSR update to core %d\n", host);
else
IPCLITE_OS_LOG(IPCLITE_DBG, "SSR update sent to core %d\n", host);
}
}
mutex_unlock(&ssr_mutex);
/* Dump the debug information */
@@ -804,15 +814,6 @@ int ipclite_msg_send(int32_t proc_id, uint64_t data)
return -EINVAL;
}
if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
channel_status_info[proc_id] = CHANNEL_ACTIVE;
} else {
IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
return -IPCLITE_EINCHAN;
}
}
ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
IPCLITE_MSG_SIGNAL);
@@ -845,15 +846,6 @@ int ipclite_test_msg_send(int32_t proc_id, uint64_t data)
return -EINVAL;
}
if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
channel_status_info[proc_id] = CHANNEL_ACTIVE;
} else {
IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
return -IPCLITE_EINCHAN;
}
}
ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
IPCLITE_TEST_SIGNAL);
@@ -911,25 +903,67 @@ static int map_ipcmem(struct ipclite_info *ipclite, const char *name)
return ret;
}
static void ipcmem_init(struct ipclite_mem *ipcmem)
/**
* insert_magic_number() - Inserts the magic number in toc header
*
* Function computes a simple checksum of the contents in toc header
* and stores the result in magic_number field in the toc header
*/
static void insert_magic_number(void)
{
int host, host0, host1;
int i = 0;
uint32_t *block = ipclite->ipcmem.mem.virt_base;
size_t size = sizeof(struct ipcmem_toc_header) / sizeof(uint32_t);
ipcmem->toc = ipcmem->mem.virt_base;
IPCLITE_OS_LOG(IPCLITE_DBG, "toc_base = %p\n", ipcmem->toc);
for (int i = 1; i < size; i++)
block[0] ^= block[i];
ipcmem->toc->hdr.size = IPCMEM_TOC_SIZE;
IPCLITE_OS_LOG(IPCLITE_DBG, "toc->hdr.size = %d\n", ipcmem->toc->hdr.size);
block[0] = ~block[0];
}
static int32_t setup_toc(struct ipclite_mem *ipcmem)
{
size_t offset = 0;
void *virt_base = ipcmem->mem.virt_base;
struct ipcmem_offsets *offsets = &ipcmem->toc->offsets;
struct ipcmem_toc_data *toc_data = &ipcmem->toc_data;
/* Setup Offsets */
offsets->host_info = offset += IPCMEM_TOC_VAR_OFFSET;
offsets->global_entry = offset += sizeof(struct ipcmem_host_info);
offsets->partition_info = offset += sizeof(struct ipcmem_partition_entry);
offsets->partition_entry = offset += sizeof(struct ipcmem_partition_info);
// offsets->debug = virt_base + size - 64K;
/* Offset to be used for any new structure added in toc (after partition_entry) */
// offsets->new_struct = offset += sizeof(struct ipcmem_partition_entry)*IPCMEM_NUM_HOSTS;
IPCLITE_OS_LOG(IPCLITE_DBG, "toc_data offsets:");
IPCLITE_OS_LOG(IPCLITE_DBG, "host_info = 0x%X", offsets->host_info);
IPCLITE_OS_LOG(IPCLITE_DBG, "global_entry = 0x%X", offsets->global_entry);
IPCLITE_OS_LOG(IPCLITE_DBG, "partition_info = 0x%X", offsets->partition_info);
IPCLITE_OS_LOG(IPCLITE_DBG, "partition_entry = 0x%X", offsets->partition_entry);
/* Point structures to the appropriate offset in TOC */
toc_data->host_info = ADD_OFFSET(virt_base, offsets->host_info);
toc_data->global_entry = ADD_OFFSET(virt_base, offsets->global_entry);
toc_data->partition_info = ADD_OFFSET(virt_base, offsets->partition_info);
toc_data->partition_entry = ADD_OFFSET(virt_base, offsets->partition_entry);
return 0;
}
static void setup_global_partition(struct ipclite_mem *ipcmem, uint32_t base_offset)
{
/*Fill in global partition details*/
ipcmem->toc->toc_entry_global = ipcmem_toc_global_partition_entry;
ipcmem->global_partition = (struct ipcmem_global_partition *)
((char *)ipcmem->mem.virt_base +
ipcmem_toc_global_partition_entry.base_offset);
ipcmem->toc_data.global_entry->base_offset = base_offset;
ipcmem->toc_data.global_entry->size = GLOBAL_PARTITION_SIZE;
ipcmem->toc_data.global_entry->flags = GLOBAL_PARTITION_FLAGS;
ipcmem->toc_data.global_entry->host0 = IPCMEM_GLOBAL_HOST;
ipcmem->toc_data.global_entry->host1 = IPCMEM_GLOBAL_HOST;
ipcmem->global_partition = ADD_OFFSET(ipcmem->mem.virt_base, base_offset);
IPCLITE_OS_LOG(IPCLITE_DBG, "base_offset =%x,ipcmem->global_partition = %p\n",
ipcmem_toc_global_partition_entry.base_offset,
base_offset,
ipcmem->global_partition);
ipcmem->global_partition->hdr = global_partition_hdr;
@@ -938,55 +972,112 @@ static void ipcmem_init(struct ipclite_mem *ipcmem)
ipcmem->global_partition->hdr.partition_type,
ipcmem->global_partition->hdr.region_offset,
ipcmem->global_partition->hdr.region_size);
/* Fill in each IPCMEM TOC entry from ipcmem_toc_partition_entries config*/
for (i = 0; i < MAX_PARTITION_COUNT; i++) {
host0 = ipcmem_toc_partition_entries[i].host0;
host1 = ipcmem_toc_partition_entries[i].host1;
IPCLITE_OS_LOG(IPCLITE_DBG, "host0 = %d, host1=%d\n", host0, host1);
ipcmem->toc->toc_entry[host0][host1] = ipcmem_toc_partition_entries[i];
ipcmem->toc->toc_entry[host1][host0] = ipcmem_toc_partition_entries[i];
if (host0 == IPCMEM_APPS && host1 == IPCMEM_APPS) {
/* Updating the Global Channel Status for APPS Loopback */
ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVE;
ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVE;
/* Updating Local Channel Status */
channel_status_info[host1] = ipcmem->toc->toc_entry[host0][host1].status;
} else if (host0 == IPCMEM_APPS || host1 == IPCMEM_APPS) {
/* Updating the Global Channel Status */
ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVATE_IN_PROGRESS;
ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVATE_IN_PROGRESS;
/* Updating Local Channel Status */
if (host0 == IPCMEM_APPS)
host = host1;
else if (host1 == IPCMEM_APPS)
host = host0;
channel_status_info[host] = ipcmem->toc->toc_entry[host0][host1].status;
}
ipcmem->partition[i] = (struct ipcmem_partition *)
((char *)ipcmem->mem.virt_base +
ipcmem_toc_partition_entries[i].base_offset);
static void update_partition(struct ipclite_mem *ipcmem, uint32_t p)
{
int host0 = ipcmem->toc_data.partition_entry[p].host0;
int host1 = ipcmem->toc_data.partition_entry[p].host1;
IPCLITE_OS_LOG(IPCLITE_DBG, "partition[%d] = %p,partition_base_offset[%d]=%lx\n",
i, ipcmem->partition[i],
i, ipcmem_toc_partition_entries[i].base_offset);
IPCLITE_OS_LOG(IPCLITE_DBG, "host0 = %d, host1=%d\n", host0, host1);
ipcmem->partition[p] = ADD_OFFSET(ipcmem->mem.virt_base,
ipcmem->toc_data.partition_entry[p].base_offset);
IPCLITE_OS_LOG(IPCLITE_DBG, "partition[%d] = %p,partition_base_offset[%d]=%lx",
p, ipcmem->partition[p],
p, ipcmem->toc_data.partition_entry[p].base_offset);
if (host0 == host1)
ipcmem->partition[i]->hdr = loopback_partition_hdr;
ipcmem->partition[p]->hdr = loopback_partition_hdr;
else
ipcmem->partition[i]->hdr = default_partition_hdr;
ipcmem->partition[p]->hdr = default_partition_hdr;
IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d\n",
ipcmem->partition[i]->hdr.type,
ipcmem->partition[i]->hdr.desc_offset,
ipcmem->partition[i]->hdr.desc_size);
IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d",
ipcmem->partition[p]->hdr.type,
ipcmem->partition[p]->hdr.desc_offset,
ipcmem->partition[p]->hdr.desc_size);
}
static int32_t setup_partitions(struct ipclite_mem *ipcmem, uint32_t base_offset)
{
uint32_t p, host0, host1;
uint32_t num_entry = 0;
/*Fill in each valid ipcmem partition table entry*/
for (host0 = 0; host0 < IPCMEM_NUM_HOSTS; host0++) {
if (!is_host_enabled(host0))
continue;
for (host1 = host0; host1 < IPCMEM_NUM_HOSTS; host1++) {
if (!is_host_enabled(host1) || is_loopback_except_apps(host0, host1))
continue;
ipcmem->toc_data.partition_entry[num_entry].base_offset = base_offset;
ipcmem->toc_data.partition_entry[num_entry].size = DEFAULT_PARTITION_SIZE;
ipcmem->toc_data.partition_entry[num_entry].flags = DEFAULT_PARTITION_FLAGS;
ipcmem->toc_data.partition_entry[num_entry].host0 = host0;
ipcmem->toc_data.partition_entry[num_entry].host1 = host1;
base_offset += DEFAULT_PARTITION_SIZE;
num_entry++;
}
}
IPCLITE_OS_LOG(IPCLITE_DBG, "total partitions = %u", num_entry);
ipcmem->partition = kcalloc(num_entry, sizeof(*ipcmem->partition), GFP_KERNEL);
if (!ipcmem->partition) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Partition Allocation failed");
return -ENOMEM;
}
/*Update appropriate partition based on partition entries*/
for (p = 0; p < num_entry; p++)
update_partition(ipcmem, p);
/*Set up info to parse partition entries*/
ipcmem->toc_data.partition_info->num_entries = partitions = num_entry;
ipcmem->toc_data.partition_info->entry_size = sizeof(struct ipcmem_partition_entry);
return 0;
}
static int32_t ipcmem_init(struct ipclite_mem *ipcmem, struct device_node *pn)
{
int ret;
uint32_t remote_pid;
uint32_t host_count = 0;
uint32_t gmem_offset = 0;
struct device_node *cn;
for_each_available_child_of_node(pn, cn) {
of_property_read_u32(cn, "qcom,remote-pid", &remote_pid);
if (remote_pid < IPCMEM_NUM_HOSTS) {
enabled_hosts |= BIT_MASK(remote_pid);
host_count++;
}
}
IPCLITE_OS_LOG(IPCLITE_DBG, "enabled_hosts = 0x%X", enabled_hosts);
IPCLITE_OS_LOG(IPCLITE_DBG, "host_count = %u", host_count);
ipcmem->toc = ipcmem->mem.virt_base;
IPCLITE_OS_LOG(IPCLITE_DBG, "toc_base = %p\n", ipcmem->toc);
ret = setup_toc(ipcmem);
if (ret) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up toc");
return ret;
}
/*Set up host related info*/
ipcmem->toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
ipcmem->toc_data.host_info->configured_host = enabled_hosts;
gmem_offset += IPCMEM_TOC_SIZE;
setup_global_partition(ipcmem, gmem_offset);
gmem_offset += GLOBAL_PARTITION_SIZE;
ret = setup_partitions(ipcmem, gmem_offset);
if (ret) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up partitions");
return ret;
}
/*Making sure all writes for ipcmem configurations are completed*/
@@ -994,6 +1085,7 @@ static void ipcmem_init(struct ipclite_mem *ipcmem)
ipcmem->toc->hdr.init_done = IPCMEM_INIT_COMPLETED;
IPCLITE_OS_LOG(IPCLITE_DBG, "Ipcmem init completed\n");
return 0;
}
static int ipclite_channel_irq_init(struct device *parent, struct device_node *node,
@@ -1089,8 +1181,22 @@ EXPORT_SYMBOL(get_global_partition_info);
static struct ipcmem_partition_header *get_ipcmem_partition_hdr(struct ipclite_mem ipcmem, int local_pid,
int remote_pid)
{
uint32_t p;
uint32_t found = -1;
for (p = 0; p < partitions; p++) {
if (ipcmem.toc_data.partition_entry[p].host0 == local_pid
&& ipcmem.toc_data.partition_entry[p].host1 == remote_pid) {
found = p;
break;
}
}
if (found < partitions)
return (struct ipcmem_partition_header *)((char *)ipcmem.mem.virt_base +
ipcmem.toc->toc_entry[local_pid][remote_pid].base_offset);
ipcmem.toc_data.partition_entry[found].base_offset);
else
return NULL;
}
static void ipclite_channel_release(struct device *dev)
@@ -1166,9 +1272,13 @@ static int ipclite_channel_init(struct device *parent,
}
IPCLITE_OS_LOG(IPCLITE_DBG, "rx_fifo = %p, tx_fifo=%p\n", rx_fifo, tx_fifo);
partition_hdr = get_ipcmem_partition_hdr(ipclite->ipcmem,
local_pid, remote_pid);
partition_hdr = get_ipcmem_partition_hdr(ipclite->ipcmem, local_pid, remote_pid);
IPCLITE_OS_LOG(IPCLITE_DBG, "partition_hdr = %p\n", partition_hdr);
if (!partition_hdr) {
ret = -ENOMEM;
goto err_put_dev;
}
descs = (u32 *)((char *)partition_hdr + partition_hdr->desc_offset);
IPCLITE_OS_LOG(IPCLITE_DBG, "descs = %p\n", descs);
@@ -1216,6 +1326,7 @@ static int ipclite_channel_init(struct device *parent,
ipclite->channel[remote_pid].remote_pid = remote_pid;
ipclite->channel[remote_pid].tx_fifo = tx_fifo;
ipclite->channel[remote_pid].rx_fifo = rx_fifo;
ipclite->channel[remote_pid].gstatus_ptr = &partition_hdr->status;
spin_lock_init(&ipclite->channel[remote_pid].tx_lock);
@@ -1228,12 +1339,19 @@ static int ipclite_channel_init(struct device *parent,
}
}
ipclite->ipcmem.toc->recovery.configured_core[remote_pid] = CONFIGURED_CORE;
/* Updating Local & Global Channel Status */
if (remote_pid == IPCMEM_APPS) {
*ipclite->channel[remote_pid].gstatus_ptr = ACTIVE;
ipclite->channel[remote_pid].status = ACTIVE;
} else {
*ipclite->channel[remote_pid].gstatus_ptr = IN_PROGRESS;
ipclite->channel[remote_pid].status = IN_PROGRESS;
}
IPCLITE_OS_LOG(IPCLITE_DBG, "Channel init completed, ret = %d\n", ret);
return ret;
err_put_dev:
ipclite->channel[remote_pid].channel_status = 0;
ipclite->channel[remote_pid].status = INACTIVE;
device_unregister(dev);
kfree(dev);
return ret;
@@ -1255,9 +1373,9 @@ static ssize_t ipclite_dbg_lvl_write(struct kobject *kobj,
/* Parse the string from Sysfs Interface */
ret = kstrtoint(buf, 0, &ipclite_debug_level);
if (ret < IPCLITE_SUCCESS) {
if (ret < 0) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
return -IPCLITE_FAILURE;
return ret;
}
/* Check if debug structure is initialized */
@@ -1274,15 +1392,14 @@ static ssize_t ipclite_dbg_lvl_write(struct kobject *kobj,
/* Signal other cores for updating the debug information */
for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
if (ipclite->ipcmem.toc->recovery.configured_core[host]) {
if (!is_host_enabled(host))
continue;
ret = ipclite_send_debug_info(host);
if (ret < IPCLITE_SUCCESS)
IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n",
host);
if (ret < 0)
IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", host);
else
IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host);
}
}
return count;
}
@@ -1294,9 +1411,9 @@ static ssize_t ipclite_dbg_ctrl_write(struct kobject *kobj,
/* Parse the string from Sysfs Interface */
ret = kstrtoint(buf, 0, &ipclite_debug_control);
if (ret < IPCLITE_SUCCESS) {
if (ret < 0) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
return -IPCLITE_FAILURE;
return ret;
}
/* Check if debug structures are initialized */
@@ -1313,15 +1430,14 @@ static ssize_t ipclite_dbg_ctrl_write(struct kobject *kobj,
/* Signal other cores for updating the debug information */
for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
if (ipclite->ipcmem.toc->recovery.configured_core[host]) {
if (!is_host_enabled(host))
continue;
ret = ipclite_send_debug_info(host);
if (ret < IPCLITE_SUCCESS)
IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n",
host);
if (ret < 0)
IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", host);
else
IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host);
}
}
return count;
}
@@ -1333,9 +1449,9 @@ static ssize_t ipclite_dbg_dump_write(struct kobject *kobj,
/* Parse the string from Sysfs Interface */
ret = kstrtoint(buf, 0, &ipclite_debug_dump);
if (ret < IPCLITE_SUCCESS) {
if (ret < 0) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
return -IPCLITE_FAILURE;
return ret;
}
/* Check if debug structures are initialized */
@@ -1363,37 +1479,42 @@ struct kobj_attribute sysfs_dbg_dump = __ATTR(ipclite_debug_dump, 0660,
static int ipclite_debug_sysfs_setup(void)
{
int ret = 0;
/* Creating a directory in /sys/kernel/ */
sysfs_kobj = kobject_create_and_add("ipclite", kernel_kobj);
if (!sysfs_kobj) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create and add sysfs directory\n");
return -IPCLITE_FAILURE;
return -ENOMEM;
}
/* Creating sysfs files/interfaces for debug */
if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_lvl.attr)) {
ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_lvl.attr);
if (ret) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug level file\n");
return -IPCLITE_FAILURE;
return ret;
}
if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_ctrl.attr)) {
ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_ctrl.attr);
if (ret) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug control file\n");
return -IPCLITE_FAILURE;
return ret;
}
if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_dump.attr)) {
ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_dump.attr);
if (ret) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug dump file\n");
return -IPCLITE_FAILURE;
return ret;
}
return IPCLITE_SUCCESS;
return ret;
}
static int ipclite_debug_info_setup(void)
{
/* Setting up the Debug Structures */
ipclite_dbg_info = (struct ipclite_debug_info *)(((char *)ipclite->ipcmem.mem.virt_base +
ipclite->ipcmem.mem.size) - IPCLITE_DEBUG_SIZE);
ipclite->ipcmem.mem.size) - DEBUG_PARTITION_SIZE);
if (!ipclite_dbg_info)
return -EADDRNOTAVAIL;
@@ -1411,11 +1532,11 @@ static int ipclite_debug_info_setup(void)
return -EADDRNOTAVAIL;
IPCLITE_OS_LOG(IPCLITE_DBG, "virtual_base_ptr = %p total_size : %d debug_size : %d\n",
ipclite->ipcmem.mem.virt_base, ipclite->ipcmem.mem.size, IPCLITE_DEBUG_SIZE);
ipclite->ipcmem.mem.virt_base, ipclite->ipcmem.mem.size, DEBUG_PARTITION_SIZE);
IPCLITE_OS_LOG(IPCLITE_DBG, "dbg_info : %p dbg_struct : %p dbg_inmem : %p\n",
ipclite_dbg_info, ipclite_dbg_struct, ipclite_dbg_inmem);
return IPCLITE_SUCCESS;
return 0;
}
static int ipclite_probe(struct platform_device *pdev)
@@ -1464,18 +1585,22 @@ static int ipclite_probe(struct platform_device *pdev)
mem = &(ipclite->ipcmem.mem);
memset(mem->virt_base, 0, mem->size);
ipcmem_init(&ipclite->ipcmem);
ret = ipcmem_init(&ipclite->ipcmem, pn);
if (ret) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up IPCMEM");
goto release;
}
/* Set up sysfs for debug */
ret = ipclite_debug_sysfs_setup();
if (ret != IPCLITE_SUCCESS) {
if (ret) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Sysfs\n");
goto release;
}
/* Mapping Debug Memory */
ret = ipclite_debug_info_setup();
if (ret != IPCLITE_SUCCESS) {
if (ret) {
IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Structures\n");
goto release;
}
@@ -1495,15 +1620,12 @@ static int ipclite_probe(struct platform_device *pdev)
mbox_client_txdone(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan, 0);
if (global_atomic_support) {
ipclite->ipcmem.toc->ipclite_features.global_atomic_support =
GLOBAL_ATOMICS_ENABLED;
} else {
ipclite->ipcmem.toc->ipclite_features.global_atomic_support =
GLOBAL_ATOMICS_DISABLED;
ipclite->ipcmem.toc->hdr.feature_mask |= GLOBAL_ATOMIC_SUPPORT_BMSK;
}
IPCLITE_OS_LOG(IPCLITE_DBG, "global_atomic_support : %d\n", global_atomic_support);
IPCLITE_OS_LOG(IPCLITE_DBG, "global_atomic_support : %d\n",
ipclite->ipcmem.toc->ipclite_features.global_atomic_support);
/* Should be called after all Global TOC related init is done */
insert_magic_number();
/* hw mutex callbacks */
ipclite_hw_mutex->acquire = ipclite_hw_mutex_acquire;
@@ -1512,9 +1634,6 @@ static int ipclite_probe(struct platform_device *pdev)
/* store to ipclite structure */
ipclite->ipclite_hw_mutex = ipclite_hw_mutex;
/* initialize hwlock owner to invalid host */
ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
/* Update the Global Debug variable for FW cores */
ipclite_dbg_info->debug_level = ipclite_debug_level;
ipclite_dbg_info->debug_control = ipclite_debug_control;

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved..
* Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved..
*/
#include <linux/hwspinlock.h>
#include <linux/module.h>
@@ -14,9 +14,10 @@
#define ACTIVE_CHANNEL 0x1
#define IPCMEM_TOC_SIZE (4*1024)
#define IPCMEM_TOC_VAR_OFFSET 0x100
#define MAX_CHANNEL_SIGNALS 6
#define MAX_PARTITION_COUNT 11 /*11 partitions other than global partition*/
#define GLOBAL_ATOMIC_SUPPORT_BMSK 0x1UL
#define IPCLITE_MSG_SIGNAL 0
#define IPCLITE_MEM_INIT_SIGNAL 1
@@ -26,13 +27,13 @@
#define IPCLITE_DEBUG_SIGNAL 5
/** Flag definitions for the entries */
#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION (0x01)
#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_WRITE_PROTECTION (0x02)
#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION \
(IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION | \
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_WRITE_PROTECTION)
#define IPCMEM_FLAGS_ENABLE_READ_PROTECTION (0x01)
#define IPCMEM_FLAGS_ENABLE_WRITE_PROTECTION (0x02)
#define IPCMEM_FLAGS_ENABLE_RW_PROTECTION \
(IPCMEM_FLAGS_ENABLE_READ_PROTECTION | \
IPCMEM_FLAGS_ENABLE_WRITE_PROTECTION)
#define IPCMEM_TOC_ENTRY_FLAGS_IGNORE_PARTITION (0x00000004)
#define IPCMEM_FLAGS_IGNORE_PARTITION (0x00000004)
/*Hardcoded macro to identify local host on each core*/
#define LOCAL_HOST IPCMEM_APPS
@@ -40,13 +41,6 @@
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT 1000
#define CHANNEL_INACTIVE 0
#define CHANNEL_ACTIVATE_IN_PROGRESS 1
#define CHANNEL_ACTIVE 2
#define CONFIGURED_CORE 1
#define IPCLITE_DEBUG_SIZE (64 * 1024)
#define IPCLITE_DEBUG_INFO_SIZE 256
#define IPCLITE_CORE_DBG_LABEL "APSS:"
#define IPCLITE_LOG_MSG_SIZE 100
@@ -55,6 +49,8 @@
#define IPCLITE_SIGNAL_LABEL_SIZE 10
#define PREV_INDEX 2
#define ADD_OFFSET(x, y) ((void *)((size_t)x + y))
#define IPCLITE_OS_LOG(__level, __fmt, arg...) \
do { \
if (ipclite_debug_level & __level) { \
@@ -69,7 +65,18 @@
} \
} while (0)
/*IPCMEM Structure Definitions*/
/**
* enum ipclite_channel_status - channel status
*
* INACTIVE : Channel uninitialized or init failed
* IN_PROGRESS : Channel init passed, awaiting confirmation from remote host
* ACTIVE : Channel init passed in local and remote host, thus active
*/
enum ipclite_channel_status {
INACTIVE = 0,
IN_PROGRESS = 1,
ACTIVE = 2,
};
enum ipclite_debug_level {
IPCLITE_ERR = 0x0001,
@@ -97,6 +104,11 @@ static const char ipclite_dbg_label[][IPCLITE_DBG_LABEL_SIZE] = {
[IPCLITE_DBG] = "dbg"
};
/**
* IPCMEM Debug Structure Definitions
* - Present in Local Memory
*/
struct ipclite_debug_info_host {
uint32_t numsig_sent; //no. of signals sent from the core
uint32_t numsig_recv; //no. of signals received on the core
@@ -137,60 +149,77 @@ struct ipclite_debug_struct {
struct ipclite_debug_info_host dbg_info_host[IPCMEM_NUM_HOSTS];
};
struct ipclite_features {
uint32_t global_atomic_support;
uint32_t version_finalised;
/**
* IPCMEM TOC Structure Definitions
* - Present in toc in shared memory
*/
struct ipcmem_host_info {
uint32_t hwlock_owner;
uint32_t configured_host;
};
struct ipclite_recover {
uint32_t global_atomic_hwlock_owner;
uint32_t configured_core[IPCMEM_NUM_HOSTS];
};
struct ipcmem_partition_header {
uint32_t type; /*partition type*/
uint32_t desc_offset; /*descriptor offset*/
uint32_t desc_size; /*descriptor size*/
uint32_t fifo0_offset; /*fifo 0 offset*/
uint32_t fifo0_size; /*fifo 0 size*/
uint32_t fifo1_offset; /*fifo 1 offset*/
uint32_t fifo1_size; /*fifo 1 size*/
};
struct ipcmem_toc_entry {
struct ipcmem_partition_entry {
uint32_t base_offset; /*partition offset from IPCMEM base*/
uint32_t size; /*partition size*/
uint32_t flags; /*partition flags if required*/
uint32_t host0; /*subsystem 0 who can access this partition*/
uint32_t host1; /*subsystem 1 who can access this partition*/
uint32_t status; /*partition active status*/
uint32_t reserved; /*legacy partition active status*/
};
struct ipcmem_partition_info {
uint32_t num_entries; /* Number of channel partitions */
uint32_t entry_size; /* Size of partition_entry structure */
};
struct ipcmem_offsets {
uint32_t host_info;
uint32_t global_entry;
uint32_t partition_info;
uint32_t partition_entry;
uint32_t debug;
uint32_t reserved; /*Padded for 64-bit alignment*/
};
/**
* Any change in TOC header size can only be accomodated with
* major version change, as it is not backward compatible.
*/
struct ipcmem_toc_header {
uint32_t size;
uint32_t init_done;
uint32_t magic_number; /*Checksum of TOC*/
uint32_t init_done; /*TOC initialization status*/
uint32_t major_version;
uint32_t minor_version;
uint64_t feature_mask;
uint32_t reserved[6]; /*Padded for future use and 64-bit alignment*/
};
/**
* struct ipcmem_toc - Table of contents in ipcmem
*
* @hdr : Header to check for toc integrity, version and features
* @offsets : List of offsetted structures and partition entries
* available in the toc data region (ipcmem_toc_data)
*/
struct ipcmem_toc {
struct ipcmem_toc_header hdr;
struct ipcmem_toc_entry toc_entry_global;
struct ipcmem_toc_entry toc_entry[IPCMEM_NUM_HOSTS][IPCMEM_NUM_HOSTS];
/* Need to have a better implementation here */
/* as ipcmem is 4k and if host number increases */
/* it would create problems*/
struct ipclite_features ipclite_features;
struct ipclite_recover recovery;
struct ipcmem_offsets offsets;
/* ---------------------------------------
* ipcmem_toc_data @ 256-byte offset
* struct ipcmem_host_info host_info;
* struct ipcmem_partition_entry global_entry;
* struct ipcmem_partition_info partition_info;
* struct ipcmem_partition_entry partition_entry[num_entries];
* ---------------------------------------
*/
};
struct ipcmem_region {
u64 aux_base;
void __iomem *virt_base;
uint32_t size;
};
struct ipcmem_partition {
struct ipcmem_partition_header hdr;
};
/**
* IPCMEM Partition Structure Definitions
* - Present in partitions in shared memory
*/
struct global_partition_header {
uint32_t partition_type;
@@ -202,13 +231,55 @@ struct ipcmem_global_partition {
struct global_partition_header hdr;
};
struct ipcmem_partition_header {
uint32_t type; /*partition type*/
uint32_t desc_offset; /*descriptor offset*/
uint32_t desc_size; /*descriptor size*/
uint32_t fifo0_offset; /*fifo 0 offset*/
uint32_t fifo0_size; /*fifo 0 size*/
uint32_t fifo1_offset; /*fifo 1 offset*/
uint32_t fifo1_size; /*fifo 1 size*/
uint32_t status; /*partition status*/
};
struct ipcmem_partition {
struct ipcmem_partition_header hdr;
};
/**
* IPCMEM Helper Structure Definitions
* - Present in local memory
* - Can have pointers to toc and partitions in shared memory
*/
/*Pointers to offsetted structures in TOC*/
struct ipcmem_toc_data {
struct ipcmem_host_info *host_info;
struct ipcmem_partition_entry *global_entry;
struct ipcmem_partition_info *partition_info;
struct ipcmem_partition_entry *partition_entry;
};
struct ipcmem_region {
u64 aux_base;
void __iomem *virt_base;
uint32_t size;
};
struct ipclite_mem {
struct ipcmem_toc *toc;
struct ipcmem_toc_data toc_data;
struct ipcmem_region mem;
struct ipcmem_global_partition *global_partition;
struct ipcmem_partition *partition[MAX_PARTITION_COUNT];
struct ipcmem_partition **partition;
};
/**
* IPCLite Structure Definitions
* - Present in local memory
* - Can have pointers to partitions in shared memory
*/
struct ipclite_fifo {
uint32_t length;
@@ -265,7 +336,8 @@ struct ipclite_channel {
uint32_t channel_version;
uint32_t version_finalised;
uint32_t channel_status;
uint32_t *gstatus_ptr;
uint32_t status;
};
/*Single structure that defines everything about IPCLite*/
@@ -277,138 +349,9 @@ struct ipclite_info {
struct ipclite_hw_mutex_ops *ipclite_hw_mutex;
};
const struct ipcmem_toc_entry ipcmem_toc_global_partition_entry = {
/* Global partition. */
4 * 1024,
128 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_GLOBAL_HOST,
IPCMEM_GLOBAL_HOST,
};
const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = {
/* Global partition. */
/* {
* 4 * 1024,
* 128 * 1024,
* IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
* IPCMEM_GLOBAL_HOST,
* IPCMEM_GLOBAL_HOST,
* },
*/
/* APPS<->CDSP partition. */
{
132 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_APPS,
IPCMEM_CDSP,
CHANNEL_INACTIVE,
},
/* APPS<->CVP (EVA) partition. */
{
164 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_APPS,
IPCMEM_CVP,
CHANNEL_INACTIVE,
},
/* APPS<->CAM (ICP) partition. */
{
196 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_APPS,
IPCMEM_CAM,
CHANNEL_INACTIVE,
},
/* APPS<->VPU (IRIS) partition. */
{
228 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_APPS,
IPCMEM_VPU,
CHANNEL_INACTIVE,
},
/* CDSP<->CVP (EVA) partition. */
{
260 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_CDSP,
IPCMEM_CVP,
CHANNEL_INACTIVE,
},
/* CDSP<->CAM (ICP) partition. */
{
292 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_CDSP,
IPCMEM_CAM,
CHANNEL_INACTIVE,
},
/* CDSP<->VPU (IRIS) partition. */
{
324 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_CDSP,
IPCMEM_VPU,
CHANNEL_INACTIVE,
},
/* CVP<->CAM (ICP) partition. */
{
356 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_CVP,
IPCMEM_CAM,
CHANNEL_INACTIVE,
},
/* CVP<->VPU (IRIS) partition. */
{
388 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_CVP,
IPCMEM_VPU,
CHANNEL_INACTIVE,
},
/* CAM<->VPU (IRIS) partition. */
{
420 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_CAM,
IPCMEM_VPU,
CHANNEL_INACTIVE,
},
/* APPS<->APPS partition. */
{
454 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_APPS,
IPCMEM_APPS,
CHANNEL_INACTIVE,
}
/* Last entry uses invalid hosts and no protections to signify the end. */
/* {
* 0,
* 0,
* IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
* IPCMEM_INVALID_HOST,
* IPCMEM_INVALID_HOST,
* }
*/
};
/*D:wefault partition parameters*/
/*Default partition parameters*/
#define DEFAULT_PARTITION_TYPE 0x0
#define DEFAULT_PARTITION_STATUS INACTIVE
#define DEFAULT_PARTITION_HDR_SIZE 1024
#define DEFAULT_DESCRIPTOR_OFFSET 1024
@@ -418,6 +361,9 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = {
#define DEFAULT_FIFO1_OFFSET (12*1024)
#define DEFAULT_FIFO1_SIZE (8*1024)
#define DEFAULT_PARTITION_SIZE (32*1024)
#define DEFAULT_PARTITION_FLAGS IPCMEM_FLAGS_ENABLE_RW_PROTECTION
/*Loopback partition parameters*/
#define LOOPBACK_PARTITION_TYPE 0x1
@@ -428,6 +374,11 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = {
#define GLOBAL_REGION_OFFSET (4*1024)
#define GLOBAL_REGION_SIZE (124*1024)
#define GLOBAL_PARTITION_SIZE (128*1024)
#define GLOBAL_PARTITION_FLAGS IPCMEM_FLAGS_ENABLE_RW_PROTECTION
/*Debug partition parameters*/
#define DEBUG_PARTITION_SIZE (64*1024)
const struct ipcmem_partition_header default_partition_hdr = {
DEFAULT_PARTITION_TYPE,
@@ -437,6 +388,7 @@ const struct ipcmem_partition_header default_partition_hdr = {
DEFAULT_FIFO0_SIZE,
DEFAULT_FIFO1_OFFSET,
DEFAULT_FIFO1_SIZE,
DEFAULT_PARTITION_STATUS,
};
/* TX and RX FIFO point to same location for such loopback partition type
@@ -450,6 +402,7 @@ const struct ipcmem_partition_header loopback_partition_hdr = {
DEFAULT_FIFO0_SIZE,
DEFAULT_FIFO0_OFFSET,
DEFAULT_FIFO0_SIZE,
DEFAULT_PARTITION_STATUS,
};
const struct global_partition_header global_partition_hdr = {

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __IPCLITE_CLIENT_H__
#define __IPCLITE_CLIENT_H__
@@ -27,13 +27,6 @@ enum ipcmem_host_type {
IPCMEM_INVALID_HOST = 0xFF, /**< Invalid processor */
};
/**
* IPCLite return codes
*/
#define IPCLITE_SUCCESS 0 /*< Success > */
#define IPCLITE_FAILURE 1 /*< Failure > */
#define IPCLITE_EINCHAN 9 /*< Inactive Channel */
struct global_region_info {
void *virt_base;
uint32_t size;