video: driver: add support to send aon region via HFI_MMAP_ADDR
- add support to intialise device region by reading data from platform to resources. - add support for iommu_map and iommu_unmap apis. - allocate a 4K page and send this address through HFI_MMAP_ADDR register. - map AON region, send virtual address and size as payload. Change-Id: I5aa26593309a220c5de62836e432c1bd5a63ba1d Signed-off-by: Darshana Patil <quic_darshana@quicinc.com>
This commit is contained in:
@@ -89,6 +89,14 @@ struct reg_preset_table {
|
||||
u32 mask;
|
||||
};
|
||||
|
||||
struct device_region_table {
|
||||
const char *name;
|
||||
phys_addr_t phy_addr;
|
||||
u32 size;
|
||||
u32 dev_addr;
|
||||
u32 region;
|
||||
};
|
||||
|
||||
struct msm_vidc_ubwc_config_data {
|
||||
u32 max_channels;
|
||||
u32 mal_length;
|
||||
@@ -199,6 +207,8 @@ struct msm_vidc_platform_data {
|
||||
unsigned int freq_tbl_size;
|
||||
const struct reg_preset_table *reg_prst_tbl;
|
||||
unsigned int reg_prst_tbl_size;
|
||||
const struct device_region_table *dev_reg_tbl;
|
||||
unsigned int dev_reg_tbl_size;
|
||||
struct msm_vidc_ubwc_config_data *ubwc_config;
|
||||
const char *fwname;
|
||||
u32 pas_id;
|
||||
|
@@ -2583,6 +2583,11 @@ static const struct reg_preset_table pineapple_reg_preset_table[] = {
|
||||
{ 0xB0088, 0x0, 0x11 },
|
||||
};
|
||||
|
||||
/* name, phys_addr, size, device_addr, device region type */
|
||||
static const struct device_region_table pineapple_device_region_table[] = {
|
||||
{ "aon-registers", 0x0AAE0000, 0x1000, 0xFFAE0000, MSM_VIDC_AON_REGISTERS },
|
||||
};
|
||||
|
||||
static const struct msm_vidc_platform_data pineapple_data = {
|
||||
/* resources dependent on other module */
|
||||
.bw_tbl = pineapple_bw_table,
|
||||
@@ -2605,6 +2610,8 @@ static const struct msm_vidc_platform_data pineapple_data = {
|
||||
.freq_tbl_size = ARRAY_SIZE(pineapple_freq_table),
|
||||
.reg_prst_tbl = pineapple_reg_preset_table,
|
||||
.reg_prst_tbl_size = ARRAY_SIZE(pineapple_reg_preset_table),
|
||||
.dev_reg_tbl = pineapple_device_region_table,
|
||||
.dev_reg_tbl_size = ARRAY_SIZE(pineapple_device_region_table),
|
||||
.fwname = "vpu33_4v",
|
||||
.pas_id = 9,
|
||||
.supports_mmrm = 1,
|
||||
|
@@ -143,9 +143,48 @@ static int __interrupt_init_iris33(struct msm_vidc_core *vidc_core)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __setup_ucregion_memory_map_iris33(struct msm_vidc_core *vidc_core)
|
||||
static int __get_device_region_info(struct msm_vidc_core *core,
|
||||
u32 *min_dev_addr, u32 *dev_reg_size)
|
||||
{
|
||||
struct device_region_set *dev_set;
|
||||
u32 min_addr, max_addr, count = 0;
|
||||
int rc = 0;
|
||||
|
||||
if (!core || !core->resource) {
|
||||
d_vpr_e("%s: invalid params\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev_set = &core->resource->device_region_set;
|
||||
|
||||
if (!dev_set->count) {
|
||||
d_vpr_h("%s: device region not available\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
min_addr = 0xFFFFFFFF;
|
||||
max_addr = 0x0;
|
||||
for (count = 0; count < dev_set->count; count++) {
|
||||
if (dev_set->device_region_tbl[count].dev_addr > max_addr)
|
||||
max_addr = dev_set->device_region_tbl[count].dev_addr +
|
||||
dev_set->device_region_tbl[count].size;
|
||||
if (dev_set->device_region_tbl[count].dev_addr < min_addr)
|
||||
min_addr = dev_set->device_region_tbl[count].dev_addr;
|
||||
}
|
||||
if (min_addr == 0xFFFFFFFF || max_addr == 0x0) {
|
||||
d_vpr_e("%s: invalid device region\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*min_dev_addr = min_addr;
|
||||
*dev_reg_size = max_addr - min_addr;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __program_bootup_registers_iris33(struct msm_vidc_core *vidc_core)
|
||||
{
|
||||
struct msm_vidc_core *core = vidc_core;
|
||||
u32 min_dev_reg_addr = 0, dev_reg_size = 0;
|
||||
u32 value;
|
||||
int rc = 0;
|
||||
|
||||
@@ -173,17 +212,35 @@ static int __setup_ucregion_memory_map_iris33(struct msm_vidc_core *vidc_core)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* update queues vaddr for debug purpose */
|
||||
value = (u32)((u64)core->iface_q_table.align_virtual_addr);
|
||||
rc = __write_register(core, HFI_DEVICE_REGION_ADDR_IRIS33, value);
|
||||
if (core->mmap_buf.align_device_addr) {
|
||||
value = (u32)core->mmap_buf.align_device_addr;
|
||||
rc = __write_register(core, HFI_MMAP_ADDR_IRIS33, value);
|
||||
if (rc)
|
||||
return rc;
|
||||
} else {
|
||||
d_vpr_e("%s: skip mmap buffer programming\n", __func__);
|
||||
/* ignore the error for now for backward compatibility */
|
||||
/* return -EINVAL; */
|
||||
}
|
||||
|
||||
rc = __get_device_region_info(core, &min_dev_reg_addr, &dev_reg_size);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
value = (u32)((u64)core->iface_q_table.align_virtual_addr >> 32);
|
||||
rc = __write_register(core, HFI_DEVICE_REGION_SIZE_IRIS33, value);
|
||||
if (min_dev_reg_addr && dev_reg_size) {
|
||||
rc = __write_register(core, HFI_DEVICE_REGION_ADDR_IRIS33, min_dev_reg_addr);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = __write_register(core, HFI_DEVICE_REGION_SIZE_IRIS33, dev_reg_size);
|
||||
if (rc)
|
||||
return rc;
|
||||
} else {
|
||||
d_vpr_e("%s: skip device region programming\n", __func__);
|
||||
/* ignore the error for now for backward compatibility */
|
||||
/* return -EINVAL; */
|
||||
}
|
||||
|
||||
if (core->sfr.align_device_addr) {
|
||||
value = (u32)core->sfr.align_device_addr + VIDEO_ARCH_LX;
|
||||
rc = __write_register(core, HFI_SFR_ADDR_IRIS33, value);
|
||||
@@ -878,7 +935,7 @@ static int __boot_firmware_iris33(struct msm_vidc_core *vidc_core)
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = __setup_ucregion_memory_map_iris33(core);
|
||||
rc = __program_bootup_registers_iris33(core);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@@ -84,6 +84,8 @@ struct msm_vidc_core {
|
||||
struct msm_vidc_inst_capability *inst_caps;
|
||||
struct msm_vidc_mem_addr sfr;
|
||||
struct msm_vidc_mem_addr iface_q_table;
|
||||
struct msm_vidc_mem_addr mmap_buf;
|
||||
struct msm_vidc_mem_addr aon;
|
||||
struct msm_vidc_iface_q_info iface_queues[VIDC_IFACEQ_NUMQ];
|
||||
struct delayed_work pm_work;
|
||||
struct workqueue_struct *pm_workq;
|
||||
|
@@ -456,6 +456,12 @@ enum msm_vidc_buffer_region {
|
||||
MSM_VIDC_REGION_MAX,
|
||||
};
|
||||
|
||||
enum msm_vidc_device_region {
|
||||
MSM_VIDC_DEVICE_REGION_NONE = 0,
|
||||
MSM_VIDC_AON_REGISTERS,
|
||||
MSM_VIDC_DEVICE_REGION_MAX,
|
||||
};
|
||||
|
||||
enum msm_vidc_port_type {
|
||||
INPUT_PORT = 0,
|
||||
OUTPUT_PORT,
|
||||
@@ -870,6 +876,7 @@ struct msm_vidc_mem {
|
||||
u32 refcount;
|
||||
struct sg_table *table;
|
||||
struct dma_buf_attachment *attach;
|
||||
phys_addr_t phys_addr;
|
||||
};
|
||||
|
||||
struct msm_vidc_mem_list {
|
||||
|
@@ -78,6 +78,10 @@ struct msm_vidc_memory_ops {
|
||||
struct msm_vidc_mem *mem);
|
||||
u32 (*buffer_region)(struct msm_vidc_inst *inst,
|
||||
enum msm_vidc_buffer_type buffer_type);
|
||||
int (*iommu_map)(struct msm_vidc_core *core,
|
||||
struct msm_vidc_mem *mem);
|
||||
int (*iommu_unmap)(struct msm_vidc_core *core,
|
||||
struct msm_vidc_mem *mem);
|
||||
};
|
||||
|
||||
struct msm_vidc_memory_ops *get_mem_ops(void);
|
||||
|
@@ -95,6 +95,10 @@ struct msm_vidc_core;
|
||||
#define venus_hfi_for_each_context_bank_reverse(__device, __sinfo) \
|
||||
venus_hfi_for_each_thing_reverse(__device, __sinfo, context_bank)
|
||||
|
||||
/* Device region set helper */
|
||||
#define venus_hfi_for_each_device_region(__device, __sinfo) \
|
||||
venus_hfi_for_each_thing(__device, __sinfo, device_region)
|
||||
|
||||
struct bus_info {
|
||||
struct icc_path *icc;
|
||||
const char *name;
|
||||
@@ -196,6 +200,19 @@ struct freq_set {
|
||||
u32 count;
|
||||
};
|
||||
|
||||
struct device_region_info {
|
||||
const char *name;
|
||||
phys_addr_t phy_addr;
|
||||
u32 size;
|
||||
u32 dev_addr;
|
||||
u32 region;
|
||||
};
|
||||
|
||||
struct device_region_set {
|
||||
struct device_region_info *device_region_tbl;
|
||||
u32 count;
|
||||
};
|
||||
|
||||
struct msm_vidc_resource {
|
||||
void *core;
|
||||
u8 __iomem *register_base_addr;
|
||||
@@ -207,6 +224,7 @@ struct msm_vidc_resource {
|
||||
struct subcache_set subcache_set;
|
||||
struct context_bank_set context_bank_set;
|
||||
struct freq_set freq_set;
|
||||
struct device_region_set device_region_set;
|
||||
int fw_cookie;
|
||||
};
|
||||
|
||||
|
@@ -72,5 +72,7 @@ void venus_hfi_pm_work_handler(struct work_struct *work);
|
||||
irqreturn_t venus_hfi_isr(int irq, void *data);
|
||||
irqreturn_t venus_hfi_isr_handler(int irq, void *data);
|
||||
int __prepare_pc(struct msm_vidc_core *core);
|
||||
struct device_region_info *venus_hfi_get_device_region_info(
|
||||
struct msm_vidc_core *core, enum msm_vidc_device_region region);
|
||||
|
||||
#endif // _VENUS_HFI_H_
|
||||
|
@@ -68,16 +68,19 @@ struct hfi_queue_header {
|
||||
|
||||
#define QDSS_SIZE 4096
|
||||
#define SFR_SIZE 4096
|
||||
#define MMAP_BUF_SIZE 4096
|
||||
|
||||
#define QUEUE_SIZE (VIDC_IFACEQ_TABLE_SIZE + \
|
||||
(VIDC_IFACEQ_QUEUE_SIZE * VIDC_IFACEQ_NUMQ))
|
||||
|
||||
#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
|
||||
#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
|
||||
#define ALIGNED_MMAP_BUF_SIZE ALIGN(MMAP_BUF_SIZE, SZ_4K)
|
||||
#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
|
||||
#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
|
||||
ALIGNED_QDSS_SIZE, SZ_1M)
|
||||
#define TOTAL_QSIZE (SHARED_QSIZE - ALIGNED_SFR_SIZE - ALIGNED_QDSS_SIZE)
|
||||
ALIGNED_QDSS_SIZE + ALIGNED_MMAP_BUF_SIZE, SZ_1M)
|
||||
#define TOTAL_QSIZE (SHARED_QSIZE - ALIGNED_SFR_SIZE - ALIGNED_QDSS_SIZE - \
|
||||
ALIGNED_MMAP_BUF_SIZE)
|
||||
|
||||
struct msm_vidc_core;
|
||||
|
||||
|
@@ -482,6 +482,64 @@ static u32 msm_vidc_buffer_region(struct msm_vidc_inst *inst,
|
||||
return MSM_VIDC_NON_SECURE;
|
||||
}
|
||||
|
||||
static int msm_vidc_iommu_map(struct msm_vidc_core *core, struct msm_vidc_mem *mem)
|
||||
{
|
||||
int rc = 0;
|
||||
struct context_bank_info *cb = NULL;
|
||||
|
||||
if (!core || !mem) {
|
||||
d_vpr_e("%s: invalid params\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cb = msm_vidc_get_context_bank_for_region(core, mem->region);
|
||||
if (!cb) {
|
||||
d_vpr_e("%s: Failed to get context bank device\n", __func__);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
rc = iommu_map(cb->domain, mem->device_addr, mem->phys_addr,
|
||||
mem->size, IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
|
||||
if (rc) {
|
||||
d_vpr_e("iommu_map failed for device_addr 0x%x, size %d, rc:%d\n",
|
||||
mem->device_addr, mem->size, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
d_vpr_h("%s: phys_addr %#x size %#x device_addr %#x, mem_region %d\n",
|
||||
__func__, mem->phys_addr, mem->size, mem->device_addr, mem->region);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int msm_vidc_iommu_unmap(struct msm_vidc_core *core, struct msm_vidc_mem *mem)
|
||||
{
|
||||
int rc = 0;
|
||||
struct context_bank_info *cb = NULL;
|
||||
|
||||
if (!core || !mem) {
|
||||
d_vpr_e("%s: invalid params\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cb = msm_vidc_get_context_bank_for_region(core, mem->region);
|
||||
if (!cb) {
|
||||
d_vpr_e("%s: Failed to get context bank device\n",
|
||||
__func__);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
d_vpr_h("%s: phys_addr %#x size %#x device_addr %#x, mem_region %d\n",
|
||||
__func__, mem->phys_addr, mem->size, mem->device_addr, mem->region);
|
||||
|
||||
iommu_unmap(cb->domain, mem->device_addr, mem->size);
|
||||
mem->device_addr = 0x0;
|
||||
mem->phys_addr = 0x0;
|
||||
mem->size = 0;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct msm_vidc_memory_ops msm_mem_ops = {
|
||||
.dma_buf_get = msm_vidc_dma_buf_get,
|
||||
.dma_buf_put = msm_vidc_dma_buf_put,
|
||||
@@ -493,6 +551,8 @@ static struct msm_vidc_memory_ops msm_mem_ops = {
|
||||
.memory_alloc_map = msm_vidc_memory_alloc_map,
|
||||
.memory_unmap_free = msm_vidc_memory_unmap_free,
|
||||
.buffer_region = msm_vidc_buffer_region,
|
||||
.iommu_map = msm_vidc_iommu_map,
|
||||
.iommu_unmap = msm_vidc_iommu_unmap,
|
||||
};
|
||||
|
||||
struct msm_vidc_memory_ops *get_mem_ops(void)
|
||||
|
@@ -637,6 +637,56 @@ static int __init_context_banks(struct msm_vidc_core *core)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __init_device_region(struct msm_vidc_core *core)
|
||||
{
|
||||
const struct device_region_table *dev_reg_tbl;
|
||||
struct device_region_set *dev_set;
|
||||
struct device_region_info *dev_reg_info;
|
||||
u32 dev_reg_count = 0, cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
if (!core || !core->resource || !core->platform) {
|
||||
d_vpr_e("%s: invalid params\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev_set = &core->resource->device_region_set;
|
||||
|
||||
dev_reg_tbl = core->platform->data.dev_reg_tbl;
|
||||
dev_reg_count = core->platform->data.dev_reg_tbl_size;
|
||||
|
||||
if (!dev_reg_tbl || !dev_reg_count) {
|
||||
d_vpr_h("%s: device regions not available\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* allocate device region table */
|
||||
dev_set->device_region_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*dev_set->device_region_tbl) * dev_reg_count, GFP_KERNEL);
|
||||
if (!dev_set->device_region_tbl) {
|
||||
d_vpr_e("%s: failed to alloc memory for device region table\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
dev_set->count = dev_reg_count;
|
||||
|
||||
/* populate device region fields from platform data */
|
||||
for (cnt = 0; cnt < dev_set->count; cnt++) {
|
||||
dev_set->device_region_tbl[cnt].name = dev_reg_tbl[cnt].name;
|
||||
dev_set->device_region_tbl[cnt].phy_addr = dev_reg_tbl[cnt].phy_addr;
|
||||
dev_set->device_region_tbl[cnt].size = dev_reg_tbl[cnt].size;
|
||||
dev_set->device_region_tbl[cnt].dev_addr = dev_reg_tbl[cnt].dev_addr;
|
||||
dev_set->device_region_tbl[cnt].region = dev_reg_tbl[cnt].region;
|
||||
}
|
||||
|
||||
/* print device region fields */
|
||||
venus_hfi_for_each_device_region(core, dev_reg_info) {
|
||||
d_vpr_h("%s: name %s phy_addr %#x size %#x dev_addr %#x dev_region %d\n",
|
||||
__func__, dev_reg_info->name, dev_reg_info->phy_addr, dev_reg_info->size,
|
||||
dev_reg_info->dev_addr, dev_reg_info->region);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MSM_MMRM
|
||||
static int __register_mmrm(struct msm_vidc_core *core)
|
||||
{
|
||||
@@ -1501,6 +1551,10 @@ static int __init_resources(struct msm_vidc_core *core)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = __init_device_region(core);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = __register_mmrm(core);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@@ -2151,3 +2151,25 @@ exit:
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct device_region_info *venus_hfi_get_device_region_info(
|
||||
struct msm_vidc_core *core, enum msm_vidc_device_region region)
|
||||
{
|
||||
struct device_region_info *dev_reg = NULL, *match = NULL;
|
||||
|
||||
if (!region || region >= MSM_VIDC_DEVICE_REGION_MAX) {
|
||||
d_vpr_e("%s: invalid region %#x\n", __func__, region);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
venus_hfi_for_each_device_region(core, dev_reg) {
|
||||
if (dev_reg->region == region) {
|
||||
match = dev_reg;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!match)
|
||||
d_vpr_e("%s: device region %d not found\n", __func__, region);
|
||||
|
||||
return match;
|
||||
}
|
||||
|
@@ -9,6 +9,7 @@
|
||||
#include "msm_vidc_debug.h"
|
||||
#include "msm_vidc_memory.h"
|
||||
#include "msm_vidc_platform.h"
|
||||
#include "venus_hfi.h"
|
||||
|
||||
static int __strict_check(struct msm_vidc_core *core, const char *function)
|
||||
{
|
||||
@@ -426,6 +427,8 @@ void venus_hfi_queue_deinit(struct msm_vidc_core *core)
|
||||
|
||||
call_mem_op(core, memory_unmap_free, core, &core->iface_q_table.mem);
|
||||
call_mem_op(core, memory_unmap_free, core, &core->sfr.mem);
|
||||
call_mem_op(core, iommu_unmap, core, &core->aon.mem);
|
||||
call_mem_op(core, memory_unmap_free, core, &core->mmap_buf.mem);
|
||||
|
||||
for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
|
||||
core->iface_queues[i].q_hdr = NULL;
|
||||
@@ -438,6 +441,12 @@ void venus_hfi_queue_deinit(struct msm_vidc_core *core)
|
||||
|
||||
core->sfr.align_virtual_addr = NULL;
|
||||
core->sfr.align_device_addr = 0;
|
||||
|
||||
core->aon.align_virtual_addr = NULL;
|
||||
core->aon.align_device_addr = 0;
|
||||
|
||||
core->mmap_buf.align_virtual_addr = NULL;
|
||||
core->mmap_buf.align_device_addr = 0;
|
||||
}
|
||||
|
||||
int venus_hfi_reset_queue_header(struct msm_vidc_core *core)
|
||||
@@ -485,8 +494,10 @@ int venus_hfi_queue_init(struct msm_vidc_core *core)
|
||||
struct hfi_queue_table_header *q_tbl_hdr;
|
||||
struct hfi_queue_header *q_hdr;
|
||||
struct msm_vidc_iface_q_info *iface_q;
|
||||
struct device_region_info *dev_reg;
|
||||
struct msm_vidc_mem mem;
|
||||
int offset = 0;
|
||||
u32 *payload;
|
||||
u32 i;
|
||||
|
||||
d_vpr_h("%s()\n", __func__);
|
||||
@@ -577,6 +588,60 @@ int venus_hfi_queue_init(struct msm_vidc_core *core)
|
||||
/* write sfr buffer size in first word */
|
||||
*((u32 *)core->sfr.align_virtual_addr) = core->sfr.mem_size;
|
||||
|
||||
/* map aon registers */
|
||||
memset(&mem, 0, sizeof(mem));
|
||||
dev_reg = venus_hfi_get_device_region_info(core, MSM_VIDC_AON_REGISTERS);
|
||||
if (!dev_reg) {
|
||||
d_vpr_h("%s: aon device region not available\n", __func__);
|
||||
goto skip_mmap_buffer;
|
||||
}
|
||||
mem.region = MSM_VIDC_NON_SECURE;
|
||||
mem.phys_addr = dev_reg->phy_addr;
|
||||
mem.size = dev_reg->size;
|
||||
mem.device_addr = dev_reg->dev_addr;
|
||||
rc = call_mem_op(core, iommu_map, core, &mem);
|
||||
if (rc) {
|
||||
d_vpr_e("%s: aon map failed\n", __func__);
|
||||
goto fail_alloc_queue;
|
||||
}
|
||||
core->aon.align_virtual_addr = mem.kvaddr;
|
||||
core->aon.align_device_addr = mem.device_addr;
|
||||
core->aon.mem = mem;
|
||||
|
||||
/* allocate 4k buffer for HFI_MMAP_ADDR */
|
||||
memset(&mem, 0, sizeof(mem));
|
||||
mem.type = MSM_VIDC_BUF_QUEUE;
|
||||
mem.region = MSM_VIDC_NON_SECURE;
|
||||
mem.size = ALIGNED_MMAP_BUF_SIZE;
|
||||
mem.secure = false;
|
||||
mem.map_kernel = true;
|
||||
rc = call_mem_op(core, memory_alloc_map, core, &mem);
|
||||
if (rc) {
|
||||
d_vpr_e("%s: mmap buffer alloc and map failed\n", __func__);
|
||||
goto fail_alloc_queue;
|
||||
}
|
||||
core->mmap_buf.align_virtual_addr = mem.kvaddr;
|
||||
core->mmap_buf.align_device_addr = mem.device_addr;
|
||||
core->mmap_buf.mem_size = ALIGNED_MMAP_BUF_SIZE;
|
||||
core->mmap_buf.mem = mem;
|
||||
/* initialize mmap buffer */
|
||||
/* payload of HFI_MMAP_ADDR:
|
||||
* payload[0] : version
|
||||
* ___________________ payloads in version 1 ________________
|
||||
* payload[1-2] : address and size of SFR
|
||||
* payload[3-4] : address and size of IPCC lite memory
|
||||
* payload[5-6] : address and size of AOSS global timers
|
||||
* payload[7-8] : address and size of HW mutex registers
|
||||
* payload[9-10] : address and size of IPCC registers
|
||||
* payload[11-12] : address and size of AON registers
|
||||
*/
|
||||
memset(core->mmap_buf.align_virtual_addr, 0, ALIGNED_MMAP_BUF_SIZE);
|
||||
payload = ((u32 *)core->mmap_buf.align_virtual_addr);
|
||||
payload[0] = 1;
|
||||
payload[11] = core->aon.mem.device_addr;
|
||||
payload[12] = core->aon.mem.size;
|
||||
|
||||
skip_mmap_buffer:
|
||||
return 0;
|
||||
fail_alloc_queue:
|
||||
return -ENOMEM;
|
||||
|
Reference in New Issue
Block a user