|
@@ -125,25 +125,14 @@ static int ipa3_nat_ipv6ct_mmap(
|
|
|
(struct ipa3_nat_ipv6ct_common_mem *)filp->private_data;
|
|
|
unsigned long vsize = vma->vm_end - vma->vm_start;
|
|
|
struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
|
|
|
-
|
|
|
- struct ipa3_nat_mem *nm_ptr = (struct ipa3_nat_mem *) dev;
|
|
|
+ struct ipa3_nat_mem *nm_ptr;
|
|
|
struct ipa3_nat_mem_loc_data *mld_ptr;
|
|
|
enum ipa3_nat_mem_in nmi;
|
|
|
|
|
|
int result = 0;
|
|
|
|
|
|
- nmi = nm_ptr->last_alloc_loc;
|
|
|
-
|
|
|
IPADBG("In\n");
|
|
|
|
|
|
- if (!IPA_VALID_NAT_MEM_IN(nmi)) {
|
|
|
- IPAERR_RL("Bad ipa3_nat_mem_in type\n");
|
|
|
- result = -EPERM;
|
|
|
- goto bail;
|
|
|
- }
|
|
|
-
|
|
|
- mld_ptr = &nm_ptr->mem_loc[nmi];
|
|
|
-
|
|
|
if (!dev->is_dev_init) {
|
|
|
IPAERR("Attempt to mmap %s before dev init\n",
|
|
|
dev->name);
|
|
@@ -153,29 +142,6 @@ static int ipa3_nat_ipv6ct_mmap(
|
|
|
|
|
|
mutex_lock(&dev->lock);
|
|
|
|
|
|
- if (!mld_ptr->vaddr) {
|
|
|
- IPAERR_RL("Attempt to mmap %s before the memory allocation\n",
|
|
|
- dev->name);
|
|
|
- result = -EPERM;
|
|
|
- goto unlock;
|
|
|
- }
|
|
|
-
|
|
|
- if (mld_ptr->is_mapped) {
|
|
|
- IPAERR("%s already mapped, only 1 mapping supported\n",
|
|
|
- dev->name);
|
|
|
- result = -EINVAL;
|
|
|
- goto unlock;
|
|
|
- }
|
|
|
-
|
|
|
- if (nmi == IPA_NAT_MEM_IN_SRAM) {
|
|
|
- if (dev->phys_mem_size == 0 || dev->phys_mem_size > vsize) {
|
|
|
- IPAERR_RL("%s err vsize(0x%X) phys_mem_size(0x%X)\n",
|
|
|
- dev->name, vsize, dev->phys_mem_size);
|
|
|
- result = -EINVAL;
|
|
|
- goto unlock;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* Check if no smmu or non dma coherent
|
|
|
*/
|
|
@@ -188,32 +154,74 @@ static int ipa3_nat_ipv6ct_mmap(
|
|
|
pgprot_noncached(vma->vm_page_prot);
|
|
|
}
|
|
|
|
|
|
- mld_ptr->base_address = NULL;
|
|
|
+ if (dev->is_nat_mem) {
|
|
|
|
|
|
- IPADBG("Mapping %s\n", ipa3_nat_mem_in_as_str(nmi));
|
|
|
+ nm_ptr = (struct ipa3_nat_mem *) dev;
|
|
|
+ nmi = nm_ptr->last_alloc_loc;
|
|
|
|
|
|
- if (nmi == IPA_NAT_MEM_IN_DDR) {
|
|
|
+ if (!IPA_VALID_NAT_MEM_IN(nmi)) {
|
|
|
+ IPAERR_RL("Bad ipa3_nat_mem_in type\n");
|
|
|
+ result = -EPERM;
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
|
|
|
- IPADBG("map sz=0x%zx into vma size=0x%08x\n",
|
|
|
- mld_ptr->table_alloc_size,
|
|
|
- vsize);
|
|
|
+ mld_ptr = &nm_ptr->mem_loc[nmi];
|
|
|
|
|
|
- result =
|
|
|
- dma_mmap_coherent(
|
|
|
- ipa3_ctx->pdev,
|
|
|
- vma,
|
|
|
- mld_ptr->vaddr,
|
|
|
- mld_ptr->dma_handle,
|
|
|
- mld_ptr->table_alloc_size);
|
|
|
+ if (!mld_ptr->vaddr) {
|
|
|
+ IPAERR_RL(
|
|
|
+ "Attempt to mmap %s before the memory allocation\n",
|
|
|
+ dev->name);
|
|
|
+ result = -EPERM;
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
|
|
|
- if (result) {
|
|
|
- IPAERR("dma_mmap_coherent failed. Err:%d\n", result);
|
|
|
+ if (mld_ptr->is_mapped) {
|
|
|
+ IPAERR("%s already mapped, only 1 mapping supported\n",
|
|
|
+ dev->name);
|
|
|
+ result = -EINVAL;
|
|
|
goto unlock;
|
|
|
}
|
|
|
|
|
|
- mld_ptr->base_address = mld_ptr->vaddr;
|
|
|
- } else {
|
|
|
if (nmi == IPA_NAT_MEM_IN_SRAM) {
|
|
|
+ if (dev->phys_mem_size == 0 ||
|
|
|
+ dev->phys_mem_size > vsize) {
|
|
|
+ IPAERR_RL(
|
|
|
+ "%s err vsize(0x%X) phys_mem_size(0x%X)\n",
|
|
|
+ dev->name, vsize, dev->phys_mem_size);
|
|
|
+ result = -EINVAL;
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ mld_ptr->base_address = NULL;
|
|
|
+
|
|
|
+ IPADBG("Mapping V4 NAT: %s\n",
|
|
|
+ ipa3_nat_mem_in_as_str(nmi));
|
|
|
+
|
|
|
+ if (nmi == IPA_NAT_MEM_IN_DDR) {
|
|
|
+
|
|
|
+ IPADBG("map sz=0x%zx -> vma size=0x%08x\n",
|
|
|
+ mld_ptr->table_alloc_size,
|
|
|
+ vsize);
|
|
|
+
|
|
|
+ result =
|
|
|
+ dma_mmap_coherent(
|
|
|
+ ipa3_ctx->pdev,
|
|
|
+ vma,
|
|
|
+ mld_ptr->vaddr,
|
|
|
+ mld_ptr->dma_handle,
|
|
|
+ mld_ptr->table_alloc_size);
|
|
|
+
|
|
|
+ if (result) {
|
|
|
+ IPAERR(
|
|
|
+ "dma_mmap_coherent failed. Err:%d\n",
|
|
|
+ result);
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ mld_ptr->base_address = mld_ptr->vaddr;
|
|
|
+
|
|
|
+ } else { /* nmi == IPA_NAT_MEM_IN_SRAM */
|
|
|
|
|
|
IPADBG("map phys_mem_size(0x%08X) -> vma sz(0x%08X)\n",
|
|
|
dev->phys_mem_size, vsize);
|
|
@@ -231,9 +239,52 @@ static int ipa3_nat_ipv6ct_mmap(
|
|
|
|
|
|
mld_ptr->base_address = mld_ptr->vaddr;
|
|
|
}
|
|
|
- }
|
|
|
|
|
|
- mld_ptr->is_mapped = true;
|
|
|
+ mld_ptr->is_mapped = true;
|
|
|
+
|
|
|
+ } else { /* dev->is_ipv6ct_mem */
|
|
|
+
|
|
|
+ if (!dev->vaddr) {
|
|
|
+ IPAERR_RL(
|
|
|
+ "Attempt to mmap %s before the memory allocation\n",
|
|
|
+ dev->name);
|
|
|
+ result = -EPERM;
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dev->is_mapped) {
|
|
|
+ IPAERR("%s already mapped, only 1 mapping supported\n",
|
|
|
+ dev->name);
|
|
|
+ result = -EINVAL;
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ dev->base_address = NULL;
|
|
|
+
|
|
|
+ IPADBG("Mapping V6 CT: %s\n",
|
|
|
+ ipa3_nat_mem_in_as_str(IPA_NAT_MEM_IN_DDR));
|
|
|
+
|
|
|
+ IPADBG("map sz=0x%zx -> vma size=0x%08x\n",
|
|
|
+ dev->table_alloc_size,
|
|
|
+ vsize);
|
|
|
+
|
|
|
+ result =
|
|
|
+ dma_mmap_coherent(
|
|
|
+ ipa3_ctx->pdev,
|
|
|
+ vma,
|
|
|
+ dev->vaddr,
|
|
|
+ dev->dma_handle,
|
|
|
+ dev->table_alloc_size);
|
|
|
+
|
|
|
+ if (result) {
|
|
|
+ IPAERR("dma_mmap_coherent failed. Err:%d\n", result);
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ dev->base_address = dev->vaddr;
|
|
|
+
|
|
|
+ dev->is_mapped = true;
|
|
|
+ }
|
|
|
|
|
|
vma->vm_ops = &ipa3_nat_ipv6ct_remap_vm_ops;
|
|
|
|
|
@@ -533,11 +584,13 @@ static int ipa3_nat_ipv6ct_allocate_mem(
|
|
|
|
|
|
nm_ptr = (struct ipa3_nat_mem *) dev;
|
|
|
|
|
|
- if (table_alloc->size <= IPA_NAT_PHYS_MEM_SIZE) {
|
|
|
+ if (sram_compatible && table_alloc->size <= IPA_NAT_PHYS_MEM_SIZE) {
|
|
|
/*
|
|
|
* CAN fit in SRAM, hence we'll use SRAM...
|
|
|
+ * And SRAM allowed
|
|
|
*/
|
|
|
- IPADBG("V4 NAT will reside in: %s\n",
|
|
|
+ IPADBG("V4 NAT with size 0x%08X will reside in: %s\n",
|
|
|
+ table_alloc->size,
|
|
|
ipa3_nat_mem_in_as_str(IPA_NAT_MEM_IN_SRAM));
|
|
|
|
|
|
if (nm_ptr->sram_in_use) {
|
|
@@ -577,9 +630,10 @@ static int ipa3_nat_ipv6ct_allocate_mem(
|
|
|
} else {
|
|
|
|
|
|
/*
|
|
|
- * CAN NOT fit in SRAM, hence we'll allocate DDR...
|
|
|
+ * CAN NOT fit in SRAM OR SRAM not allowed, hence we'll allocate DDR...
|
|
|
*/
|
|
|
- IPADBG("V4 NAT will reside in: %s\n",
|
|
|
+ IPADBG("V4 NAT with size 0x%08X will reside in: %s\n",
|
|
|
+ table_alloc->size,
|
|
|
ipa3_nat_mem_in_as_str(IPA_NAT_MEM_IN_DDR));
|
|
|
|
|
|
if (nm_ptr->ddr_in_use) {
|
|
@@ -611,11 +665,12 @@ static int ipa3_nat_ipv6ct_allocate_mem(
|
|
|
} else {
|
|
|
if (nat_type == IPAHAL_NAT_IPV6CT) {
|
|
|
|
|
|
- dev->table_alloc_size = table_alloc->size;
|
|
|
-
|
|
|
- IPADBG("V6 NAT will reside in: %s\n",
|
|
|
+ IPADBG("V6 CT with size 0x%08X will reside in: %s\n",
|
|
|
+ table_alloc->size,
|
|
|
ipa3_nat_mem_in_as_str(IPA_NAT_MEM_IN_DDR));
|
|
|
|
|
|
+ dev->table_alloc_size = table_alloc->size;
|
|
|
+
|
|
|
dev->vaddr =
|
|
|
dma_alloc_coherent(
|
|
|
ipa3_ctx->pdev,
|