ipa_nat.c 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/device.h>
  6. #include <linux/fs.h>
  7. #include <linux/init.h>
  8. #include <linux/kernel.h>
  9. #include <linux/mm.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/dma-noncoherent.h>
  12. #include "ipa_i.h"
  13. #include "ipahal/ipahal.h"
  14. #include "ipahal/ipahal_nat.h"
  15. /*
  16. * The following for adding code (ie. for EMULATION) not found on x86.
  17. */
  18. #if defined(CONFIG_IPA_EMULATION)
  19. # include "ipa_emulation_stubs.h"
  20. #endif
  21. #define IPA_NAT_PHYS_MEM_OFFSET IPA_MEM_PART(nat_tbl_ofst)
  22. #define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE
  23. #define IPA_IPV6CT_PHYS_MEM_OFFSET 0
  24. #define IPA_IPV6CT_PHYS_MEM_SIZE IPA_RAM_IPV6CT_SIZE
  25. #define IPA_NAT_IPV6CT_TEMP_MEM_SIZE 128
  26. #define IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC 4
  27. #define IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC 3
  28. #define IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC 5
  29. /*
  30. * The base table max entries is limited by index into table 13 bits number.
  31. * Limit the memory size required by user to prevent kernel memory starvation
  32. */
  33. #define IPA_TABLE_MAX_ENTRIES 8192
  34. #define MAX_ALLOC_NAT_SIZE(size) (IPA_TABLE_MAX_ENTRIES * size)
  35. #define IPA_VALID_TBL_INDEX(ti) \
  36. ((ti) == 0)
  37. enum ipa_nat_ipv6ct_table_type {
  38. IPA_NAT_BASE_TBL = 0,
  39. IPA_NAT_EXPN_TBL = 1,
  40. IPA_NAT_INDX_TBL = 2,
  41. IPA_NAT_INDEX_EXPN_TBL = 3,
  42. IPA_IPV6CT_BASE_TBL = 4,
  43. IPA_IPV6CT_EXPN_TBL = 5
  44. };
  45. static bool sram_compatible;
  46. static vm_fault_t ipa3_nat_ipv6ct_vma_fault_remap(struct vm_fault *vmf)
  47. {
  48. vmf->page = NULL;
  49. IPADBG("\n");
  50. return VM_FAULT_SIGBUS;
  51. }
  52. /* VMA related file operations functions */
  53. static const struct vm_operations_struct ipa3_nat_ipv6ct_remap_vm_ops = {
  54. .fault = ipa3_nat_ipv6ct_vma_fault_remap,
  55. };
  56. static inline const char *ipa3_nat_mem_in_as_str(
  57. enum ipa3_nat_mem_in nmi)
  58. {
  59. switch (nmi) {
  60. case IPA_NAT_MEM_IN_DDR:
  61. return "IPA_NAT_MEM_IN_DDR";
  62. case IPA_NAT_MEM_IN_SRAM:
  63. return "IPA_NAT_MEM_IN_SRAM";
  64. default:
  65. break;
  66. }
  67. return "INVALID_MEM_TYPE";
  68. }
  69. static inline char *ipa_ioc_v4_nat_init_as_str(
  70. struct ipa_ioc_v4_nat_init *ptr,
  71. char *buf,
  72. uint32_t buf_sz)
  73. {
  74. if (ptr && buf && buf_sz) {
  75. snprintf(
  76. buf, buf_sz,
  77. "V4 NAT INIT: tbl_index(0x%02X) ipv4_rules_offset(0x%08X) expn_rules_offset(0x%08X) index_offset(0x%08X) index_expn_offset(0x%08X) table_entries(0x%04X) expn_table_entries(0x%04X) ip_addr(0x%08X)",
  78. ptr->tbl_index,
  79. ptr->ipv4_rules_offset,
  80. ptr->expn_rules_offset,
  81. ptr->index_offset,
  82. ptr->index_expn_offset,
  83. ptr->table_entries,
  84. ptr->expn_table_entries,
  85. ptr->ip_addr);
  86. }
  87. return buf;
  88. }
  89. static int ipa3_nat_ipv6ct_open(struct inode *inode, struct file *filp)
  90. {
  91. struct ipa3_nat_ipv6ct_common_mem *dev;
  92. IPADBG("\n");
  93. dev = container_of(inode->i_cdev,
  94. struct ipa3_nat_ipv6ct_common_mem, cdev);
  95. filp->private_data = dev;
  96. IPADBG("return\n");
  97. return 0;
  98. }
  99. static int ipa3_nat_ipv6ct_mmap(
  100. struct file *filp,
  101. struct vm_area_struct *vma)
  102. {
  103. struct ipa3_nat_ipv6ct_common_mem *dev =
  104. (struct ipa3_nat_ipv6ct_common_mem *)filp->private_data;
  105. unsigned long vsize = vma->vm_end - vma->vm_start;
  106. struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
  107. struct ipa3_nat_mem *nm_ptr = (struct ipa3_nat_mem *) dev;
  108. struct ipa3_nat_mem_loc_data *mld_ptr;
  109. enum ipa3_nat_mem_in nmi;
  110. int result = 0;
  111. nmi = nm_ptr->last_alloc_loc;
  112. IPADBG("In\n");
  113. if (!IPA_VALID_NAT_MEM_IN(nmi)) {
  114. IPAERR_RL("Bad ipa3_nat_mem_in type\n");
  115. result = -EPERM;
  116. goto bail;
  117. }
  118. mld_ptr = &nm_ptr->mem_loc[nmi];
  119. if (!dev->is_dev_init) {
  120. IPAERR("Attempt to mmap %s before dev init\n",
  121. dev->name);
  122. result = -EPERM;
  123. goto bail;
  124. }
  125. mutex_lock(&dev->lock);
  126. if (!mld_ptr->vaddr) {
  127. IPAERR_RL("Attempt to mmap %s before the memory allocation\n",
  128. dev->name);
  129. result = -EPERM;
  130. goto unlock;
  131. }
  132. if (mld_ptr->is_mapped) {
  133. IPAERR("%s already mapped, only 1 mapping supported\n",
  134. dev->name);
  135. result = -EINVAL;
  136. goto unlock;
  137. }
  138. if (nmi == IPA_NAT_MEM_IN_SRAM) {
  139. if (dev->phys_mem_size == 0 || dev->phys_mem_size > vsize) {
  140. IPAERR_RL("%s err vsize(0x%X) phys_mem_size(0x%X)\n",
  141. dev->name, vsize, dev->phys_mem_size);
  142. result = -EINVAL;
  143. goto unlock;
  144. }
  145. }
  146. /*
  147. * Check if no smmu or non dma coherent
  148. */
  149. if (!cb->valid || !dev_is_dma_coherent(cb->dev)) {
  150. IPADBG("Either smmu valid=%u and/or DMA coherent=%u false\n",
  151. cb->valid, !dev_is_dma_coherent(cb->dev));
  152. vma->vm_page_prot =
  153. pgprot_noncached(vma->vm_page_prot);
  154. }
  155. mld_ptr->base_address = NULL;
  156. IPADBG("Mapping %s\n", ipa3_nat_mem_in_as_str(nmi));
  157. if (nmi == IPA_NAT_MEM_IN_DDR) {
  158. IPADBG("map sz=0x%zx into vma size=0x%08x\n",
  159. mld_ptr->table_alloc_size,
  160. vsize);
  161. result =
  162. dma_mmap_coherent(
  163. ipa3_ctx->pdev,
  164. vma,
  165. mld_ptr->vaddr,
  166. mld_ptr->dma_handle,
  167. mld_ptr->table_alloc_size);
  168. if (result) {
  169. IPAERR("dma_mmap_coherent failed. Err:%d\n", result);
  170. goto unlock;
  171. }
  172. mld_ptr->base_address = mld_ptr->vaddr;
  173. } else {
  174. if (nmi == IPA_NAT_MEM_IN_SRAM) {
  175. IPADBG("map phys_mem_size(0x%08X) -> vma sz(0x%08X)\n",
  176. dev->phys_mem_size, vsize);
  177. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  178. result = vm_iomap_memory(
  179. vma, mld_ptr->phys_addr, dev->phys_mem_size);
  180. if (result) {
  181. IPAERR("vm_iomap_memory failed. Err:%d\n",
  182. result);
  183. goto unlock;
  184. }
  185. mld_ptr->base_address = mld_ptr->vaddr;
  186. }
  187. }
  188. mld_ptr->is_mapped = true;
  189. vma->vm_ops = &ipa3_nat_ipv6ct_remap_vm_ops;
  190. unlock:
  191. mutex_unlock(&dev->lock);
  192. bail:
  193. IPADBG("Out\n");
  194. return result;
  195. }
  196. static const struct file_operations ipa3_nat_ipv6ct_fops = {
  197. .owner = THIS_MODULE,
  198. .open = ipa3_nat_ipv6ct_open,
  199. .mmap = ipa3_nat_ipv6ct_mmap
  200. };
  201. /**
  202. * ipa3_allocate_nat_ipv6ct_tmp_memory() - Allocates the NAT\IPv6CT temp memory
  203. */
  204. static struct ipa3_nat_ipv6ct_tmp_mem *ipa3_nat_ipv6ct_allocate_tmp_memory(void)
  205. {
  206. struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem;
  207. gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
  208. IPADBG("\n");
  209. tmp_mem = kzalloc(sizeof(*tmp_mem), GFP_KERNEL);
  210. if (tmp_mem == NULL)
  211. return NULL;
  212. tmp_mem->vaddr =
  213. dma_alloc_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
  214. &tmp_mem->dma_handle, gfp_flags);
  215. if (tmp_mem->vaddr == NULL)
  216. goto bail_tmp_mem;
  217. IPADBG("IPA successfully allocated temp memory\n");
  218. return tmp_mem;
  219. bail_tmp_mem:
  220. kfree(tmp_mem);
  221. return NULL;
  222. }
  223. static int ipa3_nat_ipv6ct_init_device(
  224. struct ipa3_nat_ipv6ct_common_mem *dev,
  225. const char *name,
  226. u32 phys_mem_size,
  227. u32 phys_mem_ofst,
  228. struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem)
  229. {
  230. int result = 0;
  231. IPADBG("In: Init of %s\n", name);
  232. mutex_init(&dev->lock);
  233. dev->is_nat_mem = IS_NAT_MEM_DEV(dev);
  234. dev->is_ipv6ct_mem = IS_IPV6CT_MEM_DEV(dev);
  235. if (strnlen(name, IPA_DEV_NAME_MAX_LEN) == IPA_DEV_NAME_MAX_LEN) {
  236. IPAERR("device name is too long\n");
  237. result = -ENODEV;
  238. goto bail;
  239. }
  240. strlcpy(dev->name, name, IPA_DEV_NAME_MAX_LEN);
  241. dev->class = class_create(THIS_MODULE, name);
  242. if (IS_ERR(dev->class)) {
  243. IPAERR("unable to create the class for %s\n", name);
  244. result = -ENODEV;
  245. goto bail;
  246. }
  247. result = alloc_chrdev_region(&dev->dev_num, 0, 1, name);
  248. if (result) {
  249. IPAERR("alloc_chrdev_region err. for %s\n", name);
  250. result = -ENODEV;
  251. goto alloc_chrdev_region_fail;
  252. }
  253. dev->dev = device_create(dev->class, NULL, dev->dev_num, NULL, name);
  254. if (IS_ERR(dev->dev)) {
  255. IPAERR("device_create err:%ld\n", PTR_ERR(dev->dev));
  256. result = -ENODEV;
  257. goto device_create_fail;
  258. }
  259. cdev_init(&dev->cdev, &ipa3_nat_ipv6ct_fops);
  260. dev->cdev.owner = THIS_MODULE;
  261. mutex_lock(&dev->lock);
  262. result = cdev_add(&dev->cdev, dev->dev_num, 1);
  263. if (result) {
  264. IPAERR("cdev_add err=%d\n", -result);
  265. goto cdev_add_fail;
  266. }
  267. dev->tmp_mem = tmp_mem;
  268. dev->phys_mem_size = phys_mem_size;
  269. dev->phys_mem_ofst = phys_mem_ofst;
  270. dev->is_dev_init = true;
  271. mutex_unlock(&dev->lock);
  272. IPADBG("ipa dev %s added successfully. major:%d minor:%d\n", name,
  273. MAJOR(dev->dev_num), MINOR(dev->dev_num));
  274. result = 0;
  275. goto bail;
  276. cdev_add_fail:
  277. mutex_unlock(&dev->lock);
  278. device_destroy(dev->class, dev->dev_num);
  279. device_create_fail:
  280. unregister_chrdev_region(dev->dev_num, 1);
  281. alloc_chrdev_region_fail:
  282. class_destroy(dev->class);
  283. bail:
  284. IPADBG("Out\n");
  285. return result;
  286. }
  287. static void ipa3_nat_ipv6ct_destroy_device(
  288. struct ipa3_nat_ipv6ct_common_mem *dev)
  289. {
  290. IPADBG("In\n");
  291. mutex_lock(&dev->lock);
  292. if (dev->tmp_mem) {
  293. if (ipa3_ctx->nat_mem.is_tmp_mem_allocated) {
  294. dma_free_coherent(
  295. ipa3_ctx->pdev,
  296. IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
  297. dev->tmp_mem->vaddr,
  298. dev->tmp_mem->dma_handle);
  299. kfree(dev->tmp_mem);
  300. dev->tmp_mem = NULL;
  301. ipa3_ctx->nat_mem.is_tmp_mem_allocated = false;
  302. }
  303. dev->tmp_mem = NULL;
  304. }
  305. device_destroy(dev->class, dev->dev_num);
  306. unregister_chrdev_region(dev->dev_num, 1);
  307. class_destroy(dev->class);
  308. dev->is_dev_init = false;
  309. mutex_unlock(&dev->lock);
  310. IPADBG("Out\n");
  311. }
  312. /**
  313. * ipa3_nat_ipv6ct_init_devices() - Initialize the NAT and IPv6CT devices
  314. *
  315. * Called during IPA init to create memory device
  316. *
  317. * Returns: 0 on success, negative on failure
  318. */
  319. int ipa3_nat_ipv6ct_init_devices(void)
  320. {
  321. struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem;
  322. int result;
  323. IPADBG("\n");
  324. /*
  325. * Allocate NAT/IPv6CT temporary memory. The memory is never deleted,
  326. * because provided to HW once NAT or IPv6CT table is deleted.
  327. */
  328. tmp_mem = ipa3_nat_ipv6ct_allocate_tmp_memory();
  329. if (tmp_mem == NULL) {
  330. IPAERR("unable to allocate tmp_mem\n");
  331. return -ENOMEM;
  332. }
  333. ipa3_ctx->nat_mem.is_tmp_mem_allocated = true;
  334. if (ipa3_nat_ipv6ct_init_device(
  335. &ipa3_ctx->nat_mem.dev,
  336. IPA_NAT_DEV_NAME,
  337. IPA_NAT_PHYS_MEM_SIZE,
  338. IPA_NAT_PHYS_MEM_OFFSET,
  339. tmp_mem)) {
  340. IPAERR("unable to create nat device\n");
  341. result = -ENODEV;
  342. goto fail_init_nat_dev;
  343. }
  344. if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) &&
  345. ipa3_nat_ipv6ct_init_device(
  346. &ipa3_ctx->ipv6ct_mem.dev,
  347. IPA_IPV6CT_DEV_NAME,
  348. IPA_IPV6CT_PHYS_MEM_SIZE,
  349. IPA_IPV6CT_PHYS_MEM_OFFSET,
  350. tmp_mem)) {
  351. IPAERR("unable to create IPv6CT device\n");
  352. result = -ENODEV;
  353. goto fail_init_ipv6ct_dev;
  354. }
  355. return 0;
  356. fail_init_ipv6ct_dev:
  357. ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->nat_mem.dev);
  358. fail_init_nat_dev:
  359. if (tmp_mem != NULL && ipa3_ctx->nat_mem.is_tmp_mem_allocated) {
  360. dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
  361. tmp_mem->vaddr, tmp_mem->dma_handle);
  362. kfree(tmp_mem);
  363. ipa3_ctx->nat_mem.is_tmp_mem_allocated = false;
  364. }
  365. return result;
  366. }
  367. /**
  368. * ipa3_nat_ipv6ct_destroy_devices() - destroy the NAT and IPv6CT devices
  369. *
  370. * Called during IPA init to destroy nat device
  371. */
  372. void ipa3_nat_ipv6ct_destroy_devices(void)
  373. {
  374. ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->nat_mem.dev);
  375. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
  376. ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->ipv6ct_mem.dev);
  377. }
  378. static int ipa3_nat_ipv6ct_allocate_mem(
  379. struct ipa3_nat_ipv6ct_common_mem *dev,
  380. struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc,
  381. enum ipahal_nat_type nat_type)
  382. {
  383. gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
  384. size_t nat_entry_size;
  385. struct ipa3_nat_mem *nm_ptr;
  386. struct ipa3_nat_mem_loc_data *mld_ptr;
  387. uintptr_t tmp_ptr;
  388. int result = 0;
  389. IPADBG("In: Requested alloc size %zu for %s\n",
  390. table_alloc->size, dev->name);
  391. if (!table_alloc->size) {
  392. IPAERR_RL("Invalid Parameters\n");
  393. result = -EPERM;
  394. goto bail;
  395. }
  396. if (!dev->is_dev_init) {
  397. IPAERR("%s hasn't been initialized\n", dev->name);
  398. result = -EPERM;
  399. goto bail;
  400. }
  401. if ((dev->is_nat_mem && nat_type != IPAHAL_NAT_IPV4) ||
  402. (dev->is_ipv6ct_mem && nat_type != IPAHAL_NAT_IPV6CT)) {
  403. IPAERR("%s dev type(%s) and nat_type(%s) mismatch\n",
  404. dev->name,
  405. (dev->is_nat_mem) ? "V4" : "V6",
  406. ipahal_nat_type_str(nat_type));
  407. result = -EPERM;
  408. goto bail;
  409. }
  410. ipahal_nat_entry_size(nat_type, &nat_entry_size);
  411. if (table_alloc->size > MAX_ALLOC_NAT_SIZE(nat_entry_size)) {
  412. IPAERR("Trying allocate more size = %zu, Max allowed = %zu\n",
  413. table_alloc->size,
  414. MAX_ALLOC_NAT_SIZE(nat_entry_size));
  415. result = -EPERM;
  416. goto bail;
  417. }
  418. if (nat_type == IPAHAL_NAT_IPV4) {
  419. nm_ptr = (struct ipa3_nat_mem *) dev;
  420. if (table_alloc->size <= IPA_NAT_PHYS_MEM_SIZE) {
  421. /*
  422. * CAN fit in SRAM, hence we'll use SRAM...
  423. */
  424. IPADBG("V4 NAT will reside in: %s\n",
  425. ipa3_nat_mem_in_as_str(IPA_NAT_MEM_IN_SRAM));
  426. if (nm_ptr->sram_in_use) {
  427. IPAERR("Memory already allocated\n");
  428. result = -EPERM;
  429. goto bail;
  430. }
  431. mld_ptr = &nm_ptr->mem_loc[IPA_NAT_MEM_IN_SRAM];
  432. mld_ptr->table_alloc_size = table_alloc->size;
  433. mld_ptr->phys_addr =
  434. ipa3_ctx->ipa_wrapper_base +
  435. ipa3_ctx->ctrl->ipa_reg_base_ofst +
  436. ipahal_get_reg_n_ofst(
  437. IPA_SW_AREA_RAM_DIRECT_ACCESS_n,
  438. 0) +
  439. IPA_NAT_PHYS_MEM_OFFSET;
  440. mld_ptr->io_vaddr = ioremap(
  441. mld_ptr->phys_addr, IPA_NAT_PHYS_MEM_SIZE);
  442. if (mld_ptr->io_vaddr == NULL) {
  443. IPAERR("ioremap failed\n");
  444. result = -ENOMEM;
  445. goto bail;
  446. }
  447. tmp_ptr = (uintptr_t) mld_ptr->io_vaddr;
  448. mld_ptr->vaddr = (void *) tmp_ptr;
  449. nm_ptr->sram_in_use = true;
  450. nm_ptr->last_alloc_loc = IPA_NAT_MEM_IN_SRAM;
  451. } else {
  452. /*
  453. * CAN NOT fit in SRAM, hence we'll allocate DDR...
  454. */
  455. IPADBG("V4 NAT will reside in: %s\n",
  456. ipa3_nat_mem_in_as_str(IPA_NAT_MEM_IN_DDR));
  457. if (nm_ptr->ddr_in_use) {
  458. IPAERR("Memory already allocated\n");
  459. result = -EPERM;
  460. goto bail;
  461. }
  462. mld_ptr = &nm_ptr->mem_loc[IPA_NAT_MEM_IN_DDR];
  463. mld_ptr->table_alloc_size = table_alloc->size;
  464. mld_ptr->vaddr =
  465. dma_alloc_coherent(
  466. ipa3_ctx->pdev,
  467. mld_ptr->table_alloc_size,
  468. &mld_ptr->dma_handle,
  469. gfp_flags);
  470. if (mld_ptr->vaddr == NULL) {
  471. IPAERR("memory alloc failed\n");
  472. result = -ENOMEM;
  473. goto bail;
  474. }
  475. nm_ptr->ddr_in_use = true;
  476. nm_ptr->last_alloc_loc = IPA_NAT_MEM_IN_DDR;
  477. }
  478. } else {
  479. if (nat_type == IPAHAL_NAT_IPV6CT) {
  480. dev->table_alloc_size = table_alloc->size;
  481. IPADBG("V6 NAT will reside in: %s\n",
  482. ipa3_nat_mem_in_as_str(IPA_NAT_MEM_IN_DDR));
  483. dev->vaddr =
  484. dma_alloc_coherent(
  485. ipa3_ctx->pdev,
  486. dev->table_alloc_size,
  487. &dev->dma_handle,
  488. gfp_flags);
  489. if (dev->vaddr == NULL) {
  490. IPAERR("memory alloc failed\n");
  491. result = -ENOMEM;
  492. goto bail;
  493. }
  494. }
  495. }
  496. bail:
  497. IPADBG("Out\n");
  498. return result;
  499. }
  500. /**
  501. * ipa3_allocate_nat_device() - Allocates memory for the NAT device
  502. * @mem: [in/out] memory parameters
  503. *
  504. * Called by NAT client driver to allocate memory for the NAT entries. Based on
  505. * the request size either shared or system memory will be used.
  506. *
  507. * Returns: 0 on success, negative on failure
  508. */
  509. int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
  510. {
  511. int result;
  512. struct ipa_ioc_nat_ipv6ct_table_alloc tmp;
  513. tmp.size = mem->size;
  514. tmp.offset = 0;
  515. result = ipa3_allocate_nat_table(&tmp);
  516. if (result)
  517. goto bail;
  518. mem->offset = tmp.offset;
  519. bail:
  520. return result;
  521. }
  522. /**
  523. * ipa3_allocate_nat_table() - Allocates memory for the NAT table
  524. * @table_alloc: [in/out] memory parameters
  525. *
  526. * Called by NAT client to allocate memory for the table entries.
  527. * Based on the request size either shared or system memory will be used.
  528. *
  529. * Returns: 0 on success, negative on failure
  530. */
  531. int ipa3_allocate_nat_table(
  532. struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc)
  533. {
  534. struct ipa3_nat_mem *nm_ptr = &(ipa3_ctx->nat_mem);
  535. struct ipa3_nat_mem_loc_data *mld_ptr;
  536. int result;
  537. IPADBG("table size:%u offset:%u\n",
  538. table_alloc->size, table_alloc->offset);
  539. mutex_lock(&nm_ptr->dev.lock);
  540. result = ipa3_nat_ipv6ct_allocate_mem(
  541. &nm_ptr->dev,
  542. table_alloc,
  543. IPAHAL_NAT_IPV4);
  544. if (result)
  545. goto bail;
  546. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0
  547. &&
  548. nm_ptr->pdn_mem.base == NULL) {
  549. gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
  550. size_t pdn_entry_size;
  551. struct ipa_mem_buffer *pdn_mem_ptr = &nm_ptr->pdn_mem;
  552. ipahal_nat_entry_size(IPAHAL_NAT_IPV4_PDN, &pdn_entry_size);
  553. pdn_mem_ptr->size = pdn_entry_size * IPA_MAX_PDN_NUM;
  554. if (IPA_MEM_PART(pdn_config_size) < pdn_mem_ptr->size) {
  555. IPAERR(
  556. "number of PDN entries exceeds SRAM available space\n");
  557. result = -ENOMEM;
  558. goto fail_alloc_pdn;
  559. }
  560. pdn_mem_ptr->base =
  561. dma_alloc_coherent(
  562. ipa3_ctx->pdev,
  563. pdn_mem_ptr->size,
  564. &pdn_mem_ptr->phys_base,
  565. gfp_flags);
  566. if (pdn_mem_ptr->base == NULL) {
  567. IPAERR("fail to allocate PDN memory\n");
  568. result = -ENOMEM;
  569. goto fail_alloc_pdn;
  570. }
  571. IPADBG("IPA NAT dev allocated PDN memory successfully\n");
  572. }
  573. IPADBG("IPA NAT dev init successfully\n");
  574. mutex_unlock(&nm_ptr->dev.lock);
  575. IPADBG("return\n");
  576. return 0;
  577. fail_alloc_pdn:
  578. mld_ptr = &nm_ptr->mem_loc[nm_ptr->last_alloc_loc];
  579. if (nm_ptr->last_alloc_loc == IPA_NAT_MEM_IN_DDR) {
  580. if (mld_ptr->vaddr) {
  581. dma_free_coherent(
  582. ipa3_ctx->pdev,
  583. mld_ptr->table_alloc_size,
  584. mld_ptr->vaddr,
  585. mld_ptr->dma_handle);
  586. mld_ptr->vaddr = NULL;
  587. }
  588. }
  589. if (nm_ptr->last_alloc_loc == IPA_NAT_MEM_IN_SRAM) {
  590. if (mld_ptr->io_vaddr) {
  591. iounmap(mld_ptr->io_vaddr);
  592. mld_ptr->io_vaddr = NULL;
  593. mld_ptr->vaddr = NULL;
  594. }
  595. }
  596. bail:
  597. mutex_unlock(&nm_ptr->dev.lock);
  598. return result;
  599. }
  600. /**
  601. * ipa3_allocate_ipv6ct_table() - Allocates memory for the IPv6CT table
  602. * @table_alloc: [in/out] memory parameters
  603. *
  604. * Called by IPv6CT client to allocate memory for the table entries.
  605. * Based on the request size either shared or system memory will be used.
  606. *
  607. * Returns: 0 on success, negative on failure
  608. */
  609. int ipa3_allocate_ipv6ct_table(
  610. struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc)
  611. {
  612. int result;
  613. IPADBG("\n");
  614. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  615. IPAERR_RL("IPv6 connection tracking isn't supported\n");
  616. return -EPERM;
  617. }
  618. mutex_lock(&ipa3_ctx->ipv6ct_mem.dev.lock);
  619. result = ipa3_nat_ipv6ct_allocate_mem(
  620. &ipa3_ctx->ipv6ct_mem.dev,
  621. table_alloc,
  622. IPAHAL_NAT_IPV6CT);
  623. if (result)
  624. goto bail;
  625. IPADBG("IPA IPv6CT dev init successfully\n");
  626. bail:
  627. mutex_unlock(&ipa3_ctx->ipv6ct_mem.dev.lock);
  628. return result;
  629. }
  630. static int ipa3_nat_ipv6ct_check_table_params(
  631. struct ipa3_nat_ipv6ct_common_mem *dev,
  632. enum ipa3_nat_mem_in nmi,
  633. uint32_t offset,
  634. uint16_t entries_num,
  635. enum ipahal_nat_type nat_type)
  636. {
  637. size_t entry_size, table_size, orig_alloc_size;
  638. struct ipa3_nat_mem *nm_ptr;
  639. struct ipa3_nat_mem_loc_data *mld_ptr;
  640. int ret = 0;
  641. IPADBG("In\n");
  642. IPADBG(
  643. "v4(%u) v6(%u) nmi(%s) ofst(%u) ents(%u) nt(%s)\n",
  644. dev->is_nat_mem,
  645. dev->is_ipv6ct_mem,
  646. ipa3_nat_mem_in_as_str(nmi),
  647. offset,
  648. entries_num,
  649. ipahal_nat_type_str(nat_type));
  650. if (dev->is_ipv6ct_mem) {
  651. orig_alloc_size = dev->table_alloc_size;
  652. if (offset > UINT_MAX - dev->dma_handle) {
  653. IPAERR_RL("Failed due to integer overflow\n");
  654. IPAERR_RL("%s dma_handle: 0x%pa offset: 0x%x\n",
  655. dev->name, &dev->dma_handle, offset);
  656. ret = -EPERM;
  657. goto bail;
  658. }
  659. } else { /* dev->is_nat_mem */
  660. nm_ptr = (struct ipa3_nat_mem *) dev;
  661. mld_ptr = &nm_ptr->mem_loc[nmi];
  662. orig_alloc_size = mld_ptr->table_alloc_size;
  663. if (nmi == IPA_NAT_MEM_IN_DDR) {
  664. if (offset > UINT_MAX - mld_ptr->dma_handle) {
  665. IPAERR_RL("Failed due to integer overflow\n");
  666. IPAERR_RL("%s dma_handle: 0x%pa offset: 0x%x\n",
  667. dev->name, &mld_ptr->dma_handle, offset);
  668. ret = -EPERM;
  669. goto bail;
  670. }
  671. }
  672. }
  673. ret = ipahal_nat_entry_size(nat_type, &entry_size);
  674. if (ret) {
  675. IPAERR("Failed to retrieve size of entry for %s\n",
  676. ipahal_nat_type_str(nat_type));
  677. goto bail;
  678. }
  679. table_size = entry_size * entries_num;
  680. /* check for integer overflow */
  681. if (offset > UINT_MAX - table_size) {
  682. IPAERR_RL("Detected overflow\n");
  683. ret = -EPERM;
  684. goto bail;
  685. }
  686. /* Check offset is not beyond allocated size */
  687. if (offset + table_size > orig_alloc_size) {
  688. IPAERR_RL("Table offset not valid\n");
  689. IPAERR_RL("offset:%d entries:%d table_size:%zu mem_size:%zu\n",
  690. offset, entries_num, table_size, orig_alloc_size);
  691. ret = -EPERM;
  692. goto bail;
  693. }
  694. bail:
  695. IPADBG("Out\n");
  696. return ret;
  697. }
  698. static inline void ipa3_nat_ipv6ct_create_init_cmd(
  699. struct ipahal_imm_cmd_nat_ipv6ct_init_common *table_init_cmd,
  700. bool is_shared,
  701. dma_addr_t base_addr,
  702. uint8_t tbl_index,
  703. uint32_t base_table_offset,
  704. uint32_t expn_table_offset,
  705. uint16_t table_entries,
  706. uint16_t expn_table_entries,
  707. const char *table_name)
  708. {
  709. table_init_cmd->base_table_addr_shared = is_shared;
  710. table_init_cmd->expansion_table_addr_shared = is_shared;
  711. table_init_cmd->base_table_addr = base_addr + base_table_offset;
  712. IPADBG("%s base table offset:0x%x\n", table_name, base_table_offset);
  713. table_init_cmd->expansion_table_addr = base_addr + expn_table_offset;
  714. IPADBG("%s expn table offset:0x%x\n", table_name, expn_table_offset);
  715. table_init_cmd->table_index = tbl_index;
  716. IPADBG("%s table index:0x%x\n", table_name, tbl_index);
  717. table_init_cmd->size_base_table = table_entries;
  718. IPADBG("%s base table size:0x%x\n", table_name, table_entries);
  719. table_init_cmd->size_expansion_table = expn_table_entries;
  720. IPADBG("%s expansion table size:0x%x\n",
  721. table_name, expn_table_entries);
  722. }
  723. static inline bool chk_sram_offset_alignment(
  724. uintptr_t addr,
  725. u32 mask)
  726. {
  727. if (addr & (uintptr_t) mask) {
  728. IPAERR("sram addr(%pK) is not properly aligned\n", addr);
  729. return false;
  730. }
  731. return true;
  732. }
  733. static inline int ipa3_nat_ipv6ct_init_device_structure(
  734. struct ipa3_nat_ipv6ct_common_mem *dev,
  735. enum ipa3_nat_mem_in nmi,
  736. uint32_t base_table_offset,
  737. uint32_t expn_table_offset,
  738. uint16_t table_entries,
  739. uint16_t expn_table_entries,
  740. uint32_t index_offset,
  741. uint32_t index_expn_offset,
  742. uint8_t focus_change)
  743. {
  744. int ret = 0;
  745. IPADBG("In\n");
  746. IPADBG(
  747. "v4(%u) v6(%u) nmi(%s) bto(%u) eto(%u) t_ents(%u) et_ents(%u) io(%u) ieo(%u)\n",
  748. dev->is_nat_mem,
  749. dev->is_ipv6ct_mem,
  750. ipa3_nat_mem_in_as_str(nmi),
  751. base_table_offset,
  752. expn_table_offset,
  753. table_entries,
  754. expn_table_entries,
  755. index_offset,
  756. index_expn_offset);
  757. if (dev->is_ipv6ct_mem) {
  758. IPADBG("v6\n");
  759. dev->base_table_addr =
  760. (char *) dev->base_address + base_table_offset;
  761. IPADBG("%s base_table_addr: 0x%pK\n",
  762. dev->name, dev->base_table_addr);
  763. dev->expansion_table_addr =
  764. (char *) dev->base_address + expn_table_offset;
  765. IPADBG("%s expansion_table_addr: 0x%pK\n",
  766. dev->name, dev->expansion_table_addr);
  767. IPADBG("%s table_entries: %d\n",
  768. dev->name, table_entries);
  769. dev->table_entries = table_entries;
  770. IPADBG("%s expn_table_entries: %d\n",
  771. dev->name, expn_table_entries);
  772. dev->expn_table_entries = expn_table_entries;
  773. } else if (dev->is_nat_mem) {
  774. struct ipa3_nat_mem *nm_ptr = (struct ipa3_nat_mem *) dev;
  775. struct ipa3_nat_mem_loc_data *mld_p =
  776. &nm_ptr->mem_loc[nmi];
  777. IPADBG("v4\n");
  778. nm_ptr->active_table = nmi;
  779. mld_p->base_table_addr =
  780. (char *) mld_p->base_address + base_table_offset;
  781. IPADBG("%s base_table_addr: 0x%pK\n",
  782. dev->name, mld_p->base_table_addr);
  783. mld_p->expansion_table_addr =
  784. (char *) mld_p->base_address + expn_table_offset;
  785. IPADBG("%s expansion_table_addr: 0x%pK\n",
  786. dev->name, mld_p->expansion_table_addr);
  787. IPADBG("%s table_entries: %d\n",
  788. dev->name, table_entries);
  789. mld_p->table_entries = table_entries;
  790. IPADBG("%s expn_table_entries: %d\n",
  791. dev->name, expn_table_entries);
  792. mld_p->expn_table_entries = expn_table_entries;
  793. mld_p->index_table_addr =
  794. (char *) mld_p->base_address + index_offset;
  795. IPADBG("index_table_addr: 0x%pK\n",
  796. mld_p->index_table_addr);
  797. mld_p->index_table_expansion_addr =
  798. (char *) mld_p->base_address + index_expn_offset;
  799. IPADBG("index_table_expansion_addr: 0x%pK\n",
  800. mld_p->index_table_expansion_addr);
  801. if (nmi == IPA_NAT_MEM_IN_DDR) {
  802. if (focus_change)
  803. nm_ptr->switch2ddr_cnt++;
  804. } else {
  805. /*
  806. * The IPA wants certain SRAM addresses
  807. * to have particular low order bits to
  808. * be zero. We test here to ensure...
  809. */
  810. if (!chk_sram_offset_alignment(
  811. (uintptr_t) mld_p->base_table_addr,
  812. 31) ||
  813. !chk_sram_offset_alignment(
  814. (uintptr_t) mld_p->expansion_table_addr,
  815. 31) ||
  816. !chk_sram_offset_alignment(
  817. (uintptr_t) mld_p->index_table_addr,
  818. 3) ||
  819. !chk_sram_offset_alignment(
  820. (uintptr_t) mld_p->index_table_expansion_addr,
  821. 3)) {
  822. ret = -ENODEV;
  823. goto done;
  824. }
  825. if (focus_change)
  826. nm_ptr->switch2sram_cnt++;
  827. }
  828. }
  829. done:
  830. IPADBG("Out\n");
  831. return ret;
  832. }
  833. static void ipa3_nat_create_init_cmd(
  834. struct ipa_ioc_v4_nat_init *init,
  835. bool is_shared,
  836. dma_addr_t base_addr,
  837. struct ipahal_imm_cmd_ip_v4_nat_init *cmd)
  838. {
  839. IPADBG("\n");
  840. ipa3_nat_ipv6ct_create_init_cmd(
  841. &cmd->table_init,
  842. is_shared,
  843. base_addr,
  844. init->tbl_index,
  845. init->ipv4_rules_offset,
  846. init->expn_rules_offset,
  847. init->table_entries,
  848. init->expn_table_entries,
  849. ipa3_ctx->nat_mem.dev.name);
  850. cmd->index_table_addr_shared = is_shared;
  851. cmd->index_table_expansion_addr_shared = is_shared;
  852. cmd->index_table_addr =
  853. base_addr + init->index_offset;
  854. IPADBG("index_offset:0x%x\n", init->index_offset);
  855. cmd->index_table_expansion_addr =
  856. base_addr + init->index_expn_offset;
  857. IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
  858. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
  859. /*
  860. * starting IPAv4.0 public ip field changed to store the
  861. * PDN config table offset in SMEM
  862. */
  863. cmd->public_addr_info = IPA_MEM_PART(pdn_config_ofst);
  864. IPADBG("pdn config base:0x%x\n", cmd->public_addr_info);
  865. } else {
  866. cmd->public_addr_info = init->ip_addr;
  867. IPADBG("Public IP address:%pI4h\n", &cmd->public_addr_info);
  868. }
  869. IPADBG("return\n");
  870. }
  871. static void ipa3_nat_create_modify_pdn_cmd(
  872. struct ipahal_imm_cmd_dma_shared_mem *mem_cmd, bool zero_mem)
  873. {
  874. size_t pdn_entry_size, mem_size;
  875. IPADBG("\n");
  876. ipahal_nat_entry_size(IPAHAL_NAT_IPV4_PDN, &pdn_entry_size);
  877. mem_size = pdn_entry_size * IPA_MAX_PDN_NUM;
  878. if (zero_mem && ipa3_ctx->nat_mem.pdn_mem.base)
  879. memset(ipa3_ctx->nat_mem.pdn_mem.base, 0, mem_size);
  880. /* Copy the PDN config table to SRAM */
  881. mem_cmd->is_read = false;
  882. mem_cmd->skip_pipeline_clear = false;
  883. mem_cmd->pipeline_clear_options = IPAHAL_HPS_CLEAR;
  884. mem_cmd->size = mem_size;
  885. mem_cmd->system_addr = ipa3_ctx->nat_mem.pdn_mem.phys_base;
  886. mem_cmd->local_addr = ipa3_ctx->smem_restricted_bytes +
  887. IPA_MEM_PART(pdn_config_ofst);
  888. IPADBG("return\n");
  889. }
  890. static int ipa3_nat_send_init_cmd(struct ipahal_imm_cmd_ip_v4_nat_init *cmd,
  891. bool zero_pdn_table)
  892. {
  893. struct ipa3_desc desc[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC];
  894. struct ipahal_imm_cmd_pyld *cmd_pyld[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC];
  895. int i, num_cmd = 0, result;
  896. struct ipahal_reg_valmask valmask;
  897. struct ipahal_imm_cmd_register_write reg_write_coal_close;
  898. IPADBG("\n");
  899. memset(desc, 0, sizeof(desc));
  900. memset(cmd_pyld, 0, sizeof(cmd_pyld));
  901. /* IC to close the coal frame before HPS Clear if coal is enabled */
  902. if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
  903. i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
  904. reg_write_coal_close.skip_pipeline_clear = false;
  905. reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  906. reg_write_coal_close.offset = ipahal_get_reg_ofst(
  907. IPA_AGGR_FORCE_CLOSE);
  908. ipahal_get_aggr_force_close_valmask(i, &valmask);
  909. reg_write_coal_close.value = valmask.val;
  910. reg_write_coal_close.value_mask = valmask.mask;
  911. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  912. IPA_IMM_CMD_REGISTER_WRITE,
  913. &reg_write_coal_close, false);
  914. if (!cmd_pyld[num_cmd]) {
  915. IPAERR("failed to construct coal close IC\n");
  916. result = -ENOMEM;
  917. goto destroy_imm_cmd;
  918. }
  919. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  920. ++num_cmd;
  921. }
  922. /* NO-OP IC for ensuring that IPA pipeline is empty */
  923. cmd_pyld[num_cmd] =
  924. ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
  925. if (!cmd_pyld[num_cmd]) {
  926. IPAERR("failed to construct NOP imm cmd\n");
  927. result = -ENOMEM;
  928. goto destroy_imm_cmd;
  929. }
  930. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  931. ++num_cmd;
  932. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  933. IPA_IMM_CMD_IP_V4_NAT_INIT, cmd, false);
  934. if (!cmd_pyld[num_cmd]) {
  935. IPAERR_RL("fail to construct NAT init imm cmd\n");
  936. result = -EPERM;
  937. goto destroy_imm_cmd;
  938. }
  939. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  940. ++num_cmd;
  941. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
  942. struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
  943. if (num_cmd >= IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC) {
  944. IPAERR("number of commands is out of range\n");
  945. result = -ENOBUFS;
  946. goto destroy_imm_cmd;
  947. }
  948. /* Copy the PDN config table to SRAM */
  949. ipa3_nat_create_modify_pdn_cmd(&mem_cmd, zero_pdn_table);
  950. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  951. IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
  952. if (!cmd_pyld[num_cmd]) {
  953. IPAERR(
  954. "fail construct dma_shared_mem cmd: for pdn table");
  955. result = -ENOMEM;
  956. goto destroy_imm_cmd;
  957. }
  958. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  959. ++num_cmd;
  960. IPADBG("added PDN table copy cmd\n");
  961. }
  962. result = ipa3_send_cmd(num_cmd, desc);
  963. if (result) {
  964. IPAERR("fail to send NAT init immediate command\n");
  965. goto destroy_imm_cmd;
  966. }
  967. IPADBG("return\n");
  968. destroy_imm_cmd:
  969. for (i = 0; i < num_cmd; ++i)
  970. ipahal_destroy_imm_cmd(cmd_pyld[i]);
  971. return result;
  972. }
  973. static int ipa3_ipv6ct_send_init_cmd(struct ipahal_imm_cmd_ip_v6_ct_init *cmd)
  974. {
  975. struct ipa3_desc desc[IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC];
  976. struct ipahal_imm_cmd_pyld
  977. *cmd_pyld[IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC];
  978. int i, num_cmd = 0, result;
  979. struct ipahal_reg_valmask valmask;
  980. struct ipahal_imm_cmd_register_write reg_write_coal_close;
  981. IPADBG("\n");
  982. memset(desc, 0, sizeof(desc));
  983. memset(cmd_pyld, 0, sizeof(cmd_pyld));
  984. /* IC to close the coal frame before HPS Clear if coal is enabled */
  985. if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
  986. i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
  987. reg_write_coal_close.skip_pipeline_clear = false;
  988. reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  989. reg_write_coal_close.offset = ipahal_get_reg_ofst(
  990. IPA_AGGR_FORCE_CLOSE);
  991. ipahal_get_aggr_force_close_valmask(i, &valmask);
  992. reg_write_coal_close.value = valmask.val;
  993. reg_write_coal_close.value_mask = valmask.mask;
  994. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  995. IPA_IMM_CMD_REGISTER_WRITE,
  996. &reg_write_coal_close, false);
  997. if (!cmd_pyld[num_cmd]) {
  998. IPAERR("failed to construct coal close IC\n");
  999. result = -ENOMEM;
  1000. goto destroy_imm_cmd;
  1001. }
  1002. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  1003. ++num_cmd;
  1004. }
  1005. /* NO-OP IC for ensuring that IPA pipeline is empty */
  1006. cmd_pyld[num_cmd] =
  1007. ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
  1008. if (!cmd_pyld[num_cmd]) {
  1009. IPAERR("failed to construct NOP imm cmd\n");
  1010. result = -ENOMEM;
  1011. goto destroy_imm_cmd;
  1012. }
  1013. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  1014. ++num_cmd;
  1015. if (num_cmd >= IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC) {
  1016. IPAERR("number of commands is out of range\n");
  1017. result = -ENOBUFS;
  1018. goto destroy_imm_cmd;
  1019. }
  1020. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  1021. IPA_IMM_CMD_IP_V6_CT_INIT, cmd, false);
  1022. if (!cmd_pyld[num_cmd]) {
  1023. IPAERR_RL("fail to construct IPv6CT init imm cmd\n");
  1024. result = -EPERM;
  1025. goto destroy_imm_cmd;
  1026. }
  1027. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  1028. ++num_cmd;
  1029. result = ipa3_send_cmd(num_cmd, desc);
  1030. if (result) {
  1031. IPAERR("Fail to send IPv6CT init immediate command\n");
  1032. goto destroy_imm_cmd;
  1033. }
  1034. IPADBG("return\n");
  1035. destroy_imm_cmd:
  1036. for (i = 0; i < num_cmd; ++i)
  1037. ipahal_destroy_imm_cmd(cmd_pyld[i]);
  1038. return result;
  1039. }
  1040. /* IOCTL function handlers */
  1041. /**
  1042. * ipa3_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
  1043. * @init: [in] initialization command attributes
  1044. *
  1045. * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
  1046. *
  1047. * Returns: 0 on success, negative on failure
  1048. */
  1049. int ipa3_nat_init_cmd(
  1050. struct ipa_ioc_v4_nat_init *init)
  1051. {
  1052. struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->nat_mem.dev;
  1053. struct ipa3_nat_mem *nm_ptr = (struct ipa3_nat_mem *) dev;
  1054. enum ipa3_nat_mem_in nmi;
  1055. struct ipa3_nat_mem_loc_data *mld_ptr;
  1056. struct ipahal_imm_cmd_ip_v4_nat_init cmd;
  1057. int result;
  1058. IPADBG("In\n");
  1059. if (!sram_compatible) {
  1060. init->mem_type = 0;
  1061. init->focus_change = 0;
  1062. }
  1063. nmi = init->mem_type;
  1064. IPADBG("tbl_index(%d) table_entries(%u)\n",
  1065. init->tbl_index,
  1066. init->table_entries);
  1067. memset(&cmd, 0, sizeof(cmd));
  1068. if (!IPA_VALID_TBL_INDEX(init->tbl_index)) {
  1069. IPAERR_RL("Unsupported table index %d\n",
  1070. init->tbl_index);
  1071. result = -EPERM;
  1072. goto bail;
  1073. }
  1074. if (init->table_entries == 0) {
  1075. IPAERR_RL("Table entries is zero\n");
  1076. result = -EPERM;
  1077. goto bail;
  1078. }
  1079. if (!IPA_VALID_NAT_MEM_IN(nmi)) {
  1080. IPAERR_RL("Bad ipa3_nat_mem_in type\n");
  1081. result = -EPERM;
  1082. goto bail;
  1083. }
  1084. IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi));
  1085. mld_ptr = &nm_ptr->mem_loc[nmi];
  1086. if (!mld_ptr->is_mapped) {
  1087. IPAERR_RL("Attempt to init %s before mmap\n", dev->name);
  1088. result = -EPERM;
  1089. goto bail;
  1090. }
  1091. result = ipa3_nat_ipv6ct_check_table_params(
  1092. dev, nmi,
  1093. init->ipv4_rules_offset,
  1094. init->table_entries + 1,
  1095. IPAHAL_NAT_IPV4);
  1096. if (result) {
  1097. IPAERR_RL("Bad params for NAT base table\n");
  1098. goto bail;
  1099. }
  1100. result = ipa3_nat_ipv6ct_check_table_params(
  1101. dev, nmi,
  1102. init->expn_rules_offset,
  1103. init->expn_table_entries,
  1104. IPAHAL_NAT_IPV4);
  1105. if (result) {
  1106. IPAERR_RL("Bad params for NAT expansion table\n");
  1107. goto bail;
  1108. }
  1109. result = ipa3_nat_ipv6ct_check_table_params(
  1110. dev, nmi,
  1111. init->index_offset,
  1112. init->table_entries + 1,
  1113. IPAHAL_NAT_IPV4_INDEX);
  1114. if (result) {
  1115. IPAERR_RL("Bad params for index table\n");
  1116. goto bail;
  1117. }
  1118. result = ipa3_nat_ipv6ct_check_table_params(
  1119. dev, nmi,
  1120. init->index_expn_offset,
  1121. init->expn_table_entries,
  1122. IPAHAL_NAT_IPV4_INDEX);
  1123. if (result) {
  1124. IPAERR_RL("Bad params for index expansion table\n");
  1125. goto bail;
  1126. }
  1127. IPADBG("Table memory becoming active: %s\n",
  1128. ipa3_nat_mem_in_as_str(nmi));
  1129. if (nmi == IPA_NAT_MEM_IN_DDR) {
  1130. ipa3_nat_create_init_cmd(
  1131. init,
  1132. false,
  1133. mld_ptr->dma_handle,
  1134. &cmd);
  1135. } else {
  1136. ipa3_nat_create_init_cmd(
  1137. init,
  1138. true,
  1139. IPA_RAM_NAT_OFST,
  1140. &cmd);
  1141. }
  1142. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0
  1143. &&
  1144. nm_ptr->pdn_mem.base
  1145. &&
  1146. !init->focus_change) {
  1147. struct ipahal_nat_pdn_entry pdn_entry;
  1148. /* store ip in pdn entry cache array */
  1149. pdn_entry.public_ip = init->ip_addr;
  1150. pdn_entry.src_metadata = 0;
  1151. pdn_entry.dst_metadata = 0;
  1152. result = ipahal_nat_construct_entry(
  1153. IPAHAL_NAT_IPV4_PDN,
  1154. &pdn_entry,
  1155. nm_ptr->pdn_mem.base);
  1156. if (result) {
  1157. IPAERR("Fail to construct NAT pdn entry\n");
  1158. goto bail;
  1159. }
  1160. }
  1161. IPADBG("Posting NAT init command\n");
  1162. result = ipa3_nat_send_init_cmd(&cmd, false);
  1163. if (result) {
  1164. IPAERR("Fail to send NAT init immediate command\n");
  1165. goto bail;
  1166. }
  1167. result = ipa3_nat_ipv6ct_init_device_structure(
  1168. dev, nmi,
  1169. init->ipv4_rules_offset,
  1170. init->expn_rules_offset,
  1171. init->table_entries,
  1172. init->expn_table_entries,
  1173. init->index_offset,
  1174. init->index_expn_offset,
  1175. init->focus_change);
  1176. if (result) {
  1177. IPAERR("Table offset initialization failure\n");
  1178. goto bail;
  1179. }
  1180. nm_ptr->public_ip_addr = init->ip_addr;
  1181. IPADBG("Public IP address:%pI4h\n", &nm_ptr->public_ip_addr);
  1182. dev->is_hw_init = true;
  1183. bail:
  1184. IPADBG("Out\n");
  1185. return result;
  1186. }
  1187. /**
  1188. * ipa3_ipv6ct_init_cmd() - Post IP_V6_CONN_TRACK_INIT command to IPA HW
  1189. * @init: [in] initialization command attributes
  1190. *
  1191. * Called by NAT client driver to post IP_V6_CONN_TRACK_INIT command to IPA HW
  1192. *
  1193. * Returns: 0 on success, negative on failure
  1194. */
  1195. int ipa3_ipv6ct_init_cmd(
  1196. struct ipa_ioc_ipv6ct_init *init)
  1197. {
  1198. struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->ipv6ct_mem.dev;
  1199. struct ipahal_imm_cmd_ip_v6_ct_init cmd;
  1200. int result;
  1201. IPADBG("In\n");
  1202. memset(&cmd, 0, sizeof(cmd));
  1203. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  1204. IPAERR_RL("IPv6 connection tracking isn't supported\n");
  1205. return -EPERM;
  1206. }
  1207. if (!IPA_VALID_TBL_INDEX(init->tbl_index)) {
  1208. IPAERR_RL("Unsupported table index %d\n", init->tbl_index);
  1209. return -EPERM;
  1210. }
  1211. if (init->table_entries == 0) {
  1212. IPAERR_RL("Table entries is zero\n");
  1213. return -EPERM;
  1214. }
  1215. if (!dev->is_mapped) {
  1216. IPAERR_RL("attempt to init %s before mmap\n",
  1217. dev->name);
  1218. return -EPERM;
  1219. }
  1220. result = ipa3_nat_ipv6ct_check_table_params(
  1221. dev, IPA_NAT_MEM_IN_DDR,
  1222. init->base_table_offset,
  1223. init->table_entries + 1,
  1224. IPAHAL_NAT_IPV6CT);
  1225. if (result) {
  1226. IPAERR_RL("Bad params for IPv6CT base table\n");
  1227. return result;
  1228. }
  1229. result = ipa3_nat_ipv6ct_check_table_params(
  1230. dev, IPA_NAT_MEM_IN_DDR,
  1231. init->expn_table_offset,
  1232. init->expn_table_entries,
  1233. IPAHAL_NAT_IPV6CT);
  1234. if (result) {
  1235. IPAERR_RL("Bad params for IPv6CT expansion table\n");
  1236. return result;
  1237. }
  1238. IPADBG("Will install v6 NAT in: %s\n",
  1239. ipa3_nat_mem_in_as_str(IPA_NAT_MEM_IN_DDR));
  1240. ipa3_nat_ipv6ct_create_init_cmd(
  1241. &cmd.table_init,
  1242. false,
  1243. dev->dma_handle,
  1244. init->tbl_index,
  1245. init->base_table_offset,
  1246. init->expn_table_offset,
  1247. init->table_entries,
  1248. init->expn_table_entries,
  1249. dev->name);
  1250. IPADBG("posting ip_v6_ct_init imm command\n");
  1251. result = ipa3_ipv6ct_send_init_cmd(&cmd);
  1252. if (result) {
  1253. IPAERR("fail to send IPv6CT init immediate command\n");
  1254. return result;
  1255. }
  1256. ipa3_nat_ipv6ct_init_device_structure(
  1257. dev,
  1258. IPA_NAT_MEM_IN_DDR,
  1259. init->base_table_offset,
  1260. init->expn_table_offset,
  1261. init->table_entries,
  1262. init->expn_table_entries,
  1263. 0, 0, 0);
  1264. dev->is_hw_init = true;
  1265. IPADBG("Out\n");
  1266. return 0;
  1267. }
  1268. /**
  1269. * ipa3_nat_mdfy_pdn() - Modify a PDN entry in PDN config table in IPA SRAM
  1270. * @mdfy_pdn: [in] PDN info to be written to SRAM
  1271. *
  1272. * Called by NAT client driver to modify an entry in the PDN config table
  1273. *
  1274. * Returns: 0 on success, negative on failure
  1275. */
  1276. int ipa3_nat_mdfy_pdn(
  1277. struct ipa_ioc_nat_pdn_entry *mdfy_pdn)
  1278. {
  1279. struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->nat_mem.dev;
  1280. struct ipa3_nat_mem *nm_ptr = (struct ipa3_nat_mem *) dev;
  1281. struct ipa_mem_buffer *pdn_mem_ptr = &nm_ptr->pdn_mem;
  1282. struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
  1283. struct ipahal_nat_pdn_entry pdn_fields = { 0 };
  1284. struct ipa3_desc desc = { 0 };
  1285. struct ipahal_imm_cmd_pyld *cmd_pyld;
  1286. size_t entry_size;
  1287. int result = 0;
  1288. IPADBG("In\n");
  1289. mutex_lock(&dev->lock);
  1290. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  1291. IPAERR_RL("IPA HW does not support multi PDN\n");
  1292. result = -EPERM;
  1293. goto bail;
  1294. }
  1295. if (pdn_mem_ptr->base == NULL) {
  1296. IPAERR_RL(
  1297. "Attempt to modify a PDN entry before the PDN table memory allocation\n");
  1298. result = -EPERM;
  1299. goto bail;
  1300. }
  1301. if (mdfy_pdn->pdn_index > (IPA_MAX_PDN_NUM - 1)) {
  1302. IPAERR_RL("pdn index out of range %d\n", mdfy_pdn->pdn_index);
  1303. result = -EPERM;
  1304. goto bail;
  1305. }
  1306. /*
  1307. * Store ip in pdn entry cache array
  1308. */
  1309. pdn_fields.public_ip = mdfy_pdn->public_ip;
  1310. pdn_fields.dst_metadata = mdfy_pdn->dst_metadata;
  1311. pdn_fields.src_metadata = mdfy_pdn->src_metadata;
  1312. /*
  1313. * Mark tethering bit for remote modem
  1314. */
  1315. if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_1) {
  1316. pdn_fields.src_metadata |= IPA_QMAP_TETH_BIT;
  1317. }
  1318. /*
  1319. * Get size of the entry
  1320. */
  1321. result = ipahal_nat_entry_size(
  1322. IPAHAL_NAT_IPV4_PDN,
  1323. &entry_size);
  1324. if (result) {
  1325. IPAERR("Failed to retrieve pdn entry size\n");
  1326. goto bail;
  1327. }
  1328. result = ipahal_nat_construct_entry(
  1329. IPAHAL_NAT_IPV4_PDN,
  1330. &pdn_fields,
  1331. (pdn_mem_ptr->base + (mdfy_pdn->pdn_index)*(entry_size)));
  1332. if (result) {
  1333. IPAERR("Fail to construct NAT pdn entry\n");
  1334. goto bail;
  1335. }
  1336. IPADBG("Modify PDN in index: %d Public ip address:%pI4h\n",
  1337. mdfy_pdn->pdn_index,
  1338. &pdn_fields.public_ip);
  1339. IPADBG("Modify PDN dst metadata: 0x%x src metadata: 0x%x\n",
  1340. pdn_fields.dst_metadata,
  1341. pdn_fields.src_metadata);
  1342. /*
  1343. * Copy the PDN config table to SRAM
  1344. */
  1345. ipa3_nat_create_modify_pdn_cmd(&mem_cmd, false);
  1346. cmd_pyld = ipahal_construct_imm_cmd(
  1347. IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
  1348. if (!cmd_pyld) {
  1349. IPAERR(
  1350. "fail construct dma_shared_mem cmd: for pdn table\n");
  1351. result = -ENOMEM;
  1352. goto bail;
  1353. }
  1354. ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
  1355. IPADBG("sending PDN table copy cmd\n");
  1356. result = ipa3_send_cmd(1, &desc);
  1357. if (result)
  1358. IPAERR("Fail to send PDN table copy immediate command\n");
  1359. ipahal_destroy_imm_cmd(cmd_pyld);
  1360. bail:
  1361. mutex_unlock(&dev->lock);
  1362. IPADBG("Out\n");
  1363. return result;
  1364. }
  1365. static uint32_t ipa3_nat_ipv6ct_calculate_table_size(
  1366. enum ipa3_nat_mem_in nmi,
  1367. uint8_t base_addr)
  1368. {
  1369. size_t entry_size;
  1370. u32 num_entries;
  1371. enum ipahal_nat_type nat_type;
  1372. struct ipa3_nat_mem_loc_data *mld_ptr = &ipa3_ctx->nat_mem.mem_loc[nmi];
  1373. switch (base_addr) {
  1374. case IPA_NAT_BASE_TBL:
  1375. num_entries = mld_ptr->table_entries + 1;
  1376. nat_type = IPAHAL_NAT_IPV4;
  1377. break;
  1378. case IPA_NAT_EXPN_TBL:
  1379. num_entries = mld_ptr->expn_table_entries;
  1380. nat_type = IPAHAL_NAT_IPV4;
  1381. break;
  1382. case IPA_NAT_INDX_TBL:
  1383. num_entries = mld_ptr->table_entries + 1;
  1384. nat_type = IPAHAL_NAT_IPV4_INDEX;
  1385. break;
  1386. case IPA_NAT_INDEX_EXPN_TBL:
  1387. num_entries = mld_ptr->expn_table_entries;
  1388. nat_type = IPAHAL_NAT_IPV4_INDEX;
  1389. break;
  1390. case IPA_IPV6CT_BASE_TBL:
  1391. num_entries = ipa3_ctx->ipv6ct_mem.dev.table_entries + 1;
  1392. nat_type = IPAHAL_NAT_IPV6CT;
  1393. break;
  1394. case IPA_IPV6CT_EXPN_TBL:
  1395. num_entries = ipa3_ctx->ipv6ct_mem.dev.expn_table_entries;
  1396. nat_type = IPAHAL_NAT_IPV6CT;
  1397. break;
  1398. default:
  1399. IPAERR_RL("Invalid base_addr %d for table DMA command\n",
  1400. base_addr);
  1401. return 0;
  1402. }
  1403. ipahal_nat_entry_size(nat_type, &entry_size);
  1404. return entry_size * num_entries;
  1405. }
  1406. static int ipa3_table_validate_table_dma_one(
  1407. enum ipa3_nat_mem_in nmi,
  1408. struct ipa_ioc_nat_dma_one *param)
  1409. {
  1410. uint32_t table_size;
  1411. if (param->table_index >= 1) {
  1412. IPAERR_RL("Unsupported table index %u\n", param->table_index);
  1413. return -EPERM;
  1414. }
  1415. switch (param->base_addr) {
  1416. case IPA_NAT_BASE_TBL:
  1417. case IPA_NAT_EXPN_TBL:
  1418. case IPA_NAT_INDX_TBL:
  1419. case IPA_NAT_INDEX_EXPN_TBL:
  1420. if (!ipa3_ctx->nat_mem.dev.is_hw_init) {
  1421. IPAERR_RL("attempt to write to %s before HW int\n",
  1422. ipa3_ctx->nat_mem.dev.name);
  1423. return -EPERM;
  1424. }
  1425. IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi));
  1426. break;
  1427. case IPA_IPV6CT_BASE_TBL:
  1428. case IPA_IPV6CT_EXPN_TBL:
  1429. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  1430. IPAERR_RL("IPv6 connection tracking isn't supported\n");
  1431. return -EPERM;
  1432. }
  1433. if (!ipa3_ctx->ipv6ct_mem.dev.is_hw_init) {
  1434. IPAERR_RL("attempt to write to %s before HW int\n",
  1435. ipa3_ctx->ipv6ct_mem.dev.name);
  1436. return -EPERM;
  1437. }
  1438. break;
  1439. default:
  1440. IPAERR_RL("Invalid base_addr %d for table DMA command\n",
  1441. param->base_addr);
  1442. return -EPERM;
  1443. }
  1444. table_size = ipa3_nat_ipv6ct_calculate_table_size(
  1445. nmi,
  1446. param->base_addr);
  1447. if (!table_size) {
  1448. IPAERR_RL("Failed to calculate table size for base_addr %d\n",
  1449. param->base_addr);
  1450. return -EPERM;
  1451. }
  1452. if (param->offset >= table_size) {
  1453. IPAERR_RL("Invalid offset %d for table DMA command\n",
  1454. param->offset);
  1455. IPAERR_RL("table_index %d base addr %d size %d\n",
  1456. param->table_index, param->base_addr, table_size);
  1457. return -EPERM;
  1458. }
  1459. return 0;
  1460. }
  1461. /**
  1462. * ipa3_table_dma_cmd() - Post TABLE_DMA command to IPA HW
  1463. * @dma: [in] initialization command attributes
  1464. *
  1465. * Called by NAT/IPv6CT clients to post TABLE_DMA command to IPA HW
  1466. *
  1467. * Returns: 0 on success, negative on failure
  1468. */
  1469. int ipa3_table_dma_cmd(
  1470. struct ipa_ioc_nat_dma_cmd *dma)
  1471. {
  1472. struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->nat_mem.dev;
  1473. enum ipahal_imm_cmd_name cmd_name = IPA_IMM_CMD_NAT_DMA;
  1474. struct ipahal_imm_cmd_table_dma cmd;
  1475. struct ipahal_imm_cmd_pyld *cmd_pyld[IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC];
  1476. struct ipa3_desc desc[IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC];
  1477. uint8_t cnt, num_cmd = 0;
  1478. int result = 0;
  1479. int i;
  1480. struct ipahal_reg_valmask valmask;
  1481. struct ipahal_imm_cmd_register_write reg_write_coal_close;
  1482. int max_dma_table_cmds = IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC;
  1483. IPADBG("In\n");
  1484. if (!sram_compatible)
  1485. dma->mem_type = 0;
  1486. if (!dev->is_dev_init) {
  1487. IPAERR_RL("NAT hasn't been initialized\n");
  1488. result = -EPERM;
  1489. goto bail;
  1490. }
  1491. if (!IPA_VALID_NAT_MEM_IN(dma->mem_type)) {
  1492. IPAERR_RL("Invalid ipa3_nat_mem_in type (%u)\n",
  1493. dma->mem_type);
  1494. result = -EPERM;
  1495. goto bail;
  1496. }
  1497. IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(dma->mem_type));
  1498. memset(&cmd, 0, sizeof(cmd));
  1499. memset(cmd_pyld, 0, sizeof(cmd_pyld));
  1500. memset(desc, 0, sizeof(desc));
  1501. /**
  1502. * We use a descriptor for closing coalsceing endpoint
  1503. * by immediate command. So, DMA entries should be less than
  1504. * IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC - 1 to overcome
  1505. * buffer overflow of ipa3_desc array.
  1506. */
  1507. if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1)
  1508. max_dma_table_cmds -= 1;
  1509. if (!dma->entries || dma->entries > (max_dma_table_cmds - 1)) {
  1510. IPAERR_RL("Invalid number of entries %d\n",
  1511. dma->entries);
  1512. result = -EPERM;
  1513. goto bail;
  1514. }
  1515. for (cnt = 0; cnt < dma->entries; ++cnt) {
  1516. result = ipa3_table_validate_table_dma_one(
  1517. dma->mem_type, &dma->dma[cnt]);
  1518. if (result) {
  1519. IPAERR_RL("Table DMA command parameter %d is invalid\n",
  1520. cnt);
  1521. goto bail;
  1522. }
  1523. }
  1524. /* IC to close the coal frame before HPS Clear if coal is enabled */
  1525. if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
  1526. i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
  1527. reg_write_coal_close.skip_pipeline_clear = false;
  1528. reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  1529. reg_write_coal_close.offset = ipahal_get_reg_ofst(
  1530. IPA_AGGR_FORCE_CLOSE);
  1531. ipahal_get_aggr_force_close_valmask(i, &valmask);
  1532. reg_write_coal_close.value = valmask.val;
  1533. reg_write_coal_close.value_mask = valmask.mask;
  1534. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  1535. IPA_IMM_CMD_REGISTER_WRITE,
  1536. &reg_write_coal_close, false);
  1537. if (!cmd_pyld[num_cmd]) {
  1538. IPAERR("failed to construct coal close IC\n");
  1539. result = -ENOMEM;
  1540. goto destroy_imm_cmd;
  1541. }
  1542. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  1543. ++num_cmd;
  1544. }
  1545. /*
  1546. * NO-OP IC for ensuring that IPA pipeline is empty
  1547. */
  1548. cmd_pyld[num_cmd] =
  1549. ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
  1550. if (!cmd_pyld[num_cmd]) {
  1551. IPAERR("Failed to construct NOP imm cmd\n");
  1552. result = -ENOMEM;
  1553. goto destroy_imm_cmd;
  1554. }
  1555. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  1556. ++num_cmd;
  1557. /*
  1558. * NAT_DMA was renamed to TABLE_DMA starting from IPAv4
  1559. */
  1560. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
  1561. cmd_name = IPA_IMM_CMD_TABLE_DMA;
  1562. for (cnt = 0; cnt < dma->entries; ++cnt) {
  1563. cmd.table_index = dma->dma[cnt].table_index;
  1564. cmd.base_addr = dma->dma[cnt].base_addr;
  1565. cmd.offset = dma->dma[cnt].offset;
  1566. cmd.data = dma->dma[cnt].data;
  1567. cmd_pyld[num_cmd] =
  1568. ipahal_construct_imm_cmd(cmd_name, &cmd, false);
  1569. if (!cmd_pyld[num_cmd]) {
  1570. IPAERR_RL("Fail to construct table_dma imm cmd\n");
  1571. result = -ENOMEM;
  1572. goto destroy_imm_cmd;
  1573. }
  1574. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  1575. ++num_cmd;
  1576. }
  1577. result = ipa3_send_cmd(num_cmd, desc);
  1578. if (result)
  1579. IPAERR("Fail to send table_dma immediate command\n");
  1580. destroy_imm_cmd:
  1581. for (cnt = 0; cnt < num_cmd; ++cnt)
  1582. ipahal_destroy_imm_cmd(cmd_pyld[cnt]);
  1583. bail:
  1584. IPADBG("Out\n");
  1585. return result;
  1586. }
  1587. /**
  1588. * ipa3_nat_dma_cmd() - Post NAT_DMA command to IPA HW
  1589. * @dma: [in] initialization command attributes
  1590. *
  1591. * Called by NAT client driver to post NAT_DMA command to IPA HW
  1592. *
  1593. * Returns: 0 on success, negative on failure
  1594. */
  1595. int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
  1596. {
  1597. return ipa3_table_dma_cmd(dma);
  1598. }
  1599. static void ipa3_nat_ipv6ct_free_mem(
  1600. struct ipa3_nat_ipv6ct_common_mem *dev)
  1601. {
  1602. struct ipa3_nat_mem *nm_ptr;
  1603. struct ipa3_nat_mem_loc_data *mld_ptr;
  1604. if (dev->is_ipv6ct_mem) {
  1605. IPADBG("In: v6\n");
  1606. if (dev->vaddr) {
  1607. IPADBG("Freeing dma memory for %s\n", dev->name);
  1608. dma_free_coherent(
  1609. ipa3_ctx->pdev,
  1610. dev->table_alloc_size,
  1611. dev->vaddr,
  1612. dev->dma_handle);
  1613. }
  1614. dev->vaddr = NULL;
  1615. dev->dma_handle = 0;
  1616. dev->table_alloc_size = 0;
  1617. dev->base_table_addr = NULL;
  1618. dev->expansion_table_addr = NULL;
  1619. dev->table_entries = 0;
  1620. dev->expn_table_entries = 0;
  1621. dev->is_hw_init = false;
  1622. dev->is_mapped = false;
  1623. } else {
  1624. if (dev->is_nat_mem) {
  1625. IPADBG("In: v4\n");
  1626. nm_ptr = (struct ipa3_nat_mem *) dev;
  1627. if (nm_ptr->ddr_in_use) {
  1628. nm_ptr->ddr_in_use = false;
  1629. mld_ptr = &nm_ptr->mem_loc[IPA_NAT_MEM_IN_DDR];
  1630. if (mld_ptr->vaddr) {
  1631. IPADBG("Freeing dma memory for %s\n",
  1632. dev->name);
  1633. dma_free_coherent(
  1634. ipa3_ctx->pdev,
  1635. mld_ptr->table_alloc_size,
  1636. mld_ptr->vaddr,
  1637. mld_ptr->dma_handle);
  1638. }
  1639. mld_ptr->vaddr = NULL;
  1640. mld_ptr->dma_handle = 0;
  1641. mld_ptr->table_alloc_size = 0;
  1642. mld_ptr->table_entries = 0;
  1643. mld_ptr->expn_table_entries = 0;
  1644. mld_ptr->base_table_addr = NULL;
  1645. mld_ptr->expansion_table_addr = NULL;
  1646. mld_ptr->index_table_addr = NULL;
  1647. mld_ptr->index_table_expansion_addr = NULL;
  1648. }
  1649. if (nm_ptr->sram_in_use) {
  1650. nm_ptr->sram_in_use = false;
  1651. mld_ptr = &nm_ptr->mem_loc[IPA_NAT_MEM_IN_SRAM];
  1652. if (mld_ptr->io_vaddr) {
  1653. IPADBG("Unmappung sram memory for %s\n",
  1654. dev->name);
  1655. iounmap(mld_ptr->io_vaddr);
  1656. }
  1657. mld_ptr->io_vaddr = NULL;
  1658. mld_ptr->vaddr = NULL;
  1659. mld_ptr->dma_handle = 0;
  1660. mld_ptr->table_alloc_size = 0;
  1661. mld_ptr->table_entries = 0;
  1662. mld_ptr->expn_table_entries = 0;
  1663. mld_ptr->base_table_addr = NULL;
  1664. mld_ptr->expansion_table_addr = NULL;
  1665. mld_ptr->index_table_addr = NULL;
  1666. mld_ptr->index_table_expansion_addr = NULL;
  1667. }
  1668. memset(nm_ptr->mem_loc, 0, sizeof(nm_ptr->mem_loc));
  1669. }
  1670. }
  1671. IPADBG("Out\n");
  1672. }
  1673. static int ipa3_nat_ipv6ct_create_del_table_cmd(
  1674. uint8_t tbl_index,
  1675. u32 base_addr,
  1676. struct ipa3_nat_ipv6ct_common_mem *dev,
  1677. struct ipahal_imm_cmd_nat_ipv6ct_init_common *table_init_cmd)
  1678. {
  1679. bool mem_type_shared = true;
  1680. IPADBG("In: tbl_index(%u) base_addr(%u) v4(%u) v6(%u)\n",
  1681. tbl_index,
  1682. base_addr,
  1683. dev->is_nat_mem,
  1684. dev->is_ipv6ct_mem);
  1685. if (!IPA_VALID_TBL_INDEX(tbl_index)) {
  1686. IPAERR_RL("Unsupported table index %d\n", tbl_index);
  1687. return -EPERM;
  1688. }
  1689. if (dev->tmp_mem) {
  1690. IPADBG("using temp memory during %s del\n", dev->name);
  1691. mem_type_shared = false;
  1692. base_addr = dev->tmp_mem->dma_handle;
  1693. }
  1694. table_init_cmd->table_index = tbl_index;
  1695. table_init_cmd->base_table_addr = base_addr;
  1696. table_init_cmd->base_table_addr_shared = mem_type_shared;
  1697. table_init_cmd->expansion_table_addr = base_addr;
  1698. table_init_cmd->expansion_table_addr_shared = mem_type_shared;
  1699. table_init_cmd->size_base_table = 0;
  1700. table_init_cmd->size_expansion_table = 0;
  1701. IPADBG("Out\n");
  1702. return 0;
  1703. }
  1704. static int ipa3_nat_send_del_table_cmd(
  1705. uint8_t tbl_index)
  1706. {
  1707. struct ipahal_imm_cmd_ip_v4_nat_init cmd;
  1708. int result = 0;
  1709. IPADBG("In\n");
  1710. result =
  1711. ipa3_nat_ipv6ct_create_del_table_cmd(
  1712. tbl_index,
  1713. IPA_NAT_PHYS_MEM_OFFSET,
  1714. &ipa3_ctx->nat_mem.dev,
  1715. &cmd.table_init);
  1716. if (result) {
  1717. IPAERR(
  1718. "Fail to create immediate command to delete NAT table\n");
  1719. goto bail;
  1720. }
  1721. cmd.index_table_addr =
  1722. cmd.table_init.base_table_addr;
  1723. cmd.index_table_addr_shared =
  1724. cmd.table_init.base_table_addr_shared;
  1725. cmd.index_table_expansion_addr =
  1726. cmd.index_table_addr;
  1727. cmd.index_table_expansion_addr_shared =
  1728. cmd.index_table_addr_shared;
  1729. cmd.public_addr_info = 0;
  1730. IPADBG("Posting NAT delete command\n");
  1731. result = ipa3_nat_send_init_cmd(&cmd, true);
  1732. if (result) {
  1733. IPAERR("Fail to send NAT delete immediate command\n");
  1734. goto bail;
  1735. }
  1736. bail:
  1737. IPADBG("Out\n");
  1738. return result;
  1739. }
  1740. static int ipa3_ipv6ct_send_del_table_cmd(uint8_t tbl_index)
  1741. {
  1742. struct ipahal_imm_cmd_ip_v6_ct_init cmd;
  1743. int result;
  1744. IPADBG("\n");
  1745. result = ipa3_nat_ipv6ct_create_del_table_cmd(
  1746. tbl_index,
  1747. IPA_IPV6CT_PHYS_MEM_OFFSET,
  1748. &ipa3_ctx->ipv6ct_mem.dev,
  1749. &cmd.table_init);
  1750. if (result) {
  1751. IPAERR(
  1752. "Fail to create immediate command to delete IPv6CT table\n");
  1753. return result;
  1754. }
  1755. IPADBG("posting IPv6CT delete command\n");
  1756. result = ipa3_ipv6ct_send_init_cmd(&cmd);
  1757. if (result) {
  1758. IPAERR("Fail to send IPv6CT delete immediate command\n");
  1759. return result;
  1760. }
  1761. IPADBG("return\n");
  1762. return 0;
  1763. }
  1764. /**
  1765. * ipa3_nat_del_cmd() - Delete a NAT table
  1766. * @del: [in] delete table table table parameters
  1767. *
  1768. * Called by NAT client driver to delete the nat table
  1769. *
  1770. * Returns: 0 on success, negative on failure
  1771. */
  1772. int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
  1773. {
  1774. struct ipa_ioc_nat_ipv6ct_table_del tmp;
  1775. tmp.table_index = del->table_index;
  1776. return ipa3_del_nat_table(&tmp);
  1777. }
  1778. /**
  1779. * ipa3_del_nat_table() - Delete the NAT table
  1780. * @del: [in] delete table parameters
  1781. *
  1782. * Called by NAT client to delete the table
  1783. *
  1784. * Returns: 0 on success, negative on failure
  1785. */
  1786. int ipa3_del_nat_table(
  1787. struct ipa_ioc_nat_ipv6ct_table_del *del)
  1788. {
  1789. struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->nat_mem.dev;
  1790. struct ipa3_nat_mem *nm_ptr = (struct ipa3_nat_mem *) dev;
  1791. struct ipa3_nat_mem_loc_data *mld_ptr;
  1792. enum ipa3_nat_mem_in nmi;
  1793. int result = 0;
  1794. IPADBG("In\n");
  1795. if (!sram_compatible)
  1796. del->mem_type = 0;
  1797. nmi = del->mem_type;
  1798. if (!dev->is_dev_init) {
  1799. IPAERR("NAT hasn't been initialized\n");
  1800. result = -EPERM;
  1801. goto bail;
  1802. }
  1803. if (!IPA_VALID_TBL_INDEX(del->table_index)) {
  1804. IPAERR_RL("Unsupported table index %d\n",
  1805. del->table_index);
  1806. result = -EPERM;
  1807. goto bail;
  1808. }
  1809. if (!IPA_VALID_NAT_MEM_IN(nmi)) {
  1810. IPAERR_RL("Bad ipa3_nat_mem_in type\n");
  1811. result = -EPERM;
  1812. goto bail;
  1813. }
  1814. IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi));
  1815. mld_ptr = &nm_ptr->mem_loc[nmi];
  1816. mutex_lock(&dev->lock);
  1817. if (dev->is_hw_init) {
  1818. result = ipa3_nat_send_del_table_cmd(del->table_index);
  1819. if (result) {
  1820. IPAERR(
  1821. "Fail to send immediate command to delete NAT table\n");
  1822. goto unlock;
  1823. }
  1824. }
  1825. nm_ptr->public_ip_addr = 0;
  1826. mld_ptr->index_table_addr = NULL;
  1827. mld_ptr->index_table_expansion_addr = NULL;
  1828. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0
  1829. &&
  1830. nm_ptr->pdn_mem.base) {
  1831. struct ipa_mem_buffer *pdn_mem_ptr = &nm_ptr->pdn_mem;
  1832. IPADBG("Freeing the PDN memory\n");
  1833. dma_free_coherent(
  1834. ipa3_ctx->pdev,
  1835. pdn_mem_ptr->size,
  1836. pdn_mem_ptr->base,
  1837. pdn_mem_ptr->phys_base);
  1838. pdn_mem_ptr->base = NULL;
  1839. }
  1840. ipa3_nat_ipv6ct_free_mem(dev);
  1841. unlock:
  1842. mutex_unlock(&dev->lock);
  1843. bail:
  1844. IPADBG("Out\n");
  1845. return result;
  1846. }
  1847. /**
  1848. * ipa3_del_ipv6ct_table() - Delete the IPv6CT table
  1849. * @del: [in] delete table parameters
  1850. *
  1851. * Called by IPv6CT client to delete the table
  1852. *
  1853. * Returns: 0 on success, negative on failure
  1854. */
  1855. int ipa3_del_ipv6ct_table(
  1856. struct ipa_ioc_nat_ipv6ct_table_del *del)
  1857. {
  1858. struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->ipv6ct_mem.dev;
  1859. int result = 0;
  1860. IPADBG("In\n");
  1861. if (!sram_compatible)
  1862. del->mem_type = 0;
  1863. if (!dev->is_dev_init) {
  1864. IPAERR("IPv6 connection tracking hasn't been initialized\n");
  1865. result = -EPERM;
  1866. goto bail;
  1867. }
  1868. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  1869. IPAERR_RL("IPv6 connection tracking isn't supported\n");
  1870. result = -EPERM;
  1871. goto bail;
  1872. }
  1873. mutex_lock(&dev->lock);
  1874. if (dev->is_hw_init) {
  1875. result = ipa3_ipv6ct_send_del_table_cmd(del->table_index);
  1876. if (result) {
  1877. IPAERR("ipa3_ipv6ct_send_del_table_cmd() fail\n");
  1878. goto unlock;
  1879. }
  1880. }
  1881. ipa3_nat_ipv6ct_free_mem(&ipa3_ctx->ipv6ct_mem.dev);
  1882. unlock:
  1883. mutex_unlock(&dev->lock);
  1884. bail:
  1885. IPADBG("Out\n");
  1886. return result;
  1887. }
  1888. int ipa3_nat_get_sram_info(
  1889. struct ipa_nat_in_sram_info *info_ptr)
  1890. {
  1891. struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->nat_mem.dev;
  1892. int ret = 0;
  1893. IPADBG("In\n");
  1894. if (!info_ptr) {
  1895. IPAERR("Bad argument passed\n");
  1896. ret = -EINVAL;
  1897. goto bail;
  1898. }
  1899. if (!dev->is_dev_init) {
  1900. IPAERR_RL("NAT hasn't been initialized\n");
  1901. ret = -EPERM;
  1902. goto bail;
  1903. }
  1904. sram_compatible = true;
  1905. memset(info_ptr,
  1906. 0,
  1907. sizeof(struct ipa_nat_in_sram_info));
  1908. /*
  1909. * Size of SRAM set aside for the NAT table.
  1910. */
  1911. info_ptr->sram_mem_available_for_nat = IPA_RAM_NAT_SIZE;
  1912. /*
  1913. * If table's phys addr in SRAM is not page aligned, it will be
  1914. * offset into the mmap'd VM by the amount calculated below. This
  1915. * value can be used by the app, so that it can know where the
  1916. * table actually lives in the mmap'd VM...
  1917. */
  1918. info_ptr->nat_table_offset_into_mmap =
  1919. (ipa3_ctx->ipa_wrapper_base +
  1920. ipa3_ctx->ctrl->ipa_reg_base_ofst +
  1921. ipahal_get_reg_n_ofst(
  1922. IPA_SW_AREA_RAM_DIRECT_ACCESS_n,
  1923. 0) +
  1924. IPA_RAM_NAT_OFST) & ~PAGE_MASK;
  1925. /*
  1926. * If the offset above plus the size of the NAT table causes the
  1927. * table to extend beyond the next page boundary, the app needs to
  1928. * know it, so that it can increase the size used in the mmap
  1929. * request...
  1930. */
  1931. info_ptr->best_nat_in_sram_size_rqst =
  1932. roundup(
  1933. info_ptr->nat_table_offset_into_mmap +
  1934. IPA_RAM_NAT_SIZE,
  1935. PAGE_SIZE);
  1936. bail:
  1937. IPADBG("Out\n");
  1938. return ret;
  1939. }