ipahal.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include "ipahal.h"
  7. #include "ipahal_i.h"
  8. #include "ipahal_reg_i.h"
  9. #include "ipahal_fltrt_i.h"
  10. #include "ipahal_hw_stats_i.h"
  11. #include "ipahal_nat_i.h"
  12. struct ipahal_context *ipahal_ctx;
  13. static const char *ipahal_imm_cmd_name_to_str[IPA_IMM_CMD_MAX] = {
  14. __stringify(IPA_IMM_CMD_IP_V4_FILTER_INIT),
  15. __stringify(IPA_IMM_CMD_IP_V6_FILTER_INIT),
  16. __stringify(IPA_IMM_CMD_IP_V4_NAT_INIT),
  17. __stringify(IPA_IMM_CMD_IP_V4_ROUTING_INIT),
  18. __stringify(IPA_IMM_CMD_IP_V6_ROUTING_INIT),
  19. __stringify(IPA_IMM_CMD_HDR_INIT_LOCAL),
  20. __stringify(IPA_IMM_CMD_HDR_INIT_SYSTEM),
  21. __stringify(IPA_IMM_CMD_REGISTER_WRITE),
  22. __stringify(IPA_IMM_CMD_NAT_DMA),
  23. __stringify(IPA_IMM_CMD_IP_PACKET_INIT),
  24. __stringify(IPA_IMM_CMD_DMA_SHARED_MEM),
  25. __stringify(IPA_IMM_CMD_IP_PACKET_TAG_STATUS),
  26. __stringify(IPA_IMM_CMD_DMA_TASK_32B_ADDR),
  27. __stringify(IPA_IMM_CMD_TABLE_DMA),
  28. __stringify(IPA_IMM_CMD_IP_V6_CT_INIT)
  29. };
  30. static const char *ipahal_pkt_status_exception_to_str
  31. [IPAHAL_PKT_STATUS_EXCEPTION_MAX] = {
  32. __stringify(IPAHAL_PKT_STATUS_EXCEPTION_NONE),
  33. __stringify(IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR),
  34. __stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE),
  35. __stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH),
  36. __stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD),
  37. __stringify(IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS),
  38. __stringify(IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT),
  39. __stringify(IPAHAL_PKT_STATUS_EXCEPTION_NAT),
  40. __stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT),
  41. };
  42. static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
  43. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
  44. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  45. {
  46. struct ipahal_imm_cmd_pyld *pyld;
  47. struct ipa_imm_cmd_hw_dma_task_32b_addr *data;
  48. struct ipahal_imm_cmd_dma_task_32b_addr *dma_params =
  49. (struct ipahal_imm_cmd_dma_task_32b_addr *)params;
  50. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  51. if (unlikely(!pyld))
  52. return pyld;
  53. /* Currently supports only one packet */
  54. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd) + (1 << 8);
  55. pyld->len = sizeof(*data);
  56. data = (struct ipa_imm_cmd_hw_dma_task_32b_addr *)pyld->data;
  57. if (unlikely(dma_params->size1 & ~0xFFFF)) {
  58. WARN(1, "Size1 is bigger than 16bit width 0x%x\n",
  59. dma_params->size1);
  60. }
  61. if (unlikely(dma_params->packet_size & ~0xFFFF)) {
  62. WARN(1, "Pkt size is bigger than 16bit width 0x%x\n",
  63. dma_params->packet_size);
  64. }
  65. data->cmplt = dma_params->cmplt ? 1 : 0;
  66. data->eof = dma_params->eof ? 1 : 0;
  67. data->flsh = dma_params->flsh ? 1 : 0;
  68. data->lock = dma_params->lock ? 1 : 0;
  69. data->unlock = dma_params->unlock ? 1 : 0;
  70. data->size1 = dma_params->size1;
  71. data->addr1 = dma_params->addr1;
  72. data->packet_size = dma_params->packet_size;
  73. return pyld;
  74. }
  75. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_tag_status(
  76. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  77. {
  78. struct ipahal_imm_cmd_pyld *pyld;
  79. struct ipa_imm_cmd_hw_ip_packet_tag_status *data;
  80. struct ipahal_imm_cmd_ip_packet_tag_status *tag_params =
  81. (struct ipahal_imm_cmd_ip_packet_tag_status *)params;
  82. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  83. if (unlikely(!pyld)) {
  84. IPAHAL_ERR("kzalloc err\n");
  85. return pyld;
  86. }
  87. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  88. pyld->len = sizeof(*data);
  89. data = (struct ipa_imm_cmd_hw_ip_packet_tag_status *)pyld->data;
  90. if (unlikely(tag_params->tag & ~0xFFFFFFFFFFFF)) {
  91. IPAHAL_ERR("tag is bigger than 48bit width 0x%llx\n",
  92. tag_params->tag);
  93. WARN_ON(1);
  94. }
  95. data->tag = tag_params->tag;
  96. return pyld;
  97. }
  98. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem(
  99. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  100. {
  101. struct ipahal_imm_cmd_pyld *pyld;
  102. struct ipa_imm_cmd_hw_dma_shared_mem *data;
  103. struct ipahal_imm_cmd_dma_shared_mem *mem_params =
  104. (struct ipahal_imm_cmd_dma_shared_mem *)params;
  105. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  106. if (unlikely(!pyld))
  107. return pyld;
  108. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  109. pyld->len = sizeof(*data);
  110. data = (struct ipa_imm_cmd_hw_dma_shared_mem *)pyld->data;
  111. if (unlikely(mem_params->size & ~0xFFFF)) {
  112. WARN(1, "Size is bigger than 16bit width 0x%x\n",
  113. mem_params->size);
  114. }
  115. if (unlikely(mem_params->local_addr & ~0xFFFF)) {
  116. WARN(1, "Local addr is bigger than 16bit width 0x%x\n",
  117. mem_params->local_addr);
  118. }
  119. data->direction = mem_params->is_read ? 1 : 0;
  120. data->size = mem_params->size;
  121. data->local_addr = mem_params->local_addr;
  122. data->system_addr = mem_params->system_addr;
  123. data->skip_pipeline_clear = mem_params->skip_pipeline_clear ? 1 : 0;
  124. switch (mem_params->pipeline_clear_options) {
  125. case IPAHAL_HPS_CLEAR:
  126. data->pipeline_clear_options = 0;
  127. break;
  128. case IPAHAL_SRC_GRP_CLEAR:
  129. data->pipeline_clear_options = 1;
  130. break;
  131. case IPAHAL_FULL_PIPELINE_CLEAR:
  132. data->pipeline_clear_options = 2;
  133. break;
  134. default:
  135. IPAHAL_ERR("unsupported pipline clear option %d\n",
  136. mem_params->pipeline_clear_options);
  137. WARN_ON(1);
  138. }
  139. return pyld;
  140. }
  141. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem_v_4_0(
  142. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  143. {
  144. struct ipahal_imm_cmd_pyld *pyld;
  145. struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 *data;
  146. struct ipahal_imm_cmd_dma_shared_mem *mem_params =
  147. (struct ipahal_imm_cmd_dma_shared_mem *)params;
  148. if (unlikely(mem_params->size & ~0xFFFF)) {
  149. IPAHAL_ERR("Size is bigger than 16bit width 0x%x\n",
  150. mem_params->size);
  151. WARN_ON(1);
  152. return NULL;
  153. }
  154. if (unlikely(mem_params->local_addr & ~0xFFFF)) {
  155. IPAHAL_ERR("Local addr is bigger than 16bit width 0x%x\n",
  156. mem_params->local_addr);
  157. WARN_ON(1);
  158. return NULL;
  159. }
  160. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  161. if (unlikely(!pyld)) {
  162. WARN_ON(1);
  163. return pyld;
  164. }
  165. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  166. pyld->len = sizeof(*data);
  167. data = (struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 *)pyld->data;
  168. data->direction = mem_params->is_read ? 1 : 0;
  169. data->clear_after_read = mem_params->clear_after_read;
  170. data->size = mem_params->size;
  171. data->local_addr = mem_params->local_addr;
  172. data->system_addr = mem_params->system_addr;
  173. pyld->opcode |= (mem_params->skip_pipeline_clear ? 1 : 0) << 8;
  174. switch (mem_params->pipeline_clear_options) {
  175. case IPAHAL_HPS_CLEAR:
  176. break;
  177. case IPAHAL_SRC_GRP_CLEAR:
  178. pyld->opcode |= (1 << 9);
  179. break;
  180. case IPAHAL_FULL_PIPELINE_CLEAR:
  181. pyld->opcode |= (2 << 9);
  182. break;
  183. default:
  184. IPAHAL_ERR("unsupported pipline clear option %d\n",
  185. mem_params->pipeline_clear_options);
  186. WARN_ON(1);
  187. }
  188. return pyld;
  189. }
  190. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write(
  191. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  192. {
  193. struct ipahal_imm_cmd_pyld *pyld;
  194. struct ipa_imm_cmd_hw_register_write *data;
  195. struct ipahal_imm_cmd_register_write *regwrt_params =
  196. (struct ipahal_imm_cmd_register_write *)params;
  197. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  198. if (unlikely(!pyld)) {
  199. IPAHAL_ERR("kzalloc err\n");
  200. return pyld;
  201. }
  202. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  203. pyld->len = sizeof(*data);
  204. data = (struct ipa_imm_cmd_hw_register_write *)pyld->data;
  205. if (unlikely(regwrt_params->offset & ~0xFFFF)) {
  206. IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n",
  207. regwrt_params->offset);
  208. WARN_ON(1);
  209. }
  210. data->offset = regwrt_params->offset;
  211. data->value = regwrt_params->value;
  212. data->value_mask = regwrt_params->value_mask;
  213. data->skip_pipeline_clear = regwrt_params->skip_pipeline_clear ? 1 : 0;
  214. switch (regwrt_params->pipeline_clear_options) {
  215. case IPAHAL_HPS_CLEAR:
  216. data->pipeline_clear_options = 0;
  217. break;
  218. case IPAHAL_SRC_GRP_CLEAR:
  219. data->pipeline_clear_options = 1;
  220. break;
  221. case IPAHAL_FULL_PIPELINE_CLEAR:
  222. data->pipeline_clear_options = 2;
  223. break;
  224. default:
  225. IPAHAL_ERR("unsupported pipline clear option %d\n",
  226. regwrt_params->pipeline_clear_options);
  227. WARN_ON(1);
  228. }
  229. return pyld;
  230. }
  231. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write_v_4_0(
  232. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  233. {
  234. struct ipahal_imm_cmd_pyld *pyld;
  235. struct ipa_imm_cmd_hw_register_write_v_4_0 *data;
  236. struct ipahal_imm_cmd_register_write *regwrt_params =
  237. (struct ipahal_imm_cmd_register_write *)params;
  238. if (unlikely(regwrt_params->offset & ~0xFFFF)) {
  239. IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n",
  240. regwrt_params->offset);
  241. WARN_ON(1);
  242. return NULL;
  243. }
  244. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  245. if (unlikely(!pyld)) {
  246. WARN_ON(1);
  247. return pyld;
  248. }
  249. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  250. pyld->len = sizeof(*data);
  251. data = (struct ipa_imm_cmd_hw_register_write_v_4_0 *)pyld->data;
  252. data->offset = regwrt_params->offset;
  253. data->offset_high = regwrt_params->offset >> 16;
  254. data->value = regwrt_params->value;
  255. data->value_mask = regwrt_params->value_mask;
  256. pyld->opcode |= (regwrt_params->skip_pipeline_clear ? 1 : 0) << 8;
  257. switch (regwrt_params->pipeline_clear_options) {
  258. case IPAHAL_HPS_CLEAR:
  259. break;
  260. case IPAHAL_SRC_GRP_CLEAR:
  261. pyld->opcode |= (1 << 9);
  262. break;
  263. case IPAHAL_FULL_PIPELINE_CLEAR:
  264. pyld->opcode |= (2 << 9);
  265. break;
  266. default:
  267. IPAHAL_ERR("unsupported pipline clear option %d\n",
  268. regwrt_params->pipeline_clear_options);
  269. WARN_ON(1);
  270. }
  271. return pyld;
  272. }
  273. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_init(
  274. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  275. {
  276. struct ipahal_imm_cmd_pyld *pyld;
  277. struct ipa_imm_cmd_hw_ip_packet_init *data;
  278. struct ipahal_imm_cmd_ip_packet_init *pktinit_params =
  279. (struct ipahal_imm_cmd_ip_packet_init *)params;
  280. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  281. if (unlikely(!pyld)) {
  282. IPAHAL_ERR("kzalloc err\n");
  283. return pyld;
  284. }
  285. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  286. pyld->len = sizeof(*data);
  287. data = (struct ipa_imm_cmd_hw_ip_packet_init *)pyld->data;
  288. if (unlikely(pktinit_params->destination_pipe_index & ~0x1F)) {
  289. IPAHAL_ERR("Dst pipe idx is bigger than 5bit width 0x%x\n",
  290. pktinit_params->destination_pipe_index);
  291. WARN_ON(1);
  292. }
  293. data->destination_pipe_index = pktinit_params->destination_pipe_index;
  294. return pyld;
  295. }
  296. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_nat_dma(
  297. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  298. {
  299. struct ipahal_imm_cmd_pyld *pyld;
  300. struct ipa_imm_cmd_hw_nat_dma *data;
  301. struct ipahal_imm_cmd_table_dma *nat_params =
  302. (struct ipahal_imm_cmd_table_dma *)params;
  303. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  304. if (unlikely(!pyld)) {
  305. IPAHAL_ERR("kzalloc err\n");
  306. return pyld;
  307. }
  308. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  309. pyld->len = sizeof(*data);
  310. data = (struct ipa_imm_cmd_hw_nat_dma *)pyld->data;
  311. data->table_index = nat_params->table_index;
  312. data->base_addr = nat_params->base_addr;
  313. data->offset = nat_params->offset;
  314. data->data = nat_params->data;
  315. return pyld;
  316. }
  317. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_table_dma_ipav4(
  318. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  319. {
  320. struct ipahal_imm_cmd_pyld *pyld;
  321. struct ipa_imm_cmd_hw_table_dma_ipav4 *data;
  322. struct ipahal_imm_cmd_table_dma *nat_params =
  323. (struct ipahal_imm_cmd_table_dma *)params;
  324. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  325. if (unlikely(!pyld)) {
  326. IPAHAL_ERR("kzalloc err\n");
  327. return pyld;
  328. }
  329. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  330. pyld->len = sizeof(*data);
  331. data = (struct ipa_imm_cmd_hw_table_dma_ipav4 *)pyld->data;
  332. data->table_index = nat_params->table_index;
  333. data->base_addr = nat_params->base_addr;
  334. data->offset = nat_params->offset;
  335. data->data = nat_params->data;
  336. return pyld;
  337. }
  338. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_system(
  339. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  340. {
  341. struct ipahal_imm_cmd_pyld *pyld;
  342. struct ipa_imm_cmd_hw_hdr_init_system *data;
  343. struct ipahal_imm_cmd_hdr_init_system *syshdr_params =
  344. (struct ipahal_imm_cmd_hdr_init_system *)params;
  345. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  346. if (unlikely(!pyld)) {
  347. IPAHAL_ERR("kzalloc err\n");
  348. return pyld;
  349. }
  350. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  351. pyld->len = sizeof(*data);
  352. data = (struct ipa_imm_cmd_hw_hdr_init_system *)pyld->data;
  353. data->hdr_table_addr = syshdr_params->hdr_table_addr;
  354. return pyld;
  355. }
  356. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_local(
  357. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  358. {
  359. struct ipahal_imm_cmd_pyld *pyld;
  360. struct ipa_imm_cmd_hw_hdr_init_local *data;
  361. struct ipahal_imm_cmd_hdr_init_local *lclhdr_params =
  362. (struct ipahal_imm_cmd_hdr_init_local *)params;
  363. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  364. if (unlikely(!pyld)) {
  365. IPAHAL_ERR("kzalloc err\n");
  366. return pyld;
  367. }
  368. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  369. pyld->len = sizeof(*data);
  370. data = (struct ipa_imm_cmd_hw_hdr_init_local *)pyld->data;
  371. if (unlikely(lclhdr_params->size_hdr_table & ~0xFFF)) {
  372. IPAHAL_ERR("Hdr tble size is bigger than 12bit width 0x%x\n",
  373. lclhdr_params->size_hdr_table);
  374. WARN_ON(1);
  375. }
  376. data->hdr_table_addr = lclhdr_params->hdr_table_addr;
  377. data->size_hdr_table = lclhdr_params->size_hdr_table;
  378. data->hdr_addr = lclhdr_params->hdr_addr;
  379. return pyld;
  380. }
  381. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_routing_init(
  382. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  383. {
  384. struct ipahal_imm_cmd_pyld *pyld;
  385. struct ipa_imm_cmd_hw_ip_v6_routing_init *data;
  386. struct ipahal_imm_cmd_ip_v6_routing_init *rt6_params =
  387. (struct ipahal_imm_cmd_ip_v6_routing_init *)params;
  388. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  389. if (unlikely(!pyld)) {
  390. IPAHAL_ERR("kzalloc err\n");
  391. return pyld;
  392. }
  393. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  394. pyld->len = sizeof(*data);
  395. data = (struct ipa_imm_cmd_hw_ip_v6_routing_init *)pyld->data;
  396. data->hash_rules_addr = rt6_params->hash_rules_addr;
  397. data->hash_rules_size = rt6_params->hash_rules_size;
  398. data->hash_local_addr = rt6_params->hash_local_addr;
  399. data->nhash_rules_addr = rt6_params->nhash_rules_addr;
  400. data->nhash_rules_size = rt6_params->nhash_rules_size;
  401. data->nhash_local_addr = rt6_params->nhash_local_addr;
  402. return pyld;
  403. }
  404. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_routing_init(
  405. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  406. {
  407. struct ipahal_imm_cmd_pyld *pyld;
  408. struct ipa_imm_cmd_hw_ip_v4_routing_init *data;
  409. struct ipahal_imm_cmd_ip_v4_routing_init *rt4_params =
  410. (struct ipahal_imm_cmd_ip_v4_routing_init *)params;
  411. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  412. if (unlikely(!pyld)) {
  413. IPAHAL_ERR("kzalloc err\n");
  414. return pyld;
  415. }
  416. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  417. pyld->len = sizeof(*data);
  418. data = (struct ipa_imm_cmd_hw_ip_v4_routing_init *)pyld->data;
  419. data->hash_rules_addr = rt4_params->hash_rules_addr;
  420. data->hash_rules_size = rt4_params->hash_rules_size;
  421. data->hash_local_addr = rt4_params->hash_local_addr;
  422. data->nhash_rules_addr = rt4_params->nhash_rules_addr;
  423. data->nhash_rules_size = rt4_params->nhash_rules_size;
  424. data->nhash_local_addr = rt4_params->nhash_local_addr;
  425. return pyld;
  426. }
  427. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_nat_init(
  428. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  429. {
  430. struct ipahal_imm_cmd_pyld *pyld;
  431. struct ipa_imm_cmd_hw_ip_v4_nat_init *data;
  432. struct ipahal_imm_cmd_ip_v4_nat_init *nat4_params =
  433. (struct ipahal_imm_cmd_ip_v4_nat_init *)params;
  434. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  435. if (unlikely(!pyld)) {
  436. IPAHAL_ERR("kzalloc err\n");
  437. return pyld;
  438. }
  439. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  440. pyld->len = sizeof(*data);
  441. data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data;
  442. data->ipv4_rules_addr = nat4_params->table_init.base_table_addr;
  443. data->ipv4_expansion_rules_addr =
  444. nat4_params->table_init.expansion_table_addr;
  445. data->index_table_addr = nat4_params->index_table_addr;
  446. data->index_table_expansion_addr =
  447. nat4_params->index_table_expansion_addr;
  448. data->table_index = nat4_params->table_init.table_index;
  449. data->ipv4_rules_addr_type =
  450. nat4_params->table_init.base_table_addr_shared ? 1 : 0;
  451. data->ipv4_expansion_rules_addr_type =
  452. nat4_params->table_init.expansion_table_addr_shared ? 1 : 0;
  453. data->index_table_addr_type =
  454. nat4_params->index_table_addr_shared ? 1 : 0;
  455. data->index_table_expansion_addr_type =
  456. nat4_params->index_table_expansion_addr_shared ? 1 : 0;
  457. data->size_base_tables = nat4_params->table_init.size_base_table;
  458. data->size_expansion_tables =
  459. nat4_params->table_init.size_expansion_table;
  460. data->public_addr_info = nat4_params->public_addr_info;
  461. return pyld;
  462. }
  463. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_ct_init(
  464. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  465. {
  466. struct ipahal_imm_cmd_pyld *pyld;
  467. struct ipa_imm_cmd_hw_ip_v6_ct_init *data;
  468. struct ipahal_imm_cmd_ip_v6_ct_init *ipv6ct_params =
  469. (struct ipahal_imm_cmd_ip_v6_ct_init *)params;
  470. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  471. if (unlikely(!pyld))
  472. return pyld;
  473. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  474. pyld->len = sizeof(*data);
  475. data = (struct ipa_imm_cmd_hw_ip_v6_ct_init *)pyld->data;
  476. data->table_addr = ipv6ct_params->table_init.base_table_addr;
  477. data->expansion_table_addr =
  478. ipv6ct_params->table_init.expansion_table_addr;
  479. data->table_index = ipv6ct_params->table_init.table_index;
  480. data->table_addr_type =
  481. ipv6ct_params->table_init.base_table_addr_shared ? 1 : 0;
  482. data->expansion_table_addr_type =
  483. ipv6ct_params->table_init.expansion_table_addr_shared ? 1 : 0;
  484. data->size_base_table = ipv6ct_params->table_init.size_base_table;
  485. data->size_expansion_table =
  486. ipv6ct_params->table_init.size_expansion_table;
  487. return pyld;
  488. }
  489. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_filter_init(
  490. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  491. {
  492. struct ipahal_imm_cmd_pyld *pyld;
  493. struct ipa_imm_cmd_hw_ip_v6_filter_init *data;
  494. struct ipahal_imm_cmd_ip_v6_filter_init *flt6_params =
  495. (struct ipahal_imm_cmd_ip_v6_filter_init *)params;
  496. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  497. if (unlikely(!pyld)) {
  498. IPAHAL_ERR("kzalloc err\n");
  499. return pyld;
  500. }
  501. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  502. pyld->len = sizeof(*data);
  503. data = (struct ipa_imm_cmd_hw_ip_v6_filter_init *)pyld->data;
  504. data->hash_rules_addr = flt6_params->hash_rules_addr;
  505. data->hash_rules_size = flt6_params->hash_rules_size;
  506. data->hash_local_addr = flt6_params->hash_local_addr;
  507. data->nhash_rules_addr = flt6_params->nhash_rules_addr;
  508. data->nhash_rules_size = flt6_params->nhash_rules_size;
  509. data->nhash_local_addr = flt6_params->nhash_local_addr;
  510. return pyld;
  511. }
  512. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_filter_init(
  513. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  514. {
  515. struct ipahal_imm_cmd_pyld *pyld;
  516. struct ipa_imm_cmd_hw_ip_v4_filter_init *data;
  517. struct ipahal_imm_cmd_ip_v4_filter_init *flt4_params =
  518. (struct ipahal_imm_cmd_ip_v4_filter_init *)params;
  519. pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
  520. if (unlikely(!pyld)) {
  521. IPAHAL_ERR("kzalloc err\n");
  522. return pyld;
  523. }
  524. pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
  525. pyld->len = sizeof(*data);
  526. data = (struct ipa_imm_cmd_hw_ip_v4_filter_init *)pyld->data;
  527. data->hash_rules_addr = flt4_params->hash_rules_addr;
  528. data->hash_rules_size = flt4_params->hash_rules_size;
  529. data->hash_local_addr = flt4_params->hash_local_addr;
  530. data->nhash_rules_addr = flt4_params->nhash_rules_addr;
  531. data->nhash_rules_size = flt4_params->nhash_rules_size;
  532. data->nhash_local_addr = flt4_params->nhash_local_addr;
  533. return pyld;
  534. }
  535. static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dummy(
  536. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  537. {
  538. IPAHAL_ERR("no construct function for IMM_CMD=%s, IPA ver %d\n",
  539. ipahal_imm_cmd_name_str(cmd), ipahal_ctx->hw_type);
  540. WARN_ON(1);
  541. return NULL;
  542. }
  543. /*
  544. * struct ipahal_imm_cmd_obj - immediate command H/W information for
  545. * specific IPA version
  546. * @construct - CB to construct imm command payload from abstracted structure
  547. * @opcode - Immediate command OpCode
  548. */
  549. struct ipahal_imm_cmd_obj {
  550. struct ipahal_imm_cmd_pyld *(*construct)(enum ipahal_imm_cmd_name cmd,
  551. const void *params, bool is_atomic_ctx);
  552. u16 opcode;
  553. };
  554. /*
  555. * This table contains the info regard each immediate command for IPAv3
  556. * and later.
  557. * Information like: opcode and construct functions.
  558. * All the information on the IMM on IPAv3 are statically defined below.
  559. * If information is missing regard some IMM on some IPA version,
  560. * the init function will fill it with the information from the previous
  561. * IPA version.
  562. * Information is considered missing if all of the fields are 0
  563. * If opcode is -1, this means that the IMM is removed on the
  564. * specific version
  565. */
  566. static struct ipahal_imm_cmd_obj
  567. ipahal_imm_cmd_objs[IPA_HW_MAX][IPA_IMM_CMD_MAX] = {
  568. /* IPAv3 */
  569. [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_FILTER_INIT] = {
  570. ipa_imm_cmd_construct_ip_v4_filter_init,
  571. 3},
  572. [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_FILTER_INIT] = {
  573. ipa_imm_cmd_construct_ip_v6_filter_init,
  574. 4},
  575. [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_NAT_INIT] = {
  576. ipa_imm_cmd_construct_ip_v4_nat_init,
  577. 5},
  578. [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_ROUTING_INIT] = {
  579. ipa_imm_cmd_construct_ip_v4_routing_init,
  580. 7},
  581. [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_ROUTING_INIT] = {
  582. ipa_imm_cmd_construct_ip_v6_routing_init,
  583. 8},
  584. [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_LOCAL] = {
  585. ipa_imm_cmd_construct_hdr_init_local,
  586. 9},
  587. [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_SYSTEM] = {
  588. ipa_imm_cmd_construct_hdr_init_system,
  589. 10},
  590. [IPA_HW_v3_0][IPA_IMM_CMD_REGISTER_WRITE] = {
  591. ipa_imm_cmd_construct_register_write,
  592. 12},
  593. [IPA_HW_v3_0][IPA_IMM_CMD_NAT_DMA] = {
  594. ipa_imm_cmd_construct_nat_dma,
  595. 14},
  596. [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_INIT] = {
  597. ipa_imm_cmd_construct_ip_packet_init,
  598. 16},
  599. [IPA_HW_v3_0][IPA_IMM_CMD_DMA_TASK_32B_ADDR] = {
  600. ipa_imm_cmd_construct_dma_task_32b_addr,
  601. 17},
  602. [IPA_HW_v3_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
  603. ipa_imm_cmd_construct_dma_shared_mem,
  604. 19},
  605. [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_TAG_STATUS] = {
  606. ipa_imm_cmd_construct_ip_packet_tag_status,
  607. 20},
  608. /* IPAv4 */
  609. [IPA_HW_v4_0][IPA_IMM_CMD_REGISTER_WRITE] = {
  610. ipa_imm_cmd_construct_register_write_v_4_0,
  611. 12},
  612. /* NAT_DMA was renamed to TABLE_DMA for IPAv4 */
  613. [IPA_HW_v4_0][IPA_IMM_CMD_NAT_DMA] = {
  614. ipa_imm_cmd_construct_dummy,
  615. -1},
  616. [IPA_HW_v4_0][IPA_IMM_CMD_TABLE_DMA] = {
  617. ipa_imm_cmd_construct_table_dma_ipav4,
  618. 14},
  619. [IPA_HW_v4_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
  620. ipa_imm_cmd_construct_dma_shared_mem_v_4_0,
  621. 19},
  622. [IPA_HW_v4_0][IPA_IMM_CMD_IP_V6_CT_INIT] = {
  623. ipa_imm_cmd_construct_ip_v6_ct_init,
  624. 23}
  625. };
  626. /*
  627. * ipahal_imm_cmd_init() - Build the Immediate command information table
  628. * See ipahal_imm_cmd_objs[][] comments
  629. */
  630. static int ipahal_imm_cmd_init(enum ipa_hw_type ipa_hw_type)
  631. {
  632. int i;
  633. int j;
  634. struct ipahal_imm_cmd_obj zero_obj;
  635. IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
  636. if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
  637. IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
  638. return -EINVAL;
  639. }
  640. memset(&zero_obj, 0, sizeof(zero_obj));
  641. for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
  642. for (j = 0; j < IPA_IMM_CMD_MAX ; j++) {
  643. if (!memcmp(&ipahal_imm_cmd_objs[i+1][j], &zero_obj,
  644. sizeof(struct ipahal_imm_cmd_obj))) {
  645. memcpy(&ipahal_imm_cmd_objs[i+1][j],
  646. &ipahal_imm_cmd_objs[i][j],
  647. sizeof(struct ipahal_imm_cmd_obj));
  648. } else {
  649. /*
  650. * explicitly overridden immediate command.
  651. * Check validity
  652. */
  653. if (!ipahal_imm_cmd_objs[i+1][j].opcode) {
  654. IPAHAL_ERR(
  655. "imm_cmd=%s with zero opcode ipa_ver=%d\n",
  656. ipahal_imm_cmd_name_str(j), i+1);
  657. WARN_ON(1);
  658. }
  659. if (!ipahal_imm_cmd_objs[i+1][j].construct) {
  660. IPAHAL_ERR(
  661. "imm_cmd=%s with NULL construct func ipa_ver=%d\n",
  662. ipahal_imm_cmd_name_str(j), i+1);
  663. WARN_ON(1);
  664. }
  665. }
  666. }
  667. }
  668. return 0;
  669. }
  670. /*
  671. * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
  672. * @cmd_name: [in] Immediate command name
  673. */
  674. const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name)
  675. {
  676. if (cmd_name < 0 || cmd_name >= IPA_IMM_CMD_MAX) {
  677. IPAHAL_ERR("requested name of invalid imm_cmd=%d\n", cmd_name);
  678. return "Invalid IMM_CMD";
  679. }
  680. return ipahal_imm_cmd_name_to_str[cmd_name];
  681. }
  682. /*
  683. * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
  684. */
  685. static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd)
  686. {
  687. u32 opcode;
  688. if (cmd >= IPA_IMM_CMD_MAX) {
  689. IPAHAL_ERR("Invalid immediate command imm_cmd=%u\n", cmd);
  690. ipa_assert();
  691. return -EFAULT;
  692. }
  693. IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
  694. ipahal_imm_cmd_name_str(cmd));
  695. opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
  696. if (opcode == -1) {
  697. IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
  698. ipahal_imm_cmd_name_str(cmd));
  699. ipa_assert();
  700. return -EFAULT;
  701. }
  702. return opcode;
  703. }
  704. /*
  705. * ipahal_construct_imm_cmd() - Construct immdiate command
  706. * This function builds imm cmd bulk that can be be sent to IPA
  707. * The command will be allocated dynamically.
  708. * After done using it, call ipahal_destroy_imm_cmd() to release it
  709. */
  710. struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
  711. enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
  712. {
  713. if (!params) {
  714. IPAHAL_ERR("Input error: params=%pK\n", params);
  715. ipa_assert();
  716. return NULL;
  717. }
  718. if (cmd >= IPA_IMM_CMD_MAX) {
  719. IPAHAL_ERR("Invalid immediate command %u\n", cmd);
  720. return NULL;
  721. }
  722. IPAHAL_DBG_LOW("construct IMM_CMD:%s\n", ipahal_imm_cmd_name_str(cmd));
  723. return ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].construct(
  724. cmd, params, is_atomic_ctx);
  725. }
  726. /*
  727. * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
  728. * Core driver may want functionality to inject NOP commands to IPA
  729. * to ensure e.g., PIPLINE clear before someother operation.
  730. * The functionality given by this function can be reached by
  731. * ipahal_construct_imm_cmd(). This function is helper to the core driver
  732. * to reach this NOP functionlity easily.
  733. * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
  734. * @pipline_clr_opt: options for pipeline clear waiting
  735. * @is_atomic_ctx: is called in atomic context or can sleep?
  736. */
  737. struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
  738. bool skip_pipline_clear,
  739. enum ipahal_pipeline_clear_option pipline_clr_opt,
  740. bool is_atomic_ctx)
  741. {
  742. struct ipahal_imm_cmd_register_write cmd;
  743. struct ipahal_imm_cmd_pyld *cmd_pyld;
  744. memset(&cmd, 0, sizeof(cmd));
  745. cmd.skip_pipeline_clear = skip_pipline_clear;
  746. cmd.pipeline_clear_options = pipline_clr_opt;
  747. cmd.value_mask = 0x0;
  748. cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
  749. &cmd, is_atomic_ctx);
  750. if (!cmd_pyld)
  751. IPAHAL_ERR("failed to construct register_write imm cmd\n");
  752. return cmd_pyld;
  753. }
  754. /* IPA Packet Status Logic */
  755. #define IPA_PKT_STATUS_SET_MSK(__hw_bit_msk, __shft) \
  756. (status->status_mask |= \
  757. ((hw_status->ipa_pkt.status_mask & (__hw_bit_msk) ? 1 : 0) \
  758. << (__shft)))
  759. static enum ipahal_pkt_status_exception pkt_status_parse_exception(
  760. bool is_ipv6, u64 exception)
  761. {
  762. enum ipahal_pkt_status_exception exception_type = 0;
  763. switch (exception) {
  764. case 0:
  765. exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NONE;
  766. break;
  767. case 1:
  768. exception_type = IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR;
  769. break;
  770. case 4:
  771. exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE;
  772. break;
  773. case 8:
  774. exception_type = IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH;
  775. break;
  776. case 16:
  777. exception_type = IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS;
  778. break;
  779. case 32:
  780. exception_type = IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT;
  781. break;
  782. case 64:
  783. if (is_ipv6)
  784. exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT;
  785. else
  786. exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NAT;
  787. break;
  788. case 229:
  789. exception_type = IPAHAL_PKT_STATUS_EXCEPTION_CSUM;
  790. break;
  791. default:
  792. IPAHAL_ERR("unsupported Status Exception type 0x%x\n",
  793. exception);
  794. WARN_ON(1);
  795. }
  796. return exception_type;
  797. }
  798. static void __ipa_parse_gen_pkt(struct ipahal_pkt_status *status,
  799. const void *unparsed_status)
  800. {
  801. bool is_ipv6;
  802. union ipa_pkt_status_hw *hw_status =
  803. (union ipa_pkt_status_hw *)unparsed_status;
  804. is_ipv6 = (hw_status->ipa_pkt.status_mask & 0x80) ? false : true;
  805. status->pkt_len = hw_status->ipa_pkt.pkt_len;
  806. status->endp_src_idx = hw_status->ipa_pkt.endp_src_idx;
  807. status->endp_dest_idx = hw_status->ipa_pkt.endp_dest_idx;
  808. status->metadata = hw_status->ipa_pkt.metadata;
  809. status->flt_local = hw_status->ipa_pkt.flt_local;
  810. status->flt_hash = hw_status->ipa_pkt.flt_hash;
  811. status->flt_global = hw_status->ipa_pkt.flt_hash;
  812. status->flt_ret_hdr = hw_status->ipa_pkt.flt_ret_hdr;
  813. status->flt_miss = (hw_status->ipa_pkt.rt_rule_id ==
  814. IPAHAL_PKT_STATUS_FLTRT_RULE_MISS_ID);
  815. status->flt_rule_id = hw_status->ipa_pkt.flt_rule_id;
  816. status->rt_local = hw_status->ipa_pkt.rt_local;
  817. status->rt_hash = hw_status->ipa_pkt.rt_hash;
  818. status->ucp = hw_status->ipa_pkt.ucp;
  819. status->rt_tbl_idx = hw_status->ipa_pkt.rt_tbl_idx;
  820. status->rt_miss = (hw_status->ipa_pkt.rt_rule_id ==
  821. IPAHAL_PKT_STATUS_FLTRT_RULE_MISS_ID);
  822. status->rt_rule_id = hw_status->ipa_pkt.rt_rule_id;
  823. status->nat_hit = hw_status->ipa_pkt.nat_hit;
  824. status->nat_entry_idx = hw_status->ipa_pkt.nat_entry_idx;
  825. status->tag_info = hw_status->ipa_pkt.tag_info;
  826. status->seq_num = hw_status->ipa_pkt.seq_num;
  827. status->time_of_day_ctr = hw_status->ipa_pkt.time_of_day_ctr;
  828. status->hdr_local = hw_status->ipa_pkt.hdr_local;
  829. status->hdr_offset = hw_status->ipa_pkt.hdr_offset;
  830. status->frag_hit = hw_status->ipa_pkt.frag_hit;
  831. status->frag_rule = hw_status->ipa_pkt.frag_rule;
  832. status->nat_type = hw_status->ipa_pkt.nat_type;
  833. status->exception = pkt_status_parse_exception(is_ipv6,
  834. hw_status->ipa_pkt.exception);
  835. }
  836. static void __ipa_parse_frag_pkt(struct ipahal_pkt_status *status,
  837. const void *unparsed_status)
  838. {
  839. union ipa_pkt_status_hw *hw_status =
  840. (union ipa_pkt_status_hw *)unparsed_status;
  841. status->frag_rule_idx = hw_status->frag_pkt.frag_rule_idx;
  842. status->tbl_idx = hw_status->frag_pkt.tbl_idx;
  843. status->src_ip_addr = hw_status->frag_pkt.src_ip_addr;
  844. status->dest_ip_addr = hw_status->frag_pkt.dest_ip_addr;
  845. status->protocol = hw_status->frag_pkt.protocol;
  846. status->ip_id = hw_status->frag_pkt.ip_id;
  847. status->tlated_ip_addr = hw_status->frag_pkt.tlated_ip_addr;
  848. status->ip_cksum_diff = hw_status->frag_pkt.ip_cksum_diff;
  849. status->endp_src_idx = hw_status->frag_pkt.endp_src_idx;
  850. status->endp_dest_idx = hw_status->frag_pkt.endp_dest_idx;
  851. status->metadata = hw_status->frag_pkt.metadata;
  852. status->seq_num = hw_status->frag_pkt.seq_num;
  853. status->hdr_local = hw_status->frag_pkt.hdr_local;
  854. status->hdr_offset = hw_status->frag_pkt.hdr_offset;
  855. status->exception = hw_status->frag_pkt.exception;
  856. status->nat_type = hw_status->frag_pkt.nat_type;
  857. }
  858. static void ipa_pkt_status_parse(
  859. const void *unparsed_status, struct ipahal_pkt_status *status)
  860. {
  861. enum ipahal_pkt_status_opcode opcode = 0;
  862. union ipa_pkt_status_hw *hw_status =
  863. (union ipa_pkt_status_hw *)unparsed_status;
  864. switch (hw_status->ipa_pkt.status_opcode) {
  865. case 0x1:
  866. opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET;
  867. break;
  868. case 0x2:
  869. opcode = IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE;
  870. break;
  871. case 0x4:
  872. opcode = IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET;
  873. break;
  874. case 0x8:
  875. opcode = IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET;
  876. break;
  877. case 0x10:
  878. opcode = IPAHAL_PKT_STATUS_OPCODE_LOG;
  879. break;
  880. case 0x20:
  881. opcode = IPAHAL_PKT_STATUS_OPCODE_DCMP;
  882. break;
  883. case 0x40:
  884. opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS;
  885. break;
  886. default:
  887. IPAHAL_ERR_RL("unsupported Status Opcode 0x%x\n",
  888. hw_status->ipa_pkt.status_opcode);
  889. }
  890. status->status_opcode = opcode;
  891. if (status->status_opcode == IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE)
  892. __ipa_parse_frag_pkt(status, unparsed_status);
  893. else
  894. __ipa_parse_gen_pkt(status, unparsed_status);
  895. switch (status->nat_type) {
  896. case 0:
  897. status->nat_type = IPAHAL_PKT_STATUS_NAT_NONE;
  898. break;
  899. case 1:
  900. status->nat_type = IPAHAL_PKT_STATUS_NAT_SRC;
  901. break;
  902. case 2:
  903. status->nat_type = IPAHAL_PKT_STATUS_NAT_DST;
  904. break;
  905. default:
  906. IPAHAL_ERR_RL("unsupported Status NAT type 0x%x\n",
  907. status->nat_type);
  908. }
  909. IPA_PKT_STATUS_SET_MSK(0x1, IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT);
  910. IPA_PKT_STATUS_SET_MSK(0x2, IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT);
  911. IPA_PKT_STATUS_SET_MSK(0x4, IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT);
  912. IPA_PKT_STATUS_SET_MSK(0x8, IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT);
  913. IPA_PKT_STATUS_SET_MSK(0x10, IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT);
  914. IPA_PKT_STATUS_SET_MSK(0x20, IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT);
  915. IPA_PKT_STATUS_SET_MSK(0x40,
  916. IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT);
  917. IPA_PKT_STATUS_SET_MSK(0x80, IPAHAL_PKT_STATUS_MASK_V4_SHFT);
  918. IPA_PKT_STATUS_SET_MSK(0x100,
  919. IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT);
  920. IPA_PKT_STATUS_SET_MSK(0x200, IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT);
  921. IPA_PKT_STATUS_SET_MSK(0x400, IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT);
  922. IPA_PKT_STATUS_SET_MSK(0x800,
  923. IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT);
  924. IPA_PKT_STATUS_SET_MSK(0x1000, IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT);
  925. IPA_PKT_STATUS_SET_MSK(0x2000, IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT);
  926. IPA_PKT_STATUS_SET_MSK(0x4000, IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT);
  927. IPA_PKT_STATUS_SET_MSK(0x8000, IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT);
  928. status->status_mask &= 0xFFFF;
  929. }
  930. /*
  931. * ipa_pkt_status_parse_thin() - Parse some of the packet status fields
  932. * for specific usage in the LAN rx data path where parsing needs to be done
  933. * but only for specific fields.
  934. * @unparsed_status: Pointer to H/W format of the packet status as read from HW
  935. * @status: Pointer to pre-allocated buffer where the parsed info will be
  936. * stored
  937. */
  938. static void ipa_pkt_status_parse_thin(const void *unparsed_status,
  939. struct ipahal_pkt_status_thin *status)
  940. {
  941. union ipa_pkt_status_hw *hw_status =
  942. (union ipa_pkt_status_hw *)unparsed_status;
  943. bool is_ipv6;
  944. is_ipv6 = (hw_status->ipa_pkt.status_mask & 0x80) ? false : true;
  945. if (!unparsed_status || !status) {
  946. IPAHAL_ERR("Input Error: unparsed_status=%pK status=%pK\n",
  947. unparsed_status, status);
  948. return;
  949. }
  950. IPAHAL_DBG_LOW("Parse Thin Status Packet\n");
  951. status->metadata = hw_status->ipa_pkt.metadata;
  952. status->endp_src_idx = hw_status->ipa_pkt.endp_src_idx;
  953. status->ucp = hw_status->ipa_pkt.ucp;
  954. status->exception = pkt_status_parse_exception(is_ipv6,
  955. hw_status->ipa_pkt.exception);
  956. }
  957. /*
  958. * struct ipahal_pkt_status_obj - Pakcet Status H/W information for
  959. * specific IPA version
  960. * @size: H/W size of the status packet
  961. * @parse: CB that parses the H/W packet status into the abstracted structure
  962. * @parse_thin: light weight CB that parses only some of the fields for
  963. * data path optimization
  964. */
  965. struct ipahal_pkt_status_obj {
  966. u32 size;
  967. void (*parse)(const void *unparsed_status,
  968. struct ipahal_pkt_status *status);
  969. void (*parse_thin)(const void *unparsed_status,
  970. struct ipahal_pkt_status_thin *status);
  971. };
  972. /*
  973. * This table contains the info regard packet status for IPAv3 and later
  974. * Information like: size of packet status and parsing function
  975. * All the information on the pkt Status on IPAv3 are statically defined below.
  976. * If information is missing regard some IPA version, the init function
  977. * will fill it with the information from the previous IPA version.
  978. * Information is considered missing if all of the fields are 0
  979. */
  980. static struct ipahal_pkt_status_obj ipahal_pkt_status_objs[IPA_HW_MAX] = {
  981. /* IPAv3 */
  982. [IPA_HW_v3_0] = {
  983. IPA3_0_PKT_STATUS_SIZE,
  984. ipa_pkt_status_parse,
  985. ipa_pkt_status_parse_thin,
  986. },
  987. };
  988. /*
  989. * ipahal_pkt_status_init() - Build the packet status information array
  990. * for the different IPA versions
  991. * See ipahal_pkt_status_objs[] comments
  992. */
  993. static int ipahal_pkt_status_init(enum ipa_hw_type ipa_hw_type)
  994. {
  995. int i;
  996. struct ipahal_pkt_status_obj zero_obj;
  997. IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
  998. if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
  999. IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
  1000. return -EINVAL;
  1001. }
  1002. /*
  1003. * Since structure alignment is implementation dependent,
  1004. * add test to avoid different and incompatible data layouts.
  1005. * If test fails it also means that ipahal_pkt_status_parse_thin
  1006. * need to be checked.
  1007. *
  1008. * In case new H/W has different size or structure of status packet,
  1009. * add a compile time validty check for it like below (as well as
  1010. * the new defines and/or the new strucutre in the internal header).
  1011. */
  1012. BUILD_BUG_ON(sizeof(union ipa_pkt_status_hw) !=
  1013. IPA3_0_PKT_STATUS_SIZE);
  1014. memset(&zero_obj, 0, sizeof(zero_obj));
  1015. for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
  1016. if (!memcmp(&ipahal_pkt_status_objs[i+1], &zero_obj,
  1017. sizeof(struct ipahal_pkt_status_obj))) {
  1018. memcpy(&ipahal_pkt_status_objs[i+1],
  1019. &ipahal_pkt_status_objs[i],
  1020. sizeof(struct ipahal_pkt_status_obj));
  1021. } else {
  1022. /*
  1023. * explicitly overridden Packet Status info
  1024. * Check validity
  1025. */
  1026. if (!ipahal_pkt_status_objs[i+1].size) {
  1027. IPAHAL_ERR(
  1028. "Packet Status with zero size ipa_ver=%d\n",
  1029. i+1);
  1030. WARN_ON(1);
  1031. }
  1032. if (!ipahal_pkt_status_objs[i+1].parse) {
  1033. IPAHAL_ERR(
  1034. "Packet Status without Parse func ipa_ver=%d\n",
  1035. i+1);
  1036. WARN_ON(1);
  1037. }
  1038. if (!ipahal_pkt_status_objs[i+1].parse_thin) {
  1039. IPAHAL_ERR(
  1040. "Packet Status without Parse_thin func ipa_ver=%d\n",
  1041. i+1);
  1042. WARN_ON(1);
  1043. }
  1044. }
  1045. }
  1046. return 0;
  1047. }
  1048. /*
  1049. * ipahal_pkt_status_get_size() - Get H/W size of packet status
  1050. */
  1051. u32 ipahal_pkt_status_get_size(void)
  1052. {
  1053. return ipahal_pkt_status_objs[ipahal_ctx->hw_type].size;
  1054. }
  1055. /*
  1056. * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form
  1057. * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
  1058. * @status: Pointer to pre-allocated buffer where the parsed info will be stored
  1059. */
  1060. void ipahal_pkt_status_parse(const void *unparsed_status,
  1061. struct ipahal_pkt_status *status)
  1062. {
  1063. if (!unparsed_status || !status) {
  1064. IPAHAL_ERR("Input Error: unparsed_status=%pK status=%pK\n",
  1065. unparsed_status, status);
  1066. return;
  1067. }
  1068. IPAHAL_DBG_LOW("Parse Status Packet\n");
  1069. memset(status, 0, sizeof(*status));
  1070. ipahal_pkt_status_objs[ipahal_ctx->hw_type].parse(unparsed_status,
  1071. status);
  1072. }
  1073. /*
  1074. * ipahal_pkt_status_parse_thin() - Similar to ipahal_pkt_status_parse,
  1075. * the difference is it only parses some of the status packet fields
  1076. * used for TP optimization.
  1077. * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
  1078. * @status: Pointer to pre-allocated buffer where the parsed info will be stored
  1079. */
  1080. void ipahal_pkt_status_parse_thin(const void *unparsed_status,
  1081. struct ipahal_pkt_status_thin *status)
  1082. {
  1083. if (!unparsed_status || !status) {
  1084. IPAHAL_ERR("Input Error: unparsed_status=%pK status=%pK\n",
  1085. unparsed_status, status);
  1086. return;
  1087. }
  1088. IPAHAL_DBG_LOW("Parse_thin Status Packet\n");
  1089. ipahal_pkt_status_objs[ipahal_ctx->hw_type].parse_thin(unparsed_status,
  1090. status);
  1091. }
  1092. /*
  1093. * ipahal_pkt_status_exception_str() - returns string represents exception type
  1094. * @exception: [in] The exception type
  1095. */
  1096. const char *ipahal_pkt_status_exception_str(
  1097. enum ipahal_pkt_status_exception exception)
  1098. {
  1099. if (exception < 0 || exception >= IPAHAL_PKT_STATUS_EXCEPTION_MAX) {
  1100. IPAHAL_ERR(
  1101. "requested string of invalid pkt_status exception=%d\n",
  1102. exception);
  1103. return "Invalid PKT_STATUS_EXCEPTION";
  1104. }
  1105. return ipahal_pkt_status_exception_to_str[exception];
  1106. }
  1107. #ifdef CONFIG_DEBUG_FS
  1108. static void ipahal_debugfs_init(void)
  1109. {
  1110. ipahal_ctx->dent = debugfs_create_dir("ipahal", 0);
  1111. if (!ipahal_ctx->dent || IS_ERR(ipahal_ctx->dent)) {
  1112. IPAHAL_ERR("fail to create ipahal debugfs folder\n");
  1113. goto fail;
  1114. }
  1115. return;
  1116. fail:
  1117. debugfs_remove_recursive(ipahal_ctx->dent);
  1118. ipahal_ctx->dent = NULL;
  1119. }
  1120. static void ipahal_debugfs_remove(void)
  1121. {
  1122. if (!ipahal_ctx)
  1123. return;
  1124. if (IS_ERR(ipahal_ctx->dent)) {
  1125. IPAHAL_ERR("ipahal debugfs folder was not created\n");
  1126. return;
  1127. }
  1128. debugfs_remove_recursive(ipahal_ctx->dent);
  1129. }
  1130. #else /* CONFIG_DEBUG_FS */
  1131. static void ipahal_debugfs_init(void) {}
  1132. static void ipahal_debugfs_remove(void) {}
  1133. #endif /* CONFIG_DEBUG_FS */
  1134. /*
  1135. * ipahal_cp_hdr_to_hw_buff_v3() - copy header to hardware buffer according to
  1136. * base address and offset given.
  1137. * @base: dma base address
  1138. * @offset: offset from base address where the data will be copied
  1139. * @hdr: the header to be copied
  1140. * @hdr_len: the length of the header
  1141. */
  1142. static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset,
  1143. u8 *const hdr, u32 hdr_len)
  1144. {
  1145. memcpy(base + offset, hdr, hdr_len);
  1146. }
  1147. /* Header address update logic. */
  1148. #define IPAHAL_CP_PROC_CTX_HEADER_UPDATE(hdr_lsb, hdr_msb, addr) \
  1149. do { \
  1150. hdr_lsb = lower_32_bits(addr); \
  1151. hdr_msb = upper_32_bits(addr); \
  1152. } while (0)
  1153. /*
  1154. * ipahal_cp_proc_ctx_to_hw_buff_v3() - copy processing context to
  1155. * base address and offset given.
  1156. * @type: header processing context type (no processing context,
  1157. * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
  1158. * @base: dma base address
  1159. * @offset: offset from base address where the data will be copied
  1160. * @hdr_len: the length of the header
  1161. * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
  1162. * @phys_base: memory location in DDR
  1163. * @hdr_base_addr: base address in table
  1164. * @offset_entry: offset from hdr_base_addr in table
  1165. * @l2tp_params: l2tp parameters
  1166. * @generic_params: generic proc_ctx params
  1167. * @is_64: Indicates whether header base address/dma base address is 64 bit.
  1168. */
  1169. static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
  1170. void *const base, u32 offset,
  1171. u32 hdr_len, bool is_hdr_proc_ctx,
  1172. dma_addr_t phys_base, u64 hdr_base_addr,
  1173. struct ipa_hdr_offset_entry *offset_entry,
  1174. struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
  1175. struct ipa_eth_II_to_eth_II_ex_procparams *generic_params,
  1176. bool is_64)
  1177. {
  1178. u64 hdr_addr;
  1179. if (type == IPA_HDR_PROC_NONE) {
  1180. struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx;
  1181. ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_seq *)
  1182. (base + offset);
  1183. ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
  1184. ctx->hdr_add.tlv.length = 2;
  1185. ctx->hdr_add.tlv.value = hdr_len;
  1186. hdr_addr = is_hdr_proc_ctx ? phys_base :
  1187. hdr_base_addr + offset_entry->offset;
  1188. IPAHAL_DBG("header address 0x%llx\n",
  1189. hdr_addr);
  1190. IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
  1191. ctx->hdr_add.hdr_addr_hi, hdr_addr);
  1192. if (!is_64)
  1193. ctx->hdr_add.hdr_addr_hi = 0;
  1194. ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
  1195. ctx->end.length = 0;
  1196. ctx->end.value = 0;
  1197. } else if (type == IPA_HDR_PROC_L2TP_HEADER_ADD) {
  1198. struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *ctx;
  1199. ctx = (struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *)
  1200. (base + offset);
  1201. ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
  1202. ctx->hdr_add.tlv.length = 2;
  1203. ctx->hdr_add.tlv.value = hdr_len;
  1204. hdr_addr = is_hdr_proc_ctx ? phys_base :
  1205. hdr_base_addr + offset_entry->offset;
  1206. IPAHAL_DBG("header address 0x%llx\n",
  1207. hdr_addr);
  1208. IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
  1209. ctx->hdr_add.hdr_addr_hi, hdr_addr);
  1210. if (!is_64)
  1211. ctx->hdr_add.hdr_addr_hi = 0;
  1212. ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
  1213. ctx->l2tp_params.tlv.length = 1;
  1214. ctx->l2tp_params.tlv.value =
  1215. IPA_HDR_UCP_L2TP_HEADER_ADD;
  1216. ctx->l2tp_params.l2tp_params.eth_hdr_retained =
  1217. l2tp_params->hdr_add_param.eth_hdr_retained;
  1218. ctx->l2tp_params.l2tp_params.input_ip_version =
  1219. l2tp_params->hdr_add_param.input_ip_version;
  1220. ctx->l2tp_params.l2tp_params.output_ip_version =
  1221. l2tp_params->hdr_add_param.output_ip_version;
  1222. IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value);
  1223. ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
  1224. ctx->end.length = 0;
  1225. ctx->end.value = 0;
  1226. } else if (type == IPA_HDR_PROC_L2TP_HEADER_REMOVE) {
  1227. struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *ctx;
  1228. ctx = (struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *)
  1229. (base + offset);
  1230. ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
  1231. ctx->hdr_add.tlv.length = 2;
  1232. ctx->hdr_add.tlv.value = hdr_len;
  1233. hdr_addr = is_hdr_proc_ctx ? phys_base :
  1234. hdr_base_addr + offset_entry->offset;
  1235. IPAHAL_DBG("header address 0x%llx length %d\n",
  1236. hdr_addr, ctx->hdr_add.tlv.value);
  1237. IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
  1238. ctx->hdr_add.hdr_addr_hi, hdr_addr);
  1239. if (!is_64)
  1240. ctx->hdr_add.hdr_addr_hi = 0;
  1241. ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
  1242. ctx->l2tp_params.tlv.length = 1;
  1243. ctx->l2tp_params.tlv.value =
  1244. IPA_HDR_UCP_L2TP_HEADER_REMOVE;
  1245. ctx->l2tp_params.l2tp_params.hdr_len_remove =
  1246. l2tp_params->hdr_remove_param.hdr_len_remove;
  1247. ctx->l2tp_params.l2tp_params.eth_hdr_retained =
  1248. l2tp_params->hdr_remove_param.eth_hdr_retained;
  1249. ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size_valid =
  1250. l2tp_params->hdr_remove_param.hdr_ofst_pkt_size_valid;
  1251. ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size =
  1252. l2tp_params->hdr_remove_param.hdr_ofst_pkt_size;
  1253. ctx->l2tp_params.l2tp_params.hdr_endianness =
  1254. l2tp_params->hdr_remove_param.hdr_endianness;
  1255. IPAHAL_DBG("hdr ofst valid: %d, hdr ofst pkt size: %d\n",
  1256. ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size_valid,
  1257. ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size);
  1258. IPAHAL_DBG("endianness: %d\n",
  1259. ctx->l2tp_params.l2tp_params.hdr_endianness);
  1260. IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value);
  1261. ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
  1262. ctx->end.length = 0;
  1263. ctx->end.value = 0;
  1264. } else if (type == IPA_HDR_PROC_ETHII_TO_ETHII_EX) {
  1265. struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex *ctx;
  1266. ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex *)
  1267. (base + offset);
  1268. ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
  1269. ctx->hdr_add.tlv.length = 1;
  1270. ctx->hdr_add.tlv.value = hdr_len;
  1271. ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
  1272. hdr_base_addr + offset_entry->offset;
  1273. IPAHAL_DBG("header address 0x%x\n",
  1274. ctx->hdr_add.hdr_addr);
  1275. ctx->hdr_add_ex.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
  1276. ctx->hdr_add_ex.tlv.length = 1;
  1277. ctx->hdr_add_ex.tlv.value = IPA_HDR_UCP_ETHII_TO_ETHII_EX;
  1278. ctx->hdr_add_ex.params.input_ethhdr_negative_offset =
  1279. generic_params->input_ethhdr_negative_offset;
  1280. ctx->hdr_add_ex.params.output_ethhdr_negative_offset =
  1281. generic_params->output_ethhdr_negative_offset;
  1282. ctx->hdr_add_ex.params.reserved = 0;
  1283. ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
  1284. ctx->end.length = 0;
  1285. ctx->end.value = 0;
  1286. } else {
  1287. struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx;
  1288. ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *)
  1289. (base + offset);
  1290. ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
  1291. ctx->hdr_add.tlv.length = 2;
  1292. ctx->hdr_add.tlv.value = hdr_len;
  1293. hdr_addr = is_hdr_proc_ctx ? phys_base :
  1294. hdr_base_addr + offset_entry->offset;
  1295. IPAHAL_DBG("header address 0x%llx\n",
  1296. hdr_addr);
  1297. IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
  1298. ctx->hdr_add.hdr_addr_hi, hdr_addr);
  1299. if (!is_64)
  1300. ctx->hdr_add.hdr_addr_hi = 0;
  1301. ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
  1302. ctx->cmd.length = 0;
  1303. switch (type) {
  1304. case IPA_HDR_PROC_ETHII_TO_ETHII:
  1305. ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_ETHII;
  1306. break;
  1307. case IPA_HDR_PROC_ETHII_TO_802_3:
  1308. ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_802_3;
  1309. break;
  1310. case IPA_HDR_PROC_802_3_TO_ETHII:
  1311. ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII;
  1312. break;
  1313. case IPA_HDR_PROC_802_3_TO_802_3:
  1314. ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3;
  1315. break;
  1316. default:
  1317. IPAHAL_ERR("unknown ipa_hdr_proc_type %d", type);
  1318. WARN_ON(1);
  1319. return -EINVAL;
  1320. }
  1321. IPAHAL_DBG("command id %d\n", ctx->cmd.value);
  1322. ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
  1323. ctx->end.length = 0;
  1324. ctx->end.value = 0;
  1325. }
  1326. return 0;
  1327. }
  1328. /*
  1329. * ipahal_get_proc_ctx_needed_len_v3() - calculates the needed length for
  1330. * addition of header processing context according to the type of processing
  1331. * context.
  1332. * @type: header processing context type (no processing context,
  1333. * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
  1334. */
  1335. static int ipahal_get_proc_ctx_needed_len_v3(enum ipa_hdr_proc_type type)
  1336. {
  1337. int ret;
  1338. switch (type) {
  1339. case IPA_HDR_PROC_NONE:
  1340. ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_seq);
  1341. break;
  1342. case IPA_HDR_PROC_ETHII_TO_ETHII:
  1343. case IPA_HDR_PROC_ETHII_TO_802_3:
  1344. case IPA_HDR_PROC_802_3_TO_ETHII:
  1345. case IPA_HDR_PROC_802_3_TO_802_3:
  1346. ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq);
  1347. break;
  1348. case IPA_HDR_PROC_L2TP_HEADER_ADD:
  1349. ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq);
  1350. break;
  1351. case IPA_HDR_PROC_L2TP_HEADER_REMOVE:
  1352. ret =
  1353. sizeof(struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq);
  1354. break;
  1355. case IPA_HDR_PROC_ETHII_TO_ETHII_EX:
  1356. ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex);
  1357. break;
  1358. default:
  1359. /* invalid value to make sure failure */
  1360. IPAHAL_ERR_RL("invalid ipa_hdr_proc_type %d\n", type);
  1361. ret = -1;
  1362. }
  1363. return ret;
  1364. }
  1365. /*
  1366. * struct ipahal_hdr_funcs - headers handling functions for specific IPA
  1367. * version
  1368. * @ipahal_cp_hdr_to_hw_buff - copy function for regular headers
  1369. */
  1370. struct ipahal_hdr_funcs {
  1371. void (*ipahal_cp_hdr_to_hw_buff)(void *const base, u32 offset,
  1372. u8 *const hdr, u32 hdr_len);
  1373. int (*ipahal_cp_proc_ctx_to_hw_buff)(enum ipa_hdr_proc_type type,
  1374. void *const base, u32 offset, u32 hdr_len,
  1375. bool is_hdr_proc_ctx, dma_addr_t phys_base,
  1376. u64 hdr_base_addr,
  1377. struct ipa_hdr_offset_entry *offset_entry,
  1378. struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
  1379. struct ipa_eth_II_to_eth_II_ex_procparams
  1380. *generic_params,
  1381. bool is_64);
  1382. int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
  1383. };
  1384. static struct ipahal_hdr_funcs hdr_funcs;
  1385. static void ipahal_hdr_init(enum ipa_hw_type ipa_hw_type)
  1386. {
  1387. IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
  1388. /*
  1389. * once there are changes in HW and need to use different case, insert
  1390. * new case for the new h/w. put the default always for the latest HW
  1391. * and make sure all previous supported versions have their cases.
  1392. */
  1393. switch (ipa_hw_type) {
  1394. case IPA_HW_v3_0:
  1395. default:
  1396. hdr_funcs.ipahal_cp_hdr_to_hw_buff =
  1397. ipahal_cp_hdr_to_hw_buff_v3;
  1398. hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff =
  1399. ipahal_cp_proc_ctx_to_hw_buff_v3;
  1400. hdr_funcs.ipahal_get_proc_ctx_needed_len =
  1401. ipahal_get_proc_ctx_needed_len_v3;
  1402. }
  1403. IPAHAL_DBG("Exit\n");
  1404. }
  1405. /*
  1406. * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to
  1407. * base address and offset given.
  1408. * @base: dma base address
  1409. * @offset: offset from base address where the data will be copied
  1410. * @hdr: the header to be copied
  1411. * @hdr_len: the length of the header
  1412. */
  1413. void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *const hdr,
  1414. u32 hdr_len)
  1415. {
  1416. IPAHAL_DBG_LOW("Entry\n");
  1417. IPAHAL_DBG("base %pK, offset %d, hdr %pK, hdr_len %d\n", base,
  1418. offset, hdr, hdr_len);
  1419. if (!base || !hdr_len || !hdr) {
  1420. IPAHAL_ERR("failed on validating params\n");
  1421. return;
  1422. }
  1423. hdr_funcs.ipahal_cp_hdr_to_hw_buff(base, offset, hdr, hdr_len);
  1424. IPAHAL_DBG_LOW("Exit\n");
  1425. }
  1426. /*
  1427. * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to
  1428. * base address and offset given.
  1429. * @type: type of header processing context
  1430. * @base: dma base address
  1431. * @offset: offset from base address where the data will be copied
  1432. * @hdr_len: the length of the header
  1433. * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
  1434. * @phys_base: memory location in DDR
  1435. * @hdr_base_addr: base address in table
  1436. * @offset_entry: offset from hdr_base_addr in table
  1437. * @l2tp_params: l2tp parameters
  1438. * @generic_params: generic proc_ctx params
  1439. * @is_64: Indicates whether header base address/dma base address is 64 bit.
  1440. */
  1441. int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
  1442. void *const base, u32 offset, u32 hdr_len,
  1443. bool is_hdr_proc_ctx, dma_addr_t phys_base,
  1444. u64 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry,
  1445. struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
  1446. struct ipa_eth_II_to_eth_II_ex_procparams *generic_params,
  1447. bool is_64)
  1448. {
  1449. IPAHAL_DBG(
  1450. "type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %llu, offset_entry %pK, bool %d\n"
  1451. , type, base, offset, hdr_len, is_hdr_proc_ctx,
  1452. hdr_base_addr, offset_entry, is_64);
  1453. if (!base ||
  1454. !hdr_len ||
  1455. (is_hdr_proc_ctx && !phys_base) ||
  1456. (!is_hdr_proc_ctx && !offset_entry) ||
  1457. (!is_hdr_proc_ctx && !hdr_base_addr)) {
  1458. IPAHAL_ERR(
  1459. "invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%llu is_hdr_proc_ctx:%d offset_entry:%pK\n"
  1460. , hdr_len, &phys_base, hdr_base_addr
  1461. , is_hdr_proc_ctx, offset_entry);
  1462. return -EINVAL;
  1463. }
  1464. return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
  1465. hdr_len, is_hdr_proc_ctx, phys_base,
  1466. hdr_base_addr, offset_entry, l2tp_params,
  1467. generic_params, is_64);
  1468. }
  1469. /*
  1470. * ipahal_get_proc_ctx_needed_len() - calculates the needed length for
  1471. * addition of header processing context according to the type of processing
  1472. * context
  1473. * @type: header processing context type (no processing context,
  1474. * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
  1475. */
  1476. int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type)
  1477. {
  1478. int res;
  1479. IPAHAL_DBG("entry\n");
  1480. res = hdr_funcs.ipahal_get_proc_ctx_needed_len(type);
  1481. IPAHAL_DBG("Exit\n");
  1482. return res;
  1483. }
  1484. int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
  1485. struct device *ipa_pdev)
  1486. {
  1487. int result;
  1488. IPAHAL_DBG("Entry - IPA HW TYPE=%d base=%pK ipa_pdev=%pK\n",
  1489. ipa_hw_type, base, ipa_pdev);
  1490. ipahal_ctx = kzalloc(sizeof(*ipahal_ctx), GFP_KERNEL);
  1491. if (!ipahal_ctx) {
  1492. IPAHAL_ERR("kzalloc err for ipahal_ctx\n");
  1493. result = -ENOMEM;
  1494. goto bail_err_exit;
  1495. }
  1496. if (ipa_hw_type < IPA_HW_v3_0) {
  1497. IPAHAL_ERR("ipahal supported on IPAv3 and later only\n");
  1498. result = -EINVAL;
  1499. goto bail_free_ctx;
  1500. }
  1501. if (ipa_hw_type >= IPA_HW_MAX) {
  1502. IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
  1503. result = -EINVAL;
  1504. goto bail_free_ctx;
  1505. }
  1506. if (!base) {
  1507. IPAHAL_ERR("invalid memory io mapping addr\n");
  1508. result = -EINVAL;
  1509. goto bail_free_ctx;
  1510. }
  1511. if (!ipa_pdev) {
  1512. IPAHAL_ERR("invalid IPA platform device\n");
  1513. result = -EINVAL;
  1514. goto bail_free_ctx;
  1515. }
  1516. ipahal_ctx->hw_type = ipa_hw_type;
  1517. ipahal_ctx->base = base;
  1518. ipahal_ctx->ipa_pdev = ipa_pdev;
  1519. if (ipahal_reg_init(ipa_hw_type)) {
  1520. IPAHAL_ERR("failed to init ipahal reg\n");
  1521. result = -EFAULT;
  1522. goto bail_free_ctx;
  1523. }
  1524. if (ipahal_imm_cmd_init(ipa_hw_type)) {
  1525. IPAHAL_ERR("failed to init ipahal imm cmd\n");
  1526. result = -EFAULT;
  1527. goto bail_free_ctx;
  1528. }
  1529. if (ipahal_pkt_status_init(ipa_hw_type)) {
  1530. IPAHAL_ERR("failed to init ipahal pkt status\n");
  1531. result = -EFAULT;
  1532. goto bail_free_ctx;
  1533. }
  1534. ipahal_hdr_init(ipa_hw_type);
  1535. if (ipahal_fltrt_init(ipa_hw_type)) {
  1536. IPAHAL_ERR("failed to init ipahal flt rt\n");
  1537. result = -EFAULT;
  1538. goto bail_free_ctx;
  1539. }
  1540. if (ipahal_hw_stats_init(ipa_hw_type)) {
  1541. IPAHAL_ERR("failed to init ipahal hw stats\n");
  1542. result = -EFAULT;
  1543. goto bail_free_fltrt;
  1544. }
  1545. if (ipahal_nat_init(ipa_hw_type)) {
  1546. IPAHAL_ERR("failed to init ipahal NAT\n");
  1547. result = -EFAULT;
  1548. goto bail_free_fltrt;
  1549. }
  1550. /* create an IPC buffer for the registers dump */
  1551. ipahal_ctx->regdumpbuf = ipc_log_context_create(IPAHAL_IPC_LOG_PAGES,
  1552. "ipa_regs", 0);
  1553. if (ipahal_ctx->regdumpbuf == NULL)
  1554. IPAHAL_ERR("failed to create IPA regdump log, continue...\n");
  1555. ipahal_debugfs_init();
  1556. return 0;
  1557. bail_free_fltrt:
  1558. ipahal_fltrt_destroy();
  1559. bail_free_ctx:
  1560. if (ipahal_ctx->regdumpbuf)
  1561. ipc_log_context_destroy(ipahal_ctx->regdumpbuf);
  1562. kfree(ipahal_ctx);
  1563. ipahal_ctx = NULL;
  1564. bail_err_exit:
  1565. return result;
  1566. }
  1567. void ipahal_destroy(void)
  1568. {
  1569. IPAHAL_DBG("Entry\n");
  1570. ipahal_fltrt_destroy();
  1571. ipahal_debugfs_remove();
  1572. kfree(ipahal_ctx);
  1573. ipahal_ctx = NULL;
  1574. }
  1575. void ipahal_free_dma_mem(struct ipa_mem_buffer *mem)
  1576. {
  1577. if (likely(mem)) {
  1578. dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
  1579. mem->phys_base);
  1580. mem->size = 0;
  1581. mem->base = NULL;
  1582. mem->phys_base = 0;
  1583. }
  1584. }