ipa_flt.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include "ipa_i.h"
  6. #include "ipahal/ipahal.h"
  7. #include "ipahal/ipahal_fltrt.h"
  8. #define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
  9. #define IPA_FLT_STATUS_OF_DEL_FAILED (-1)
  10. #define IPA_FLT_STATUS_OF_MDFY_FAILED (-1)
  11. #define IPA_FLT_GET_RULE_TYPE(__entry) \
  12. ( \
  13. ((__entry)->rule.hashable) ? \
  14. (IPA_RULE_HASHABLE):(IPA_RULE_NON_HASHABLE) \
  15. )
  16. /**
  17. * ipa3_generate_flt_hw_rule() - generates the filtering hardware rule
  18. * @ip: the ip address family type
  19. * @entry: filtering entry
  20. * @buf: output buffer, buf == NULL means
  21. * caller wants to know the size of the rule as seen
  22. * by HW so they did not pass a valid buffer, we will use a
  23. * scratch buffer instead.
  24. * With this scheme we are going to
  25. * generate the rule twice, once to know size using scratch
  26. * buffer and second to write the rule to the actual caller
  27. * supplied buffer which is of required size
  28. *
  29. * Returns: 0 on success, negative on failure
  30. *
  31. * caller needs to hold any needed locks to ensure integrity
  32. *
  33. */
  34. static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
  35. struct ipa3_flt_entry *entry, u8 *buf)
  36. {
  37. struct ipahal_flt_rule_gen_params gen_params;
  38. int res = 0;
  39. memset(&gen_params, 0, sizeof(gen_params));
  40. if (entry->rule.hashable) {
  41. if (entry->rule.attrib.attrib_mask & IPA_FLT_IS_PURE_ACK
  42. && !entry->rule.eq_attrib_type) {
  43. IPAERR_RL("PURE_ACK rule atrb used with hash rule\n");
  44. WARN_ON_RATELIMIT_IPA(1);
  45. return -EPERM;
  46. }
  47. /*
  48. * tos_eq_present field has two meanings:
  49. * tos equation for IPA ver < 4.5 (as the field name reveals)
  50. * pure_ack equation for IPA ver >= 4.5
  51. */
  52. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
  53. entry->rule.eq_attrib_type &&
  54. entry->rule.eq_attrib.tos_eq_present) {
  55. IPAERR_RL("PURE_ACK rule eq used with hash rule\n");
  56. return -EPERM;
  57. }
  58. }
  59. gen_params.ipt = ip;
  60. if (entry->rt_tbl && (!ipa3_check_idr_if_freed(entry->rt_tbl)))
  61. gen_params.rt_tbl_idx = entry->rt_tbl->idx;
  62. else
  63. gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx;
  64. gen_params.priority = entry->prio;
  65. gen_params.id = entry->rule_id;
  66. gen_params.rule = (const struct ipa_flt_rule_i *)&entry->rule;
  67. gen_params.cnt_idx = entry->cnt_idx;
  68. res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
  69. if (res) {
  70. IPAERR_RL("failed to generate flt h/w rule\n");
  71. return res;
  72. }
  73. return 0;
  74. }
  75. static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
  76. {
  77. struct ipa3_flt_tbl *tbl;
  78. int i;
  79. IPADBG_LOW("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt);
  80. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  81. if (!ipa_is_ep_support_flt(i))
  82. continue;
  83. tbl = &ipa3_ctx->flt_tbl[i][ip];
  84. if (tbl->prev_mem[rlt].phys_base) {
  85. IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i);
  86. ipahal_free_dma_mem(&tbl->prev_mem[rlt]);
  87. }
  88. if (list_empty(&tbl->head_flt_rule_list)) {
  89. if (tbl->curr_mem[rlt].phys_base) {
  90. IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n",
  91. i);
  92. ipahal_free_dma_mem(&tbl->curr_mem[rlt]);
  93. }
  94. }
  95. }
  96. }
  97. /**
  98. * ipa_prep_flt_tbl_for_cmt() - preparing the flt table for commit
  99. * assign priorities to the rules, calculate their sizes and calculate
  100. * the overall table size
  101. * @ip: the ip address family type
  102. * @tbl: the flt tbl to be prepared
  103. * @pipe_idx: the ep pipe appropriate for the given tbl
  104. *
  105. * Return: 0 on success, negative on failure
  106. */
  107. static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
  108. struct ipa3_flt_tbl *tbl, int pipe_idx)
  109. {
  110. struct ipa3_flt_entry *entry;
  111. int prio_i;
  112. int max_prio;
  113. u32 hdr_width;
  114. tbl->sz[IPA_RULE_HASHABLE] = 0;
  115. tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
  116. max_prio = ipahal_get_rule_max_priority();
  117. prio_i = max_prio;
  118. list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
  119. if (entry->rule.max_prio) {
  120. entry->prio = max_prio;
  121. } else {
  122. if (ipahal_rule_decrease_priority(&prio_i)) {
  123. IPAERR("cannot decrease rule priority - %d\n",
  124. prio_i);
  125. return -EPERM;
  126. }
  127. entry->prio = prio_i;
  128. }
  129. if (ipa3_generate_flt_hw_rule(ip, entry, NULL)) {
  130. IPAERR("failed to calculate HW FLT rule size\n");
  131. return -EPERM;
  132. }
  133. IPADBG_LOW("pipe %d rule_id(handle) %u hw_len %d priority %u\n",
  134. pipe_idx, entry->rule_id, entry->hw_len, entry->prio);
  135. if (entry->rule.hashable)
  136. tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
  137. else
  138. tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len;
  139. }
  140. if ((tbl->sz[IPA_RULE_HASHABLE] +
  141. tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
  142. IPADBG_LOW("flt tbl pipe %d is with zero total size\n",
  143. pipe_idx);
  144. return 0;
  145. }
  146. hdr_width = ipahal_get_hw_tbl_hdr_width();
  147. /* for the header word */
  148. if (tbl->sz[IPA_RULE_HASHABLE])
  149. tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
  150. if (tbl->sz[IPA_RULE_NON_HASHABLE])
  151. tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
  152. IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
  153. tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
  154. return 0;
  155. }
  156. /**
  157. * ipa_translate_flt_tbl_to_hw_fmt() - translate the flt driver structures
  158. * (rules and tables) to HW format and fill it in the given buffers
  159. * @ip: the ip address family type
  160. * @rlt: the type of the rules to translate (hashable or non-hashable)
  161. * @base: the rules body buffer to be filled
  162. * @hdr: the rules header (addresses/offsets) buffer to be filled
  163. * @body_ofst: the offset of the rules body from the rules header at
  164. * ipa sram
  165. *
  166. * Returns: 0 on success, negative on failure
  167. *
  168. * caller needs to hold any needed locks to ensure integrity
  169. *
  170. */
  171. static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
  172. enum ipa_rule_type rlt, u8 *base, u8 *hdr, u32 body_ofst)
  173. {
  174. u64 offset;
  175. u8 *body_i;
  176. int res;
  177. struct ipa3_flt_entry *entry;
  178. u8 *tbl_mem_buf;
  179. struct ipa_mem_buffer tbl_mem;
  180. struct ipa3_flt_tbl *tbl;
  181. int i;
  182. int hdr_idx = 0;
  183. body_i = base;
  184. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  185. if (!ipa_is_ep_support_flt(i))
  186. continue;
  187. tbl = &ipa3_ctx->flt_tbl[i][ip];
  188. if (tbl->sz[rlt] == 0) {
  189. hdr_idx++;
  190. continue;
  191. }
  192. if (tbl->in_sys[rlt]) {
  193. /* only body (no header) */
  194. tbl_mem.size = tbl->sz[rlt] -
  195. ipahal_get_hw_tbl_hdr_width();
  196. if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
  197. IPAERR("fail to alloc sys tbl of size %d\n",
  198. tbl_mem.size);
  199. goto err;
  200. }
  201. if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
  202. hdr, hdr_idx, true)) {
  203. IPAERR("fail to wrt sys tbl addr to hdr\n");
  204. goto hdr_update_fail;
  205. }
  206. tbl_mem_buf = tbl_mem.base;
  207. /* generate the rule-set */
  208. list_for_each_entry(entry, &tbl->head_flt_rule_list,
  209. link) {
  210. if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
  211. continue;
  212. res = ipa3_generate_flt_hw_rule(
  213. ip, entry, tbl_mem_buf);
  214. if (res) {
  215. IPAERR("failed to gen HW FLT rule\n");
  216. goto hdr_update_fail;
  217. }
  218. tbl_mem_buf += entry->hw_len;
  219. }
  220. if (tbl->curr_mem[rlt].phys_base) {
  221. WARN_ON(tbl->prev_mem[rlt].phys_base);
  222. tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
  223. }
  224. tbl->curr_mem[rlt] = tbl_mem;
  225. } else {
  226. offset = body_i - base + body_ofst;
  227. /* update the hdr at the right index */
  228. if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
  229. hdr_idx, true)) {
  230. IPAERR("fail to wrt lcl tbl ofst to hdr\n");
  231. goto hdr_update_fail;
  232. }
  233. /* generate the rule-set */
  234. list_for_each_entry(entry, &tbl->head_flt_rule_list,
  235. link) {
  236. if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
  237. continue;
  238. res = ipa3_generate_flt_hw_rule(
  239. ip, entry, body_i);
  240. if (res) {
  241. IPAERR("failed to gen HW FLT rule\n");
  242. goto err;
  243. }
  244. body_i += entry->hw_len;
  245. }
  246. /**
  247. * advance body_i to next table alignment as local
  248. * tables are order back-to-back
  249. */
  250. body_i += ipahal_get_lcl_tbl_addr_alignment();
  251. body_i = (u8 *)((long)body_i &
  252. ~ipahal_get_lcl_tbl_addr_alignment());
  253. }
  254. hdr_idx++;
  255. }
  256. return 0;
  257. hdr_update_fail:
  258. ipahal_free_dma_mem(&tbl_mem);
  259. err:
  260. return -EPERM;
  261. }
  262. /**
  263. * ipa_generate_flt_hw_tbl_img() - generates the flt hw tbls.
  264. * headers and bodies are being created into buffers that will be filled into
  265. * the local memory (sram)
  266. * @ip: the ip address family type
  267. * @alloc_params: In and Out parameters for the allocations of the buffers
  268. * 4 buffers: hdr and bdy, each hashable and non-hashable
  269. *
  270. * Return: 0 on success, negative on failure
  271. */
  272. static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
  273. struct ipahal_fltrt_alloc_imgs_params *alloc_params)
  274. {
  275. u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
  276. int rc = 0;
  277. if (ip == IPA_IP_v4) {
  278. nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_nhash_ofst) -
  279. IPA_MEM_PART(v4_flt_nhash_ofst);
  280. hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_hash_ofst) -
  281. IPA_MEM_PART(v4_flt_hash_ofst);
  282. } else {
  283. nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_nhash_ofst) -
  284. IPA_MEM_PART(v6_flt_nhash_ofst);
  285. hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_hash_ofst) -
  286. IPA_MEM_PART(v6_flt_hash_ofst);
  287. }
  288. if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
  289. IPAERR_RL("fail to allocate FLT HW TBL images. IP %d\n", ip);
  290. rc = -ENOMEM;
  291. goto allocate_failed;
  292. }
  293. if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
  294. alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
  295. hash_bdy_start_ofst)) {
  296. IPAERR_RL("fail to translate hashable flt tbls to hw format\n");
  297. rc = -EPERM;
  298. goto translate_fail;
  299. }
  300. if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
  301. alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
  302. nhash_bdy_start_ofst)) {
  303. IPAERR_RL("fail to translate non-hash flt tbls to hw format\n");
  304. rc = -EPERM;
  305. goto translate_fail;
  306. }
  307. return rc;
  308. translate_fail:
  309. if (alloc_params->hash_hdr.size)
  310. ipahal_free_dma_mem(&alloc_params->hash_hdr);
  311. ipahal_free_dma_mem(&alloc_params->nhash_hdr);
  312. if (alloc_params->hash_bdy.size)
  313. ipahal_free_dma_mem(&alloc_params->hash_bdy);
  314. if (alloc_params->nhash_bdy.size)
  315. ipahal_free_dma_mem(&alloc_params->nhash_bdy);
  316. allocate_failed:
  317. return rc;
  318. }
  319. /**
  320. * ipa_flt_valid_lcl_tbl_size() - validate if the space allocated for flt
  321. * tbl bodies at the sram is enough for the commit
  322. * @ipt: the ip address family type
  323. * @rlt: the rule type (hashable or non-hashable)
  324. *
  325. * Return: true if enough space available or false in other cases
  326. */
  327. static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
  328. enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
  329. {
  330. u16 avail;
  331. if (!bdy) {
  332. IPAERR("Bad parameters, bdy = NULL\n");
  333. return false;
  334. }
  335. if (ipt == IPA_IP_v4)
  336. avail = (rlt == IPA_RULE_HASHABLE) ?
  337. IPA_MEM_PART(apps_v4_flt_hash_size) :
  338. IPA_MEM_PART(apps_v4_flt_nhash_size);
  339. else
  340. avail = (rlt == IPA_RULE_HASHABLE) ?
  341. IPA_MEM_PART(apps_v6_flt_hash_size) :
  342. IPA_MEM_PART(apps_v6_flt_nhash_size);
  343. if (bdy->size <= avail)
  344. return true;
  345. IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n",
  346. bdy->size, avail, ipt, rlt);
  347. return false;
  348. }
  349. /**
  350. * ipa_flt_alloc_cmd_buffers() - alloc descriptors and imm cmds
  351. * payload pointers buffers for headers and bodies of flt structure
  352. * as well as place for flush imm.
  353. * @ipt: the ip address family type
  354. * @entries: the number of entries
  355. * @desc: [OUT] descriptor buffer
  356. * @cmd: [OUT] imm commands payload pointers buffer
  357. *
  358. * Return: 0 on success, negative on failure
  359. */
  360. static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip, u16 entries,
  361. struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld)
  362. {
  363. *desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC);
  364. if (*desc == NULL) {
  365. IPAERR("fail to alloc desc blob ip %d\n", ip);
  366. goto fail_desc_alloc;
  367. }
  368. *cmd_pyld = kcalloc(entries, sizeof(**cmd_pyld), GFP_ATOMIC);
  369. if (*cmd_pyld == NULL) {
  370. IPAERR("fail to alloc cmd pyld blob ip %d\n", ip);
  371. goto fail_cmd_alloc;
  372. }
  373. return 0;
  374. fail_cmd_alloc:
  375. kfree(*desc);
  376. fail_desc_alloc:
  377. return -ENOMEM;
  378. }
  379. /**
  380. * ipa_flt_skip_pipe_config() - skip ep flt configuration or not?
  381. * will skip according to pre-configuration or modem pipes
  382. * @pipe: the EP pipe index
  383. *
  384. * Return: true if to skip, false otherwize
  385. */
  386. static bool ipa_flt_skip_pipe_config(int pipe)
  387. {
  388. struct ipa3_ep_context *ep;
  389. if (ipa_is_modem_pipe(pipe)) {
  390. IPADBG_LOW("skip %d - modem owned pipe\n", pipe);
  391. return true;
  392. }
  393. if (ipa3_ctx->skip_ep_cfg_shadow[pipe]) {
  394. IPADBG_LOW("skip %d\n", pipe);
  395. return true;
  396. }
  397. ep = &ipa3_ctx->ep[pipe];
  398. if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD) == pipe
  399. && ipa3_ctx->modem_cfg_emb_pipe_flt)
  400. && ep->client == IPA_CLIENT_APPS_WAN_PROD) {
  401. IPADBG_LOW("skip %d\n", pipe);
  402. return true;
  403. }
  404. return false;
  405. }
  406. /**
  407. * __ipa_commit_flt_v3() - commit flt tables to the hw
  408. * commit the headers and the bodies if are local with internal cache flushing.
  409. * The headers (and local bodies) will first be created into dma buffers and
  410. * then written via IC to the SRAM
  411. * @ipt: the ip address family type
  412. *
  413. * Return: 0 on success, negative on failure
  414. */
  415. int __ipa_commit_flt_v3(enum ipa_ip_type ip)
  416. {
  417. struct ipahal_fltrt_alloc_imgs_params alloc_params;
  418. int rc = 0;
  419. struct ipa3_desc *desc;
  420. struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
  421. struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
  422. struct ipahal_imm_cmd_pyld **cmd_pyld;
  423. int num_cmd = 0;
  424. int i;
  425. int hdr_idx;
  426. u32 lcl_hash_hdr, lcl_nhash_hdr;
  427. u32 lcl_hash_bdy, lcl_nhash_bdy;
  428. bool lcl_hash, lcl_nhash;
  429. struct ipahal_reg_fltrt_hash_flush flush;
  430. struct ipahal_reg_valmask valmask;
  431. u32 tbl_hdr_width;
  432. struct ipa3_flt_tbl *tbl;
  433. u16 entries;
  434. struct ipahal_imm_cmd_register_write reg_write_coal_close;
  435. tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
  436. memset(&alloc_params, 0, sizeof(alloc_params));
  437. alloc_params.ipt = ip;
  438. alloc_params.tbls_num = ipa3_ctx->ep_flt_num;
  439. if (ip == IPA_IP_v4) {
  440. lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
  441. IPA_MEM_PART(v4_flt_hash_ofst) +
  442. tbl_hdr_width; /* to skip the bitmap */
  443. lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
  444. IPA_MEM_PART(v4_flt_nhash_ofst) +
  445. tbl_hdr_width; /* to skip the bitmap */
  446. lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
  447. IPA_MEM_PART(apps_v4_flt_hash_ofst);
  448. lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
  449. IPA_MEM_PART(apps_v4_flt_nhash_ofst);
  450. lcl_hash = ipa3_ctx->ip4_flt_tbl_hash_lcl;
  451. lcl_nhash = ipa3_ctx->ip4_flt_tbl_nhash_lcl;
  452. } else {
  453. lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
  454. IPA_MEM_PART(v6_flt_hash_ofst) +
  455. tbl_hdr_width; /* to skip the bitmap */
  456. lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
  457. IPA_MEM_PART(v6_flt_nhash_ofst) +
  458. tbl_hdr_width; /* to skip the bitmap */
  459. lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
  460. IPA_MEM_PART(apps_v6_flt_hash_ofst);
  461. lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
  462. IPA_MEM_PART(apps_v6_flt_nhash_ofst);
  463. lcl_hash = ipa3_ctx->ip6_flt_tbl_hash_lcl;
  464. lcl_nhash = ipa3_ctx->ip6_flt_tbl_nhash_lcl;
  465. }
  466. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  467. if (!ipa_is_ep_support_flt(i))
  468. continue;
  469. tbl = &ipa3_ctx->flt_tbl[i][ip];
  470. if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) {
  471. rc = -EPERM;
  472. goto prep_failed;
  473. }
  474. if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
  475. tbl->sz[IPA_RULE_HASHABLE]) {
  476. alloc_params.num_lcl_hash_tbls++;
  477. alloc_params.total_sz_lcl_hash_tbls +=
  478. tbl->sz[IPA_RULE_HASHABLE];
  479. alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
  480. }
  481. if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
  482. tbl->sz[IPA_RULE_NON_HASHABLE]) {
  483. alloc_params.num_lcl_nhash_tbls++;
  484. alloc_params.total_sz_lcl_nhash_tbls +=
  485. tbl->sz[IPA_RULE_NON_HASHABLE];
  486. alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
  487. }
  488. }
  489. if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
  490. IPAERR_RL("fail to generate FLT HW TBL image. IP %d\n", ip);
  491. rc = -EFAULT;
  492. goto prep_failed;
  493. }
  494. if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
  495. &alloc_params.hash_bdy)) {
  496. rc = -EFAULT;
  497. goto fail_size_valid;
  498. }
  499. if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
  500. &alloc_params.nhash_bdy)) {
  501. rc = -EFAULT;
  502. goto fail_size_valid;
  503. }
  504. /* +4: 2 for bodies (hashable and non-hashable), 1 for flushing and 1
  505. * for closing the colaescing frame
  506. */
  507. entries = (ipa3_ctx->ep_flt_num) * 2 + 4;
  508. if (ipa_flt_alloc_cmd_buffers(ip, entries, &desc, &cmd_pyld)) {
  509. rc = -ENOMEM;
  510. goto fail_size_valid;
  511. }
  512. /* IC to close the coal frame before HPS Clear if coal is enabled */
  513. if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
  514. i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
  515. reg_write_coal_close.skip_pipeline_clear = false;
  516. reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  517. reg_write_coal_close.offset = ipahal_get_reg_ofst(
  518. IPA_AGGR_FORCE_CLOSE);
  519. ipahal_get_aggr_force_close_valmask(i, &valmask);
  520. reg_write_coal_close.value = valmask.val;
  521. reg_write_coal_close.value_mask = valmask.mask;
  522. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  523. IPA_IMM_CMD_REGISTER_WRITE,
  524. &reg_write_coal_close, false);
  525. if (!cmd_pyld[num_cmd]) {
  526. IPAERR("failed to construct coal close IC\n");
  527. rc = -ENOMEM;
  528. goto fail_reg_write_construct;
  529. }
  530. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  531. ++num_cmd;
  532. }
  533. /*
  534. * SRAM memory not allocated to hash tables. Sending
  535. * command to hash tables(filer/routing) operation not supported.
  536. */
  537. if (!ipa3_ctx->ipa_fltrt_not_hashable) {
  538. /* flushing ipa internal hashable flt rules cache */
  539. memset(&flush, 0, sizeof(flush));
  540. if (ip == IPA_IP_v4)
  541. flush.v4_flt = true;
  542. else
  543. flush.v6_flt = true;
  544. ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
  545. reg_write_cmd.skip_pipeline_clear = false;
  546. reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  547. reg_write_cmd.offset = ipahal_get_reg_ofst(
  548. IPA_FILT_ROUT_HASH_FLUSH);
  549. reg_write_cmd.value = valmask.val;
  550. reg_write_cmd.value_mask = valmask.mask;
  551. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  552. IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd,
  553. false);
  554. if (!cmd_pyld[num_cmd]) {
  555. IPAERR(
  556. "fail construct register_write imm cmd: IP %d\n", ip);
  557. rc = -EFAULT;
  558. goto fail_imm_cmd_construct;
  559. }
  560. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  561. ++num_cmd;
  562. }
  563. hdr_idx = 0;
  564. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  565. if (!ipa_is_ep_support_flt(i)) {
  566. IPADBG_LOW("skip %d - not filtering pipe\n", i);
  567. continue;
  568. }
  569. if (ipa_flt_skip_pipe_config(i)) {
  570. hdr_idx++;
  571. continue;
  572. }
  573. if (num_cmd + 1 >= entries) {
  574. IPAERR("number of commands is out of range: IP = %d\n",
  575. ip);
  576. rc = -ENOBUFS;
  577. goto fail_imm_cmd_construct;
  578. }
  579. IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n",
  580. hdr_idx, i);
  581. mem_cmd.is_read = false;
  582. mem_cmd.skip_pipeline_clear = false;
  583. mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  584. mem_cmd.size = tbl_hdr_width;
  585. mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base +
  586. hdr_idx * tbl_hdr_width;
  587. mem_cmd.local_addr = lcl_nhash_hdr +
  588. hdr_idx * tbl_hdr_width;
  589. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  590. IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
  591. if (!cmd_pyld[num_cmd]) {
  592. IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
  593. ip);
  594. rc = -ENOMEM;
  595. goto fail_imm_cmd_construct;
  596. }
  597. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  598. ++num_cmd;
  599. /*
  600. * SRAM memory not allocated to hash tables. Sending command
  601. * to hash tables(filer/routing) operation not supported.
  602. */
  603. if (!ipa3_ctx->ipa_fltrt_not_hashable) {
  604. mem_cmd.is_read = false;
  605. mem_cmd.skip_pipeline_clear = false;
  606. mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  607. mem_cmd.size = tbl_hdr_width;
  608. mem_cmd.system_addr = alloc_params.hash_hdr.phys_base +
  609. hdr_idx * tbl_hdr_width;
  610. mem_cmd.local_addr = lcl_hash_hdr +
  611. hdr_idx * tbl_hdr_width;
  612. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  613. IPA_IMM_CMD_DMA_SHARED_MEM,
  614. &mem_cmd, false);
  615. if (!cmd_pyld[num_cmd]) {
  616. IPAERR(
  617. "fail construct dma_shared_mem cmd: IP = %d\n",
  618. ip);
  619. rc = -ENOMEM;
  620. goto fail_imm_cmd_construct;
  621. }
  622. ipa3_init_imm_cmd_desc(&desc[num_cmd],
  623. cmd_pyld[num_cmd]);
  624. ++num_cmd;
  625. }
  626. ++hdr_idx;
  627. }
  628. if (lcl_nhash) {
  629. if (num_cmd >= entries) {
  630. IPAERR("number of commands is out of range: IP = %d\n",
  631. ip);
  632. rc = -ENOBUFS;
  633. goto fail_imm_cmd_construct;
  634. }
  635. mem_cmd.is_read = false;
  636. mem_cmd.skip_pipeline_clear = false;
  637. mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  638. mem_cmd.size = alloc_params.nhash_bdy.size;
  639. mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
  640. mem_cmd.local_addr = lcl_nhash_bdy;
  641. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  642. IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
  643. if (!cmd_pyld[num_cmd]) {
  644. IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
  645. ip);
  646. rc = -ENOMEM;
  647. goto fail_imm_cmd_construct;
  648. }
  649. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  650. ++num_cmd;
  651. }
  652. if (lcl_hash) {
  653. if (num_cmd >= entries) {
  654. IPAERR("number of commands is out of range: IP = %d\n",
  655. ip);
  656. rc = -ENOBUFS;
  657. goto fail_imm_cmd_construct;
  658. }
  659. mem_cmd.is_read = false;
  660. mem_cmd.skip_pipeline_clear = false;
  661. mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  662. mem_cmd.size = alloc_params.hash_bdy.size;
  663. mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
  664. mem_cmd.local_addr = lcl_hash_bdy;
  665. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  666. IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
  667. if (!cmd_pyld[num_cmd]) {
  668. IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
  669. ip);
  670. rc = -ENOMEM;
  671. goto fail_imm_cmd_construct;
  672. }
  673. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  674. ++num_cmd;
  675. }
  676. if (ipa3_send_cmd(num_cmd, desc)) {
  677. IPAERR("fail to send immediate command\n");
  678. rc = -EFAULT;
  679. goto fail_imm_cmd_construct;
  680. }
  681. IPADBG_LOW("Hashable HEAD\n");
  682. IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
  683. alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
  684. IPADBG_LOW("Non-Hashable HEAD\n");
  685. IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
  686. alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
  687. if (alloc_params.hash_bdy.size) {
  688. IPADBG_LOW("Hashable BODY\n");
  689. IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
  690. alloc_params.hash_bdy.phys_base,
  691. alloc_params.hash_bdy.size);
  692. }
  693. if (alloc_params.nhash_bdy.size) {
  694. IPADBG_LOW("Non-Hashable BODY\n");
  695. IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
  696. alloc_params.nhash_bdy.phys_base,
  697. alloc_params.nhash_bdy.size);
  698. }
  699. __ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE);
  700. __ipa_reap_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE);
  701. fail_imm_cmd_construct:
  702. for (i = 0 ; i < num_cmd ; i++)
  703. ipahal_destroy_imm_cmd(cmd_pyld[i]);
  704. fail_reg_write_construct:
  705. kfree(desc);
  706. kfree(cmd_pyld);
  707. fail_size_valid:
  708. if (alloc_params.hash_hdr.size)
  709. ipahal_free_dma_mem(&alloc_params.hash_hdr);
  710. ipahal_free_dma_mem(&alloc_params.nhash_hdr);
  711. if (alloc_params.hash_bdy.size)
  712. ipahal_free_dma_mem(&alloc_params.hash_bdy);
  713. if (alloc_params.nhash_bdy.size)
  714. ipahal_free_dma_mem(&alloc_params.nhash_bdy);
  715. prep_failed:
  716. return rc;
  717. }
  718. static int __ipa_validate_flt_rule(const struct ipa_flt_rule_i *rule,
  719. struct ipa3_rt_tbl **rt_tbl, enum ipa_ip_type ip)
  720. {
  721. int index;
  722. if (rule->action != IPA_PASS_TO_EXCEPTION) {
  723. if (!rule->eq_attrib_type) {
  724. if (!rule->rt_tbl_hdl) {
  725. IPAERR_RL("invalid RT tbl\n");
  726. goto error;
  727. }
  728. *rt_tbl = ipa3_id_find(rule->rt_tbl_hdl);
  729. if (*rt_tbl == NULL) {
  730. IPAERR_RL("RT tbl not found\n");
  731. goto error;
  732. }
  733. if ((*rt_tbl)->cookie != IPA_RT_TBL_COOKIE) {
  734. IPAERR_RL("RT table cookie is invalid\n");
  735. goto error;
  736. }
  737. } else {
  738. if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
  739. IPA_MEM_PART(v4_modem_rt_index_hi) :
  740. IPA_MEM_PART(v6_modem_rt_index_hi))) {
  741. IPAERR_RL("invalid RT tbl\n");
  742. goto error;
  743. }
  744. }
  745. } else {
  746. if (rule->rt_tbl_idx > 0) {
  747. IPAERR_RL("invalid RT tbl\n");
  748. goto error;
  749. }
  750. }
  751. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
  752. if (rule->pdn_idx) {
  753. if (rule->action == IPA_PASS_TO_EXCEPTION ||
  754. rule->action == IPA_PASS_TO_ROUTING) {
  755. IPAERR_RL(
  756. "PDN index should be 0 when action is not pass to NAT\n");
  757. goto error;
  758. } else {
  759. if (rule->pdn_idx >= IPA_MAX_PDN_NUM) {
  760. IPAERR_RL("PDN index %d is too large\n",
  761. rule->pdn_idx);
  762. goto error;
  763. }
  764. }
  765. }
  766. }
  767. if (rule->rule_id) {
  768. if ((rule->rule_id < ipahal_get_rule_id_hi_bit()) ||
  769. (rule->rule_id >= ((ipahal_get_rule_id_hi_bit()<<1)-1))) {
  770. IPAERR_RL("invalid rule_id provided 0x%x\n"
  771. "rule_id with bit 0x%x are auto generated\n",
  772. rule->rule_id, ipahal_get_rule_id_hi_bit());
  773. goto error;
  774. }
  775. }
  776. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
  777. if (rule->enable_stats && rule->cnt_idx) {
  778. if (!ipahal_is_rule_cnt_id_valid(rule->cnt_idx)) {
  779. IPAERR_RL(
  780. "invalid cnt_idx %hhu out of range\n",
  781. rule->cnt_idx);
  782. goto error;
  783. }
  784. index = rule->cnt_idx - 1;
  785. if (!ipa3_ctx->flt_rt_counters.used_hw[index]) {
  786. IPAERR_RL(
  787. "invalid cnt_idx %hhu not alloc by driver\n",
  788. rule->cnt_idx);
  789. goto error;
  790. }
  791. }
  792. } else {
  793. if (rule->enable_stats) {
  794. IPAERR_RL(
  795. "enable_stats won't support on ipa_hw_type %d\n",
  796. ipa3_ctx->ipa_hw_type);
  797. goto error;
  798. }
  799. }
  800. return 0;
  801. error:
  802. return -EPERM;
  803. }
  804. static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
  805. const struct ipa_flt_rule_i *rule, struct ipa3_rt_tbl *rt_tbl,
  806. struct ipa3_flt_tbl *tbl, bool user)
  807. {
  808. int id;
  809. *entry = kmem_cache_zalloc(ipa3_ctx->flt_rule_cache, GFP_KERNEL);
  810. if (!*entry)
  811. goto error;
  812. INIT_LIST_HEAD(&((*entry)->link));
  813. (*entry)->rule = *rule;
  814. (*entry)->cookie = IPA_FLT_COOKIE;
  815. (*entry)->rt_tbl = rt_tbl;
  816. (*entry)->tbl = tbl;
  817. if (rule->rule_id) {
  818. id = rule->rule_id;
  819. } else {
  820. id = ipa3_alloc_rule_id(tbl->rule_ids);
  821. if (id < 0) {
  822. IPAERR_RL("failed to allocate rule id\n");
  823. WARN_ON_RATELIMIT_IPA(1);
  824. goto rule_id_fail;
  825. }
  826. }
  827. (*entry)->rule_id = id;
  828. (*entry)->ipacm_installed = user;
  829. if (rule->enable_stats)
  830. (*entry)->cnt_idx = rule->cnt_idx;
  831. else
  832. (*entry)->cnt_idx = 0;
  833. return 0;
  834. rule_id_fail:
  835. kmem_cache_free(ipa3_ctx->flt_rule_cache, *entry);
  836. error:
  837. return -EPERM;
  838. }
  839. static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
  840. struct ipa3_flt_entry *entry, u32 *rule_hdl)
  841. {
  842. int id;
  843. tbl->rule_cnt++;
  844. if (entry->rt_tbl)
  845. entry->rt_tbl->ref_cnt++;
  846. id = ipa3_id_alloc(entry);
  847. if (id < 0) {
  848. IPAERR_RL("failed to add to tree\n");
  849. WARN_ON_RATELIMIT_IPA(1);
  850. goto ipa_insert_failed;
  851. }
  852. *rule_hdl = id;
  853. entry->id = id;
  854. IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
  855. return 0;
  856. ipa_insert_failed:
  857. if (entry->rt_tbl)
  858. entry->rt_tbl->ref_cnt--;
  859. tbl->rule_cnt--;
  860. return -EPERM;
  861. }
  862. static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
  863. const struct ipa_flt_rule_i *rule, u8 add_rear,
  864. u32 *rule_hdl, bool user)
  865. {
  866. struct ipa3_flt_entry *entry;
  867. struct ipa3_rt_tbl *rt_tbl = NULL;
  868. if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
  869. goto error;
  870. if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, user))
  871. goto error;
  872. if (add_rear) {
  873. if (tbl->sticky_rear)
  874. list_add_tail(&entry->link,
  875. tbl->head_flt_rule_list.prev);
  876. else
  877. list_add_tail(&entry->link, &tbl->head_flt_rule_list);
  878. } else {
  879. list_add(&entry->link, &tbl->head_flt_rule_list);
  880. }
  881. if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
  882. goto ipa_insert_failed;
  883. return 0;
  884. ipa_insert_failed:
  885. list_del(&entry->link);
  886. /* if rule id was allocated from idr, remove it */
  887. if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
  888. (entry->rule_id >= ipahal_get_low_rule_id()))
  889. idr_remove(entry->tbl->rule_ids, entry->rule_id);
  890. kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
  891. error:
  892. return -EPERM;
  893. }
  894. static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
  895. const struct ipa_flt_rule_i *rule,
  896. u32 *rule_hdl,
  897. enum ipa_ip_type ip,
  898. struct ipa3_flt_entry **add_after_entry)
  899. {
  900. struct ipa3_flt_entry *entry;
  901. struct ipa3_rt_tbl *rt_tbl = NULL;
  902. if (!*add_after_entry)
  903. goto error;
  904. if (rule == NULL || rule_hdl == NULL) {
  905. IPAERR_RL("bad parms rule=%pK rule_hdl=%pK\n", rule,
  906. rule_hdl);
  907. goto error;
  908. }
  909. if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
  910. goto error;
  911. if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, true))
  912. goto error;
  913. list_add(&entry->link, &((*add_after_entry)->link));
  914. if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
  915. goto ipa_insert_failed;
  916. /*
  917. * prepare for next insertion
  918. */
  919. *add_after_entry = entry;
  920. return 0;
  921. ipa_insert_failed:
  922. list_del(&entry->link);
  923. /* if rule id was allocated from idr, remove it */
  924. if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
  925. (entry->rule_id >= ipahal_get_low_rule_id()))
  926. idr_remove(entry->tbl->rule_ids, entry->rule_id);
  927. kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
  928. error:
  929. *add_after_entry = NULL;
  930. return -EPERM;
  931. }
  932. static int __ipa_del_flt_rule(u32 rule_hdl)
  933. {
  934. struct ipa3_flt_entry *entry;
  935. int id;
  936. entry = ipa3_id_find(rule_hdl);
  937. if (entry == NULL) {
  938. IPAERR_RL("lookup failed\n");
  939. return -EINVAL;
  940. }
  941. if (entry->cookie != IPA_FLT_COOKIE) {
  942. IPAERR_RL("bad params\n");
  943. return -EINVAL;
  944. }
  945. id = entry->id;
  946. list_del(&entry->link);
  947. entry->tbl->rule_cnt--;
  948. if (entry->rt_tbl)
  949. entry->rt_tbl->ref_cnt--;
  950. IPADBG("del flt rule rule_cnt=%d rule_id=%d\n",
  951. entry->tbl->rule_cnt, entry->rule_id);
  952. entry->cookie = 0;
  953. /* if rule id was allocated from idr, remove it */
  954. if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
  955. (entry->rule_id >= ipahal_get_low_rule_id()))
  956. idr_remove(entry->tbl->rule_ids, entry->rule_id);
  957. kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
  958. /* remove the handle from the database */
  959. ipa3_id_remove(id);
  960. return 0;
  961. }
  962. static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy_i *frule,
  963. enum ipa_ip_type ip)
  964. {
  965. struct ipa3_flt_entry *entry;
  966. struct ipa3_rt_tbl *rt_tbl = NULL;
  967. entry = ipa3_id_find(frule->rule_hdl);
  968. if (entry == NULL) {
  969. IPAERR_RL("lookup failed\n");
  970. goto error;
  971. }
  972. if (entry->cookie != IPA_FLT_COOKIE) {
  973. IPAERR_RL("bad params\n");
  974. goto error;
  975. }
  976. if (__ipa_validate_flt_rule(&frule->rule, &rt_tbl, ip))
  977. goto error;
  978. if (entry->rt_tbl)
  979. entry->rt_tbl->ref_cnt--;
  980. entry->rule = frule->rule;
  981. entry->rt_tbl = rt_tbl;
  982. if (entry->rt_tbl)
  983. entry->rt_tbl->ref_cnt++;
  984. entry->hw_len = 0;
  985. entry->prio = 0;
  986. if (frule->rule.enable_stats)
  987. entry->cnt_idx = frule->rule.cnt_idx;
  988. else
  989. entry->cnt_idx = 0;
  990. return 0;
  991. error:
  992. return -EPERM;
  993. }
  994. static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx)
  995. {
  996. *ipa_ep_idx = ipa3_get_ep_mapping(ep);
  997. if (*ipa_ep_idx < 0) {
  998. IPAERR_RL("ep not valid ep=%d\n", ep);
  999. return -EINVAL;
  1000. }
  1001. if (ipa3_ctx->ep[*ipa_ep_idx].valid == 0)
  1002. IPADBG("ep not connected ep_idx=%d\n", *ipa_ep_idx);
  1003. if (!ipa_is_ep_support_flt(*ipa_ep_idx)) {
  1004. IPAERR("ep do not support filtering ep=%d\n", ep);
  1005. return -EINVAL;
  1006. }
  1007. return 0;
  1008. }
  1009. static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
  1010. const struct ipa_flt_rule_i *rule, u8 add_rear,
  1011. u32 *rule_hdl, bool user)
  1012. {
  1013. struct ipa3_flt_tbl *tbl;
  1014. int ipa_ep_idx;
  1015. if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
  1016. IPAERR_RL("bad parms rule=%pK rule_hdl=%pK ep=%d\n", rule,
  1017. rule_hdl, ep);
  1018. return -EINVAL;
  1019. }
  1020. if (__ipa_add_flt_get_ep_idx(ep, &ipa_ep_idx))
  1021. return -EINVAL;
  1022. if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1023. IPAERR_RL("invalid ipa_ep_idx=%d\n", ipa_ep_idx);
  1024. return -EINVAL;
  1025. }
  1026. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip];
  1027. IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
  1028. return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, user);
  1029. }
  1030. static void __ipa_convert_flt_rule_in(struct ipa_flt_rule rule_in,
  1031. struct ipa_flt_rule_i *rule_out)
  1032. {
  1033. if (unlikely(sizeof(struct ipa_flt_rule) >
  1034. sizeof(struct ipa_flt_rule_i))) {
  1035. IPAERR_RL("invalid size in:%d size out:%d\n",
  1036. sizeof(struct ipa_flt_rule_i),
  1037. sizeof(struct ipa_flt_rule));
  1038. return;
  1039. }
  1040. memset(rule_out, 0, sizeof(struct ipa_flt_rule_i));
  1041. memcpy(rule_out, &rule_in, sizeof(struct ipa_flt_rule));
  1042. }
  1043. static void __ipa_convert_flt_rule_out(struct ipa_flt_rule_i rule_in,
  1044. struct ipa_flt_rule *rule_out)
  1045. {
  1046. if (unlikely(sizeof(struct ipa_flt_rule) >
  1047. sizeof(struct ipa_flt_rule_i))) {
  1048. IPAERR_RL("invalid size in:%d size out:%d\n",
  1049. sizeof(struct ipa_flt_rule_i),
  1050. sizeof(struct ipa_flt_rule));
  1051. return;
  1052. }
  1053. memset(rule_out, 0, sizeof(struct ipa_flt_rule));
  1054. memcpy(rule_out, &rule_in, sizeof(struct ipa_flt_rule));
  1055. }
  1056. static void __ipa_convert_flt_mdfy_in(struct ipa_flt_rule_mdfy rule_in,
  1057. struct ipa_flt_rule_mdfy_i *rule_out)
  1058. {
  1059. if (unlikely(sizeof(struct ipa_flt_rule_mdfy) >
  1060. sizeof(struct ipa_flt_rule_mdfy_i))) {
  1061. IPAERR_RL("invalid size in:%d size out:%d\n",
  1062. sizeof(struct ipa_flt_rule_mdfy),
  1063. sizeof(struct ipa_flt_rule_mdfy_i));
  1064. return;
  1065. }
  1066. memset(rule_out, 0, sizeof(struct ipa_flt_rule_mdfy_i));
  1067. memcpy(&rule_out->rule, &rule_in.rule,
  1068. sizeof(struct ipa_flt_rule));
  1069. rule_out->rule_hdl = rule_in.rule_hdl;
  1070. rule_out->status = rule_in.status;
  1071. }
  1072. static void __ipa_convert_flt_mdfy_out(struct ipa_flt_rule_mdfy_i rule_in,
  1073. struct ipa_flt_rule_mdfy *rule_out)
  1074. {
  1075. if (unlikely(sizeof(struct ipa_flt_rule_mdfy) >
  1076. sizeof(struct ipa_flt_rule_mdfy_i))) {
  1077. IPAERR_RL("invalid size in:%d size out:%d\n",
  1078. sizeof(struct ipa_flt_rule_mdfy),
  1079. sizeof(struct ipa_flt_rule_mdfy_i));
  1080. return;
  1081. }
  1082. memset(rule_out, 0, sizeof(struct ipa_flt_rule_mdfy));
  1083. memcpy(&rule_out->rule, &rule_in.rule,
  1084. sizeof(struct ipa_flt_rule));
  1085. rule_out->rule_hdl = rule_in.rule_hdl;
  1086. rule_out->status = rule_in.status;
  1087. }
  1088. /**
  1089. * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally
  1090. * commit to IPA HW
  1091. * @rules: [inout] set of filtering rules to add
  1092. *
  1093. * Returns: 0 on success, negative on failure
  1094. *
  1095. * Note: Should not be called from atomic context
  1096. */
  1097. int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
  1098. {
  1099. return ipa3_add_flt_rule_usr(rules, false);
  1100. }
  1101. /**
  1102. * ipa3_add_flt_rule_v2() - Add the specified filtering rules to
  1103. * SW and optionally commit to IPA HW
  1104. * @rules: [inout] set of filtering rules to add
  1105. *
  1106. * Returns: 0 on success, negative on failure
  1107. *
  1108. * Note: Should not be called from atomic context
  1109. */
  1110. int ipa3_add_flt_rule_v2(struct ipa_ioc_add_flt_rule_v2 *rules)
  1111. {
  1112. return ipa3_add_flt_rule_usr_v2(rules, false);
  1113. }
  1114. /**
  1115. * ipa3_add_flt_rule_usr() - Add the specified filtering rules to
  1116. * SW and optionally commit to IPA HW
  1117. * @rules: [inout] set of filtering rules to add
  1118. * @user_only: [in] indicate rules installed by userspace
  1119. *
  1120. * Returns: 0 on success, negative on failure
  1121. *
  1122. * Note: Should not be called from atomic context
  1123. */
  1124. int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only)
  1125. {
  1126. int i;
  1127. int result;
  1128. struct ipa_flt_rule_i rule;
  1129. if (rules == NULL || rules->num_rules == 0 ||
  1130. rules->ip >= IPA_IP_MAX) {
  1131. IPAERR_RL("bad parm\n");
  1132. return -EINVAL;
  1133. }
  1134. mutex_lock(&ipa3_ctx->lock);
  1135. for (i = 0; i < rules->num_rules; i++) {
  1136. if (!rules->global) {
  1137. /* if hashing not supported, all table entry
  1138. * are non-hash tables
  1139. */
  1140. if (ipa3_ctx->ipa_fltrt_not_hashable)
  1141. rules->rules[i].rule.hashable = false;
  1142. __ipa_convert_flt_rule_in(
  1143. rules->rules[i].rule, &rule);
  1144. result = __ipa_add_ep_flt_rule(rules->ip,
  1145. rules->ep,
  1146. &rule,
  1147. rules->rules[i].at_rear,
  1148. &rules->rules[i].flt_rule_hdl,
  1149. user_only);
  1150. __ipa_convert_flt_rule_out(rule,
  1151. &rules->rules[i].rule);
  1152. } else
  1153. result = -1;
  1154. if (result) {
  1155. IPAERR_RL("failed to add flt rule %d\n", i);
  1156. rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
  1157. } else {
  1158. rules->rules[i].status = 0;
  1159. }
  1160. }
  1161. if (rules->global) {
  1162. IPAERR_RL("no support for global filter rules\n");
  1163. result = -EPERM;
  1164. goto bail;
  1165. }
  1166. if (rules->commit)
  1167. if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
  1168. result = -EPERM;
  1169. goto bail;
  1170. }
  1171. result = 0;
  1172. bail:
  1173. mutex_unlock(&ipa3_ctx->lock);
  1174. return result;
  1175. }
  1176. /**
  1177. * ipa3_add_flt_rule_usr_v2() - Add the specified filtering
  1178. * rules to SW and optionally commit to IPA HW
  1179. * @rules: [inout] set of filtering rules to add
  1180. * @user_only: [in] indicate rules installed by userspace
  1181. *
  1182. * Returns: 0 on success, negative on failure
  1183. *
  1184. * Note: Should not be called from atomic context
  1185. */
  1186. int ipa3_add_flt_rule_usr_v2(struct ipa_ioc_add_flt_rule_v2
  1187. *rules, bool user_only)
  1188. {
  1189. int i;
  1190. int result;
  1191. if (rules == NULL || rules->num_rules == 0 ||
  1192. rules->ip >= IPA_IP_MAX) {
  1193. IPAERR_RL("bad parm\n");
  1194. return -EINVAL;
  1195. }
  1196. mutex_lock(&ipa3_ctx->lock);
  1197. for (i = 0; i < rules->num_rules; i++) {
  1198. if (!rules->global) {
  1199. /* if hashing not supported, all table entry
  1200. * are non-hash tables
  1201. */
  1202. if (ipa3_ctx->ipa_fltrt_not_hashable)
  1203. ((struct ipa_flt_rule_add_i *)
  1204. rules->rules)[i].rule.hashable = false;
  1205. result = __ipa_add_ep_flt_rule(rules->ip,
  1206. rules->ep,
  1207. &(((struct ipa_flt_rule_add_i *)
  1208. rules->rules)[i].rule),
  1209. ((struct ipa_flt_rule_add_i *)
  1210. rules->rules)[i].at_rear,
  1211. &(((struct ipa_flt_rule_add_i *)
  1212. rules->rules)[i].flt_rule_hdl),
  1213. user_only);
  1214. } else
  1215. result = -1;
  1216. if (result) {
  1217. IPAERR_RL("failed to add flt rule %d\n", i);
  1218. ((struct ipa_flt_rule_add_i *)
  1219. rules->rules)[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
  1220. } else {
  1221. ((struct ipa_flt_rule_add_i *)
  1222. rules->rules)[i].status = 0;
  1223. }
  1224. }
  1225. if (rules->global) {
  1226. IPAERR_RL("no support for global filter rules\n");
  1227. result = -EPERM;
  1228. goto bail;
  1229. }
  1230. if (rules->commit)
  1231. if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
  1232. result = -EPERM;
  1233. goto bail;
  1234. }
  1235. result = 0;
  1236. bail:
  1237. mutex_unlock(&ipa3_ctx->lock);
  1238. return result;
  1239. }
  1240. /**
  1241. * ipa3_add_flt_rule_after() - Add the specified filtering rules to SW after
  1242. * the rule which its handle is given and optionally commit to IPA HW
  1243. *
  1244. * Returns: 0 on success, negative on failure
  1245. *
  1246. * Note: Should not be called from atomic context
  1247. */
  1248. int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
  1249. {
  1250. int i;
  1251. int result;
  1252. struct ipa3_flt_tbl *tbl;
  1253. int ipa_ep_idx;
  1254. struct ipa3_flt_entry *entry;
  1255. struct ipa_flt_rule_i rule;
  1256. if (rules == NULL || rules->num_rules == 0 ||
  1257. rules->ip >= IPA_IP_MAX) {
  1258. IPAERR_RL("bad parm\n");
  1259. return -EINVAL;
  1260. }
  1261. if (rules->ep >= IPA_CLIENT_MAX) {
  1262. IPAERR_RL("bad parms ep=%d\n", rules->ep);
  1263. return -EINVAL;
  1264. }
  1265. mutex_lock(&ipa3_ctx->lock);
  1266. if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) {
  1267. result = -EINVAL;
  1268. goto bail;
  1269. }
  1270. if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES || ipa_ep_idx < 0) {
  1271. IPAERR_RL("invalid ipa_ep_idx=%u\n", ipa_ep_idx);
  1272. result = -EINVAL;
  1273. goto bail;
  1274. }
  1275. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip];
  1276. entry = ipa3_id_find(rules->add_after_hdl);
  1277. if (entry == NULL) {
  1278. IPAERR_RL("lookup failed\n");
  1279. result = -EINVAL;
  1280. goto bail;
  1281. }
  1282. if (entry->cookie != IPA_FLT_COOKIE) {
  1283. IPAERR_RL("Invalid cookie value = %u flt hdl id = %d\n",
  1284. entry->cookie, rules->add_after_hdl);
  1285. result = -EINVAL;
  1286. goto bail;
  1287. }
  1288. if (entry->tbl != tbl) {
  1289. IPAERR_RL("given entry does not match the table\n");
  1290. result = -EINVAL;
  1291. goto bail;
  1292. }
  1293. if (tbl->sticky_rear)
  1294. if (&entry->link == tbl->head_flt_rule_list.prev) {
  1295. IPAERR_RL("cannot add rule at end of a sticky table");
  1296. result = -EINVAL;
  1297. goto bail;
  1298. }
  1299. IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n",
  1300. rules->ip, rules->ep, rules->add_after_hdl);
  1301. /*
  1302. * we add all rules one after the other, if one insertion fails, it cuts
  1303. * the chain (all following will receive fail status) following calls to
  1304. * __ipa_add_flt_rule_after will fail (entry == NULL)
  1305. */
  1306. for (i = 0; i < rules->num_rules; i++) {
  1307. /* if hashing not supported, all tables are non-hash tables*/
  1308. if (ipa3_ctx->ipa_fltrt_not_hashable)
  1309. rules->rules[i].rule.hashable = false;
  1310. __ipa_convert_flt_rule_in(
  1311. rules->rules[i].rule, &rule);
  1312. result = __ipa_add_flt_rule_after(tbl,
  1313. &rule,
  1314. &rules->rules[i].flt_rule_hdl,
  1315. rules->ip,
  1316. &entry);
  1317. __ipa_convert_flt_rule_out(rule,
  1318. &rules->rules[i].rule);
  1319. if (result) {
  1320. IPAERR_RL("failed to add flt rule %d\n", i);
  1321. rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
  1322. } else {
  1323. rules->rules[i].status = 0;
  1324. }
  1325. }
  1326. if (rules->commit)
  1327. if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
  1328. IPAERR("failed to commit flt rules\n");
  1329. result = -EPERM;
  1330. goto bail;
  1331. }
  1332. result = 0;
  1333. bail:
  1334. mutex_unlock(&ipa3_ctx->lock);
  1335. return result;
  1336. }
  1337. /**
  1338. * ipa3_add_flt_rule_after_v2() - Add the specified filtering
  1339. * rules to SW after the rule which its handle is given and
  1340. * optionally commit to IPA HW
  1341. *
  1342. * Returns: 0 on success, negative on failure
  1343. *
  1344. * Note: Should not be called from atomic context
  1345. */
  1346. int ipa3_add_flt_rule_after_v2(struct ipa_ioc_add_flt_rule_after_v2
  1347. *rules)
  1348. {
  1349. int i;
  1350. int result;
  1351. struct ipa3_flt_tbl *tbl;
  1352. int ipa_ep_idx;
  1353. struct ipa3_flt_entry *entry;
  1354. if (rules == NULL || rules->num_rules == 0 ||
  1355. rules->ip >= IPA_IP_MAX) {
  1356. IPAERR_RL("bad parm\n");
  1357. return -EINVAL;
  1358. }
  1359. if (rules->ep >= IPA_CLIENT_MAX) {
  1360. IPAERR_RL("bad parms ep=%d\n", rules->ep);
  1361. return -EINVAL;
  1362. }
  1363. mutex_lock(&ipa3_ctx->lock);
  1364. if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) {
  1365. result = -EINVAL;
  1366. goto bail;
  1367. }
  1368. if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES ||
  1369. ipa_ep_idx < 0) {
  1370. IPAERR_RL("invalid ipa_ep_idx=%u\n", ipa_ep_idx);
  1371. result = -EINVAL;
  1372. goto bail;
  1373. }
  1374. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip];
  1375. entry = ipa3_id_find(rules->add_after_hdl);
  1376. if (entry == NULL) {
  1377. IPAERR_RL("lookup failed\n");
  1378. result = -EINVAL;
  1379. goto bail;
  1380. }
  1381. if (entry->cookie != IPA_FLT_COOKIE) {
  1382. IPAERR_RL("Invalid cookie value = %u flt hdl id = %d\n",
  1383. entry->cookie, rules->add_after_hdl);
  1384. result = -EINVAL;
  1385. goto bail;
  1386. }
  1387. if (entry->tbl != tbl) {
  1388. IPAERR_RL("given entry does not match the table\n");
  1389. result = -EINVAL;
  1390. goto bail;
  1391. }
  1392. if (tbl->sticky_rear)
  1393. if (&entry->link == tbl->head_flt_rule_list.prev) {
  1394. IPAERR_RL("cannot add rule at end of a sticky table");
  1395. result = -EINVAL;
  1396. goto bail;
  1397. }
  1398. IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n",
  1399. rules->ip, rules->ep, rules->add_after_hdl);
  1400. /*
  1401. * we add all rules one after the other, if one insertion fails, it cuts
  1402. * the chain (all following will receive fail status) following calls to
  1403. * __ipa_add_flt_rule_after will fail (entry == NULL)
  1404. */
  1405. for (i = 0; i < rules->num_rules; i++) {
  1406. /* if hashing not supported, all tables are non-hash tables*/
  1407. if (ipa3_ctx->ipa_fltrt_not_hashable)
  1408. ((struct ipa_flt_rule_add_i *)
  1409. rules->rules)[i].rule.hashable = false;
  1410. result = __ipa_add_flt_rule_after(tbl,
  1411. &(((struct ipa_flt_rule_add_i *)
  1412. rules->rules)[i].rule),
  1413. &(((struct ipa_flt_rule_add_i *)
  1414. rules->rules)[i].flt_rule_hdl),
  1415. rules->ip,
  1416. &entry);
  1417. if (result) {
  1418. IPAERR_RL("failed to add flt rule %d\n", i);
  1419. ((struct ipa_flt_rule_add_i *)
  1420. rules->rules)[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
  1421. } else {
  1422. ((struct ipa_flt_rule_add_i *)
  1423. rules->rules)[i].status = 0;
  1424. }
  1425. }
  1426. if (rules->commit)
  1427. if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
  1428. IPAERR("failed to commit flt rules\n");
  1429. result = -EPERM;
  1430. goto bail;
  1431. }
  1432. result = 0;
  1433. bail:
  1434. mutex_unlock(&ipa3_ctx->lock);
  1435. return result;
  1436. }
  1437. /**
  1438. * ipa3_del_flt_rule() - Remove the specified filtering rules from SW and
  1439. * optionally commit to IPA HW
  1440. *
  1441. * Returns: 0 on success, negative on failure
  1442. *
  1443. * Note: Should not be called from atomic context
  1444. */
  1445. int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
  1446. {
  1447. int i;
  1448. int result;
  1449. if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
  1450. IPAERR_RL("bad param\n");
  1451. return -EINVAL;
  1452. }
  1453. mutex_lock(&ipa3_ctx->lock);
  1454. for (i = 0; i < hdls->num_hdls; i++) {
  1455. if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
  1456. IPAERR_RL("failed to del flt rule %i\n", i);
  1457. hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
  1458. } else {
  1459. hdls->hdl[i].status = 0;
  1460. }
  1461. }
  1462. if (hdls->commit)
  1463. if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
  1464. result = -EPERM;
  1465. goto bail;
  1466. }
  1467. result = 0;
  1468. bail:
  1469. mutex_unlock(&ipa3_ctx->lock);
  1470. return result;
  1471. }
  1472. /**
  1473. * ipa3_mdfy_flt_rule() - Modify the specified filtering rules in SW and
  1474. * optionally commit to IPA HW
  1475. *
  1476. * Returns: 0 on success, negative on failure
  1477. *
  1478. * Note: Should not be called from atomic context
  1479. */
  1480. int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
  1481. {
  1482. int i;
  1483. int result;
  1484. struct ipa_flt_rule_mdfy_i rule;
  1485. if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
  1486. IPAERR_RL("bad parm\n");
  1487. return -EINVAL;
  1488. }
  1489. mutex_lock(&ipa3_ctx->lock);
  1490. for (i = 0; i < hdls->num_rules; i++) {
  1491. /* if hashing not supported, all tables are non-hash tables*/
  1492. if (ipa3_ctx->ipa_fltrt_not_hashable)
  1493. hdls->rules[i].rule.hashable = false;
  1494. __ipa_convert_flt_mdfy_in(hdls->rules[i], &rule);
  1495. if (__ipa_mdfy_flt_rule(&rule, hdls->ip)) {
  1496. IPAERR_RL("failed to mdfy flt rule %i\n", i);
  1497. hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
  1498. } else {
  1499. hdls->rules[i].status = 0;
  1500. __ipa_convert_flt_mdfy_out(rule, &hdls->rules[i]);
  1501. }
  1502. }
  1503. if (hdls->commit)
  1504. if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
  1505. result = -EPERM;
  1506. goto bail;
  1507. }
  1508. result = 0;
  1509. bail:
  1510. mutex_unlock(&ipa3_ctx->lock);
  1511. return result;
  1512. }
  1513. /**
  1514. * ipa3_mdfy_flt_rule_v2() - Modify the specified filtering
  1515. * rules in SW and optionally commit to IPA HW
  1516. *
  1517. * Returns: 0 on success, negative on failure
  1518. *
  1519. * Note: Should not be called from atomic context
  1520. */
  1521. int ipa3_mdfy_flt_rule_v2(struct ipa_ioc_mdfy_flt_rule_v2 *hdls)
  1522. {
  1523. int i;
  1524. int result;
  1525. if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
  1526. IPAERR_RL("bad parm\n");
  1527. return -EINVAL;
  1528. }
  1529. mutex_lock(&ipa3_ctx->lock);
  1530. for (i = 0; i < hdls->num_rules; i++) {
  1531. /* if hashing not supported, all tables are non-hash tables*/
  1532. if (ipa3_ctx->ipa_fltrt_not_hashable)
  1533. ((struct ipa_flt_rule_mdfy_i *)
  1534. hdls->rules)[i].rule.hashable = false;
  1535. if (__ipa_mdfy_flt_rule(&(((struct ipa_flt_rule_mdfy_i *)
  1536. hdls->rules)[i]), hdls->ip)) {
  1537. IPAERR_RL("failed to mdfy flt rule %i\n", i);
  1538. ((struct ipa_flt_rule_mdfy_i *)
  1539. hdls->rules)[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
  1540. } else {
  1541. ((struct ipa_flt_rule_mdfy_i *)
  1542. hdls->rules)[i].status = 0;
  1543. }
  1544. }
  1545. if (hdls->commit)
  1546. if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
  1547. result = -EPERM;
  1548. goto bail;
  1549. }
  1550. result = 0;
  1551. bail:
  1552. mutex_unlock(&ipa3_ctx->lock);
  1553. return result;
  1554. }
  1555. /**
  1556. * ipa3_commit_flt() - Commit the current SW filtering table of specified type
  1557. * to IPA HW
  1558. * @ip: [in] the family of routing tables
  1559. *
  1560. * Returns: 0 on success, negative on failure
  1561. *
  1562. * Note: Should not be called from atomic context
  1563. */
  1564. int ipa3_commit_flt(enum ipa_ip_type ip)
  1565. {
  1566. int result;
  1567. if (ip >= IPA_IP_MAX) {
  1568. IPAERR_RL("bad param\n");
  1569. return -EINVAL;
  1570. }
  1571. mutex_lock(&ipa3_ctx->lock);
  1572. if (ipa3_ctx->ctrl->ipa3_commit_flt(ip)) {
  1573. result = -EPERM;
  1574. goto bail;
  1575. }
  1576. result = 0;
  1577. bail:
  1578. mutex_unlock(&ipa3_ctx->lock);
  1579. return result;
  1580. }
  1581. /**
  1582. * ipa3_reset_flt() - Reset the current SW filtering table of specified type
  1583. * (does not commit to HW)
  1584. * @ip: [in] the family of routing tables
  1585. * @user_only: [in] indicate rules deleted by userspace
  1586. *
  1587. * Returns: 0 on success, negative on failure
  1588. *
  1589. * Note: Should not be called from atomic context
  1590. */
  1591. int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only)
  1592. {
  1593. struct ipa3_flt_tbl *tbl;
  1594. struct ipa3_flt_entry *entry;
  1595. struct ipa3_flt_entry *next;
  1596. int i;
  1597. int id;
  1598. int rule_id;
  1599. if (ip >= IPA_IP_MAX) {
  1600. IPAERR_RL("bad parm\n");
  1601. return -EINVAL;
  1602. }
  1603. mutex_lock(&ipa3_ctx->lock);
  1604. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  1605. if (!ipa_is_ep_support_flt(i))
  1606. continue;
  1607. tbl = &ipa3_ctx->flt_tbl[i][ip];
  1608. list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
  1609. link) {
  1610. if (ipa3_id_find(entry->id) == NULL) {
  1611. WARN_ON_RATELIMIT_IPA(1);
  1612. mutex_unlock(&ipa3_ctx->lock);
  1613. return -EFAULT;
  1614. }
  1615. if (!user_only ||
  1616. entry->ipacm_installed) {
  1617. list_del(&entry->link);
  1618. entry->tbl->rule_cnt--;
  1619. if (entry->rt_tbl &&
  1620. (!ipa3_check_idr_if_freed(
  1621. entry->rt_tbl)))
  1622. entry->rt_tbl->ref_cnt--;
  1623. /* if rule id was allocated from idr, remove */
  1624. rule_id = entry->rule_id;
  1625. id = entry->id;
  1626. if ((rule_id < ipahal_get_rule_id_hi_bit()) &&
  1627. (rule_id >= ipahal_get_low_rule_id()))
  1628. idr_remove(entry->tbl->rule_ids,
  1629. rule_id);
  1630. entry->cookie = 0;
  1631. kmem_cache_free(ipa3_ctx->flt_rule_cache,
  1632. entry);
  1633. /* remove the handle from the database */
  1634. ipa3_id_remove(id);
  1635. }
  1636. }
  1637. }
  1638. /* commit the change to IPA-HW */
  1639. if (ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4) ||
  1640. ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6)) {
  1641. IPAERR("fail to commit flt-rule\n");
  1642. WARN_ON_RATELIMIT_IPA(1);
  1643. mutex_unlock(&ipa3_ctx->lock);
  1644. return -EPERM;
  1645. }
  1646. mutex_unlock(&ipa3_ctx->lock);
  1647. return 0;
  1648. }
  1649. void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx)
  1650. {
  1651. struct ipa3_flt_tbl *tbl;
  1652. struct ipa3_ep_context *ep;
  1653. struct ipa_flt_rule_i rule;
  1654. if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1655. IPAERR("invalid ipa_ep_idx=%u\n", ipa_ep_idx);
  1656. ipa_assert();
  1657. return;
  1658. }
  1659. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1660. if (!ipa_is_ep_support_flt(ipa_ep_idx)) {
  1661. IPADBG("cannot add flt rules to non filtering pipe num %d\n",
  1662. ipa_ep_idx);
  1663. return;
  1664. }
  1665. memset(&rule, 0, sizeof(rule));
  1666. mutex_lock(&ipa3_ctx->lock);
  1667. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
  1668. rule.action = IPA_PASS_TO_EXCEPTION;
  1669. __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
  1670. &ep->dflt_flt4_rule_hdl, false);
  1671. ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
  1672. tbl->sticky_rear = true;
  1673. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
  1674. rule.action = IPA_PASS_TO_EXCEPTION;
  1675. __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
  1676. &ep->dflt_flt6_rule_hdl, false);
  1677. ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
  1678. tbl->sticky_rear = true;
  1679. mutex_unlock(&ipa3_ctx->lock);
  1680. }
  1681. void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
  1682. {
  1683. struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
  1684. struct ipa3_flt_tbl *tbl;
  1685. mutex_lock(&ipa3_ctx->lock);
  1686. if (ep->dflt_flt4_rule_hdl) {
  1687. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
  1688. __ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
  1689. ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
  1690. /* Reset the sticky flag. */
  1691. tbl->sticky_rear = false;
  1692. ep->dflt_flt4_rule_hdl = 0;
  1693. }
  1694. if (ep->dflt_flt6_rule_hdl) {
  1695. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
  1696. __ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
  1697. ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
  1698. /* Reset the sticky flag. */
  1699. tbl->sticky_rear = false;
  1700. ep->dflt_flt6_rule_hdl = 0;
  1701. }
  1702. mutex_unlock(&ipa3_ctx->lock);
  1703. }
  1704. /**
  1705. * ipa3_set_flt_tuple_mask() - Sets the flt tuple masking for the given pipe
  1706. * Pipe must be for AP EP (not modem) and support filtering
  1707. * updates the the filtering masking values without changing the rt ones.
  1708. *
  1709. * @pipe_idx: filter pipe index to configure the tuple masking
  1710. * @tuple: the tuple members masking
  1711. * Returns: 0 on success, negative on failure
  1712. *
  1713. */
  1714. int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple)
  1715. {
  1716. struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
  1717. if (!tuple) {
  1718. IPAERR_RL("bad tuple\n");
  1719. return -EINVAL;
  1720. }
  1721. if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
  1722. IPAERR("bad pipe index!\n");
  1723. return -EINVAL;
  1724. }
  1725. if (!ipa_is_ep_support_flt(pipe_idx)) {
  1726. IPAERR("pipe %d not filtering pipe\n", pipe_idx);
  1727. return -EINVAL;
  1728. }
  1729. if (ipa_is_modem_pipe(pipe_idx)) {
  1730. IPAERR("modem pipe tuple is not configured by AP\n");
  1731. return -EINVAL;
  1732. }
  1733. ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
  1734. pipe_idx, &fltrt_tuple);
  1735. fltrt_tuple.flt = *tuple;
  1736. ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
  1737. pipe_idx, &fltrt_tuple);
  1738. return 0;
  1739. }
  1740. /**
  1741. * ipa3_flt_read_tbl_from_hw() -Read filtering table from IPA HW
  1742. * @pipe_idx: IPA endpoint index
  1743. * @ip_type: IPv4 or IPv6 table
  1744. * @hashable: hashable or non-hashable table
  1745. * @entry: array to fill the table entries
  1746. * @num_entry: number of entries in entry array. set by the caller to indicate
  1747. * entry array size. Then set by this function as an output parameter to
  1748. * indicate the number of entries in the array
  1749. *
  1750. * This function reads the filtering table from IPA SRAM and prepares an array
  1751. * of entries. This function is mainly used for debugging purposes.
  1752. *
  1753. * If empty table or Modem Apps table, zero entries will be returned.
  1754. *
  1755. * Returns: 0 on success, negative on failure
  1756. */
  1757. int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, enum ipa_ip_type ip_type,
  1758. bool hashable, struct ipahal_flt_rule_entry entry[], int *num_entry)
  1759. {
  1760. void *ipa_sram_mmio;
  1761. u64 hdr_base_ofst;
  1762. int tbl_entry_idx;
  1763. int i;
  1764. int res = 0;
  1765. u64 tbl_addr;
  1766. bool is_sys;
  1767. u8 *rule_addr;
  1768. struct ipa_mem_buffer *sys_tbl_mem;
  1769. int rule_idx;
  1770. struct ipa3_flt_tbl *flt_tbl_ptr;
  1771. IPADBG("pipe_idx=%d ip=%d hashable=%d entry=0x%pK num_entry=0x%pK\n",
  1772. pipe_idx, ip_type, hashable, entry, num_entry);
  1773. /*
  1774. * SRAM memory not allocated to hash tables. Reading of hash table
  1775. * rules operation not supported
  1776. */
  1777. if (hashable && ipa3_ctx->ipa_fltrt_not_hashable) {
  1778. IPAERR_RL("Reading hashable rules not supported\n");
  1779. *num_entry = 0;
  1780. return 0;
  1781. }
  1782. if (pipe_idx >= ipa3_ctx->ipa_num_pipes ||
  1783. pipe_idx >= IPA3_MAX_NUM_PIPES || ip_type >= IPA_IP_MAX ||
  1784. !entry || !num_entry) {
  1785. IPAERR_RL("Invalid pipe_idx=%u\n", pipe_idx);
  1786. return -EFAULT;
  1787. }
  1788. if (!ipa_is_ep_support_flt(pipe_idx)) {
  1789. IPAERR_RL("pipe %d does not support filtering\n", pipe_idx);
  1790. return -EINVAL;
  1791. }
  1792. flt_tbl_ptr = &ipa3_ctx->flt_tbl[pipe_idx][ip_type];
  1793. /* map IPA SRAM */
  1794. ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
  1795. ipa3_ctx->ctrl->ipa_reg_base_ofst +
  1796. ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n,
  1797. ipa3_ctx->smem_restricted_bytes / 4),
  1798. ipa3_ctx->smem_sz);
  1799. if (!ipa_sram_mmio) {
  1800. IPAERR("fail to ioremap IPA SRAM\n");
  1801. return -ENOMEM;
  1802. }
  1803. memset(entry, 0, sizeof(*entry) * (*num_entry));
  1804. if (hashable) {
  1805. if (ip_type == IPA_IP_v4)
  1806. hdr_base_ofst =
  1807. IPA_MEM_PART(v4_flt_hash_ofst);
  1808. else
  1809. hdr_base_ofst =
  1810. IPA_MEM_PART(v6_flt_hash_ofst);
  1811. } else {
  1812. if (ip_type == IPA_IP_v4)
  1813. hdr_base_ofst =
  1814. IPA_MEM_PART(v4_flt_nhash_ofst);
  1815. else
  1816. hdr_base_ofst =
  1817. IPA_MEM_PART(v6_flt_nhash_ofst);
  1818. }
  1819. /* calculate the index of the tbl entry */
  1820. tbl_entry_idx = 1; /* skip the bitmap */
  1821. for (i = 0; i < pipe_idx; i++)
  1822. if (ipa3_ctx->ep_flt_bitmap & (1 << i))
  1823. tbl_entry_idx++;
  1824. IPADBG("hdr_base_ofst=0x%llx tbl_entry_idx=%d\n",
  1825. hdr_base_ofst, tbl_entry_idx);
  1826. res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
  1827. tbl_entry_idx, &tbl_addr, &is_sys);
  1828. if (res) {
  1829. IPAERR("failed to read table address from header structure\n");
  1830. goto bail;
  1831. }
  1832. IPADBG("flt tbl ep=%d: tbl_addr=0x%llx is_sys=%d\n",
  1833. pipe_idx, tbl_addr, is_sys);
  1834. if (!tbl_addr) {
  1835. IPAERR("invalid flt tbl addr\n");
  1836. res = -EFAULT;
  1837. goto bail;
  1838. }
  1839. /* for tables resides in DDR access it from the virtual memory */
  1840. if (is_sys) {
  1841. sys_tbl_mem =
  1842. &flt_tbl_ptr->curr_mem[hashable ? IPA_RULE_HASHABLE :
  1843. IPA_RULE_NON_HASHABLE];
  1844. if (sys_tbl_mem->phys_base &&
  1845. sys_tbl_mem->phys_base != tbl_addr) {
  1846. IPAERR("mismatch addr: parsed=%llx sw=%pad\n",
  1847. tbl_addr, &sys_tbl_mem->phys_base);
  1848. }
  1849. if (sys_tbl_mem->phys_base)
  1850. rule_addr = sys_tbl_mem->base;
  1851. else
  1852. rule_addr = NULL;
  1853. } else {
  1854. rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
  1855. }
  1856. IPADBG("First rule addr 0x%pK\n", rule_addr);
  1857. if (!rule_addr) {
  1858. /* Modem table in system memory or empty table */
  1859. *num_entry = 0;
  1860. goto bail;
  1861. }
  1862. rule_idx = 0;
  1863. while (rule_idx < *num_entry) {
  1864. res = ipahal_flt_parse_hw_rule(rule_addr, &entry[rule_idx]);
  1865. if (res) {
  1866. IPAERR("failed parsing flt rule\n");
  1867. goto bail;
  1868. }
  1869. IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
  1870. if (!entry[rule_idx].rule_size)
  1871. break;
  1872. rule_addr += entry[rule_idx].rule_size;
  1873. rule_idx++;
  1874. }
  1875. *num_entry = rule_idx;
  1876. bail:
  1877. iounmap(ipa_sram_mmio);
  1878. return 0;
  1879. }