ipa_flt.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include "ipa_i.h"
  6. #include "ipahal/ipahal.h"
  7. #include "ipahal/ipahal_fltrt.h"
  8. #define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
  9. #define IPA_FLT_STATUS_OF_DEL_FAILED (-1)
  10. #define IPA_FLT_STATUS_OF_MDFY_FAILED (-1)
  11. #define IPA_FLT_GET_RULE_TYPE(__entry) \
  12. ( \
  13. ((__entry)->rule.hashable) ? \
  14. (IPA_RULE_HASHABLE):(IPA_RULE_NON_HASHABLE) \
  15. )
  16. /**
  17. * ipa3_generate_flt_hw_rule() - generates the filtering hardware rule
  18. * @ip: the ip address family type
  19. * @entry: filtering entry
  20. * @buf: output buffer, buf == NULL means
  21. * caller wants to know the size of the rule as seen
  22. * by HW so they did not pass a valid buffer, we will use a
  23. * scratch buffer instead.
  24. * With this scheme we are going to
  25. * generate the rule twice, once to know size using scratch
  26. * buffer and second to write the rule to the actual caller
  27. * supplied buffer which is of required size
  28. *
  29. * Returns: 0 on success, negative on failure
  30. *
  31. * caller needs to hold any needed locks to ensure integrity
  32. *
  33. */
  34. static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
  35. struct ipa3_flt_entry *entry, u8 *buf)
  36. {
  37. struct ipahal_flt_rule_gen_params gen_params;
  38. int res = 0;
  39. memset(&gen_params, 0, sizeof(gen_params));
  40. if (entry->rule.hashable) {
  41. if (entry->rule.attrib.attrib_mask & IPA_FLT_IS_PURE_ACK
  42. && !entry->rule.eq_attrib_type) {
  43. IPAERR_RL("PURE_ACK rule atrb used with hash rule\n");
  44. WARN_ON_RATELIMIT_IPA(1);
  45. return -EPERM;
  46. }
  47. /*
  48. * tos_eq_present field has two meanings:
  49. * tos equation for IPA ver < 4.5 (as the field name reveals)
  50. * pure_ack equation for IPA ver >= 4.5
  51. */
  52. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
  53. entry->rule.eq_attrib_type &&
  54. entry->rule.eq_attrib.tos_eq_present) {
  55. IPAERR_RL("PURE_ACK rule eq used with hash rule\n");
  56. return -EPERM;
  57. }
  58. }
  59. gen_params.ipt = ip;
  60. if (entry->rt_tbl)
  61. gen_params.rt_tbl_idx = entry->rt_tbl->idx;
  62. else
  63. gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx;
  64. gen_params.priority = entry->prio;
  65. gen_params.id = entry->rule_id;
  66. gen_params.rule = (const struct ipa_flt_rule_i *)&entry->rule;
  67. gen_params.cnt_idx = entry->cnt_idx;
  68. res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
  69. if (res) {
  70. IPAERR_RL("failed to generate flt h/w rule\n");
  71. return res;
  72. }
  73. return 0;
  74. }
  75. static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
  76. {
  77. struct ipa3_flt_tbl *tbl;
  78. int i;
  79. IPADBG_LOW("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt);
  80. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  81. if (!ipa_is_ep_support_flt(i))
  82. continue;
  83. tbl = &ipa3_ctx->flt_tbl[i][ip];
  84. if (tbl->prev_mem[rlt].phys_base) {
  85. IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i);
  86. ipahal_free_dma_mem(&tbl->prev_mem[rlt]);
  87. }
  88. if (list_empty(&tbl->head_flt_rule_list)) {
  89. if (tbl->curr_mem[rlt].phys_base) {
  90. IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n",
  91. i);
  92. ipahal_free_dma_mem(&tbl->curr_mem[rlt]);
  93. }
  94. }
  95. }
  96. }
  97. /**
  98. * ipa_prep_flt_tbl_for_cmt() - preparing the flt table for commit
  99. * assign priorities to the rules, calculate their sizes and calculate
  100. * the overall table size
  101. * @ip: the ip address family type
  102. * @tbl: the flt tbl to be prepared
  103. * @pipe_idx: the ep pipe appropriate for the given tbl
  104. *
  105. * Return: 0 on success, negative on failure
  106. */
  107. static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
  108. struct ipa3_flt_tbl *tbl, int pipe_idx)
  109. {
  110. struct ipa3_flt_entry *entry;
  111. int prio_i;
  112. int max_prio;
  113. u32 hdr_width;
  114. tbl->sz[IPA_RULE_HASHABLE] = 0;
  115. tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
  116. max_prio = ipahal_get_rule_max_priority();
  117. prio_i = max_prio;
  118. list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
  119. if (entry->rule.max_prio) {
  120. entry->prio = max_prio;
  121. } else {
  122. if (ipahal_rule_decrease_priority(&prio_i)) {
  123. IPAERR("cannot decrease rule priority - %d\n",
  124. prio_i);
  125. return -EPERM;
  126. }
  127. entry->prio = prio_i;
  128. }
  129. if (ipa3_generate_flt_hw_rule(ip, entry, NULL)) {
  130. IPAERR("failed to calculate HW FLT rule size\n");
  131. return -EPERM;
  132. }
  133. IPADBG_LOW("pipe %d rule_id(handle) %u hw_len %d priority %u\n",
  134. pipe_idx, entry->rule_id, entry->hw_len, entry->prio);
  135. if (entry->rule.hashable)
  136. tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
  137. else
  138. tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len;
  139. }
  140. if ((tbl->sz[IPA_RULE_HASHABLE] +
  141. tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
  142. IPADBG_LOW("flt tbl pipe %d is with zero total size\n",
  143. pipe_idx);
  144. return 0;
  145. }
  146. hdr_width = ipahal_get_hw_tbl_hdr_width();
  147. /* for the header word */
  148. if (tbl->sz[IPA_RULE_HASHABLE])
  149. tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
  150. if (tbl->sz[IPA_RULE_NON_HASHABLE])
  151. tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
  152. IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
  153. tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
  154. return 0;
  155. }
  156. /**
  157. * ipa_translate_flt_tbl_to_hw_fmt() - translate the flt driver structures
  158. * (rules and tables) to HW format and fill it in the given buffers
  159. * @ip: the ip address family type
  160. * @rlt: the type of the rules to translate (hashable or non-hashable)
  161. * @base: the rules body buffer to be filled
  162. * @hdr: the rules header (addresses/offsets) buffer to be filled
  163. * @body_ofst: the offset of the rules body from the rules header at
  164. * ipa sram
  165. *
  166. * Returns: 0 on success, negative on failure
  167. *
  168. * caller needs to hold any needed locks to ensure integrity
  169. *
  170. */
  171. static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
  172. enum ipa_rule_type rlt, u8 *base, u8 *hdr, u32 body_ofst)
  173. {
  174. u64 offset;
  175. u8 *body_i;
  176. int res;
  177. struct ipa3_flt_entry *entry;
  178. u8 *tbl_mem_buf;
  179. struct ipa_mem_buffer tbl_mem;
  180. struct ipa3_flt_tbl *tbl;
  181. int i;
  182. int hdr_idx = 0;
  183. body_i = base;
  184. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  185. if (!ipa_is_ep_support_flt(i))
  186. continue;
  187. tbl = &ipa3_ctx->flt_tbl[i][ip];
  188. if (tbl->sz[rlt] == 0) {
  189. hdr_idx++;
  190. continue;
  191. }
  192. if (tbl->in_sys[rlt]) {
  193. /* only body (no header) */
  194. tbl_mem.size = tbl->sz[rlt] -
  195. ipahal_get_hw_tbl_hdr_width();
  196. if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
  197. IPAERR("fail to alloc sys tbl of size %d\n",
  198. tbl_mem.size);
  199. goto err;
  200. }
  201. if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
  202. hdr, hdr_idx, true)) {
  203. IPAERR("fail to wrt sys tbl addr to hdr\n");
  204. goto hdr_update_fail;
  205. }
  206. tbl_mem_buf = tbl_mem.base;
  207. /* generate the rule-set */
  208. list_for_each_entry(entry, &tbl->head_flt_rule_list,
  209. link) {
  210. if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
  211. continue;
  212. res = ipa3_generate_flt_hw_rule(
  213. ip, entry, tbl_mem_buf);
  214. if (res) {
  215. IPAERR("failed to gen HW FLT rule\n");
  216. goto hdr_update_fail;
  217. }
  218. tbl_mem_buf += entry->hw_len;
  219. }
  220. if (tbl->curr_mem[rlt].phys_base) {
  221. WARN_ON(tbl->prev_mem[rlt].phys_base);
  222. tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
  223. }
  224. tbl->curr_mem[rlt] = tbl_mem;
  225. } else {
  226. offset = body_i - base + body_ofst;
  227. /* update the hdr at the right index */
  228. if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
  229. hdr_idx, true)) {
  230. IPAERR("fail to wrt lcl tbl ofst to hdr\n");
  231. goto hdr_update_fail;
  232. }
  233. /* generate the rule-set */
  234. list_for_each_entry(entry, &tbl->head_flt_rule_list,
  235. link) {
  236. if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
  237. continue;
  238. res = ipa3_generate_flt_hw_rule(
  239. ip, entry, body_i);
  240. if (res) {
  241. IPAERR("failed to gen HW FLT rule\n");
  242. goto err;
  243. }
  244. body_i += entry->hw_len;
  245. }
  246. /**
  247. * advance body_i to next table alignment as local
  248. * tables are order back-to-back
  249. */
  250. body_i += ipahal_get_lcl_tbl_addr_alignment();
  251. body_i = (u8 *)((long)body_i &
  252. ~ipahal_get_lcl_tbl_addr_alignment());
  253. }
  254. hdr_idx++;
  255. }
  256. return 0;
  257. hdr_update_fail:
  258. ipahal_free_dma_mem(&tbl_mem);
  259. err:
  260. return -EPERM;
  261. }
  262. /**
  263. * ipa_generate_flt_hw_tbl_img() - generates the flt hw tbls.
  264. * headers and bodies are being created into buffers that will be filled into
  265. * the local memory (sram)
  266. * @ip: the ip address family type
  267. * @alloc_params: In and Out parameters for the allocations of the buffers
  268. * 4 buffers: hdr and bdy, each hashable and non-hashable
  269. *
  270. * Return: 0 on success, negative on failure
  271. */
  272. static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
  273. struct ipahal_fltrt_alloc_imgs_params *alloc_params)
  274. {
  275. u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
  276. int rc = 0;
  277. if (ip == IPA_IP_v4) {
  278. nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_nhash_ofst) -
  279. IPA_MEM_PART(v4_flt_nhash_ofst);
  280. hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_hash_ofst) -
  281. IPA_MEM_PART(v4_flt_hash_ofst);
  282. } else {
  283. nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_nhash_ofst) -
  284. IPA_MEM_PART(v6_flt_nhash_ofst);
  285. hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_hash_ofst) -
  286. IPA_MEM_PART(v6_flt_hash_ofst);
  287. }
  288. if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
  289. IPAERR_RL("fail to allocate FLT HW TBL images. IP %d\n", ip);
  290. rc = -ENOMEM;
  291. goto allocate_failed;
  292. }
  293. if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
  294. alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
  295. hash_bdy_start_ofst)) {
  296. IPAERR_RL("fail to translate hashable flt tbls to hw format\n");
  297. rc = -EPERM;
  298. goto translate_fail;
  299. }
  300. if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
  301. alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
  302. nhash_bdy_start_ofst)) {
  303. IPAERR_RL("fail to translate non-hash flt tbls to hw format\n");
  304. rc = -EPERM;
  305. goto translate_fail;
  306. }
  307. return rc;
  308. translate_fail:
  309. if (alloc_params->hash_hdr.size)
  310. ipahal_free_dma_mem(&alloc_params->hash_hdr);
  311. ipahal_free_dma_mem(&alloc_params->nhash_hdr);
  312. if (alloc_params->hash_bdy.size)
  313. ipahal_free_dma_mem(&alloc_params->hash_bdy);
  314. if (alloc_params->nhash_bdy.size)
  315. ipahal_free_dma_mem(&alloc_params->nhash_bdy);
  316. allocate_failed:
  317. return rc;
  318. }
  319. /**
  320. * ipa_flt_valid_lcl_tbl_size() - validate if the space allocated for flt
  321. * tbl bodies at the sram is enough for the commit
  322. * @ipt: the ip address family type
  323. * @rlt: the rule type (hashable or non-hashable)
  324. *
  325. * Return: true if enough space available or false in other cases
  326. */
  327. static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
  328. enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
  329. {
  330. u16 avail;
  331. if (!bdy) {
  332. IPAERR("Bad parameters, bdy = NULL\n");
  333. return false;
  334. }
  335. if (ipt == IPA_IP_v4)
  336. avail = (rlt == IPA_RULE_HASHABLE) ?
  337. IPA_MEM_PART(apps_v4_flt_hash_size) :
  338. IPA_MEM_PART(apps_v4_flt_nhash_size);
  339. else
  340. avail = (rlt == IPA_RULE_HASHABLE) ?
  341. IPA_MEM_PART(apps_v6_flt_hash_size) :
  342. IPA_MEM_PART(apps_v6_flt_nhash_size);
  343. if (bdy->size <= avail)
  344. return true;
  345. IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n",
  346. bdy->size, avail, ipt, rlt);
  347. return false;
  348. }
  349. /**
  350. * ipa_flt_alloc_cmd_buffers() - alloc descriptors and imm cmds
  351. * payload pointers buffers for headers and bodies of flt structure
  352. * as well as place for flush imm.
  353. * @ipt: the ip address family type
  354. * @entries: the number of entries
  355. * @desc: [OUT] descriptor buffer
  356. * @cmd: [OUT] imm commands payload pointers buffer
  357. *
  358. * Return: 0 on success, negative on failure
  359. */
  360. static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip, u16 entries,
  361. struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld)
  362. {
  363. *desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC);
  364. if (*desc == NULL) {
  365. IPAERR("fail to alloc desc blob ip %d\n", ip);
  366. goto fail_desc_alloc;
  367. }
  368. *cmd_pyld = kcalloc(entries, sizeof(**cmd_pyld), GFP_ATOMIC);
  369. if (*cmd_pyld == NULL) {
  370. IPAERR("fail to alloc cmd pyld blob ip %d\n", ip);
  371. goto fail_cmd_alloc;
  372. }
  373. return 0;
  374. fail_cmd_alloc:
  375. kfree(*desc);
  376. fail_desc_alloc:
  377. return -ENOMEM;
  378. }
  379. /**
  380. * ipa_flt_skip_pipe_config() - skip ep flt configuration or not?
  381. * will skip according to pre-configuration or modem pipes
  382. * @pipe: the EP pipe index
  383. *
  384. * Return: true if to skip, false otherwize
  385. */
  386. static bool ipa_flt_skip_pipe_config(int pipe)
  387. {
  388. struct ipa3_ep_context *ep;
  389. if (ipa_is_modem_pipe(pipe)) {
  390. IPADBG_LOW("skip %d - modem owned pipe\n", pipe);
  391. return true;
  392. }
  393. if (ipa3_ctx->skip_ep_cfg_shadow[pipe]) {
  394. IPADBG_LOW("skip %d\n", pipe);
  395. return true;
  396. }
  397. ep = &ipa3_ctx->ep[pipe];
  398. if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD) == pipe
  399. && ipa3_ctx->modem_cfg_emb_pipe_flt)
  400. && ep->client == IPA_CLIENT_APPS_WAN_PROD) {
  401. IPADBG_LOW("skip %d\n", pipe);
  402. return true;
  403. }
  404. return false;
  405. }
  406. /**
  407. * __ipa_commit_flt_v3() - commit flt tables to the hw
  408. * commit the headers and the bodies if are local with internal cache flushing.
  409. * The headers (and local bodies) will first be created into dma buffers and
  410. * then written via IC to the SRAM
  411. * @ipt: the ip address family type
  412. *
  413. * Return: 0 on success, negative on failure
  414. */
  415. int __ipa_commit_flt_v3(enum ipa_ip_type ip)
  416. {
  417. struct ipahal_fltrt_alloc_imgs_params alloc_params;
  418. int rc = 0;
  419. struct ipa3_desc *desc;
  420. struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
  421. struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
  422. struct ipahal_imm_cmd_pyld **cmd_pyld;
  423. int num_cmd = 0;
  424. int i;
  425. int hdr_idx;
  426. u32 lcl_hash_hdr, lcl_nhash_hdr;
  427. u32 lcl_hash_bdy, lcl_nhash_bdy;
  428. bool lcl_hash, lcl_nhash;
  429. struct ipahal_reg_fltrt_hash_flush flush;
  430. struct ipahal_reg_valmask valmask;
  431. u32 tbl_hdr_width;
  432. struct ipa3_flt_tbl *tbl;
  433. u16 entries;
  434. tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
  435. memset(&alloc_params, 0, sizeof(alloc_params));
  436. alloc_params.ipt = ip;
  437. alloc_params.tbls_num = ipa3_ctx->ep_flt_num;
  438. if (ip == IPA_IP_v4) {
  439. lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
  440. IPA_MEM_PART(v4_flt_hash_ofst) +
  441. tbl_hdr_width; /* to skip the bitmap */
  442. lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
  443. IPA_MEM_PART(v4_flt_nhash_ofst) +
  444. tbl_hdr_width; /* to skip the bitmap */
  445. lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
  446. IPA_MEM_PART(apps_v4_flt_hash_ofst);
  447. lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
  448. IPA_MEM_PART(apps_v4_flt_nhash_ofst);
  449. lcl_hash = ipa3_ctx->ip4_flt_tbl_hash_lcl;
  450. lcl_nhash = ipa3_ctx->ip4_flt_tbl_nhash_lcl;
  451. } else {
  452. lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
  453. IPA_MEM_PART(v6_flt_hash_ofst) +
  454. tbl_hdr_width; /* to skip the bitmap */
  455. lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
  456. IPA_MEM_PART(v6_flt_nhash_ofst) +
  457. tbl_hdr_width; /* to skip the bitmap */
  458. lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
  459. IPA_MEM_PART(apps_v6_flt_hash_ofst);
  460. lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
  461. IPA_MEM_PART(apps_v6_flt_nhash_ofst);
  462. lcl_hash = ipa3_ctx->ip6_flt_tbl_hash_lcl;
  463. lcl_nhash = ipa3_ctx->ip6_flt_tbl_nhash_lcl;
  464. }
  465. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  466. if (!ipa_is_ep_support_flt(i))
  467. continue;
  468. tbl = &ipa3_ctx->flt_tbl[i][ip];
  469. if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) {
  470. rc = -EPERM;
  471. goto prep_failed;
  472. }
  473. if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
  474. tbl->sz[IPA_RULE_HASHABLE]) {
  475. alloc_params.num_lcl_hash_tbls++;
  476. alloc_params.total_sz_lcl_hash_tbls +=
  477. tbl->sz[IPA_RULE_HASHABLE];
  478. alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
  479. }
  480. if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
  481. tbl->sz[IPA_RULE_NON_HASHABLE]) {
  482. alloc_params.num_lcl_nhash_tbls++;
  483. alloc_params.total_sz_lcl_nhash_tbls +=
  484. tbl->sz[IPA_RULE_NON_HASHABLE];
  485. alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
  486. }
  487. }
  488. if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
  489. IPAERR_RL("fail to generate FLT HW TBL image. IP %d\n", ip);
  490. rc = -EFAULT;
  491. goto prep_failed;
  492. }
  493. if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
  494. &alloc_params.hash_bdy)) {
  495. rc = -EFAULT;
  496. goto fail_size_valid;
  497. }
  498. if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
  499. &alloc_params.nhash_bdy)) {
  500. rc = -EFAULT;
  501. goto fail_size_valid;
  502. }
  503. /* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */
  504. entries = (ipa3_ctx->ep_flt_num) * 2 + 3;
  505. if (ipa_flt_alloc_cmd_buffers(ip, entries, &desc, &cmd_pyld)) {
  506. rc = -ENOMEM;
  507. goto fail_size_valid;
  508. }
  509. /*
  510. * SRAM memory not allocated to hash tables. Sending
  511. * command to hash tables(filer/routing) operation not supported.
  512. */
  513. if (!ipa3_ctx->ipa_fltrt_not_hashable) {
  514. /* flushing ipa internal hashable flt rules cache */
  515. memset(&flush, 0, sizeof(flush));
  516. if (ip == IPA_IP_v4)
  517. flush.v4_flt = true;
  518. else
  519. flush.v6_flt = true;
  520. ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
  521. reg_write_cmd.skip_pipeline_clear = false;
  522. reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  523. reg_write_cmd.offset = ipahal_get_reg_ofst(
  524. IPA_FILT_ROUT_HASH_FLUSH);
  525. reg_write_cmd.value = valmask.val;
  526. reg_write_cmd.value_mask = valmask.mask;
  527. cmd_pyld[0] = ipahal_construct_imm_cmd(
  528. IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd,
  529. false);
  530. if (!cmd_pyld[0]) {
  531. IPAERR(
  532. "fail construct register_write imm cmd: IP %d\n", ip);
  533. rc = -EFAULT;
  534. goto fail_reg_write_construct;
  535. }
  536. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  537. ++num_cmd;
  538. }
  539. hdr_idx = 0;
  540. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  541. if (!ipa_is_ep_support_flt(i)) {
  542. IPADBG_LOW("skip %d - not filtering pipe\n", i);
  543. continue;
  544. }
  545. if (ipa_flt_skip_pipe_config(i)) {
  546. hdr_idx++;
  547. continue;
  548. }
  549. if (num_cmd + 1 >= entries) {
  550. IPAERR("number of commands is out of range: IP = %d\n",
  551. ip);
  552. rc = -ENOBUFS;
  553. goto fail_imm_cmd_construct;
  554. }
  555. IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n",
  556. hdr_idx, i);
  557. mem_cmd.is_read = false;
  558. mem_cmd.skip_pipeline_clear = false;
  559. mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  560. mem_cmd.size = tbl_hdr_width;
  561. mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base +
  562. hdr_idx * tbl_hdr_width;
  563. mem_cmd.local_addr = lcl_nhash_hdr +
  564. hdr_idx * tbl_hdr_width;
  565. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  566. IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
  567. if (!cmd_pyld[num_cmd]) {
  568. IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
  569. ip);
  570. rc = -ENOMEM;
  571. goto fail_imm_cmd_construct;
  572. }
  573. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  574. ++num_cmd;
  575. /*
  576. * SRAM memory not allocated to hash tables. Sending command
  577. * to hash tables(filer/routing) operation not supported.
  578. */
  579. if (!ipa3_ctx->ipa_fltrt_not_hashable) {
  580. mem_cmd.is_read = false;
  581. mem_cmd.skip_pipeline_clear = false;
  582. mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  583. mem_cmd.size = tbl_hdr_width;
  584. mem_cmd.system_addr = alloc_params.hash_hdr.phys_base +
  585. hdr_idx * tbl_hdr_width;
  586. mem_cmd.local_addr = lcl_hash_hdr +
  587. hdr_idx * tbl_hdr_width;
  588. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  589. IPA_IMM_CMD_DMA_SHARED_MEM,
  590. &mem_cmd, false);
  591. if (!cmd_pyld[num_cmd]) {
  592. IPAERR(
  593. "fail construct dma_shared_mem cmd: IP = %d\n",
  594. ip);
  595. rc = -ENOMEM;
  596. goto fail_imm_cmd_construct;
  597. }
  598. ipa3_init_imm_cmd_desc(&desc[num_cmd],
  599. cmd_pyld[num_cmd]);
  600. ++num_cmd;
  601. }
  602. ++hdr_idx;
  603. }
  604. if (lcl_nhash) {
  605. if (num_cmd >= entries) {
  606. IPAERR("number of commands is out of range: IP = %d\n",
  607. ip);
  608. rc = -ENOBUFS;
  609. goto fail_imm_cmd_construct;
  610. }
  611. mem_cmd.is_read = false;
  612. mem_cmd.skip_pipeline_clear = false;
  613. mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  614. mem_cmd.size = alloc_params.nhash_bdy.size;
  615. mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
  616. mem_cmd.local_addr = lcl_nhash_bdy;
  617. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  618. IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
  619. if (!cmd_pyld[num_cmd]) {
  620. IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
  621. ip);
  622. rc = -ENOMEM;
  623. goto fail_imm_cmd_construct;
  624. }
  625. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  626. ++num_cmd;
  627. }
  628. if (lcl_hash) {
  629. if (num_cmd >= entries) {
  630. IPAERR("number of commands is out of range: IP = %d\n",
  631. ip);
  632. rc = -ENOBUFS;
  633. goto fail_imm_cmd_construct;
  634. }
  635. mem_cmd.is_read = false;
  636. mem_cmd.skip_pipeline_clear = false;
  637. mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  638. mem_cmd.size = alloc_params.hash_bdy.size;
  639. mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
  640. mem_cmd.local_addr = lcl_hash_bdy;
  641. cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
  642. IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
  643. if (!cmd_pyld[num_cmd]) {
  644. IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
  645. ip);
  646. rc = -ENOMEM;
  647. goto fail_imm_cmd_construct;
  648. }
  649. ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
  650. ++num_cmd;
  651. }
  652. if (ipa3_send_cmd(num_cmd, desc)) {
  653. IPAERR("fail to send immediate command\n");
  654. rc = -EFAULT;
  655. goto fail_imm_cmd_construct;
  656. }
  657. IPADBG_LOW("Hashable HEAD\n");
  658. IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
  659. alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
  660. IPADBG_LOW("Non-Hashable HEAD\n");
  661. IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
  662. alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
  663. if (alloc_params.hash_bdy.size) {
  664. IPADBG_LOW("Hashable BODY\n");
  665. IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
  666. alloc_params.hash_bdy.phys_base,
  667. alloc_params.hash_bdy.size);
  668. }
  669. if (alloc_params.nhash_bdy.size) {
  670. IPADBG_LOW("Non-Hashable BODY\n");
  671. IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
  672. alloc_params.nhash_bdy.phys_base,
  673. alloc_params.nhash_bdy.size);
  674. }
  675. __ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE);
  676. __ipa_reap_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE);
  677. fail_imm_cmd_construct:
  678. for (i = 0 ; i < num_cmd ; i++)
  679. ipahal_destroy_imm_cmd(cmd_pyld[i]);
  680. fail_reg_write_construct:
  681. kfree(desc);
  682. kfree(cmd_pyld);
  683. fail_size_valid:
  684. if (alloc_params.hash_hdr.size)
  685. ipahal_free_dma_mem(&alloc_params.hash_hdr);
  686. ipahal_free_dma_mem(&alloc_params.nhash_hdr);
  687. if (alloc_params.hash_bdy.size)
  688. ipahal_free_dma_mem(&alloc_params.hash_bdy);
  689. if (alloc_params.nhash_bdy.size)
  690. ipahal_free_dma_mem(&alloc_params.nhash_bdy);
  691. prep_failed:
  692. return rc;
  693. }
  694. static int __ipa_validate_flt_rule(const struct ipa_flt_rule_i *rule,
  695. struct ipa3_rt_tbl **rt_tbl, enum ipa_ip_type ip)
  696. {
  697. int index;
  698. if (rule->action != IPA_PASS_TO_EXCEPTION) {
  699. if (!rule->eq_attrib_type) {
  700. if (!rule->rt_tbl_hdl) {
  701. IPAERR_RL("invalid RT tbl\n");
  702. goto error;
  703. }
  704. *rt_tbl = ipa3_id_find(rule->rt_tbl_hdl);
  705. if (*rt_tbl == NULL) {
  706. IPAERR_RL("RT tbl not found\n");
  707. goto error;
  708. }
  709. if ((*rt_tbl)->cookie != IPA_RT_TBL_COOKIE) {
  710. IPAERR_RL("RT table cookie is invalid\n");
  711. goto error;
  712. }
  713. } else {
  714. if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
  715. IPA_MEM_PART(v4_modem_rt_index_hi) :
  716. IPA_MEM_PART(v6_modem_rt_index_hi))) {
  717. IPAERR_RL("invalid RT tbl\n");
  718. goto error;
  719. }
  720. }
  721. } else {
  722. if (rule->rt_tbl_idx > 0) {
  723. IPAERR_RL("invalid RT tbl\n");
  724. goto error;
  725. }
  726. }
  727. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
  728. if (rule->pdn_idx) {
  729. if (rule->action == IPA_PASS_TO_EXCEPTION ||
  730. rule->action == IPA_PASS_TO_ROUTING) {
  731. IPAERR_RL(
  732. "PDN index should be 0 when action is not pass to NAT\n");
  733. goto error;
  734. } else {
  735. if (rule->pdn_idx >= IPA_MAX_PDN_NUM) {
  736. IPAERR_RL("PDN index %d is too large\n",
  737. rule->pdn_idx);
  738. goto error;
  739. }
  740. }
  741. }
  742. }
  743. if (rule->rule_id) {
  744. if ((rule->rule_id < ipahal_get_rule_id_hi_bit()) ||
  745. (rule->rule_id >= ((ipahal_get_rule_id_hi_bit()<<1)-1))) {
  746. IPAERR_RL("invalid rule_id provided 0x%x\n"
  747. "rule_id with bit 0x%x are auto generated\n",
  748. rule->rule_id, ipahal_get_rule_id_hi_bit());
  749. goto error;
  750. }
  751. }
  752. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
  753. if (rule->enable_stats && rule->cnt_idx) {
  754. if (!ipahal_is_rule_cnt_id_valid(rule->cnt_idx)) {
  755. IPAERR_RL(
  756. "invalid cnt_idx %hhu out of range\n",
  757. rule->cnt_idx);
  758. goto error;
  759. }
  760. index = rule->cnt_idx - 1;
  761. if (!ipa3_ctx->flt_rt_counters.used_hw[index]) {
  762. IPAERR_RL(
  763. "invalid cnt_idx %hhu not alloc by driver\n",
  764. rule->cnt_idx);
  765. goto error;
  766. }
  767. }
  768. } else {
  769. if (rule->enable_stats) {
  770. IPAERR_RL(
  771. "enable_stats won't support on ipa_hw_type %d\n",
  772. ipa3_ctx->ipa_hw_type);
  773. goto error;
  774. }
  775. }
  776. return 0;
  777. error:
  778. return -EPERM;
  779. }
  780. static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
  781. const struct ipa_flt_rule_i *rule, struct ipa3_rt_tbl *rt_tbl,
  782. struct ipa3_flt_tbl *tbl, bool user)
  783. {
  784. int id;
  785. *entry = kmem_cache_zalloc(ipa3_ctx->flt_rule_cache, GFP_KERNEL);
  786. if (!*entry)
  787. goto error;
  788. INIT_LIST_HEAD(&((*entry)->link));
  789. (*entry)->rule = *rule;
  790. (*entry)->cookie = IPA_FLT_COOKIE;
  791. (*entry)->rt_tbl = rt_tbl;
  792. (*entry)->tbl = tbl;
  793. if (rule->rule_id) {
  794. id = rule->rule_id;
  795. } else {
  796. id = ipa3_alloc_rule_id(tbl->rule_ids);
  797. if (id < 0) {
  798. IPAERR_RL("failed to allocate rule id\n");
  799. WARN_ON_RATELIMIT_IPA(1);
  800. goto rule_id_fail;
  801. }
  802. }
  803. (*entry)->rule_id = id;
  804. (*entry)->ipacm_installed = user;
  805. if (rule->enable_stats)
  806. (*entry)->cnt_idx = rule->cnt_idx;
  807. else
  808. (*entry)->cnt_idx = 0;
  809. return 0;
  810. rule_id_fail:
  811. kmem_cache_free(ipa3_ctx->flt_rule_cache, *entry);
  812. error:
  813. return -EPERM;
  814. }
  815. static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
  816. struct ipa3_flt_entry *entry, u32 *rule_hdl)
  817. {
  818. int id;
  819. tbl->rule_cnt++;
  820. if (entry->rt_tbl)
  821. entry->rt_tbl->ref_cnt++;
  822. id = ipa3_id_alloc(entry);
  823. if (id < 0) {
  824. IPAERR_RL("failed to add to tree\n");
  825. WARN_ON_RATELIMIT_IPA(1);
  826. goto ipa_insert_failed;
  827. }
  828. *rule_hdl = id;
  829. entry->id = id;
  830. IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
  831. return 0;
  832. ipa_insert_failed:
  833. if (entry->rt_tbl)
  834. entry->rt_tbl->ref_cnt--;
  835. tbl->rule_cnt--;
  836. return -EPERM;
  837. }
  838. static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
  839. const struct ipa_flt_rule_i *rule, u8 add_rear,
  840. u32 *rule_hdl, bool user)
  841. {
  842. struct ipa3_flt_entry *entry;
  843. struct ipa3_rt_tbl *rt_tbl = NULL;
  844. if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
  845. goto error;
  846. if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, user))
  847. goto error;
  848. if (add_rear) {
  849. if (tbl->sticky_rear)
  850. list_add_tail(&entry->link,
  851. tbl->head_flt_rule_list.prev);
  852. else
  853. list_add_tail(&entry->link, &tbl->head_flt_rule_list);
  854. } else {
  855. list_add(&entry->link, &tbl->head_flt_rule_list);
  856. }
  857. if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
  858. goto ipa_insert_failed;
  859. return 0;
  860. ipa_insert_failed:
  861. list_del(&entry->link);
  862. /* if rule id was allocated from idr, remove it */
  863. if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
  864. (entry->rule_id >= ipahal_get_low_rule_id()))
  865. idr_remove(entry->tbl->rule_ids, entry->rule_id);
  866. kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
  867. error:
  868. return -EPERM;
  869. }
  870. static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
  871. const struct ipa_flt_rule_i *rule,
  872. u32 *rule_hdl,
  873. enum ipa_ip_type ip,
  874. struct ipa3_flt_entry **add_after_entry)
  875. {
  876. struct ipa3_flt_entry *entry;
  877. struct ipa3_rt_tbl *rt_tbl = NULL;
  878. if (!*add_after_entry)
  879. goto error;
  880. if (rule == NULL || rule_hdl == NULL) {
  881. IPAERR_RL("bad parms rule=%pK rule_hdl=%pK\n", rule,
  882. rule_hdl);
  883. goto error;
  884. }
  885. if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
  886. goto error;
  887. if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, true))
  888. goto error;
  889. list_add(&entry->link, &((*add_after_entry)->link));
  890. if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
  891. goto ipa_insert_failed;
  892. /*
  893. * prepare for next insertion
  894. */
  895. *add_after_entry = entry;
  896. return 0;
  897. ipa_insert_failed:
  898. list_del(&entry->link);
  899. /* if rule id was allocated from idr, remove it */
  900. if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
  901. (entry->rule_id >= ipahal_get_low_rule_id()))
  902. idr_remove(entry->tbl->rule_ids, entry->rule_id);
  903. kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
  904. error:
  905. *add_after_entry = NULL;
  906. return -EPERM;
  907. }
  908. static int __ipa_del_flt_rule(u32 rule_hdl)
  909. {
  910. struct ipa3_flt_entry *entry;
  911. int id;
  912. entry = ipa3_id_find(rule_hdl);
  913. if (entry == NULL) {
  914. IPAERR_RL("lookup failed\n");
  915. return -EINVAL;
  916. }
  917. if (entry->cookie != IPA_FLT_COOKIE) {
  918. IPAERR_RL("bad params\n");
  919. return -EINVAL;
  920. }
  921. id = entry->id;
  922. list_del(&entry->link);
  923. entry->tbl->rule_cnt--;
  924. if (entry->rt_tbl)
  925. entry->rt_tbl->ref_cnt--;
  926. IPADBG("del flt rule rule_cnt=%d rule_id=%d\n",
  927. entry->tbl->rule_cnt, entry->rule_id);
  928. entry->cookie = 0;
  929. /* if rule id was allocated from idr, remove it */
  930. if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
  931. (entry->rule_id >= ipahal_get_low_rule_id()))
  932. idr_remove(entry->tbl->rule_ids, entry->rule_id);
  933. kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
  934. /* remove the handle from the database */
  935. ipa3_id_remove(id);
  936. return 0;
  937. }
  938. static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy_i *frule,
  939. enum ipa_ip_type ip)
  940. {
  941. struct ipa3_flt_entry *entry;
  942. struct ipa3_rt_tbl *rt_tbl = NULL;
  943. entry = ipa3_id_find(frule->rule_hdl);
  944. if (entry == NULL) {
  945. IPAERR_RL("lookup failed\n");
  946. goto error;
  947. }
  948. if (entry->cookie != IPA_FLT_COOKIE) {
  949. IPAERR_RL("bad params\n");
  950. goto error;
  951. }
  952. if (__ipa_validate_flt_rule(&frule->rule, &rt_tbl, ip))
  953. goto error;
  954. if (entry->rt_tbl)
  955. entry->rt_tbl->ref_cnt--;
  956. entry->rule = frule->rule;
  957. entry->rt_tbl = rt_tbl;
  958. if (entry->rt_tbl)
  959. entry->rt_tbl->ref_cnt++;
  960. entry->hw_len = 0;
  961. entry->prio = 0;
  962. if (frule->rule.enable_stats)
  963. entry->cnt_idx = frule->rule.cnt_idx;
  964. else
  965. entry->cnt_idx = 0;
  966. return 0;
  967. error:
  968. return -EPERM;
  969. }
  970. static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx)
  971. {
  972. *ipa_ep_idx = ipa3_get_ep_mapping(ep);
  973. if (*ipa_ep_idx < 0) {
  974. IPAERR_RL("ep not valid ep=%d\n", ep);
  975. return -EINVAL;
  976. }
  977. if (ipa3_ctx->ep[*ipa_ep_idx].valid == 0)
  978. IPADBG("ep not connected ep_idx=%d\n", *ipa_ep_idx);
  979. if (!ipa_is_ep_support_flt(*ipa_ep_idx)) {
  980. IPAERR("ep do not support filtering ep=%d\n", ep);
  981. return -EINVAL;
  982. }
  983. return 0;
  984. }
  985. static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
  986. const struct ipa_flt_rule_i *rule, u8 add_rear,
  987. u32 *rule_hdl, bool user)
  988. {
  989. struct ipa3_flt_tbl *tbl;
  990. int ipa_ep_idx;
  991. if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
  992. IPAERR_RL("bad parms rule=%pK rule_hdl=%pK ep=%d\n", rule,
  993. rule_hdl, ep);
  994. return -EINVAL;
  995. }
  996. if (__ipa_add_flt_get_ep_idx(ep, &ipa_ep_idx))
  997. return -EINVAL;
  998. if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  999. IPAERR_RL("invalid ipa_ep_idx=%d\n", ipa_ep_idx);
  1000. return -EINVAL;
  1001. }
  1002. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip];
  1003. IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
  1004. return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, user);
  1005. }
  1006. static void __ipa_convert_flt_rule_in(struct ipa_flt_rule rule_in,
  1007. struct ipa_flt_rule_i *rule_out)
  1008. {
  1009. if (unlikely(sizeof(struct ipa_flt_rule) >
  1010. sizeof(struct ipa_flt_rule_i))) {
  1011. IPAERR_RL("invalid size in:%d size out:%d\n",
  1012. sizeof(struct ipa_flt_rule_i),
  1013. sizeof(struct ipa_flt_rule));
  1014. return;
  1015. }
  1016. memset(rule_out, 0, sizeof(struct ipa_flt_rule_i));
  1017. memcpy(rule_out, &rule_in, sizeof(struct ipa_flt_rule));
  1018. }
  1019. static void __ipa_convert_flt_rule_out(struct ipa_flt_rule_i rule_in,
  1020. struct ipa_flt_rule *rule_out)
  1021. {
  1022. if (unlikely(sizeof(struct ipa_flt_rule) >
  1023. sizeof(struct ipa_flt_rule_i))) {
  1024. IPAERR_RL("invalid size in:%d size out:%d\n",
  1025. sizeof(struct ipa_flt_rule_i),
  1026. sizeof(struct ipa_flt_rule));
  1027. return;
  1028. }
  1029. memset(rule_out, 0, sizeof(struct ipa_flt_rule));
  1030. memcpy(rule_out, &rule_in, sizeof(struct ipa_flt_rule));
  1031. }
  1032. static void __ipa_convert_flt_mdfy_in(struct ipa_flt_rule_mdfy rule_in,
  1033. struct ipa_flt_rule_mdfy_i *rule_out)
  1034. {
  1035. if (unlikely(sizeof(struct ipa_flt_rule_mdfy) >
  1036. sizeof(struct ipa_flt_rule_mdfy_i))) {
  1037. IPAERR_RL("invalid size in:%d size out:%d\n",
  1038. sizeof(struct ipa_flt_rule_mdfy),
  1039. sizeof(struct ipa_flt_rule_mdfy_i));
  1040. return;
  1041. }
  1042. memset(rule_out, 0, sizeof(struct ipa_flt_rule_mdfy_i));
  1043. memcpy(&rule_out->rule, &rule_in.rule,
  1044. sizeof(struct ipa_flt_rule));
  1045. rule_out->rule_hdl = rule_in.rule_hdl;
  1046. rule_out->status = rule_in.status;
  1047. }
  1048. static void __ipa_convert_flt_mdfy_out(struct ipa_flt_rule_mdfy_i rule_in,
  1049. struct ipa_flt_rule_mdfy *rule_out)
  1050. {
  1051. if (unlikely(sizeof(struct ipa_flt_rule_mdfy) >
  1052. sizeof(struct ipa_flt_rule_mdfy_i))) {
  1053. IPAERR_RL("invalid size in:%d size out:%d\n",
  1054. sizeof(struct ipa_flt_rule_mdfy),
  1055. sizeof(struct ipa_flt_rule_mdfy_i));
  1056. return;
  1057. }
  1058. memset(rule_out, 0, sizeof(struct ipa_flt_rule_mdfy));
  1059. memcpy(&rule_out->rule, &rule_in.rule,
  1060. sizeof(struct ipa_flt_rule));
  1061. rule_out->rule_hdl = rule_in.rule_hdl;
  1062. rule_out->status = rule_in.status;
  1063. }
  1064. /**
  1065. * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally
  1066. * commit to IPA HW
  1067. * @rules: [inout] set of filtering rules to add
  1068. *
  1069. * Returns: 0 on success, negative on failure
  1070. *
  1071. * Note: Should not be called from atomic context
  1072. */
  1073. int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
  1074. {
  1075. return ipa3_add_flt_rule_usr(rules, false);
  1076. }
  1077. /**
  1078. * ipa3_add_flt_rule_v2() - Add the specified filtering rules to
  1079. * SW and optionally commit to IPA HW
  1080. * @rules: [inout] set of filtering rules to add
  1081. *
  1082. * Returns: 0 on success, negative on failure
  1083. *
  1084. * Note: Should not be called from atomic context
  1085. */
  1086. int ipa3_add_flt_rule_v2(struct ipa_ioc_add_flt_rule_v2 *rules)
  1087. {
  1088. return ipa3_add_flt_rule_usr_v2(rules, false);
  1089. }
  1090. /**
  1091. * ipa3_add_flt_rule_usr() - Add the specified filtering rules to
  1092. * SW and optionally commit to IPA HW
  1093. * @rules: [inout] set of filtering rules to add
  1094. * @user_only: [in] indicate rules installed by userspace
  1095. *
  1096. * Returns: 0 on success, negative on failure
  1097. *
  1098. * Note: Should not be called from atomic context
  1099. */
  1100. int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only)
  1101. {
  1102. int i;
  1103. int result;
  1104. struct ipa_flt_rule_i rule;
  1105. if (rules == NULL || rules->num_rules == 0 ||
  1106. rules->ip >= IPA_IP_MAX) {
  1107. IPAERR_RL("bad parm\n");
  1108. return -EINVAL;
  1109. }
  1110. mutex_lock(&ipa3_ctx->lock);
  1111. for (i = 0; i < rules->num_rules; i++) {
  1112. if (!rules->global) {
  1113. /* if hashing not supported, all table entry
  1114. * are non-hash tables
  1115. */
  1116. if (ipa3_ctx->ipa_fltrt_not_hashable)
  1117. rules->rules[i].rule.hashable = false;
  1118. __ipa_convert_flt_rule_in(
  1119. rules->rules[i].rule, &rule);
  1120. result = __ipa_add_ep_flt_rule(rules->ip,
  1121. rules->ep,
  1122. &rule,
  1123. rules->rules[i].at_rear,
  1124. &rules->rules[i].flt_rule_hdl,
  1125. user_only);
  1126. __ipa_convert_flt_rule_out(rule,
  1127. &rules->rules[i].rule);
  1128. } else
  1129. result = -1;
  1130. if (result) {
  1131. IPAERR_RL("failed to add flt rule %d\n", i);
  1132. rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
  1133. } else {
  1134. rules->rules[i].status = 0;
  1135. }
  1136. }
  1137. if (rules->global) {
  1138. IPAERR_RL("no support for global filter rules\n");
  1139. result = -EPERM;
  1140. goto bail;
  1141. }
  1142. if (rules->commit)
  1143. if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
  1144. result = -EPERM;
  1145. goto bail;
  1146. }
  1147. result = 0;
  1148. bail:
  1149. mutex_unlock(&ipa3_ctx->lock);
  1150. return result;
  1151. }
  1152. /**
  1153. * ipa3_add_flt_rule_usr_v2() - Add the specified filtering
  1154. * rules to SW and optionally commit to IPA HW
  1155. * @rules: [inout] set of filtering rules to add
  1156. * @user_only: [in] indicate rules installed by userspace
  1157. *
  1158. * Returns: 0 on success, negative on failure
  1159. *
  1160. * Note: Should not be called from atomic context
  1161. */
  1162. int ipa3_add_flt_rule_usr_v2(struct ipa_ioc_add_flt_rule_v2
  1163. *rules, bool user_only)
  1164. {
  1165. int i;
  1166. int result;
  1167. if (rules == NULL || rules->num_rules == 0 ||
  1168. rules->ip >= IPA_IP_MAX) {
  1169. IPAERR_RL("bad parm\n");
  1170. return -EINVAL;
  1171. }
  1172. mutex_lock(&ipa3_ctx->lock);
  1173. for (i = 0; i < rules->num_rules; i++) {
  1174. if (!rules->global) {
  1175. /* if hashing not supported, all table entry
  1176. * are non-hash tables
  1177. */
  1178. if (ipa3_ctx->ipa_fltrt_not_hashable)
  1179. ((struct ipa_flt_rule_add_i *)
  1180. rules->rules)[i].rule.hashable = false;
  1181. result = __ipa_add_ep_flt_rule(rules->ip,
  1182. rules->ep,
  1183. &(((struct ipa_flt_rule_add_i *)
  1184. rules->rules)[i].rule),
  1185. ((struct ipa_flt_rule_add_i *)
  1186. rules->rules)[i].at_rear,
  1187. &(((struct ipa_flt_rule_add_i *)
  1188. rules->rules)[i].flt_rule_hdl),
  1189. user_only);
  1190. } else
  1191. result = -1;
  1192. if (result) {
  1193. IPAERR_RL("failed to add flt rule %d\n", i);
  1194. ((struct ipa_flt_rule_add_i *)
  1195. rules->rules)[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
  1196. } else {
  1197. ((struct ipa_flt_rule_add_i *)
  1198. rules->rules)[i].status = 0;
  1199. }
  1200. }
  1201. if (rules->global) {
  1202. IPAERR_RL("no support for global filter rules\n");
  1203. result = -EPERM;
  1204. goto bail;
  1205. }
  1206. if (rules->commit)
  1207. if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
  1208. result = -EPERM;
  1209. goto bail;
  1210. }
  1211. result = 0;
  1212. bail:
  1213. mutex_unlock(&ipa3_ctx->lock);
  1214. return result;
  1215. }
  1216. /**
  1217. * ipa3_add_flt_rule_after() - Add the specified filtering rules to SW after
  1218. * the rule which its handle is given and optionally commit to IPA HW
  1219. *
  1220. * Returns: 0 on success, negative on failure
  1221. *
  1222. * Note: Should not be called from atomic context
  1223. */
  1224. int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
  1225. {
  1226. int i;
  1227. int result;
  1228. struct ipa3_flt_tbl *tbl;
  1229. int ipa_ep_idx;
  1230. struct ipa3_flt_entry *entry;
  1231. struct ipa_flt_rule_i rule;
  1232. if (rules == NULL || rules->num_rules == 0 ||
  1233. rules->ip >= IPA_IP_MAX) {
  1234. IPAERR_RL("bad parm\n");
  1235. return -EINVAL;
  1236. }
  1237. if (rules->ep >= IPA_CLIENT_MAX) {
  1238. IPAERR_RL("bad parms ep=%d\n", rules->ep);
  1239. return -EINVAL;
  1240. }
  1241. mutex_lock(&ipa3_ctx->lock);
  1242. if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) {
  1243. result = -EINVAL;
  1244. goto bail;
  1245. }
  1246. if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES || ipa_ep_idx < 0) {
  1247. IPAERR_RL("invalid ipa_ep_idx=%u\n", ipa_ep_idx);
  1248. result = -EINVAL;
  1249. goto bail;
  1250. }
  1251. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip];
  1252. entry = ipa3_id_find(rules->add_after_hdl);
  1253. if (entry == NULL) {
  1254. IPAERR_RL("lookup failed\n");
  1255. result = -EINVAL;
  1256. goto bail;
  1257. }
  1258. if (entry->cookie != IPA_FLT_COOKIE) {
  1259. IPAERR_RL("Invalid cookie value = %u flt hdl id = %d\n",
  1260. entry->cookie, rules->add_after_hdl);
  1261. result = -EINVAL;
  1262. goto bail;
  1263. }
  1264. if (entry->tbl != tbl) {
  1265. IPAERR_RL("given entry does not match the table\n");
  1266. result = -EINVAL;
  1267. goto bail;
  1268. }
  1269. if (tbl->sticky_rear)
  1270. if (&entry->link == tbl->head_flt_rule_list.prev) {
  1271. IPAERR_RL("cannot add rule at end of a sticky table");
  1272. result = -EINVAL;
  1273. goto bail;
  1274. }
  1275. IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n",
  1276. rules->ip, rules->ep, rules->add_after_hdl);
  1277. /*
  1278. * we add all rules one after the other, if one insertion fails, it cuts
  1279. * the chain (all following will receive fail status) following calls to
  1280. * __ipa_add_flt_rule_after will fail (entry == NULL)
  1281. */
  1282. for (i = 0; i < rules->num_rules; i++) {
  1283. /* if hashing not supported, all tables are non-hash tables*/
  1284. if (ipa3_ctx->ipa_fltrt_not_hashable)
  1285. rules->rules[i].rule.hashable = false;
  1286. __ipa_convert_flt_rule_in(
  1287. rules->rules[i].rule, &rule);
  1288. result = __ipa_add_flt_rule_after(tbl,
  1289. &rule,
  1290. &rules->rules[i].flt_rule_hdl,
  1291. rules->ip,
  1292. &entry);
  1293. __ipa_convert_flt_rule_out(rule,
  1294. &rules->rules[i].rule);
  1295. if (result) {
  1296. IPAERR_RL("failed to add flt rule %d\n", i);
  1297. rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
  1298. } else {
  1299. rules->rules[i].status = 0;
  1300. }
  1301. }
  1302. if (rules->commit)
  1303. if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
  1304. IPAERR("failed to commit flt rules\n");
  1305. result = -EPERM;
  1306. goto bail;
  1307. }
  1308. result = 0;
  1309. bail:
  1310. mutex_unlock(&ipa3_ctx->lock);
  1311. return result;
  1312. }
  1313. /**
  1314. * ipa3_add_flt_rule_after_v2() - Add the specified filtering
  1315. * rules to SW after the rule which its handle is given and
  1316. * optionally commit to IPA HW
  1317. *
  1318. * Returns: 0 on success, negative on failure
  1319. *
  1320. * Note: Should not be called from atomic context
  1321. */
  1322. int ipa3_add_flt_rule_after_v2(struct ipa_ioc_add_flt_rule_after_v2
  1323. *rules)
  1324. {
  1325. int i;
  1326. int result;
  1327. struct ipa3_flt_tbl *tbl;
  1328. int ipa_ep_idx;
  1329. struct ipa3_flt_entry *entry;
  1330. if (rules == NULL || rules->num_rules == 0 ||
  1331. rules->ip >= IPA_IP_MAX) {
  1332. IPAERR_RL("bad parm\n");
  1333. return -EINVAL;
  1334. }
  1335. if (rules->ep >= IPA_CLIENT_MAX) {
  1336. IPAERR_RL("bad parms ep=%d\n", rules->ep);
  1337. return -EINVAL;
  1338. }
  1339. mutex_lock(&ipa3_ctx->lock);
  1340. if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) {
  1341. result = -EINVAL;
  1342. goto bail;
  1343. }
  1344. if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES ||
  1345. ipa_ep_idx < 0) {
  1346. IPAERR_RL("invalid ipa_ep_idx=%u\n", ipa_ep_idx);
  1347. result = -EINVAL;
  1348. goto bail;
  1349. }
  1350. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip];
  1351. entry = ipa3_id_find(rules->add_after_hdl);
  1352. if (entry == NULL) {
  1353. IPAERR_RL("lookup failed\n");
  1354. result = -EINVAL;
  1355. goto bail;
  1356. }
  1357. if (entry->cookie != IPA_FLT_COOKIE) {
  1358. IPAERR_RL("Invalid cookie value = %u flt hdl id = %d\n",
  1359. entry->cookie, rules->add_after_hdl);
  1360. result = -EINVAL;
  1361. goto bail;
  1362. }
  1363. if (entry->tbl != tbl) {
  1364. IPAERR_RL("given entry does not match the table\n");
  1365. result = -EINVAL;
  1366. goto bail;
  1367. }
  1368. if (tbl->sticky_rear)
  1369. if (&entry->link == tbl->head_flt_rule_list.prev) {
  1370. IPAERR_RL("cannot add rule at end of a sticky table");
  1371. result = -EINVAL;
  1372. goto bail;
  1373. }
  1374. IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n",
  1375. rules->ip, rules->ep, rules->add_after_hdl);
  1376. /*
  1377. * we add all rules one after the other, if one insertion fails, it cuts
  1378. * the chain (all following will receive fail status) following calls to
  1379. * __ipa_add_flt_rule_after will fail (entry == NULL)
  1380. */
  1381. for (i = 0; i < rules->num_rules; i++) {
  1382. /* if hashing not supported, all tables are non-hash tables*/
  1383. if (ipa3_ctx->ipa_fltrt_not_hashable)
  1384. ((struct ipa_flt_rule_add_i *)
  1385. rules->rules)[i].rule.hashable = false;
  1386. result = __ipa_add_flt_rule_after(tbl,
  1387. &(((struct ipa_flt_rule_add_i *)
  1388. rules->rules)[i].rule),
  1389. &(((struct ipa_flt_rule_add_i *)
  1390. rules->rules)[i].flt_rule_hdl),
  1391. rules->ip,
  1392. &entry);
  1393. if (result) {
  1394. IPAERR_RL("failed to add flt rule %d\n", i);
  1395. ((struct ipa_flt_rule_add_i *)
  1396. rules->rules)[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
  1397. } else {
  1398. ((struct ipa_flt_rule_add_i *)
  1399. rules->rules)[i].status = 0;
  1400. }
  1401. }
  1402. if (rules->commit)
  1403. if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
  1404. IPAERR("failed to commit flt rules\n");
  1405. result = -EPERM;
  1406. goto bail;
  1407. }
  1408. result = 0;
  1409. bail:
  1410. mutex_unlock(&ipa3_ctx->lock);
  1411. return result;
  1412. }
  1413. /**
  1414. * ipa3_del_flt_rule() - Remove the specified filtering rules from SW and
  1415. * optionally commit to IPA HW
  1416. *
  1417. * Returns: 0 on success, negative on failure
  1418. *
  1419. * Note: Should not be called from atomic context
  1420. */
  1421. int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
  1422. {
  1423. int i;
  1424. int result;
  1425. if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
  1426. IPAERR_RL("bad param\n");
  1427. return -EINVAL;
  1428. }
  1429. mutex_lock(&ipa3_ctx->lock);
  1430. for (i = 0; i < hdls->num_hdls; i++) {
  1431. if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
  1432. IPAERR_RL("failed to del flt rule %i\n", i);
  1433. hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
  1434. } else {
  1435. hdls->hdl[i].status = 0;
  1436. }
  1437. }
  1438. if (hdls->commit)
  1439. if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
  1440. result = -EPERM;
  1441. goto bail;
  1442. }
  1443. result = 0;
  1444. bail:
  1445. mutex_unlock(&ipa3_ctx->lock);
  1446. return result;
  1447. }
  1448. /**
  1449. * ipa3_mdfy_flt_rule() - Modify the specified filtering rules in SW and
  1450. * optionally commit to IPA HW
  1451. *
  1452. * Returns: 0 on success, negative on failure
  1453. *
  1454. * Note: Should not be called from atomic context
  1455. */
  1456. int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
  1457. {
  1458. int i;
  1459. int result;
  1460. struct ipa_flt_rule_mdfy_i rule;
  1461. if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
  1462. IPAERR_RL("bad parm\n");
  1463. return -EINVAL;
  1464. }
  1465. mutex_lock(&ipa3_ctx->lock);
  1466. for (i = 0; i < hdls->num_rules; i++) {
  1467. /* if hashing not supported, all tables are non-hash tables*/
  1468. if (ipa3_ctx->ipa_fltrt_not_hashable)
  1469. hdls->rules[i].rule.hashable = false;
  1470. __ipa_convert_flt_mdfy_in(hdls->rules[i], &rule);
  1471. if (__ipa_mdfy_flt_rule(&rule, hdls->ip)) {
  1472. IPAERR_RL("failed to mdfy flt rule %i\n", i);
  1473. hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
  1474. } else {
  1475. hdls->rules[i].status = 0;
  1476. __ipa_convert_flt_mdfy_out(rule, &hdls->rules[i]);
  1477. }
  1478. }
  1479. if (hdls->commit)
  1480. if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
  1481. result = -EPERM;
  1482. goto bail;
  1483. }
  1484. result = 0;
  1485. bail:
  1486. mutex_unlock(&ipa3_ctx->lock);
  1487. return result;
  1488. }
  1489. /**
  1490. * ipa3_mdfy_flt_rule_v2() - Modify the specified filtering
  1491. * rules in SW and optionally commit to IPA HW
  1492. *
  1493. * Returns: 0 on success, negative on failure
  1494. *
  1495. * Note: Should not be called from atomic context
  1496. */
  1497. int ipa3_mdfy_flt_rule_v2(struct ipa_ioc_mdfy_flt_rule_v2 *hdls)
  1498. {
  1499. int i;
  1500. int result;
  1501. if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
  1502. IPAERR_RL("bad parm\n");
  1503. return -EINVAL;
  1504. }
  1505. mutex_lock(&ipa3_ctx->lock);
  1506. for (i = 0; i < hdls->num_rules; i++) {
  1507. /* if hashing not supported, all tables are non-hash tables*/
  1508. if (ipa3_ctx->ipa_fltrt_not_hashable)
  1509. ((struct ipa_flt_rule_mdfy_i *)
  1510. hdls->rules)[i].rule.hashable = false;
  1511. if (__ipa_mdfy_flt_rule(&(((struct ipa_flt_rule_mdfy_i *)
  1512. hdls->rules)[i]), hdls->ip)) {
  1513. IPAERR_RL("failed to mdfy flt rule %i\n", i);
  1514. ((struct ipa_flt_rule_mdfy_i *)
  1515. hdls->rules)[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
  1516. } else {
  1517. ((struct ipa_flt_rule_mdfy_i *)
  1518. hdls->rules)[i].status = 0;
  1519. }
  1520. }
  1521. if (hdls->commit)
  1522. if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
  1523. result = -EPERM;
  1524. goto bail;
  1525. }
  1526. result = 0;
  1527. bail:
  1528. mutex_unlock(&ipa3_ctx->lock);
  1529. return result;
  1530. }
  1531. /**
  1532. * ipa3_commit_flt() - Commit the current SW filtering table of specified type
  1533. * to IPA HW
  1534. * @ip: [in] the family of routing tables
  1535. *
  1536. * Returns: 0 on success, negative on failure
  1537. *
  1538. * Note: Should not be called from atomic context
  1539. */
  1540. int ipa3_commit_flt(enum ipa_ip_type ip)
  1541. {
  1542. int result;
  1543. if (ip >= IPA_IP_MAX) {
  1544. IPAERR_RL("bad param\n");
  1545. return -EINVAL;
  1546. }
  1547. mutex_lock(&ipa3_ctx->lock);
  1548. if (ipa3_ctx->ctrl->ipa3_commit_flt(ip)) {
  1549. result = -EPERM;
  1550. goto bail;
  1551. }
  1552. result = 0;
  1553. bail:
  1554. mutex_unlock(&ipa3_ctx->lock);
  1555. return result;
  1556. }
  1557. /**
  1558. * ipa3_reset_flt() - Reset the current SW filtering table of specified type
  1559. * (does not commit to HW)
  1560. * @ip: [in] the family of routing tables
  1561. * @user_only: [in] indicate rules deleted by userspace
  1562. *
  1563. * Returns: 0 on success, negative on failure
  1564. *
  1565. * Note: Should not be called from atomic context
  1566. */
  1567. int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only)
  1568. {
  1569. struct ipa3_flt_tbl *tbl;
  1570. struct ipa3_flt_entry *entry;
  1571. struct ipa3_flt_entry *next;
  1572. int i;
  1573. int id;
  1574. int rule_id;
  1575. if (ip >= IPA_IP_MAX) {
  1576. IPAERR_RL("bad parm\n");
  1577. return -EINVAL;
  1578. }
  1579. mutex_lock(&ipa3_ctx->lock);
  1580. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  1581. if (!ipa_is_ep_support_flt(i))
  1582. continue;
  1583. tbl = &ipa3_ctx->flt_tbl[i][ip];
  1584. list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
  1585. link) {
  1586. if (ipa3_id_find(entry->id) == NULL) {
  1587. WARN_ON_RATELIMIT_IPA(1);
  1588. mutex_unlock(&ipa3_ctx->lock);
  1589. return -EFAULT;
  1590. }
  1591. if (!user_only ||
  1592. entry->ipacm_installed) {
  1593. list_del(&entry->link);
  1594. entry->tbl->rule_cnt--;
  1595. if (entry->rt_tbl)
  1596. entry->rt_tbl->ref_cnt--;
  1597. /* if rule id was allocated from idr, remove */
  1598. rule_id = entry->rule_id;
  1599. id = entry->id;
  1600. if ((rule_id < ipahal_get_rule_id_hi_bit()) &&
  1601. (rule_id >= ipahal_get_low_rule_id()))
  1602. idr_remove(entry->tbl->rule_ids,
  1603. rule_id);
  1604. entry->cookie = 0;
  1605. kmem_cache_free(ipa3_ctx->flt_rule_cache,
  1606. entry);
  1607. /* remove the handle from the database */
  1608. ipa3_id_remove(id);
  1609. }
  1610. }
  1611. }
  1612. /* commit the change to IPA-HW */
  1613. if (ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4) ||
  1614. ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6)) {
  1615. IPAERR("fail to commit flt-rule\n");
  1616. WARN_ON_RATELIMIT_IPA(1);
  1617. mutex_unlock(&ipa3_ctx->lock);
  1618. return -EPERM;
  1619. }
  1620. mutex_unlock(&ipa3_ctx->lock);
  1621. return 0;
  1622. }
  1623. void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx)
  1624. {
  1625. struct ipa3_flt_tbl *tbl;
  1626. struct ipa3_ep_context *ep;
  1627. struct ipa_flt_rule_i rule;
  1628. if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1629. IPAERR("invalid ipa_ep_idx=%u\n", ipa_ep_idx);
  1630. ipa_assert();
  1631. return;
  1632. }
  1633. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1634. if (!ipa_is_ep_support_flt(ipa_ep_idx)) {
  1635. IPADBG("cannot add flt rules to non filtering pipe num %d\n",
  1636. ipa_ep_idx);
  1637. return;
  1638. }
  1639. memset(&rule, 0, sizeof(rule));
  1640. mutex_lock(&ipa3_ctx->lock);
  1641. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
  1642. rule.action = IPA_PASS_TO_EXCEPTION;
  1643. __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
  1644. &ep->dflt_flt4_rule_hdl, false);
  1645. ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
  1646. tbl->sticky_rear = true;
  1647. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
  1648. rule.action = IPA_PASS_TO_EXCEPTION;
  1649. __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
  1650. &ep->dflt_flt6_rule_hdl, false);
  1651. ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
  1652. tbl->sticky_rear = true;
  1653. mutex_unlock(&ipa3_ctx->lock);
  1654. }
  1655. void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
  1656. {
  1657. struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
  1658. struct ipa3_flt_tbl *tbl;
  1659. mutex_lock(&ipa3_ctx->lock);
  1660. if (ep->dflt_flt4_rule_hdl) {
  1661. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
  1662. __ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
  1663. ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
  1664. /* Reset the sticky flag. */
  1665. tbl->sticky_rear = false;
  1666. ep->dflt_flt4_rule_hdl = 0;
  1667. }
  1668. if (ep->dflt_flt6_rule_hdl) {
  1669. tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
  1670. __ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
  1671. ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
  1672. /* Reset the sticky flag. */
  1673. tbl->sticky_rear = false;
  1674. ep->dflt_flt6_rule_hdl = 0;
  1675. }
  1676. mutex_unlock(&ipa3_ctx->lock);
  1677. }
  1678. /**
  1679. * ipa3_set_flt_tuple_mask() - Sets the flt tuple masking for the given pipe
  1680. * Pipe must be for AP EP (not modem) and support filtering
  1681. * updates the the filtering masking values without changing the rt ones.
  1682. *
  1683. * @pipe_idx: filter pipe index to configure the tuple masking
  1684. * @tuple: the tuple members masking
  1685. * Returns: 0 on success, negative on failure
  1686. *
  1687. */
  1688. int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple)
  1689. {
  1690. struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
  1691. if (!tuple) {
  1692. IPAERR_RL("bad tuple\n");
  1693. return -EINVAL;
  1694. }
  1695. if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
  1696. IPAERR("bad pipe index!\n");
  1697. return -EINVAL;
  1698. }
  1699. if (!ipa_is_ep_support_flt(pipe_idx)) {
  1700. IPAERR("pipe %d not filtering pipe\n", pipe_idx);
  1701. return -EINVAL;
  1702. }
  1703. if (ipa_is_modem_pipe(pipe_idx)) {
  1704. IPAERR("modem pipe tuple is not configured by AP\n");
  1705. return -EINVAL;
  1706. }
  1707. ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
  1708. pipe_idx, &fltrt_tuple);
  1709. fltrt_tuple.flt = *tuple;
  1710. ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
  1711. pipe_idx, &fltrt_tuple);
  1712. return 0;
  1713. }
  1714. /**
  1715. * ipa3_flt_read_tbl_from_hw() -Read filtering table from IPA HW
  1716. * @pipe_idx: IPA endpoint index
  1717. * @ip_type: IPv4 or IPv6 table
  1718. * @hashable: hashable or non-hashable table
  1719. * @entry: array to fill the table entries
  1720. * @num_entry: number of entries in entry array. set by the caller to indicate
  1721. * entry array size. Then set by this function as an output parameter to
  1722. * indicate the number of entries in the array
  1723. *
  1724. * This function reads the filtering table from IPA SRAM and prepares an array
  1725. * of entries. This function is mainly used for debugging purposes.
  1726. *
  1727. * If empty table or Modem Apps table, zero entries will be returned.
  1728. *
  1729. * Returns: 0 on success, negative on failure
  1730. */
  1731. int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, enum ipa_ip_type ip_type,
  1732. bool hashable, struct ipahal_flt_rule_entry entry[], int *num_entry)
  1733. {
  1734. void *ipa_sram_mmio;
  1735. u64 hdr_base_ofst;
  1736. int tbl_entry_idx;
  1737. int i;
  1738. int res = 0;
  1739. u64 tbl_addr;
  1740. bool is_sys;
  1741. u8 *rule_addr;
  1742. struct ipa_mem_buffer *sys_tbl_mem;
  1743. int rule_idx;
  1744. struct ipa3_flt_tbl *flt_tbl_ptr;
  1745. IPADBG("pipe_idx=%d ip=%d hashable=%d entry=0x%pK num_entry=0x%pK\n",
  1746. pipe_idx, ip_type, hashable, entry, num_entry);
  1747. /*
  1748. * SRAM memory not allocated to hash tables. Reading of hash table
  1749. * rules operation not supported
  1750. */
  1751. if (hashable && ipa3_ctx->ipa_fltrt_not_hashable) {
  1752. IPAERR_RL("Reading hashable rules not supported\n");
  1753. *num_entry = 0;
  1754. return 0;
  1755. }
  1756. if (pipe_idx >= ipa3_ctx->ipa_num_pipes ||
  1757. pipe_idx >= IPA3_MAX_NUM_PIPES || ip_type >= IPA_IP_MAX ||
  1758. !entry || !num_entry) {
  1759. IPAERR_RL("Invalid pipe_idx=%u\n", pipe_idx);
  1760. return -EFAULT;
  1761. }
  1762. if (!ipa_is_ep_support_flt(pipe_idx)) {
  1763. IPAERR_RL("pipe %d does not support filtering\n", pipe_idx);
  1764. return -EINVAL;
  1765. }
  1766. flt_tbl_ptr = &ipa3_ctx->flt_tbl[pipe_idx][ip_type];
  1767. /* map IPA SRAM */
  1768. ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
  1769. ipa3_ctx->ctrl->ipa_reg_base_ofst +
  1770. ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n,
  1771. ipa3_ctx->smem_restricted_bytes / 4),
  1772. ipa3_ctx->smem_sz);
  1773. if (!ipa_sram_mmio) {
  1774. IPAERR("fail to ioremap IPA SRAM\n");
  1775. return -ENOMEM;
  1776. }
  1777. memset(entry, 0, sizeof(*entry) * (*num_entry));
  1778. if (hashable) {
  1779. if (ip_type == IPA_IP_v4)
  1780. hdr_base_ofst =
  1781. IPA_MEM_PART(v4_flt_hash_ofst);
  1782. else
  1783. hdr_base_ofst =
  1784. IPA_MEM_PART(v6_flt_hash_ofst);
  1785. } else {
  1786. if (ip_type == IPA_IP_v4)
  1787. hdr_base_ofst =
  1788. IPA_MEM_PART(v4_flt_nhash_ofst);
  1789. else
  1790. hdr_base_ofst =
  1791. IPA_MEM_PART(v6_flt_nhash_ofst);
  1792. }
  1793. /* calculate the index of the tbl entry */
  1794. tbl_entry_idx = 1; /* skip the bitmap */
  1795. for (i = 0; i < pipe_idx; i++)
  1796. if (ipa3_ctx->ep_flt_bitmap & (1 << i))
  1797. tbl_entry_idx++;
  1798. IPADBG("hdr_base_ofst=0x%llx tbl_entry_idx=%d\n",
  1799. hdr_base_ofst, tbl_entry_idx);
  1800. res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
  1801. tbl_entry_idx, &tbl_addr, &is_sys);
  1802. if (res) {
  1803. IPAERR("failed to read table address from header structure\n");
  1804. goto bail;
  1805. }
  1806. IPADBG("flt tbl ep=%d: tbl_addr=0x%llx is_sys=%d\n",
  1807. pipe_idx, tbl_addr, is_sys);
  1808. if (!tbl_addr) {
  1809. IPAERR("invalid flt tbl addr\n");
  1810. res = -EFAULT;
  1811. goto bail;
  1812. }
  1813. /* for tables resides in DDR access it from the virtual memory */
  1814. if (is_sys) {
  1815. sys_tbl_mem =
  1816. &flt_tbl_ptr->curr_mem[hashable ? IPA_RULE_HASHABLE :
  1817. IPA_RULE_NON_HASHABLE];
  1818. if (sys_tbl_mem->phys_base &&
  1819. sys_tbl_mem->phys_base != tbl_addr) {
  1820. IPAERR("mismatch addr: parsed=%llx sw=%pad\n",
  1821. tbl_addr, &sys_tbl_mem->phys_base);
  1822. }
  1823. if (sys_tbl_mem->phys_base)
  1824. rule_addr = sys_tbl_mem->base;
  1825. else
  1826. rule_addr = NULL;
  1827. } else {
  1828. rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
  1829. }
  1830. IPADBG("First rule addr 0x%pK\n", rule_addr);
  1831. if (!rule_addr) {
  1832. /* Modem table in system memory or empty table */
  1833. *num_entry = 0;
  1834. goto bail;
  1835. }
  1836. rule_idx = 0;
  1837. while (rule_idx < *num_entry) {
  1838. res = ipahal_flt_parse_hw_rule(rule_addr, &entry[rule_idx]);
  1839. if (res) {
  1840. IPAERR("failed parsing flt rule\n");
  1841. goto bail;
  1842. }
  1843. IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
  1844. if (!entry[rule_idx].rule_size)
  1845. break;
  1846. rule_addr += entry[rule_idx].rule_size;
  1847. rule_idx++;
  1848. }
  1849. *num_entry = rule_idx;
  1850. bail:
  1851. iounmap(ipa_sram_mmio);
  1852. return 0;
  1853. }