ipa_hdr.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include "ipa_i.h"
  6. #include "ipahal/ipahal.h"
  7. static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 64};
  8. static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
  9. #define HDR_TYPE_IS_VALID(type) \
  10. ((type) >= 0 && (type) < IPA_HDR_L2_MAX)
  11. #define HDR_PROC_TYPE_IS_VALID(type) \
  12. ((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
  13. /**
  14. * ipa3_generate_hdr_hw_tbl() - generates the headers table
  15. * @mem: [out] buffer to put the header table
  16. *
  17. * Returns: 0 on success, negative on failure
  18. */
  19. static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
  20. {
  21. struct ipa3_hdr_entry *entry;
  22. gfp_t flag = GFP_KERNEL;
  23. mem->size = ipa3_ctx->hdr_tbl.end;
  24. if (mem->size == 0) {
  25. IPAERR("hdr tbl empty\n");
  26. return -EPERM;
  27. }
  28. IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
  29. alloc:
  30. mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
  31. &mem->phys_base, flag);
  32. if (!mem->base) {
  33. if (flag == GFP_KERNEL) {
  34. flag = GFP_ATOMIC;
  35. goto alloc;
  36. }
  37. IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
  38. return -ENOMEM;
  39. }
  40. list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
  41. link) {
  42. if (entry->is_hdr_proc_ctx)
  43. continue;
  44. IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
  45. entry->offset_entry->offset);
  46. ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
  47. entry->hdr, entry->hdr_len);
  48. }
  49. return 0;
  50. }
  51. static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
  52. u64 hdr_base_addr)
  53. {
  54. struct ipa3_hdr_proc_ctx_entry *entry;
  55. int ret;
  56. int ep;
  57. struct ipa_ep_cfg *cfg_ptr;
  58. struct ipa_l2tp_header_remove_procparams *l2p_hdr_rm_ptr;
  59. list_for_each_entry(entry,
  60. &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
  61. link) {
  62. IPADBG_LOW("processing type %d ofst=%d\n",
  63. entry->type, entry->offset_entry->offset);
  64. if (entry->l2tp_params.is_dst_pipe_valid) {
  65. ep = ipa3_get_ep_mapping(entry->l2tp_params.dst_pipe);
  66. if (ep >= 0) {
  67. cfg_ptr = &ipa3_ctx->ep[ep].cfg;
  68. l2p_hdr_rm_ptr =
  69. &entry->l2tp_params.hdr_remove_param;
  70. l2p_hdr_rm_ptr->hdr_ofst_pkt_size_valid =
  71. cfg_ptr->hdr.hdr_ofst_pkt_size_valid;
  72. l2p_hdr_rm_ptr->hdr_ofst_pkt_size =
  73. cfg_ptr->hdr.hdr_ofst_pkt_size;
  74. l2p_hdr_rm_ptr->hdr_endianness =
  75. cfg_ptr->hdr_ext.hdr_little_endian ?
  76. 0 : 1;
  77. }
  78. }
  79. ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
  80. entry->offset_entry->offset,
  81. entry->hdr->hdr_len,
  82. entry->hdr->is_hdr_proc_ctx,
  83. entry->hdr->phys_base,
  84. hdr_base_addr,
  85. entry->hdr->offset_entry,
  86. &entry->l2tp_params,
  87. &entry->generic_params,
  88. ipa3_ctx->use_64_bit_dma_mask);
  89. if (ret)
  90. return ret;
  91. }
  92. return 0;
  93. }
  94. /**
  95. * ipa3_generate_hdr_proc_ctx_hw_tbl() -
  96. * generates the headers processing context table.
  97. * @mem: [out] buffer to put the processing context table
  98. * @aligned_mem: [out] actual processing context table (with alignment).
  99. * Processing context table needs to be 8 Bytes aligned.
  100. *
  101. * Returns: 0 on success, negative on failure
  102. */
  103. static int ipa3_generate_hdr_proc_ctx_hw_tbl(u64 hdr_sys_addr,
  104. struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
  105. {
  106. u64 hdr_base_addr;
  107. gfp_t flag = GFP_KERNEL;
  108. mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
  109. /* make sure table is aligned */
  110. mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
  111. IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
  112. alloc:
  113. mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
  114. &mem->phys_base, flag);
  115. if (!mem->base) {
  116. if (flag == GFP_KERNEL) {
  117. flag = GFP_ATOMIC;
  118. goto alloc;
  119. }
  120. IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
  121. return -ENOMEM;
  122. }
  123. aligned_mem->phys_base =
  124. IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
  125. aligned_mem->base = mem->base +
  126. (aligned_mem->phys_base - mem->phys_base);
  127. aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
  128. memset(aligned_mem->base, 0, aligned_mem->size);
  129. hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
  130. hdr_sys_addr;
  131. return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
  132. }
  133. /**
  134. * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW
  135. *
  136. * Returns: 0 on success, negative on failure
  137. */
  138. int __ipa_commit_hdr_v3_0(void)
  139. {
  140. struct ipa3_desc desc[3];
  141. struct ipa_mem_buffer hdr_mem;
  142. struct ipa_mem_buffer ctx_mem;
  143. struct ipa_mem_buffer aligned_ctx_mem;
  144. struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
  145. struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
  146. struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
  147. struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
  148. struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
  149. struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
  150. struct ipahal_imm_cmd_pyld *coal_cmd_pyld = NULL;
  151. int rc = -EFAULT;
  152. int i;
  153. int num_cmd = 0;
  154. u32 proc_ctx_size;
  155. u32 proc_ctx_ofst;
  156. u32 proc_ctx_size_ddr;
  157. struct ipahal_imm_cmd_register_write reg_write_coal_close;
  158. struct ipahal_reg_valmask valmask;
  159. memset(desc, 0, 3 * sizeof(struct ipa3_desc));
  160. if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
  161. IPAERR("fail to generate HDR HW TBL\n");
  162. goto end;
  163. }
  164. if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
  165. &aligned_ctx_mem)) {
  166. IPAERR("fail to generate HDR PROC CTX HW TBL\n");
  167. goto end;
  168. }
  169. /* IC to close the coal frame before HPS Clear if coal is enabled */
  170. if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
  171. i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
  172. reg_write_coal_close.skip_pipeline_clear = false;
  173. reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  174. reg_write_coal_close.offset = ipahal_get_reg_ofst(
  175. IPA_AGGR_FORCE_CLOSE);
  176. ipahal_get_aggr_force_close_valmask(i, &valmask);
  177. reg_write_coal_close.value = valmask.val;
  178. reg_write_coal_close.value_mask = valmask.mask;
  179. coal_cmd_pyld = ipahal_construct_imm_cmd(
  180. IPA_IMM_CMD_REGISTER_WRITE,
  181. &reg_write_coal_close, false);
  182. if (!coal_cmd_pyld) {
  183. IPAERR("failed to construct coal close IC\n");
  184. goto end;
  185. }
  186. ipa3_init_imm_cmd_desc(&desc[num_cmd], coal_cmd_pyld);
  187. ++num_cmd;
  188. }
  189. if (ipa3_ctx->hdr_tbl_lcl) {
  190. if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
  191. IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
  192. IPA_MEM_PART(apps_hdr_size));
  193. goto end;
  194. } else {
  195. dma_cmd_hdr.is_read = false; /* write operation */
  196. dma_cmd_hdr.skip_pipeline_clear = false;
  197. dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  198. dma_cmd_hdr.system_addr = hdr_mem.phys_base;
  199. dma_cmd_hdr.size = hdr_mem.size;
  200. dma_cmd_hdr.local_addr =
  201. ipa3_ctx->smem_restricted_bytes +
  202. IPA_MEM_PART(apps_hdr_ofst);
  203. hdr_cmd_pyld = ipahal_construct_imm_cmd(
  204. IPA_IMM_CMD_DMA_SHARED_MEM,
  205. &dma_cmd_hdr, false);
  206. if (!hdr_cmd_pyld) {
  207. IPAERR("fail construct dma_shared_mem cmd\n");
  208. goto end;
  209. }
  210. }
  211. } else {
  212. if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
  213. IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
  214. IPA_MEM_PART(apps_hdr_size_ddr));
  215. goto end;
  216. } else {
  217. hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
  218. hdr_cmd_pyld = ipahal_construct_imm_cmd(
  219. IPA_IMM_CMD_HDR_INIT_SYSTEM,
  220. &hdr_init_cmd, false);
  221. if (!hdr_cmd_pyld) {
  222. IPAERR("fail construct hdr_init_system cmd\n");
  223. goto end;
  224. }
  225. }
  226. }
  227. ipa3_init_imm_cmd_desc(&desc[num_cmd], hdr_cmd_pyld);
  228. ++num_cmd;
  229. IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
  230. proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
  231. proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
  232. if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
  233. if (aligned_ctx_mem.size > proc_ctx_size) {
  234. IPAERR("tbl too big needed %d avail %d\n",
  235. aligned_ctx_mem.size,
  236. proc_ctx_size);
  237. goto end;
  238. } else {
  239. dma_cmd_ctx.is_read = false; /* Write operation */
  240. dma_cmd_ctx.skip_pipeline_clear = false;
  241. dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  242. dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
  243. dma_cmd_ctx.size = aligned_ctx_mem.size;
  244. dma_cmd_ctx.local_addr =
  245. ipa3_ctx->smem_restricted_bytes +
  246. proc_ctx_ofst;
  247. ctx_cmd_pyld = ipahal_construct_imm_cmd(
  248. IPA_IMM_CMD_DMA_SHARED_MEM,
  249. &dma_cmd_ctx, false);
  250. if (!ctx_cmd_pyld) {
  251. IPAERR("fail construct dma_shared_mem cmd\n");
  252. goto end;
  253. }
  254. }
  255. } else {
  256. proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
  257. if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
  258. IPAERR("tbl too big, needed %d avail %d\n",
  259. aligned_ctx_mem.size,
  260. proc_ctx_size_ddr);
  261. goto end;
  262. } else {
  263. reg_write_cmd.skip_pipeline_clear = false;
  264. reg_write_cmd.pipeline_clear_options =
  265. IPAHAL_HPS_CLEAR;
  266. reg_write_cmd.offset =
  267. ipahal_get_reg_ofst(
  268. IPA_SYS_PKT_PROC_CNTXT_BASE);
  269. reg_write_cmd.value = aligned_ctx_mem.phys_base;
  270. reg_write_cmd.value_mask =
  271. ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
  272. ctx_cmd_pyld = ipahal_construct_imm_cmd(
  273. IPA_IMM_CMD_REGISTER_WRITE,
  274. &reg_write_cmd, false);
  275. if (!ctx_cmd_pyld) {
  276. IPAERR("fail construct register_write cmd\n");
  277. goto end;
  278. }
  279. }
  280. }
  281. ipa3_init_imm_cmd_desc(&desc[num_cmd], ctx_cmd_pyld);
  282. ++num_cmd;
  283. IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
  284. if (ipa3_send_cmd(num_cmd, desc))
  285. IPAERR("fail to send immediate command\n");
  286. else
  287. rc = 0;
  288. if (ipa3_ctx->hdr_tbl_lcl) {
  289. dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
  290. hdr_mem.phys_base);
  291. } else {
  292. if (!rc) {
  293. if (ipa3_ctx->hdr_mem.phys_base)
  294. dma_free_coherent(ipa3_ctx->pdev,
  295. ipa3_ctx->hdr_mem.size,
  296. ipa3_ctx->hdr_mem.base,
  297. ipa3_ctx->hdr_mem.phys_base);
  298. ipa3_ctx->hdr_mem = hdr_mem;
  299. }
  300. }
  301. if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
  302. dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base,
  303. ctx_mem.phys_base);
  304. } else {
  305. if (!rc) {
  306. if (ipa3_ctx->hdr_proc_ctx_mem.phys_base)
  307. dma_free_coherent(ipa3_ctx->pdev,
  308. ipa3_ctx->hdr_proc_ctx_mem.size,
  309. ipa3_ctx->hdr_proc_ctx_mem.base,
  310. ipa3_ctx->hdr_proc_ctx_mem.phys_base);
  311. ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
  312. }
  313. }
  314. end:
  315. if (coal_cmd_pyld)
  316. ipahal_destroy_imm_cmd(coal_cmd_pyld);
  317. if (ctx_cmd_pyld)
  318. ipahal_destroy_imm_cmd(ctx_cmd_pyld);
  319. if (hdr_cmd_pyld)
  320. ipahal_destroy_imm_cmd(hdr_cmd_pyld);
  321. return rc;
  322. }
  323. static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
  324. bool add_ref_hdr, bool user_only)
  325. {
  326. struct ipa3_hdr_entry *hdr_entry;
  327. struct ipa3_hdr_proc_ctx_entry *entry;
  328. struct ipa3_hdr_proc_ctx_offset_entry *offset;
  329. u32 bin;
  330. struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
  331. int id;
  332. int needed_len;
  333. int mem_size;
  334. IPADBG_LOW("Add processing type %d hdr_hdl %d\n",
  335. proc_ctx->type, proc_ctx->hdr_hdl);
  336. if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
  337. IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
  338. return -EINVAL;
  339. }
  340. hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
  341. if (!hdr_entry) {
  342. IPAERR_RL("hdr_hdl is invalid\n");
  343. return -EINVAL;
  344. }
  345. if (hdr_entry->cookie != IPA_HDR_COOKIE) {
  346. IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie);
  347. WARN_ON_RATELIMIT_IPA(1);
  348. return -EINVAL;
  349. }
  350. IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
  351. hdr_entry->name, hdr_entry->is_hdr_proc_ctx);
  352. entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
  353. if (!entry) {
  354. IPAERR("failed to alloc proc_ctx object\n");
  355. return -ENOMEM;
  356. }
  357. INIT_LIST_HEAD(&entry->link);
  358. entry->type = proc_ctx->type;
  359. entry->hdr = hdr_entry;
  360. entry->l2tp_params = proc_ctx->l2tp_params;
  361. entry->generic_params = proc_ctx->generic_params;
  362. if (add_ref_hdr)
  363. hdr_entry->ref_cnt++;
  364. entry->cookie = IPA_PROC_HDR_COOKIE;
  365. entry->ipacm_installed = user_only;
  366. needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
  367. if ((needed_len < 0) ||
  368. ((needed_len > ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0])
  369. &&
  370. (needed_len >
  371. ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]))) {
  372. IPAERR_RL("unexpected needed len %d\n", needed_len);
  373. WARN_ON_RATELIMIT_IPA(1);
  374. goto bad_len;
  375. }
  376. if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0])
  377. bin = IPA_HDR_PROC_CTX_BIN0;
  378. else
  379. bin = IPA_HDR_PROC_CTX_BIN1;
  380. mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
  381. IPA_MEM_PART(apps_hdr_proc_ctx_size) :
  382. IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
  383. if (list_empty(&htbl->head_free_offset_list[bin])) {
  384. if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
  385. IPAERR_RL("hdr proc ctx table overflow\n");
  386. goto bad_len;
  387. }
  388. offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
  389. GFP_KERNEL);
  390. if (!offset) {
  391. IPAERR("failed to alloc offset object\n");
  392. goto bad_len;
  393. }
  394. INIT_LIST_HEAD(&offset->link);
  395. /*
  396. * for a first item grow, set the bin and offset which are set
  397. * in stone
  398. */
  399. offset->offset = htbl->end;
  400. offset->bin = bin;
  401. offset->ipacm_installed = user_only;
  402. htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
  403. list_add(&offset->link,
  404. &htbl->head_offset_list[bin]);
  405. } else {
  406. /* get the first free slot */
  407. offset =
  408. list_first_entry(&htbl->head_free_offset_list[bin],
  409. struct ipa3_hdr_proc_ctx_offset_entry, link);
  410. offset->ipacm_installed = user_only;
  411. list_move(&offset->link, &htbl->head_offset_list[bin]);
  412. }
  413. entry->offset_entry = offset;
  414. list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
  415. htbl->proc_ctx_cnt++;
  416. IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
  417. htbl->proc_ctx_cnt, offset->offset);
  418. id = ipa3_id_alloc(entry);
  419. if (id < 0) {
  420. IPAERR_RL("failed to alloc id\n");
  421. WARN_ON_RATELIMIT_IPA(1);
  422. goto ipa_insert_failed;
  423. }
  424. entry->id = id;
  425. proc_ctx->proc_ctx_hdl = id;
  426. entry->ref_cnt++;
  427. return 0;
  428. ipa_insert_failed:
  429. list_move(&offset->link,
  430. &htbl->head_free_offset_list[offset->bin]);
  431. entry->offset_entry = NULL;
  432. list_del(&entry->link);
  433. htbl->proc_ctx_cnt--;
  434. bad_len:
  435. if (add_ref_hdr)
  436. hdr_entry->ref_cnt--;
  437. entry->cookie = 0;
  438. kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
  439. return -EPERM;
  440. }
  441. static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user)
  442. {
  443. struct ipa3_hdr_entry *entry;
  444. struct ipa_hdr_offset_entry *offset = NULL;
  445. u32 bin;
  446. struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
  447. int id;
  448. int mem_size;
  449. if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
  450. IPAERR_RL("bad param\n");
  451. goto error;
  452. }
  453. if (!HDR_TYPE_IS_VALID(hdr->type)) {
  454. IPAERR_RL("invalid hdr type %d\n", hdr->type);
  455. goto error;
  456. }
  457. entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL);
  458. if (!entry)
  459. goto error;
  460. INIT_LIST_HEAD(&entry->link);
  461. memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
  462. entry->hdr_len = hdr->hdr_len;
  463. strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
  464. entry->is_partial = hdr->is_partial;
  465. entry->type = hdr->type;
  466. entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
  467. entry->eth2_ofst = hdr->eth2_ofst;
  468. entry->cookie = IPA_HDR_COOKIE;
  469. entry->ipacm_installed = user;
  470. if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
  471. bin = IPA_HDR_BIN0;
  472. else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
  473. bin = IPA_HDR_BIN1;
  474. else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
  475. bin = IPA_HDR_BIN2;
  476. else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
  477. bin = IPA_HDR_BIN3;
  478. else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
  479. bin = IPA_HDR_BIN4;
  480. else {
  481. IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
  482. goto bad_hdr_len;
  483. }
  484. mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
  485. IPA_MEM_PART(apps_hdr_size_ddr);
  486. if (list_empty(&htbl->head_free_offset_list[bin])) {
  487. /* if header does not fit to table, place it in DDR */
  488. if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
  489. entry->is_hdr_proc_ctx = true;
  490. entry->phys_base = dma_map_single(ipa3_ctx->pdev,
  491. entry->hdr,
  492. entry->hdr_len,
  493. DMA_TO_DEVICE);
  494. if (dma_mapping_error(ipa3_ctx->pdev,
  495. entry->phys_base)) {
  496. IPAERR("dma_map_single failure for entry\n");
  497. goto fail_dma_mapping;
  498. }
  499. } else {
  500. entry->is_hdr_proc_ctx = false;
  501. offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
  502. GFP_KERNEL);
  503. if (!offset) {
  504. IPAERR("failed to alloc hdr offset object\n");
  505. goto bad_hdr_len;
  506. }
  507. INIT_LIST_HEAD(&offset->link);
  508. /*
  509. * for a first item grow, set the bin and offset which
  510. * are set in stone
  511. */
  512. offset->offset = htbl->end;
  513. offset->bin = bin;
  514. htbl->end += ipa_hdr_bin_sz[bin];
  515. list_add(&offset->link,
  516. &htbl->head_offset_list[bin]);
  517. entry->offset_entry = offset;
  518. offset->ipacm_installed = user;
  519. }
  520. } else {
  521. entry->is_hdr_proc_ctx = false;
  522. /* get the first free slot */
  523. offset = list_first_entry(&htbl->head_free_offset_list[bin],
  524. struct ipa_hdr_offset_entry, link);
  525. list_move(&offset->link, &htbl->head_offset_list[bin]);
  526. entry->offset_entry = offset;
  527. offset->ipacm_installed = user;
  528. }
  529. list_add(&entry->link, &htbl->head_hdr_entry_list);
  530. htbl->hdr_cnt++;
  531. if (entry->is_hdr_proc_ctx)
  532. IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
  533. hdr->hdr_len,
  534. htbl->hdr_cnt,
  535. &entry->phys_base);
  536. else
  537. IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
  538. hdr->hdr_len,
  539. htbl->hdr_cnt,
  540. entry->offset_entry->offset);
  541. id = ipa3_id_alloc(entry);
  542. if (id < 0) {
  543. IPAERR_RL("failed to alloc id\n");
  544. WARN_ON_RATELIMIT_IPA(1);
  545. goto ipa_insert_failed;
  546. }
  547. entry->id = id;
  548. hdr->hdr_hdl = id;
  549. entry->ref_cnt++;
  550. if (entry->is_hdr_proc_ctx) {
  551. struct ipa_hdr_proc_ctx_add proc_ctx;
  552. IPADBG("adding processing context for header %s\n", hdr->name);
  553. proc_ctx.type = IPA_HDR_PROC_NONE;
  554. proc_ctx.hdr_hdl = id;
  555. if (__ipa_add_hdr_proc_ctx(&proc_ctx, false, user)) {
  556. IPAERR("failed to add hdr proc ctx\n");
  557. goto fail_add_proc_ctx;
  558. }
  559. entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
  560. }
  561. return 0;
  562. fail_add_proc_ctx:
  563. entry->ref_cnt--;
  564. hdr->hdr_hdl = 0;
  565. ipa3_id_remove(id);
  566. ipa_insert_failed:
  567. if (entry->is_hdr_proc_ctx) {
  568. dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
  569. entry->hdr_len, DMA_TO_DEVICE);
  570. } else {
  571. if (offset)
  572. list_move(&offset->link,
  573. &htbl->head_free_offset_list[offset->bin]);
  574. entry->offset_entry = NULL;
  575. }
  576. htbl->hdr_cnt--;
  577. list_del(&entry->link);
  578. fail_dma_mapping:
  579. entry->is_hdr_proc_ctx = false;
  580. bad_hdr_len:
  581. entry->cookie = 0;
  582. kmem_cache_free(ipa3_ctx->hdr_cache, entry);
  583. error:
  584. return -EPERM;
  585. }
  586. static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
  587. bool release_hdr, bool by_user)
  588. {
  589. struct ipa3_hdr_proc_ctx_entry *entry;
  590. struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
  591. entry = ipa3_id_find(proc_ctx_hdl);
  592. if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
  593. IPAERR_RL("bad param\n");
  594. return -EINVAL;
  595. }
  596. IPADBG("del proc ctx cnt=%d ofst=%d\n",
  597. htbl->proc_ctx_cnt, entry->offset_entry->offset);
  598. if (by_user && entry->user_deleted) {
  599. IPAERR_RL("proc_ctx already deleted by user\n");
  600. return -EINVAL;
  601. }
  602. if (by_user)
  603. entry->user_deleted = true;
  604. if (--entry->ref_cnt) {
  605. IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
  606. proc_ctx_hdl, entry->ref_cnt);
  607. return 0;
  608. }
  609. if (release_hdr)
  610. __ipa3_del_hdr(entry->hdr->id, false);
  611. /* move the offset entry to appropriate free list */
  612. list_move(&entry->offset_entry->link,
  613. &htbl->head_free_offset_list[entry->offset_entry->bin]);
  614. list_del(&entry->link);
  615. htbl->proc_ctx_cnt--;
  616. entry->cookie = 0;
  617. kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
  618. /* remove the handle from the database */
  619. ipa3_id_remove(proc_ctx_hdl);
  620. return 0;
  621. }
  622. int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
  623. {
  624. struct ipa3_hdr_entry *entry;
  625. struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
  626. entry = ipa3_id_find(hdr_hdl);
  627. if (entry == NULL) {
  628. IPAERR_RL("lookup failed\n");
  629. return -EINVAL;
  630. }
  631. if (entry->cookie != IPA_HDR_COOKIE) {
  632. IPAERR_RL("bad parm\n");
  633. return -EINVAL;
  634. }
  635. if (entry->is_hdr_proc_ctx)
  636. IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
  637. entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
  638. else
  639. IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n",
  640. entry->hdr_len, htbl->hdr_cnt,
  641. entry->offset_entry->offset);
  642. if (by_user && entry->user_deleted) {
  643. IPAERR_RL("proc_ctx already deleted by user\n");
  644. return -EINVAL;
  645. }
  646. if (by_user) {
  647. if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
  648. IPADBG("Trying to delete hdr %s offset=%u\n",
  649. entry->name, entry->offset_entry->offset);
  650. if (!entry->offset_entry->offset) {
  651. IPAERR_RL(
  652. "User cannot delete default header\n");
  653. return -EPERM;
  654. }
  655. }
  656. entry->user_deleted = true;
  657. }
  658. if (--entry->ref_cnt) {
  659. IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
  660. return 0;
  661. }
  662. if (entry->is_hdr_proc_ctx) {
  663. dma_unmap_single(ipa3_ctx->pdev,
  664. entry->phys_base,
  665. entry->hdr_len,
  666. DMA_TO_DEVICE);
  667. __ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false, false);
  668. } else {
  669. /* move the offset entry to appropriate free list */
  670. list_move(&entry->offset_entry->link,
  671. &htbl->head_free_offset_list[entry->offset_entry->bin]);
  672. }
  673. list_del(&entry->link);
  674. htbl->hdr_cnt--;
  675. entry->cookie = 0;
  676. kmem_cache_free(ipa3_ctx->hdr_cache, entry);
  677. /* remove the handle from the database */
  678. ipa3_id_remove(hdr_hdl);
  679. return 0;
  680. }
  681. /**
  682. * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
  683. * to IPA HW
  684. * @hdrs: [inout] set of headers to add
  685. *
  686. * Returns: 0 on success, negative on failure
  687. *
  688. * Note: Should not be called from atomic context
  689. */
  690. int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
  691. {
  692. return ipa3_add_hdr_usr(hdrs, false);
  693. }
  694. /**
  695. * ipa3_add_hdr_usr() - add the specified headers to SW
  696. * and optionally commit them to IPA HW
  697. * @hdrs: [inout] set of headers to add
  698. * @user_only: [in] indicate installed from user
  699. *
  700. * Returns: 0 on success, negative on failure
  701. *
  702. * Note: Should not be called from atomic context
  703. */
  704. int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only)
  705. {
  706. int i;
  707. int result = -EFAULT;
  708. if (hdrs == NULL || hdrs->num_hdrs == 0) {
  709. IPAERR_RL("bad parm\n");
  710. return -EINVAL;
  711. }
  712. mutex_lock(&ipa3_ctx->lock);
  713. IPADBG("adding %d headers to IPA driver internal data struct\n",
  714. hdrs->num_hdrs);
  715. for (i = 0; i < hdrs->num_hdrs; i++) {
  716. if (__ipa_add_hdr(&hdrs->hdr[i], user_only)) {
  717. IPAERR_RL("failed to add hdr %d\n", i);
  718. hdrs->hdr[i].status = -1;
  719. } else {
  720. hdrs->hdr[i].status = 0;
  721. }
  722. }
  723. if (hdrs->commit) {
  724. IPADBG("committing all headers to IPA core");
  725. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  726. result = -EPERM;
  727. goto bail;
  728. }
  729. }
  730. result = 0;
  731. bail:
  732. mutex_unlock(&ipa3_ctx->lock);
  733. return result;
  734. }
  735. /**
  736. * ipa3_del_hdr_by_user() - Remove the specified headers
  737. * from SW and optionally commit them to IPA HW
  738. * @hdls: [inout] set of headers to delete
  739. * @by_user: Operation requested by user?
  740. *
  741. * Returns: 0 on success, negative on failure
  742. *
  743. * Note: Should not be called from atomic context
  744. */
  745. int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
  746. {
  747. int i;
  748. int result = -EFAULT;
  749. if (hdls == NULL || hdls->num_hdls == 0) {
  750. IPAERR_RL("bad parm\n");
  751. return -EINVAL;
  752. }
  753. mutex_lock(&ipa3_ctx->lock);
  754. for (i = 0; i < hdls->num_hdls; i++) {
  755. if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
  756. IPAERR_RL("failed to del hdr %i\n", i);
  757. hdls->hdl[i].status = -1;
  758. } else {
  759. hdls->hdl[i].status = 0;
  760. }
  761. }
  762. if (hdls->commit) {
  763. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  764. result = -EPERM;
  765. goto bail;
  766. }
  767. }
  768. result = 0;
  769. bail:
  770. mutex_unlock(&ipa3_ctx->lock);
  771. return result;
  772. }
  773. /**
  774. * ipa3_del_hdr() - Remove the specified headers from SW
  775. * and optionally commit them to IPA HW
  776. * @hdls: [inout] set of headers to delete
  777. *
  778. * Returns: 0 on success, negative on failure
  779. *
  780. * Note: Should not be called from atomic context
  781. */
  782. int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
  783. {
  784. return ipa3_del_hdr_by_user(hdls, false);
  785. }
  786. /**
  787. * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
  788. * and optionally commit them to IPA HW
  789. * @proc_ctxs: [inout] set of processing context headers to add
  790. * @user_only: [in] indicate installed by user-space module
  791. *
  792. * Returns: 0 on success, negative on failure
  793. *
  794. * Note: Should not be called from atomic context
  795. */
  796. int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
  797. bool user_only)
  798. {
  799. int i;
  800. int result = -EFAULT;
  801. if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
  802. IPAERR_RL("bad parm\n");
  803. return -EINVAL;
  804. }
  805. mutex_lock(&ipa3_ctx->lock);
  806. IPADBG("adding %d header processing contextes to IPA driver\n",
  807. proc_ctxs->num_proc_ctxs);
  808. for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
  809. if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i],
  810. true, user_only)) {
  811. IPAERR_RL("failed to add hdr proc ctx %d\n", i);
  812. proc_ctxs->proc_ctx[i].status = -1;
  813. } else {
  814. proc_ctxs->proc_ctx[i].status = 0;
  815. }
  816. }
  817. if (proc_ctxs->commit) {
  818. IPADBG("committing all headers to IPA core");
  819. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  820. result = -EPERM;
  821. goto bail;
  822. }
  823. }
  824. result = 0;
  825. bail:
  826. mutex_unlock(&ipa3_ctx->lock);
  827. return result;
  828. }
  829. /**
  830. * ipa3_del_hdr_proc_ctx_by_user() -
  831. * Remove the specified processing context headers from SW and
  832. * optionally commit them to IPA HW.
  833. * @hdls: [inout] set of processing context headers to delete
  834. * @by_user: Operation requested by user?
  835. *
  836. * Returns: 0 on success, negative on failure
  837. *
  838. * Note: Should not be called from atomic context
  839. */
  840. int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
  841. bool by_user)
  842. {
  843. int i;
  844. int result;
  845. if (hdls == NULL || hdls->num_hdls == 0) {
  846. IPAERR_RL("bad parm\n");
  847. return -EINVAL;
  848. }
  849. mutex_lock(&ipa3_ctx->lock);
  850. for (i = 0; i < hdls->num_hdls; i++) {
  851. if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
  852. IPAERR_RL("failed to del hdr %i\n", i);
  853. hdls->hdl[i].status = -1;
  854. } else {
  855. hdls->hdl[i].status = 0;
  856. }
  857. }
  858. if (hdls->commit) {
  859. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  860. result = -EPERM;
  861. goto bail;
  862. }
  863. }
  864. result = 0;
  865. bail:
  866. mutex_unlock(&ipa3_ctx->lock);
  867. return result;
  868. }
  869. /**
  870. * ipa3_del_hdr_proc_ctx() -
  871. * Remove the specified processing context headers from SW and
  872. * optionally commit them to IPA HW.
  873. * @hdls: [inout] set of processing context headers to delete
  874. *
  875. * Returns: 0 on success, negative on failure
  876. *
  877. * Note: Should not be called from atomic context
  878. */
  879. int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
  880. {
  881. return ipa3_del_hdr_proc_ctx_by_user(hdls, false);
  882. }
  883. /**
  884. * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
  885. *
  886. * Returns: 0 on success, negative on failure
  887. *
  888. * Note: Should not be called from atomic context
  889. */
  890. int ipa3_commit_hdr(void)
  891. {
  892. int result = -EFAULT;
  893. /*
  894. * issue a commit on the routing module since routing rules point to
  895. * header table entries
  896. */
  897. if (ipa3_commit_rt(IPA_IP_v4))
  898. return -EPERM;
  899. if (ipa3_commit_rt(IPA_IP_v6))
  900. return -EPERM;
  901. mutex_lock(&ipa3_ctx->lock);
  902. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  903. result = -EPERM;
  904. goto bail;
  905. }
  906. result = 0;
  907. bail:
  908. mutex_unlock(&ipa3_ctx->lock);
  909. return result;
  910. }
  911. /**
  912. * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
  913. * HW)
  914. *
  915. * @user_only: [in] indicate delete rules installed by userspace
  916. * Returns: 0 on success, negative on failure
  917. *
  918. * Note: Should not be called from atomic context
  919. */
  920. int ipa3_reset_hdr(bool user_only)
  921. {
  922. struct ipa3_hdr_entry *entry;
  923. struct ipa3_hdr_entry *next;
  924. struct ipa3_hdr_proc_ctx_entry *ctx_entry;
  925. struct ipa3_hdr_proc_ctx_entry *ctx_next;
  926. struct ipa_hdr_offset_entry *off_entry;
  927. struct ipa_hdr_offset_entry *off_next;
  928. struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
  929. struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
  930. struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
  931. struct ipa3_hdr_proc_ctx_tbl *htbl_proc = &ipa3_ctx->hdr_proc_ctx_tbl;
  932. int i;
  933. /*
  934. * issue a reset on the routing module since routing rules point to
  935. * header table entries
  936. */
  937. if (ipa3_reset_rt(IPA_IP_v4, user_only))
  938. IPAERR_RL("fail to reset v4 rt\n");
  939. if (ipa3_reset_rt(IPA_IP_v6, user_only))
  940. IPAERR_RL("fail to reset v6 rt\n");
  941. mutex_lock(&ipa3_ctx->lock);
  942. IPADBG("reset hdr\n");
  943. list_for_each_entry_safe(entry, next,
  944. &ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
  945. /* do not remove the default header */
  946. if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
  947. IPADBG("Trying to remove hdr %s offset=%u\n",
  948. entry->name, entry->offset_entry->offset);
  949. if (!entry->offset_entry->offset) {
  950. if (entry->is_hdr_proc_ctx) {
  951. IPAERR("default header is proc ctx\n");
  952. mutex_unlock(&ipa3_ctx->lock);
  953. WARN_ON_RATELIMIT_IPA(1);
  954. return -EFAULT;
  955. }
  956. IPADBG("skip default header\n");
  957. continue;
  958. }
  959. }
  960. if (ipa3_id_find(entry->id) == NULL) {
  961. mutex_unlock(&ipa3_ctx->lock);
  962. WARN_ON_RATELIMIT_IPA(1);
  963. return -EFAULT;
  964. }
  965. if (!user_only || entry->ipacm_installed) {
  966. if (entry->is_hdr_proc_ctx) {
  967. dma_unmap_single(ipa3_ctx->pdev,
  968. entry->phys_base,
  969. entry->hdr_len,
  970. DMA_TO_DEVICE);
  971. entry->proc_ctx = NULL;
  972. } else {
  973. /* move the offset entry to free list */
  974. entry->offset_entry->ipacm_installed = false;
  975. list_move(&entry->offset_entry->link,
  976. &htbl->head_free_offset_list[
  977. entry->offset_entry->bin]);
  978. }
  979. list_del(&entry->link);
  980. htbl->hdr_cnt--;
  981. entry->ref_cnt = 0;
  982. entry->cookie = 0;
  983. /* remove the handle from the database */
  984. ipa3_id_remove(entry->id);
  985. kmem_cache_free(ipa3_ctx->hdr_cache, entry);
  986. }
  987. }
  988. /* only clean up offset_list and free_offset_list on global reset */
  989. if (!user_only) {
  990. for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
  991. list_for_each_entry_safe(off_entry, off_next,
  992. &ipa3_ctx->hdr_tbl.head_offset_list[i],
  993. link) {
  994. /**
  995. * do not remove the default exception
  996. * header which is at offset 0
  997. */
  998. if (off_entry->offset == 0)
  999. continue;
  1000. list_del(&off_entry->link);
  1001. kmem_cache_free(ipa3_ctx->hdr_offset_cache,
  1002. off_entry);
  1003. }
  1004. list_for_each_entry_safe(off_entry, off_next,
  1005. &ipa3_ctx->hdr_tbl.head_free_offset_list[i],
  1006. link) {
  1007. list_del(&off_entry->link);
  1008. kmem_cache_free(ipa3_ctx->hdr_offset_cache,
  1009. off_entry);
  1010. }
  1011. }
  1012. /* there is one header of size 8 */
  1013. ipa3_ctx->hdr_tbl.end = 8;
  1014. ipa3_ctx->hdr_tbl.hdr_cnt = 1;
  1015. }
  1016. IPADBG("reset hdr proc ctx\n");
  1017. list_for_each_entry_safe(
  1018. ctx_entry,
  1019. ctx_next,
  1020. &(htbl_proc->head_proc_ctx_entry_list),
  1021. link) {
  1022. if (ipa3_id_find(ctx_entry->id) == NULL) {
  1023. mutex_unlock(&ipa3_ctx->lock);
  1024. WARN_ON_RATELIMIT_IPA(1);
  1025. return -EFAULT;
  1026. }
  1027. if (!user_only ||
  1028. ctx_entry->ipacm_installed) {
  1029. /* move the offset entry to appropriate free list */
  1030. list_move(&ctx_entry->offset_entry->link,
  1031. &htbl_proc->head_free_offset_list[
  1032. ctx_entry->offset_entry->bin]);
  1033. list_del(&ctx_entry->link);
  1034. htbl_proc->proc_ctx_cnt--;
  1035. ctx_entry->ref_cnt = 0;
  1036. ctx_entry->cookie = 0;
  1037. /* remove the handle from the database */
  1038. ipa3_id_remove(ctx_entry->id);
  1039. kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache,
  1040. ctx_entry);
  1041. }
  1042. }
  1043. /* only clean up offset_list and free_offset_list on global reset */
  1044. if (!user_only) {
  1045. for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
  1046. list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
  1047. &(htbl_proc->head_offset_list[i]), link) {
  1048. list_del(&ctx_off_entry->link);
  1049. kmem_cache_free(
  1050. ipa3_ctx->hdr_proc_ctx_offset_cache,
  1051. ctx_off_entry);
  1052. }
  1053. list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
  1054. &(htbl_proc->head_free_offset_list[i]), link) {
  1055. list_del(&ctx_off_entry->link);
  1056. kmem_cache_free(
  1057. ipa3_ctx->hdr_proc_ctx_offset_cache,
  1058. ctx_off_entry);
  1059. }
  1060. }
  1061. htbl_proc->end = 0;
  1062. htbl_proc->proc_ctx_cnt = 0;
  1063. }
  1064. /* commit the change to IPA-HW */
  1065. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  1066. IPAERR("fail to commit hdr\n");
  1067. WARN_ON_RATELIMIT_IPA(1);
  1068. mutex_unlock(&ipa3_ctx->lock);
  1069. return -EFAULT;
  1070. }
  1071. mutex_unlock(&ipa3_ctx->lock);
  1072. return 0;
  1073. }
  1074. static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
  1075. {
  1076. struct ipa3_hdr_entry *entry;
  1077. if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
  1078. IPAERR_RL("Header name too long: %s\n", name);
  1079. return NULL;
  1080. }
  1081. list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
  1082. link) {
  1083. if (!strcmp(name, entry->name))
  1084. return entry;
  1085. }
  1086. return NULL;
  1087. }
  1088. /**
  1089. * ipa3_get_hdr() - Lookup the specified header resource
  1090. * @lookup: [inout] header to lookup and its handle
  1091. *
  1092. * lookup the specified header resource and return handle if it exists
  1093. *
  1094. * Returns: 0 on success, negative on failure
  1095. *
  1096. * Note: Should not be called from atomic context
  1097. * Caller should call ipa3_put_hdr later if this function succeeds
  1098. */
  1099. int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
  1100. {
  1101. struct ipa3_hdr_entry *entry;
  1102. int result = -1;
  1103. if (lookup == NULL) {
  1104. IPAERR_RL("bad parm\n");
  1105. return -EINVAL;
  1106. }
  1107. mutex_lock(&ipa3_ctx->lock);
  1108. lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
  1109. entry = __ipa_find_hdr(lookup->name);
  1110. if (entry) {
  1111. lookup->hdl = entry->id;
  1112. result = 0;
  1113. }
  1114. mutex_unlock(&ipa3_ctx->lock);
  1115. return result;
  1116. }
  1117. /**
  1118. * __ipa3_release_hdr() - drop reference to header and cause
  1119. * deletion if reference count permits
  1120. * @hdr_hdl: [in] handle of header to be released
  1121. *
  1122. * Returns: 0 on success, negative on failure
  1123. */
  1124. int __ipa3_release_hdr(u32 hdr_hdl)
  1125. {
  1126. int result = 0;
  1127. if (__ipa3_del_hdr(hdr_hdl, false)) {
  1128. IPADBG("fail to del hdr %x\n", hdr_hdl);
  1129. result = -EFAULT;
  1130. goto bail;
  1131. }
  1132. /* commit for put */
  1133. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  1134. IPAERR("fail to commit hdr\n");
  1135. result = -EFAULT;
  1136. goto bail;
  1137. }
  1138. bail:
  1139. return result;
  1140. }
  1141. /**
  1142. * __ipa3_release_hdr_proc_ctx() - drop reference to processing context
  1143. * and cause deletion if reference count permits
  1144. * @proc_ctx_hdl: [in] handle of processing context to be released
  1145. *
  1146. * Returns: 0 on success, negative on failure
  1147. */
  1148. int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl)
  1149. {
  1150. int result = 0;
  1151. if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true, false)) {
  1152. IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
  1153. result = -EFAULT;
  1154. goto bail;
  1155. }
  1156. /* commit for put */
  1157. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  1158. IPAERR("fail to commit hdr\n");
  1159. result = -EFAULT;
  1160. goto bail;
  1161. }
  1162. bail:
  1163. return result;
  1164. }
  1165. /**
  1166. * ipa3_put_hdr() - Release the specified header handle
  1167. * @hdr_hdl: [in] the header handle to release
  1168. *
  1169. * Returns: 0 on success, negative on failure
  1170. *
  1171. * Note: Should not be called from atomic context
  1172. */
  1173. int ipa3_put_hdr(u32 hdr_hdl)
  1174. {
  1175. struct ipa3_hdr_entry *entry;
  1176. int result = -EFAULT;
  1177. mutex_lock(&ipa3_ctx->lock);
  1178. entry = ipa3_id_find(hdr_hdl);
  1179. if (entry == NULL) {
  1180. IPAERR_RL("lookup failed\n");
  1181. result = -EINVAL;
  1182. goto bail;
  1183. }
  1184. if (entry->cookie != IPA_HDR_COOKIE) {
  1185. IPAERR_RL("invalid header entry\n");
  1186. result = -EINVAL;
  1187. goto bail;
  1188. }
  1189. result = 0;
  1190. bail:
  1191. mutex_unlock(&ipa3_ctx->lock);
  1192. return result;
  1193. }
  1194. /**
  1195. * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of
  1196. * it
  1197. * @copy: [inout] header to lookup and its copy
  1198. *
  1199. * lookup the specified header resource and return a copy of it (along with its
  1200. * attributes) if it exists, this would be called for partial headers
  1201. *
  1202. * Returns: 0 on success, negative on failure
  1203. *
  1204. * Note: Should not be called from atomic context
  1205. */
  1206. int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
  1207. {
  1208. struct ipa3_hdr_entry *entry;
  1209. int result = -EFAULT;
  1210. if (copy == NULL) {
  1211. IPAERR_RL("bad parm\n");
  1212. return -EINVAL;
  1213. }
  1214. mutex_lock(&ipa3_ctx->lock);
  1215. copy->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
  1216. entry = __ipa_find_hdr(copy->name);
  1217. if (entry) {
  1218. memcpy(copy->hdr, entry->hdr, entry->hdr_len);
  1219. copy->hdr_len = entry->hdr_len;
  1220. copy->type = entry->type;
  1221. copy->is_partial = entry->is_partial;
  1222. copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
  1223. copy->eth2_ofst = entry->eth2_ofst;
  1224. result = 0;
  1225. }
  1226. mutex_unlock(&ipa3_ctx->lock);
  1227. return result;
  1228. }