ipa_hdr.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include "ipa_i.h"
  6. #include "ipahal/ipahal.h"
  7. static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 64};
  8. static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
  9. #define HDR_TYPE_IS_VALID(type) \
  10. ((type) >= 0 && (type) < IPA_HDR_L2_MAX)
  11. #define HDR_PROC_TYPE_IS_VALID(type) \
  12. ((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
  13. /**
  14. * ipa3_generate_hdr_hw_tbl() - generates the headers table
  15. * @mem: [out] buffer to put the header table
  16. *
  17. * Returns: 0 on success, negative on failure
  18. */
  19. static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
  20. {
  21. struct ipa3_hdr_entry *entry;
  22. gfp_t flag = GFP_KERNEL;
  23. mem->size = ipa3_ctx->hdr_tbl.end;
  24. if (mem->size == 0) {
  25. IPAERR("hdr tbl empty\n");
  26. return -EPERM;
  27. }
  28. IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
  29. alloc:
  30. mem->base = dma_zalloc_coherent(ipa3_ctx->pdev, mem->size,
  31. &mem->phys_base, flag);
  32. if (!mem->base) {
  33. if (flag == GFP_KERNEL) {
  34. flag = GFP_ATOMIC;
  35. goto alloc;
  36. }
  37. IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
  38. return -ENOMEM;
  39. }
  40. list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
  41. link) {
  42. if (entry->is_hdr_proc_ctx)
  43. continue;
  44. IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
  45. entry->offset_entry->offset);
  46. ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
  47. entry->hdr, entry->hdr_len);
  48. }
  49. return 0;
  50. }
  51. static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
  52. u64 hdr_base_addr)
  53. {
  54. struct ipa3_hdr_proc_ctx_entry *entry;
  55. int ret;
  56. int ep;
  57. struct ipa_ep_cfg *cfg_ptr;
  58. struct ipa_l2tp_header_remove_procparams *l2p_hdr_rm_ptr;
  59. list_for_each_entry(entry,
  60. &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
  61. link) {
  62. IPADBG_LOW("processing type %d ofst=%d\n",
  63. entry->type, entry->offset_entry->offset);
  64. if (entry->l2tp_params.is_dst_pipe_valid) {
  65. ep = ipa3_get_ep_mapping(entry->l2tp_params.dst_pipe);
  66. if (ep >= 0) {
  67. cfg_ptr = &ipa3_ctx->ep[ep].cfg;
  68. l2p_hdr_rm_ptr =
  69. &entry->l2tp_params.hdr_remove_param;
  70. l2p_hdr_rm_ptr->hdr_ofst_pkt_size_valid =
  71. cfg_ptr->hdr.hdr_ofst_pkt_size_valid;
  72. l2p_hdr_rm_ptr->hdr_ofst_pkt_size =
  73. cfg_ptr->hdr.hdr_ofst_pkt_size;
  74. l2p_hdr_rm_ptr->hdr_endianness =
  75. cfg_ptr->hdr_ext.hdr_little_endian ?
  76. 0 : 1;
  77. }
  78. }
  79. ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
  80. entry->offset_entry->offset,
  81. entry->hdr->hdr_len,
  82. entry->hdr->is_hdr_proc_ctx,
  83. entry->hdr->phys_base,
  84. hdr_base_addr,
  85. entry->hdr->offset_entry,
  86. entry->l2tp_params,
  87. ipa3_ctx->use_64_bit_dma_mask);
  88. if (ret)
  89. return ret;
  90. }
  91. return 0;
  92. }
  93. /**
  94. * ipa3_generate_hdr_proc_ctx_hw_tbl() -
  95. * generates the headers processing context table.
  96. * @mem: [out] buffer to put the processing context table
  97. * @aligned_mem: [out] actual processing context table (with alignment).
  98. * Processing context table needs to be 8 Bytes aligned.
  99. *
  100. * Returns: 0 on success, negative on failure
  101. */
  102. static int ipa3_generate_hdr_proc_ctx_hw_tbl(u64 hdr_sys_addr,
  103. struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
  104. {
  105. u64 hdr_base_addr;
  106. mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
  107. /* make sure table is aligned */
  108. mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
  109. IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
  110. mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
  111. &mem->phys_base, GFP_KERNEL);
  112. if (!mem->base) {
  113. IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
  114. return -ENOMEM;
  115. }
  116. aligned_mem->phys_base =
  117. IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
  118. aligned_mem->base = mem->base +
  119. (aligned_mem->phys_base - mem->phys_base);
  120. aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
  121. memset(aligned_mem->base, 0, aligned_mem->size);
  122. hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
  123. hdr_sys_addr;
  124. return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
  125. }
  126. /**
  127. * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW
  128. *
  129. * Returns: 0 on success, negative on failure
  130. */
  131. int __ipa_commit_hdr_v3_0(void)
  132. {
  133. struct ipa3_desc desc[2];
  134. struct ipa_mem_buffer hdr_mem;
  135. struct ipa_mem_buffer ctx_mem;
  136. struct ipa_mem_buffer aligned_ctx_mem;
  137. struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
  138. struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
  139. struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
  140. struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
  141. struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
  142. struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
  143. int rc = -EFAULT;
  144. u32 proc_ctx_size;
  145. u32 proc_ctx_ofst;
  146. u32 proc_ctx_size_ddr;
  147. memset(desc, 0, 2 * sizeof(struct ipa3_desc));
  148. if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
  149. IPAERR("fail to generate HDR HW TBL\n");
  150. goto end;
  151. }
  152. if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
  153. &aligned_ctx_mem)) {
  154. IPAERR("fail to generate HDR PROC CTX HW TBL\n");
  155. goto end;
  156. }
  157. if (ipa3_ctx->hdr_tbl_lcl) {
  158. if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
  159. IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
  160. IPA_MEM_PART(apps_hdr_size));
  161. goto end;
  162. } else {
  163. dma_cmd_hdr.is_read = false; /* write operation */
  164. dma_cmd_hdr.skip_pipeline_clear = false;
  165. dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  166. dma_cmd_hdr.system_addr = hdr_mem.phys_base;
  167. dma_cmd_hdr.size = hdr_mem.size;
  168. dma_cmd_hdr.local_addr =
  169. ipa3_ctx->smem_restricted_bytes +
  170. IPA_MEM_PART(apps_hdr_ofst);
  171. hdr_cmd_pyld = ipahal_construct_imm_cmd(
  172. IPA_IMM_CMD_DMA_SHARED_MEM,
  173. &dma_cmd_hdr, false);
  174. if (!hdr_cmd_pyld) {
  175. IPAERR("fail construct dma_shared_mem cmd\n");
  176. goto end;
  177. }
  178. }
  179. } else {
  180. if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
  181. IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
  182. IPA_MEM_PART(apps_hdr_size_ddr));
  183. goto end;
  184. } else {
  185. hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
  186. hdr_cmd_pyld = ipahal_construct_imm_cmd(
  187. IPA_IMM_CMD_HDR_INIT_SYSTEM,
  188. &hdr_init_cmd, false);
  189. if (!hdr_cmd_pyld) {
  190. IPAERR("fail construct hdr_init_system cmd\n");
  191. goto end;
  192. }
  193. }
  194. }
  195. ipa3_init_imm_cmd_desc(&desc[0], hdr_cmd_pyld);
  196. IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
  197. proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
  198. proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
  199. if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
  200. if (aligned_ctx_mem.size > proc_ctx_size) {
  201. IPAERR("tbl too big needed %d avail %d\n",
  202. aligned_ctx_mem.size,
  203. proc_ctx_size);
  204. goto end;
  205. } else {
  206. dma_cmd_ctx.is_read = false; /* Write operation */
  207. dma_cmd_ctx.skip_pipeline_clear = false;
  208. dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  209. dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
  210. dma_cmd_ctx.size = aligned_ctx_mem.size;
  211. dma_cmd_ctx.local_addr =
  212. ipa3_ctx->smem_restricted_bytes +
  213. proc_ctx_ofst;
  214. ctx_cmd_pyld = ipahal_construct_imm_cmd(
  215. IPA_IMM_CMD_DMA_SHARED_MEM,
  216. &dma_cmd_ctx, false);
  217. if (!ctx_cmd_pyld) {
  218. IPAERR("fail construct dma_shared_mem cmd\n");
  219. goto end;
  220. }
  221. }
  222. } else {
  223. proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
  224. if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
  225. IPAERR("tbl too big, needed %d avail %d\n",
  226. aligned_ctx_mem.size,
  227. proc_ctx_size_ddr);
  228. goto end;
  229. } else {
  230. reg_write_cmd.skip_pipeline_clear = false;
  231. reg_write_cmd.pipeline_clear_options =
  232. IPAHAL_HPS_CLEAR;
  233. reg_write_cmd.offset =
  234. ipahal_get_reg_ofst(
  235. IPA_SYS_PKT_PROC_CNTXT_BASE);
  236. reg_write_cmd.value = aligned_ctx_mem.phys_base;
  237. reg_write_cmd.value_mask =
  238. ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
  239. ctx_cmd_pyld = ipahal_construct_imm_cmd(
  240. IPA_IMM_CMD_REGISTER_WRITE,
  241. &reg_write_cmd, false);
  242. if (!ctx_cmd_pyld) {
  243. IPAERR("fail construct register_write cmd\n");
  244. goto end;
  245. }
  246. }
  247. }
  248. ipa3_init_imm_cmd_desc(&desc[1], ctx_cmd_pyld);
  249. IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
  250. if (ipa3_send_cmd(2, desc))
  251. IPAERR("fail to send immediate command\n");
  252. else
  253. rc = 0;
  254. if (ipa3_ctx->hdr_tbl_lcl) {
  255. dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
  256. hdr_mem.phys_base);
  257. } else {
  258. if (!rc) {
  259. if (ipa3_ctx->hdr_mem.phys_base)
  260. dma_free_coherent(ipa3_ctx->pdev,
  261. ipa3_ctx->hdr_mem.size,
  262. ipa3_ctx->hdr_mem.base,
  263. ipa3_ctx->hdr_mem.phys_base);
  264. ipa3_ctx->hdr_mem = hdr_mem;
  265. }
  266. }
  267. if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
  268. dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base,
  269. ctx_mem.phys_base);
  270. } else {
  271. if (!rc) {
  272. if (ipa3_ctx->hdr_proc_ctx_mem.phys_base)
  273. dma_free_coherent(ipa3_ctx->pdev,
  274. ipa3_ctx->hdr_proc_ctx_mem.size,
  275. ipa3_ctx->hdr_proc_ctx_mem.base,
  276. ipa3_ctx->hdr_proc_ctx_mem.phys_base);
  277. ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
  278. }
  279. }
  280. end:
  281. if (ctx_cmd_pyld)
  282. ipahal_destroy_imm_cmd(ctx_cmd_pyld);
  283. if (hdr_cmd_pyld)
  284. ipahal_destroy_imm_cmd(hdr_cmd_pyld);
  285. return rc;
  286. }
  287. static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
  288. bool add_ref_hdr, bool user_only)
  289. {
  290. struct ipa3_hdr_entry *hdr_entry;
  291. struct ipa3_hdr_proc_ctx_entry *entry;
  292. struct ipa3_hdr_proc_ctx_offset_entry *offset;
  293. u32 bin;
  294. struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
  295. int id;
  296. int needed_len;
  297. int mem_size;
  298. IPADBG_LOW("Add processing type %d hdr_hdl %d\n",
  299. proc_ctx->type, proc_ctx->hdr_hdl);
  300. if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
  301. IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
  302. return -EINVAL;
  303. }
  304. hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
  305. if (!hdr_entry) {
  306. IPAERR_RL("hdr_hdl is invalid\n");
  307. return -EINVAL;
  308. }
  309. if (hdr_entry->cookie != IPA_HDR_COOKIE) {
  310. IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie);
  311. WARN_ON_RATELIMIT_IPA(1);
  312. return -EINVAL;
  313. }
  314. IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
  315. hdr_entry->name, hdr_entry->is_hdr_proc_ctx);
  316. entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
  317. if (!entry) {
  318. IPAERR("failed to alloc proc_ctx object\n");
  319. return -ENOMEM;
  320. }
  321. INIT_LIST_HEAD(&entry->link);
  322. entry->type = proc_ctx->type;
  323. entry->hdr = hdr_entry;
  324. entry->l2tp_params = proc_ctx->l2tp_params;
  325. if (add_ref_hdr)
  326. hdr_entry->ref_cnt++;
  327. entry->cookie = IPA_PROC_HDR_COOKIE;
  328. entry->ipacm_installed = user_only;
  329. needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
  330. if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
  331. bin = IPA_HDR_PROC_CTX_BIN0;
  332. } else if (needed_len <=
  333. ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
  334. bin = IPA_HDR_PROC_CTX_BIN1;
  335. } else {
  336. IPAERR_RL("unexpected needed len %d\n", needed_len);
  337. WARN_ON_RATELIMIT_IPA(1);
  338. goto bad_len;
  339. }
  340. mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
  341. IPA_MEM_PART(apps_hdr_proc_ctx_size) :
  342. IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
  343. if (list_empty(&htbl->head_free_offset_list[bin])) {
  344. if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
  345. IPAERR_RL("hdr proc ctx table overflow\n");
  346. goto bad_len;
  347. }
  348. offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
  349. GFP_KERNEL);
  350. if (!offset) {
  351. IPAERR("failed to alloc offset object\n");
  352. goto bad_len;
  353. }
  354. INIT_LIST_HEAD(&offset->link);
  355. /*
  356. * for a first item grow, set the bin and offset which are set
  357. * in stone
  358. */
  359. offset->offset = htbl->end;
  360. offset->bin = bin;
  361. offset->ipacm_installed = user_only;
  362. htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
  363. list_add(&offset->link,
  364. &htbl->head_offset_list[bin]);
  365. } else {
  366. /* get the first free slot */
  367. offset =
  368. list_first_entry(&htbl->head_free_offset_list[bin],
  369. struct ipa3_hdr_proc_ctx_offset_entry, link);
  370. offset->ipacm_installed = user_only;
  371. list_move(&offset->link, &htbl->head_offset_list[bin]);
  372. }
  373. entry->offset_entry = offset;
  374. list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
  375. htbl->proc_ctx_cnt++;
  376. IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
  377. htbl->proc_ctx_cnt, offset->offset);
  378. id = ipa3_id_alloc(entry);
  379. if (id < 0) {
  380. IPAERR_RL("failed to alloc id\n");
  381. WARN_ON_RATELIMIT_IPA(1);
  382. goto ipa_insert_failed;
  383. }
  384. entry->id = id;
  385. proc_ctx->proc_ctx_hdl = id;
  386. entry->ref_cnt++;
  387. return 0;
  388. ipa_insert_failed:
  389. list_move(&offset->link,
  390. &htbl->head_free_offset_list[offset->bin]);
  391. entry->offset_entry = NULL;
  392. list_del(&entry->link);
  393. htbl->proc_ctx_cnt--;
  394. bad_len:
  395. if (add_ref_hdr)
  396. hdr_entry->ref_cnt--;
  397. entry->cookie = 0;
  398. kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
  399. return -EPERM;
  400. }
  401. static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user)
  402. {
  403. struct ipa3_hdr_entry *entry;
  404. struct ipa_hdr_offset_entry *offset = NULL;
  405. u32 bin;
  406. struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
  407. int id;
  408. int mem_size;
  409. if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
  410. IPAERR_RL("bad param\n");
  411. goto error;
  412. }
  413. if (!HDR_TYPE_IS_VALID(hdr->type)) {
  414. IPAERR_RL("invalid hdr type %d\n", hdr->type);
  415. goto error;
  416. }
  417. entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL);
  418. if (!entry)
  419. goto error;
  420. INIT_LIST_HEAD(&entry->link);
  421. memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
  422. entry->hdr_len = hdr->hdr_len;
  423. strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
  424. entry->is_partial = hdr->is_partial;
  425. entry->type = hdr->type;
  426. entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
  427. entry->eth2_ofst = hdr->eth2_ofst;
  428. entry->cookie = IPA_HDR_COOKIE;
  429. entry->ipacm_installed = user;
  430. if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
  431. bin = IPA_HDR_BIN0;
  432. else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
  433. bin = IPA_HDR_BIN1;
  434. else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
  435. bin = IPA_HDR_BIN2;
  436. else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
  437. bin = IPA_HDR_BIN3;
  438. else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
  439. bin = IPA_HDR_BIN4;
  440. else {
  441. IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
  442. goto bad_hdr_len;
  443. }
  444. mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
  445. IPA_MEM_PART(apps_hdr_size_ddr);
  446. if (list_empty(&htbl->head_free_offset_list[bin])) {
  447. /* if header does not fit to table, place it in DDR */
  448. if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
  449. entry->is_hdr_proc_ctx = true;
  450. entry->phys_base = dma_map_single(ipa3_ctx->pdev,
  451. entry->hdr,
  452. entry->hdr_len,
  453. DMA_TO_DEVICE);
  454. if (dma_mapping_error(ipa3_ctx->pdev,
  455. entry->phys_base)) {
  456. IPAERR("dma_map_single failure for entry\n");
  457. goto fail_dma_mapping;
  458. }
  459. } else {
  460. entry->is_hdr_proc_ctx = false;
  461. offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
  462. GFP_KERNEL);
  463. if (!offset) {
  464. IPAERR("failed to alloc hdr offset object\n");
  465. goto bad_hdr_len;
  466. }
  467. INIT_LIST_HEAD(&offset->link);
  468. /*
  469. * for a first item grow, set the bin and offset which
  470. * are set in stone
  471. */
  472. offset->offset = htbl->end;
  473. offset->bin = bin;
  474. htbl->end += ipa_hdr_bin_sz[bin];
  475. list_add(&offset->link,
  476. &htbl->head_offset_list[bin]);
  477. entry->offset_entry = offset;
  478. offset->ipacm_installed = user;
  479. }
  480. } else {
  481. entry->is_hdr_proc_ctx = false;
  482. /* get the first free slot */
  483. offset = list_first_entry(&htbl->head_free_offset_list[bin],
  484. struct ipa_hdr_offset_entry, link);
  485. list_move(&offset->link, &htbl->head_offset_list[bin]);
  486. entry->offset_entry = offset;
  487. offset->ipacm_installed = user;
  488. }
  489. list_add(&entry->link, &htbl->head_hdr_entry_list);
  490. htbl->hdr_cnt++;
  491. if (entry->is_hdr_proc_ctx)
  492. IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
  493. hdr->hdr_len,
  494. htbl->hdr_cnt,
  495. &entry->phys_base);
  496. else
  497. IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
  498. hdr->hdr_len,
  499. htbl->hdr_cnt,
  500. entry->offset_entry->offset);
  501. id = ipa3_id_alloc(entry);
  502. if (id < 0) {
  503. IPAERR_RL("failed to alloc id\n");
  504. WARN_ON_RATELIMIT_IPA(1);
  505. goto ipa_insert_failed;
  506. }
  507. entry->id = id;
  508. hdr->hdr_hdl = id;
  509. entry->ref_cnt++;
  510. if (entry->is_hdr_proc_ctx) {
  511. struct ipa_hdr_proc_ctx_add proc_ctx;
  512. IPADBG("adding processing context for header %s\n", hdr->name);
  513. proc_ctx.type = IPA_HDR_PROC_NONE;
  514. proc_ctx.hdr_hdl = id;
  515. if (__ipa_add_hdr_proc_ctx(&proc_ctx, false, user)) {
  516. IPAERR("failed to add hdr proc ctx\n");
  517. goto fail_add_proc_ctx;
  518. }
  519. entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
  520. }
  521. return 0;
  522. fail_add_proc_ctx:
  523. entry->ref_cnt--;
  524. hdr->hdr_hdl = 0;
  525. ipa3_id_remove(id);
  526. ipa_insert_failed:
  527. if (entry->is_hdr_proc_ctx) {
  528. dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
  529. entry->hdr_len, DMA_TO_DEVICE);
  530. } else {
  531. if (offset)
  532. list_move(&offset->link,
  533. &htbl->head_free_offset_list[offset->bin]);
  534. entry->offset_entry = NULL;
  535. }
  536. htbl->hdr_cnt--;
  537. list_del(&entry->link);
  538. fail_dma_mapping:
  539. entry->is_hdr_proc_ctx = false;
  540. bad_hdr_len:
  541. entry->cookie = 0;
  542. kmem_cache_free(ipa3_ctx->hdr_cache, entry);
  543. error:
  544. return -EPERM;
  545. }
  546. static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
  547. bool release_hdr, bool by_user)
  548. {
  549. struct ipa3_hdr_proc_ctx_entry *entry;
  550. struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
  551. entry = ipa3_id_find(proc_ctx_hdl);
  552. if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
  553. IPAERR_RL("bad param\n");
  554. return -EINVAL;
  555. }
  556. IPADBG("del proc ctx cnt=%d ofst=%d\n",
  557. htbl->proc_ctx_cnt, entry->offset_entry->offset);
  558. if (by_user && entry->user_deleted) {
  559. IPAERR_RL("proc_ctx already deleted by user\n");
  560. return -EINVAL;
  561. }
  562. if (by_user)
  563. entry->user_deleted = true;
  564. if (--entry->ref_cnt) {
  565. IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
  566. proc_ctx_hdl, entry->ref_cnt);
  567. return 0;
  568. }
  569. if (release_hdr)
  570. __ipa3_del_hdr(entry->hdr->id, false);
  571. /* move the offset entry to appropriate free list */
  572. list_move(&entry->offset_entry->link,
  573. &htbl->head_free_offset_list[entry->offset_entry->bin]);
  574. list_del(&entry->link);
  575. htbl->proc_ctx_cnt--;
  576. entry->cookie = 0;
  577. kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
  578. /* remove the handle from the database */
  579. ipa3_id_remove(proc_ctx_hdl);
  580. return 0;
  581. }
  582. int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
  583. {
  584. struct ipa3_hdr_entry *entry;
  585. struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
  586. entry = ipa3_id_find(hdr_hdl);
  587. if (entry == NULL) {
  588. IPAERR_RL("lookup failed\n");
  589. return -EINVAL;
  590. }
  591. if (entry->cookie != IPA_HDR_COOKIE) {
  592. IPAERR_RL("bad parm\n");
  593. return -EINVAL;
  594. }
  595. if (entry->is_hdr_proc_ctx)
  596. IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
  597. entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
  598. else
  599. IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n",
  600. entry->hdr_len, htbl->hdr_cnt,
  601. entry->offset_entry->offset);
  602. if (by_user && entry->user_deleted) {
  603. IPAERR_RL("proc_ctx already deleted by user\n");
  604. return -EINVAL;
  605. }
  606. if (by_user) {
  607. if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
  608. IPADBG("Trying to delete hdr %s offset=%u\n",
  609. entry->name, entry->offset_entry->offset);
  610. if (!entry->offset_entry->offset) {
  611. IPAERR_RL(
  612. "User cannot delete default header\n");
  613. return -EPERM;
  614. }
  615. }
  616. entry->user_deleted = true;
  617. }
  618. if (--entry->ref_cnt) {
  619. IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
  620. return 0;
  621. }
  622. if (entry->is_hdr_proc_ctx) {
  623. dma_unmap_single(ipa3_ctx->pdev,
  624. entry->phys_base,
  625. entry->hdr_len,
  626. DMA_TO_DEVICE);
  627. __ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false, false);
  628. } else {
  629. /* move the offset entry to appropriate free list */
  630. list_move(&entry->offset_entry->link,
  631. &htbl->head_free_offset_list[entry->offset_entry->bin]);
  632. }
  633. list_del(&entry->link);
  634. htbl->hdr_cnt--;
  635. entry->cookie = 0;
  636. kmem_cache_free(ipa3_ctx->hdr_cache, entry);
  637. /* remove the handle from the database */
  638. ipa3_id_remove(hdr_hdl);
  639. return 0;
  640. }
  641. /**
  642. * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
  643. * to IPA HW
  644. * @hdrs: [inout] set of headers to add
  645. *
  646. * Returns: 0 on success, negative on failure
  647. *
  648. * Note: Should not be called from atomic context
  649. */
  650. int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
  651. {
  652. return ipa3_add_hdr_usr(hdrs, false);
  653. }
  654. /**
  655. * ipa3_add_hdr_usr() - add the specified headers to SW
  656. * and optionally commit them to IPA HW
  657. * @hdrs: [inout] set of headers to add
  658. * @user_only: [in] indicate installed from user
  659. *
  660. * Returns: 0 on success, negative on failure
  661. *
  662. * Note: Should not be called from atomic context
  663. */
  664. int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only)
  665. {
  666. int i;
  667. int result = -EFAULT;
  668. if (hdrs == NULL || hdrs->num_hdrs == 0) {
  669. IPAERR_RL("bad parm\n");
  670. return -EINVAL;
  671. }
  672. mutex_lock(&ipa3_ctx->lock);
  673. IPADBG("adding %d headers to IPA driver internal data struct\n",
  674. hdrs->num_hdrs);
  675. for (i = 0; i < hdrs->num_hdrs; i++) {
  676. if (__ipa_add_hdr(&hdrs->hdr[i], user_only)) {
  677. IPAERR_RL("failed to add hdr %d\n", i);
  678. hdrs->hdr[i].status = -1;
  679. } else {
  680. hdrs->hdr[i].status = 0;
  681. }
  682. }
  683. if (hdrs->commit) {
  684. IPADBG("committing all headers to IPA core");
  685. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  686. result = -EPERM;
  687. goto bail;
  688. }
  689. }
  690. result = 0;
  691. bail:
  692. mutex_unlock(&ipa3_ctx->lock);
  693. return result;
  694. }
  695. /**
  696. * ipa3_del_hdr_by_user() - Remove the specified headers
  697. * from SW and optionally commit them to IPA HW
  698. * @hdls: [inout] set of headers to delete
  699. * @by_user: Operation requested by user?
  700. *
  701. * Returns: 0 on success, negative on failure
  702. *
  703. * Note: Should not be called from atomic context
  704. */
  705. int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
  706. {
  707. int i;
  708. int result = -EFAULT;
  709. if (hdls == NULL || hdls->num_hdls == 0) {
  710. IPAERR_RL("bad parm\n");
  711. return -EINVAL;
  712. }
  713. mutex_lock(&ipa3_ctx->lock);
  714. for (i = 0; i < hdls->num_hdls; i++) {
  715. if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
  716. IPAERR_RL("failed to del hdr %i\n", i);
  717. hdls->hdl[i].status = -1;
  718. } else {
  719. hdls->hdl[i].status = 0;
  720. }
  721. }
  722. if (hdls->commit) {
  723. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  724. result = -EPERM;
  725. goto bail;
  726. }
  727. }
  728. result = 0;
  729. bail:
  730. mutex_unlock(&ipa3_ctx->lock);
  731. return result;
  732. }
  733. /**
  734. * ipa3_del_hdr() - Remove the specified headers from SW
  735. * and optionally commit them to IPA HW
  736. * @hdls: [inout] set of headers to delete
  737. *
  738. * Returns: 0 on success, negative on failure
  739. *
  740. * Note: Should not be called from atomic context
  741. */
  742. int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
  743. {
  744. return ipa3_del_hdr_by_user(hdls, false);
  745. }
  746. /**
  747. * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
  748. * and optionally commit them to IPA HW
  749. * @proc_ctxs: [inout] set of processing context headers to add
  750. * @user_only: [in] indicate installed by user-space module
  751. *
  752. * Returns: 0 on success, negative on failure
  753. *
  754. * Note: Should not be called from atomic context
  755. */
  756. int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
  757. bool user_only)
  758. {
  759. int i;
  760. int result = -EFAULT;
  761. if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
  762. IPAERR_RL("bad parm\n");
  763. return -EINVAL;
  764. }
  765. mutex_lock(&ipa3_ctx->lock);
  766. IPADBG("adding %d header processing contextes to IPA driver\n",
  767. proc_ctxs->num_proc_ctxs);
  768. for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
  769. if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i],
  770. true, user_only)) {
  771. IPAERR_RL("failed to add hdr pric ctx %d\n", i);
  772. proc_ctxs->proc_ctx[i].status = -1;
  773. } else {
  774. proc_ctxs->proc_ctx[i].status = 0;
  775. }
  776. }
  777. if (proc_ctxs->commit) {
  778. IPADBG("committing all headers to IPA core");
  779. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  780. result = -EPERM;
  781. goto bail;
  782. }
  783. }
  784. result = 0;
  785. bail:
  786. mutex_unlock(&ipa3_ctx->lock);
  787. return result;
  788. }
  789. /**
  790. * ipa3_del_hdr_proc_ctx_by_user() -
  791. * Remove the specified processing context headers from SW and
  792. * optionally commit them to IPA HW.
  793. * @hdls: [inout] set of processing context headers to delete
  794. * @by_user: Operation requested by user?
  795. *
  796. * Returns: 0 on success, negative on failure
  797. *
  798. * Note: Should not be called from atomic context
  799. */
  800. int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
  801. bool by_user)
  802. {
  803. int i;
  804. int result;
  805. if (hdls == NULL || hdls->num_hdls == 0) {
  806. IPAERR_RL("bad parm\n");
  807. return -EINVAL;
  808. }
  809. mutex_lock(&ipa3_ctx->lock);
  810. for (i = 0; i < hdls->num_hdls; i++) {
  811. if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
  812. IPAERR_RL("failed to del hdr %i\n", i);
  813. hdls->hdl[i].status = -1;
  814. } else {
  815. hdls->hdl[i].status = 0;
  816. }
  817. }
  818. if (hdls->commit) {
  819. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  820. result = -EPERM;
  821. goto bail;
  822. }
  823. }
  824. result = 0;
  825. bail:
  826. mutex_unlock(&ipa3_ctx->lock);
  827. return result;
  828. }
  829. /**
  830. * ipa3_del_hdr_proc_ctx() -
  831. * Remove the specified processing context headers from SW and
  832. * optionally commit them to IPA HW.
  833. * @hdls: [inout] set of processing context headers to delete
  834. *
  835. * Returns: 0 on success, negative on failure
  836. *
  837. * Note: Should not be called from atomic context
  838. */
  839. int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
  840. {
  841. return ipa3_del_hdr_proc_ctx_by_user(hdls, false);
  842. }
  843. /**
  844. * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
  845. *
  846. * Returns: 0 on success, negative on failure
  847. *
  848. * Note: Should not be called from atomic context
  849. */
  850. int ipa3_commit_hdr(void)
  851. {
  852. int result = -EFAULT;
  853. /*
  854. * issue a commit on the routing module since routing rules point to
  855. * header table entries
  856. */
  857. if (ipa3_commit_rt(IPA_IP_v4))
  858. return -EPERM;
  859. if (ipa3_commit_rt(IPA_IP_v6))
  860. return -EPERM;
  861. mutex_lock(&ipa3_ctx->lock);
  862. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  863. result = -EPERM;
  864. goto bail;
  865. }
  866. result = 0;
  867. bail:
  868. mutex_unlock(&ipa3_ctx->lock);
  869. return result;
  870. }
  871. /**
  872. * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
  873. * HW)
  874. *
  875. * @user_only: [in] indicate delete rules installed by userspace
  876. * Returns: 0 on success, negative on failure
  877. *
  878. * Note: Should not be called from atomic context
  879. */
  880. int ipa3_reset_hdr(bool user_only)
  881. {
  882. struct ipa3_hdr_entry *entry;
  883. struct ipa3_hdr_entry *next;
  884. struct ipa3_hdr_proc_ctx_entry *ctx_entry;
  885. struct ipa3_hdr_proc_ctx_entry *ctx_next;
  886. struct ipa_hdr_offset_entry *off_entry;
  887. struct ipa_hdr_offset_entry *off_next;
  888. struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
  889. struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
  890. struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
  891. struct ipa3_hdr_proc_ctx_tbl *htbl_proc = &ipa3_ctx->hdr_proc_ctx_tbl;
  892. int i;
  893. /*
  894. * issue a reset on the routing module since routing rules point to
  895. * header table entries
  896. */
  897. if (ipa3_reset_rt(IPA_IP_v4, user_only))
  898. IPAERR_RL("fail to reset v4 rt\n");
  899. if (ipa3_reset_rt(IPA_IP_v6, user_only))
  900. IPAERR_RL("fail to reset v6 rt\n");
  901. mutex_lock(&ipa3_ctx->lock);
  902. IPADBG("reset hdr\n");
  903. list_for_each_entry_safe(entry, next,
  904. &ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
  905. /* do not remove the default header */
  906. if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
  907. IPADBG("Trying to remove hdr %s offset=%u\n",
  908. entry->name, entry->offset_entry->offset);
  909. if (!entry->offset_entry->offset) {
  910. if (entry->is_hdr_proc_ctx) {
  911. IPAERR("default header is proc ctx\n");
  912. mutex_unlock(&ipa3_ctx->lock);
  913. WARN_ON_RATELIMIT_IPA(1);
  914. return -EFAULT;
  915. }
  916. IPADBG("skip default header\n");
  917. continue;
  918. }
  919. }
  920. if (ipa3_id_find(entry->id) == NULL) {
  921. mutex_unlock(&ipa3_ctx->lock);
  922. WARN_ON_RATELIMIT_IPA(1);
  923. return -EFAULT;
  924. }
  925. if (!user_only || entry->ipacm_installed) {
  926. if (entry->is_hdr_proc_ctx) {
  927. dma_unmap_single(ipa3_ctx->pdev,
  928. entry->phys_base,
  929. entry->hdr_len,
  930. DMA_TO_DEVICE);
  931. entry->proc_ctx = NULL;
  932. } else {
  933. /* move the offset entry to free list */
  934. entry->offset_entry->ipacm_installed = false;
  935. list_move(&entry->offset_entry->link,
  936. &htbl->head_free_offset_list[
  937. entry->offset_entry->bin]);
  938. }
  939. list_del(&entry->link);
  940. htbl->hdr_cnt--;
  941. entry->ref_cnt = 0;
  942. entry->cookie = 0;
  943. /* remove the handle from the database */
  944. ipa3_id_remove(entry->id);
  945. kmem_cache_free(ipa3_ctx->hdr_cache, entry);
  946. }
  947. }
  948. /* only clean up offset_list and free_offset_list on global reset */
  949. if (!user_only) {
  950. for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
  951. list_for_each_entry_safe(off_entry, off_next,
  952. &ipa3_ctx->hdr_tbl.head_offset_list[i],
  953. link) {
  954. /**
  955. * do not remove the default exception
  956. * header which is at offset 0
  957. */
  958. if (off_entry->offset == 0)
  959. continue;
  960. list_del(&off_entry->link);
  961. kmem_cache_free(ipa3_ctx->hdr_offset_cache,
  962. off_entry);
  963. }
  964. list_for_each_entry_safe(off_entry, off_next,
  965. &ipa3_ctx->hdr_tbl.head_free_offset_list[i],
  966. link) {
  967. list_del(&off_entry->link);
  968. kmem_cache_free(ipa3_ctx->hdr_offset_cache,
  969. off_entry);
  970. }
  971. }
  972. /* there is one header of size 8 */
  973. ipa3_ctx->hdr_tbl.end = 8;
  974. ipa3_ctx->hdr_tbl.hdr_cnt = 1;
  975. }
  976. IPADBG("reset hdr proc ctx\n");
  977. list_for_each_entry_safe(
  978. ctx_entry,
  979. ctx_next,
  980. &(htbl_proc->head_proc_ctx_entry_list),
  981. link) {
  982. if (ipa3_id_find(ctx_entry->id) == NULL) {
  983. mutex_unlock(&ipa3_ctx->lock);
  984. WARN_ON_RATELIMIT_IPA(1);
  985. return -EFAULT;
  986. }
  987. if (!user_only ||
  988. ctx_entry->ipacm_installed) {
  989. /* move the offset entry to appropriate free list */
  990. list_move(&ctx_entry->offset_entry->link,
  991. &htbl_proc->head_free_offset_list[
  992. ctx_entry->offset_entry->bin]);
  993. list_del(&ctx_entry->link);
  994. htbl_proc->proc_ctx_cnt--;
  995. ctx_entry->ref_cnt = 0;
  996. ctx_entry->cookie = 0;
  997. /* remove the handle from the database */
  998. ipa3_id_remove(ctx_entry->id);
  999. kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache,
  1000. ctx_entry);
  1001. }
  1002. }
  1003. /* only clean up offset_list and free_offset_list on global reset */
  1004. if (!user_only) {
  1005. for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
  1006. list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
  1007. &(htbl_proc->head_offset_list[i]), link) {
  1008. list_del(&ctx_off_entry->link);
  1009. kmem_cache_free(
  1010. ipa3_ctx->hdr_proc_ctx_offset_cache,
  1011. ctx_off_entry);
  1012. }
  1013. list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
  1014. &(htbl_proc->head_free_offset_list[i]), link) {
  1015. list_del(&ctx_off_entry->link);
  1016. kmem_cache_free(
  1017. ipa3_ctx->hdr_proc_ctx_offset_cache,
  1018. ctx_off_entry);
  1019. }
  1020. }
  1021. htbl_proc->end = 0;
  1022. htbl_proc->proc_ctx_cnt = 0;
  1023. }
  1024. /* commit the change to IPA-HW */
  1025. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  1026. IPAERR("fail to commit hdr\n");
  1027. WARN_ON_RATELIMIT_IPA(1);
  1028. mutex_unlock(&ipa3_ctx->lock);
  1029. return -EFAULT;
  1030. }
  1031. mutex_unlock(&ipa3_ctx->lock);
  1032. return 0;
  1033. }
  1034. static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
  1035. {
  1036. struct ipa3_hdr_entry *entry;
  1037. if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
  1038. IPAERR_RL("Header name too long: %s\n", name);
  1039. return NULL;
  1040. }
  1041. list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
  1042. link) {
  1043. if (!strcmp(name, entry->name))
  1044. return entry;
  1045. }
  1046. return NULL;
  1047. }
  1048. /**
  1049. * ipa3_get_hdr() - Lookup the specified header resource
  1050. * @lookup: [inout] header to lookup and its handle
  1051. *
  1052. * lookup the specified header resource and return handle if it exists
  1053. *
  1054. * Returns: 0 on success, negative on failure
  1055. *
  1056. * Note: Should not be called from atomic context
  1057. * Caller should call ipa3_put_hdr later if this function succeeds
  1058. */
  1059. int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
  1060. {
  1061. struct ipa3_hdr_entry *entry;
  1062. int result = -1;
  1063. if (lookup == NULL) {
  1064. IPAERR_RL("bad parm\n");
  1065. return -EINVAL;
  1066. }
  1067. mutex_lock(&ipa3_ctx->lock);
  1068. lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
  1069. entry = __ipa_find_hdr(lookup->name);
  1070. if (entry) {
  1071. lookup->hdl = entry->id;
  1072. result = 0;
  1073. }
  1074. mutex_unlock(&ipa3_ctx->lock);
  1075. return result;
  1076. }
  1077. /**
  1078. * __ipa3_release_hdr() - drop reference to header and cause
  1079. * deletion if reference count permits
  1080. * @hdr_hdl: [in] handle of header to be released
  1081. *
  1082. * Returns: 0 on success, negative on failure
  1083. */
  1084. int __ipa3_release_hdr(u32 hdr_hdl)
  1085. {
  1086. int result = 0;
  1087. if (__ipa3_del_hdr(hdr_hdl, false)) {
  1088. IPADBG("fail to del hdr %x\n", hdr_hdl);
  1089. result = -EFAULT;
  1090. goto bail;
  1091. }
  1092. /* commit for put */
  1093. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  1094. IPAERR("fail to commit hdr\n");
  1095. result = -EFAULT;
  1096. goto bail;
  1097. }
  1098. bail:
  1099. return result;
  1100. }
  1101. /**
  1102. * __ipa3_release_hdr_proc_ctx() - drop reference to processing context
  1103. * and cause deletion if reference count permits
  1104. * @proc_ctx_hdl: [in] handle of processing context to be released
  1105. *
  1106. * Returns: 0 on success, negative on failure
  1107. */
  1108. int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl)
  1109. {
  1110. int result = 0;
  1111. if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true, false)) {
  1112. IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
  1113. result = -EFAULT;
  1114. goto bail;
  1115. }
  1116. /* commit for put */
  1117. if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
  1118. IPAERR("fail to commit hdr\n");
  1119. result = -EFAULT;
  1120. goto bail;
  1121. }
  1122. bail:
  1123. return result;
  1124. }
  1125. /**
  1126. * ipa3_put_hdr() - Release the specified header handle
  1127. * @hdr_hdl: [in] the header handle to release
  1128. *
  1129. * Returns: 0 on success, negative on failure
  1130. *
  1131. * Note: Should not be called from atomic context
  1132. */
  1133. int ipa3_put_hdr(u32 hdr_hdl)
  1134. {
  1135. struct ipa3_hdr_entry *entry;
  1136. int result = -EFAULT;
  1137. mutex_lock(&ipa3_ctx->lock);
  1138. entry = ipa3_id_find(hdr_hdl);
  1139. if (entry == NULL) {
  1140. IPAERR_RL("lookup failed\n");
  1141. result = -EINVAL;
  1142. goto bail;
  1143. }
  1144. if (entry->cookie != IPA_HDR_COOKIE) {
  1145. IPAERR_RL("invalid header entry\n");
  1146. result = -EINVAL;
  1147. goto bail;
  1148. }
  1149. result = 0;
  1150. bail:
  1151. mutex_unlock(&ipa3_ctx->lock);
  1152. return result;
  1153. }
  1154. /**
  1155. * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of
  1156. * it
  1157. * @copy: [inout] header to lookup and its copy
  1158. *
  1159. * lookup the specified header resource and return a copy of it (along with its
  1160. * attributes) if it exists, this would be called for partial headers
  1161. *
  1162. * Returns: 0 on success, negative on failure
  1163. *
  1164. * Note: Should not be called from atomic context
  1165. */
  1166. int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
  1167. {
  1168. struct ipa3_hdr_entry *entry;
  1169. int result = -EFAULT;
  1170. if (copy == NULL) {
  1171. IPAERR_RL("bad parm\n");
  1172. return -EINVAL;
  1173. }
  1174. mutex_lock(&ipa3_ctx->lock);
  1175. copy->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
  1176. entry = __ipa_find_hdr(copy->name);
  1177. if (entry) {
  1178. memcpy(copy->hdr, entry->hdr, entry->hdr_len);
  1179. copy->hdr_len = entry->hdr_len;
  1180. copy->type = entry->type;
  1181. copy->is_partial = entry->is_partial;
  1182. copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
  1183. copy->eth2_ofst = entry->eth2_ofst;
  1184. result = 0;
  1185. }
  1186. mutex_unlock(&ipa3_ctx->lock);
  1187. return result;
  1188. }