ipa_uc_offload.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/ipa_uc_offload.h>
  6. #include <linux/msm_ipa.h>
  7. #include "../ipa_common_i.h"
  8. #include "../ipa_v3/ipa_pm.h"
  9. #define IPA_NTN_DMA_POOL_ALIGNMENT 8
  10. #define OFFLOAD_DRV_NAME "ipa_uc_offload"
  11. #define IPA_UC_OFFLOAD_DBG(fmt, args...) \
  12. do { \
  13. pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
  14. __func__, __LINE__, ## args); \
  15. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  16. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  17. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  18. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  19. } while (0)
  20. #define IPA_UC_OFFLOAD_LOW(fmt, args...) \
  21. do { \
  22. pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
  23. __func__, __LINE__, ## args); \
  24. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  25. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  26. } while (0)
  27. #define IPA_UC_OFFLOAD_ERR(fmt, args...) \
  28. do { \
  29. pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
  30. __func__, __LINE__, ## args); \
  31. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  32. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  33. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  34. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  35. } while (0)
  36. #define IPA_UC_OFFLOAD_INFO(fmt, args...) \
  37. do { \
  38. pr_info(OFFLOAD_DRV_NAME " %s:%d " fmt, \
  39. __func__, __LINE__, ## args); \
  40. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  41. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  42. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  43. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  44. } while (0)
  45. enum ipa_uc_offload_state {
  46. IPA_UC_OFFLOAD_STATE_INVALID,
  47. IPA_UC_OFFLOAD_STATE_INITIALIZED,
  48. IPA_UC_OFFLOAD_STATE_UP,
  49. };
  50. struct ipa_uc_offload_ctx {
  51. enum ipa_uc_offload_proto proto;
  52. enum ipa_uc_offload_state state;
  53. void *priv;
  54. u8 hdr_len;
  55. u32 partial_hdr_hdl[IPA_IP_MAX];
  56. char netdev_name[IPA_RESOURCE_NAME_MAX];
  57. ipa_notify_cb notify;
  58. struct completion ntn_completion;
  59. u32 pm_hdl;
  60. struct ipa_ntn_conn_in_params conn;
  61. };
  62. static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
  63. static int ipa_commit_partial_hdr(
  64. struct ipa_ioc_add_hdr *hdr,
  65. const char *netdev_name,
  66. struct ipa_hdr_info *hdr_info)
  67. {
  68. int i;
  69. if (hdr == NULL || hdr_info == NULL) {
  70. IPA_UC_OFFLOAD_ERR("Invalid input\n");
  71. return -EINVAL;
  72. }
  73. hdr->commit = 1;
  74. hdr->num_hdrs = 2;
  75. snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
  76. "%s_ipv4", netdev_name);
  77. snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
  78. "%s_ipv6", netdev_name);
  79. for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
  80. hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
  81. memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
  82. hdr->hdr[i].type = hdr_info[i].hdr_type;
  83. hdr->hdr[i].is_partial = 1;
  84. hdr->hdr[i].is_eth2_ofst_valid = 1;
  85. hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
  86. }
  87. if (ipa_add_hdr(hdr)) {
  88. IPA_UC_OFFLOAD_ERR("fail to add partial headers\n");
  89. return -EFAULT;
  90. }
  91. return 0;
  92. }
  93. static void ipa_uc_offload_ntn_pm_cb(void *p, enum ipa_pm_cb_event event)
  94. {
  95. /* suspend/resume is not supported */
  96. IPA_UC_OFFLOAD_DBG("event = %d\n", event);
  97. }
  98. static int ipa_uc_offload_ntn_register_pm_client(
  99. struct ipa_uc_offload_ctx *ntn_ctx)
  100. {
  101. int res;
  102. struct ipa_pm_register_params params;
  103. memset(&params, 0, sizeof(params));
  104. params.name = "ETH";
  105. params.callback = ipa_uc_offload_ntn_pm_cb;
  106. params.user_data = ntn_ctx;
  107. params.group = IPA_PM_GROUP_DEFAULT;
  108. res = ipa_pm_register(&params, &ntn_ctx->pm_hdl);
  109. if (res) {
  110. IPA_UC_OFFLOAD_ERR("fail to register with PM %d\n", res);
  111. return res;
  112. }
  113. res = ipa_pm_associate_ipa_cons_to_client(ntn_ctx->pm_hdl,
  114. IPA_CLIENT_ETHERNET_CONS);
  115. if (res) {
  116. IPA_UC_OFFLOAD_ERR("fail to associate cons with PM %d\n", res);
  117. ipa_pm_deregister(ntn_ctx->pm_hdl);
  118. ntn_ctx->pm_hdl = ~0;
  119. return res;
  120. }
  121. return 0;
  122. }
  123. static void ipa_uc_offload_ntn_deregister_pm_client(
  124. struct ipa_uc_offload_ctx *ntn_ctx)
  125. {
  126. ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
  127. ipa_pm_deregister(ntn_ctx->pm_hdl);
  128. }
  129. static int ipa_uc_offload_ntn_reg_intf(
  130. struct ipa_uc_offload_intf_params *inp,
  131. struct ipa_uc_offload_out_params *outp,
  132. struct ipa_uc_offload_ctx *ntn_ctx)
  133. {
  134. struct ipa_ioc_add_hdr *hdr = NULL;
  135. struct ipa_tx_intf tx;
  136. struct ipa_rx_intf rx;
  137. struct ipa_ioc_tx_intf_prop tx_prop[2];
  138. struct ipa_ioc_rx_intf_prop rx_prop[2];
  139. int ret = 0;
  140. u32 len;
  141. IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
  142. inp->netdev_name);
  143. ret = ipa_uc_offload_ntn_register_pm_client(ntn_ctx);
  144. if (ret) {
  145. IPA_UC_OFFLOAD_ERR("fail to register PM client\n");
  146. return -EFAULT;
  147. }
  148. memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
  149. ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len;
  150. ntn_ctx->notify = inp->notify;
  151. ntn_ctx->priv = inp->priv;
  152. /* add partial header */
  153. len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
  154. hdr = kzalloc(len, GFP_KERNEL);
  155. if (hdr == NULL) {
  156. ret = -ENOMEM;
  157. goto fail_alloc;
  158. }
  159. if (ipa_commit_partial_hdr(hdr, ntn_ctx->netdev_name, inp->hdr_info)) {
  160. IPA_UC_OFFLOAD_ERR("fail to commit partial headers\n");
  161. ret = -EFAULT;
  162. goto fail;
  163. }
  164. /* populate tx prop */
  165. tx.num_props = 2;
  166. tx.prop = tx_prop;
  167. memset(tx_prop, 0, sizeof(tx_prop));
  168. tx_prop[0].ip = IPA_IP_v4;
  169. tx_prop[0].dst_pipe = IPA_CLIENT_ETHERNET_CONS;
  170. tx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
  171. memcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
  172. sizeof(tx_prop[0].hdr_name));
  173. tx_prop[1].ip = IPA_IP_v6;
  174. tx_prop[1].dst_pipe = IPA_CLIENT_ETHERNET_CONS;
  175. tx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
  176. memcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
  177. sizeof(tx_prop[1].hdr_name));
  178. /* populate rx prop */
  179. rx.num_props = 2;
  180. rx.prop = rx_prop;
  181. memset(rx_prop, 0, sizeof(rx_prop));
  182. rx_prop[0].ip = IPA_IP_v4;
  183. rx_prop[0].src_pipe = IPA_CLIENT_ETHERNET_PROD;
  184. rx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
  185. if (inp->is_meta_data_valid) {
  186. rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
  187. rx_prop[0].attrib.meta_data = inp->meta_data;
  188. rx_prop[0].attrib.meta_data_mask = inp->meta_data_mask;
  189. }
  190. rx_prop[1].ip = IPA_IP_v6;
  191. rx_prop[1].src_pipe = IPA_CLIENT_ETHERNET_PROD;
  192. rx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
  193. if (inp->is_meta_data_valid) {
  194. rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
  195. rx_prop[1].attrib.meta_data = inp->meta_data;
  196. rx_prop[1].attrib.meta_data_mask = inp->meta_data_mask;
  197. }
  198. if (ipa_register_intf(inp->netdev_name, &tx, &rx)) {
  199. IPA_UC_OFFLOAD_ERR("fail to add interface prop\n");
  200. memset(ntn_ctx, 0, sizeof(*ntn_ctx));
  201. ret = -EFAULT;
  202. goto fail;
  203. }
  204. ntn_ctx->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
  205. ntn_ctx->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
  206. init_completion(&ntn_ctx->ntn_completion);
  207. ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
  208. kfree(hdr);
  209. return ret;
  210. fail:
  211. kfree(hdr);
  212. fail_alloc:
  213. ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
  214. return ret;
  215. }
  216. int ipa_uc_offload_reg_intf(
  217. struct ipa_uc_offload_intf_params *inp,
  218. struct ipa_uc_offload_out_params *outp)
  219. {
  220. struct ipa_uc_offload_ctx *ctx;
  221. int ret = 0;
  222. if (inp == NULL || outp == NULL) {
  223. IPA_UC_OFFLOAD_ERR("invalid params in=%pK out=%pK\n",
  224. inp, outp);
  225. return -EINVAL;
  226. }
  227. if (inp->proto <= IPA_UC_INVALID ||
  228. inp->proto >= IPA_UC_MAX_PROT_SIZE) {
  229. IPA_UC_OFFLOAD_ERR("invalid proto %d\n", inp->proto);
  230. return -EINVAL;
  231. }
  232. if (!ipa_uc_offload_ctx[inp->proto]) {
  233. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  234. if (ctx == NULL) {
  235. IPA_UC_OFFLOAD_ERR("fail to alloc uc offload ctx\n");
  236. return -EFAULT;
  237. }
  238. ipa_uc_offload_ctx[inp->proto] = ctx;
  239. ctx->proto = inp->proto;
  240. } else
  241. ctx = ipa_uc_offload_ctx[inp->proto];
  242. if (ctx->state != IPA_UC_OFFLOAD_STATE_INVALID) {
  243. IPA_UC_OFFLOAD_ERR("Already Initialized\n");
  244. return -EINVAL;
  245. }
  246. if (ctx->proto == IPA_UC_NTN) {
  247. ret = ipa_uc_offload_ntn_reg_intf(inp, outp, ctx);
  248. if (!ret)
  249. outp->clnt_hndl = IPA_UC_NTN;
  250. }
  251. return ret;
  252. }
  253. EXPORT_SYMBOL(ipa_uc_offload_reg_intf);
  254. static int ipa_uc_ntn_alloc_conn_smmu_info(struct ipa_ntn_setup_info *dest,
  255. struct ipa_ntn_setup_info *source)
  256. {
  257. int result;
  258. IPA_UC_OFFLOAD_DBG("Allocating smmu info\n");
  259. memcpy(dest, source, sizeof(struct ipa_ntn_setup_info));
  260. dest->data_buff_list =
  261. kcalloc(dest->num_buffers, sizeof(struct ntn_buff_smmu_map),
  262. GFP_KERNEL);
  263. if (dest->data_buff_list == NULL) {
  264. IPA_UC_OFFLOAD_ERR("failed to alloc smmu info\n");
  265. return -ENOMEM;
  266. }
  267. memcpy(dest->data_buff_list, source->data_buff_list,
  268. sizeof(struct ntn_buff_smmu_map) * dest->num_buffers);
  269. result = ipa_smmu_store_sgt(&dest->buff_pool_base_sgt,
  270. source->buff_pool_base_sgt);
  271. if (result) {
  272. kfree(dest->data_buff_list);
  273. return result;
  274. }
  275. result = ipa_smmu_store_sgt(&dest->ring_base_sgt,
  276. source->ring_base_sgt);
  277. if (result) {
  278. kfree(dest->data_buff_list);
  279. ipa_smmu_free_sgt(&dest->buff_pool_base_sgt);
  280. return result;
  281. }
  282. return 0;
  283. }
  284. static void ipa_uc_ntn_free_conn_smmu_info(struct ipa_ntn_setup_info *params)
  285. {
  286. kfree(params->data_buff_list);
  287. ipa_smmu_free_sgt(&params->buff_pool_base_sgt);
  288. ipa_smmu_free_sgt(&params->ring_base_sgt);
  289. }
  290. int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp,
  291. struct ipa_ntn_conn_out_params *outp,
  292. struct ipa_uc_offload_ctx *ntn_ctx)
  293. {
  294. int result = 0;
  295. enum ipa_uc_offload_state prev_state;
  296. if (ntn_ctx->conn.dl.smmu_enabled != ntn_ctx->conn.ul.smmu_enabled) {
  297. IPA_UC_OFFLOAD_ERR("ul and dl smmu enablement do not match\n");
  298. return -EINVAL;
  299. }
  300. prev_state = ntn_ctx->state;
  301. if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
  302. inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
  303. IPA_UC_OFFLOAD_ERR("alignment failure on TX\n");
  304. return -EINVAL;
  305. }
  306. if (inp->ul.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
  307. inp->ul.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
  308. IPA_UC_OFFLOAD_ERR("alignment failure on RX\n");
  309. return -EINVAL;
  310. }
  311. result = ipa_pm_activate_sync(ntn_ctx->pm_hdl);
  312. if (result) {
  313. IPA_UC_OFFLOAD_ERR("fail to activate: %d\n", result);
  314. return result;
  315. }
  316. ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
  317. result = ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
  318. ntn_ctx->priv, ntn_ctx->hdr_len, outp);
  319. if (result) {
  320. IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes: %d\n",
  321. result);
  322. ntn_ctx->state = prev_state;
  323. result = -EFAULT;
  324. goto fail;
  325. }
  326. if (ntn_ctx->conn.dl.smmu_enabled) {
  327. result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.dl,
  328. &inp->dl);
  329. if (result) {
  330. IPA_UC_OFFLOAD_ERR("alloc failure on TX\n");
  331. goto fail;
  332. }
  333. result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.ul,
  334. &inp->ul);
  335. if (result) {
  336. ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl);
  337. IPA_UC_OFFLOAD_ERR("alloc failure on RX\n");
  338. goto fail;
  339. }
  340. }
  341. fail:
  342. return result;
  343. }
  344. int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp,
  345. struct ipa_uc_offload_conn_out_params *outp)
  346. {
  347. int ret = 0;
  348. struct ipa_uc_offload_ctx *offload_ctx;
  349. if (!(inp && outp)) {
  350. IPA_UC_OFFLOAD_ERR("bad parm. in=%pK out=%pK\n", inp, outp);
  351. return -EINVAL;
  352. }
  353. if (inp->clnt_hndl <= IPA_UC_INVALID ||
  354. inp->clnt_hndl >= IPA_UC_MAX_PROT_SIZE) {
  355. IPA_UC_OFFLOAD_ERR("invalid client handle %d\n",
  356. inp->clnt_hndl);
  357. return -EINVAL;
  358. }
  359. offload_ctx = ipa_uc_offload_ctx[inp->clnt_hndl];
  360. if (!offload_ctx) {
  361. IPA_UC_OFFLOAD_ERR("Invalid Handle\n");
  362. return -EINVAL;
  363. }
  364. if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
  365. IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
  366. return -EPERM;
  367. }
  368. switch (offload_ctx->proto) {
  369. case IPA_UC_NTN:
  370. ret = ipa_uc_ntn_conn_pipes(&inp->u.ntn, &outp->u.ntn,
  371. offload_ctx);
  372. break;
  373. default:
  374. IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", offload_ctx->proto);
  375. ret = -EINVAL;
  376. break;
  377. }
  378. return ret;
  379. }
  380. EXPORT_SYMBOL(ipa_uc_offload_conn_pipes);
  381. static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
  382. {
  383. int ipa_ep_idx_ul, ipa_ep_idx_dl;
  384. int ret = 0;
  385. if (ntn_ctx->conn.dl.smmu_enabled != ntn_ctx->conn.ul.smmu_enabled) {
  386. IPA_UC_OFFLOAD_ERR("ul and dl smmu enablement do not match\n");
  387. return -EINVAL;
  388. }
  389. ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
  390. ret = ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
  391. if (ret) {
  392. IPA_UC_OFFLOAD_ERR("fail to deactivate res: %d\n",
  393. ret);
  394. return -EFAULT;
  395. }
  396. ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
  397. ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS);
  398. ret = ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl,
  399. &ntn_ctx->conn);
  400. if (ret) {
  401. IPA_UC_OFFLOAD_ERR("fail to tear down ntn offload pipes, %d\n",
  402. ret);
  403. return -EFAULT;
  404. }
  405. if (ntn_ctx->conn.dl.smmu_enabled) {
  406. ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl);
  407. ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.ul);
  408. }
  409. return ret;
  410. }
  411. int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
  412. {
  413. struct ipa_uc_offload_ctx *offload_ctx;
  414. int ret = 0;
  415. if (clnt_hdl <= IPA_UC_INVALID ||
  416. clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
  417. IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
  418. return -EINVAL;
  419. }
  420. offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
  421. if (!offload_ctx) {
  422. IPA_UC_OFFLOAD_ERR("Invalid client Handle\n");
  423. return -EINVAL;
  424. }
  425. if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
  426. IPA_UC_OFFLOAD_ERR("Invalid state\n");
  427. return -EINVAL;
  428. }
  429. switch (offload_ctx->proto) {
  430. case IPA_UC_NTN:
  431. ret = ipa_uc_ntn_disconn_pipes(offload_ctx);
  432. break;
  433. default:
  434. IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
  435. ret = -EINVAL;
  436. break;
  437. }
  438. return ret;
  439. }
  440. EXPORT_SYMBOL(ipa_uc_offload_disconn_pipes);
  441. static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx)
  442. {
  443. int len, result = 0;
  444. struct ipa_ioc_del_hdr *hdr;
  445. ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
  446. len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
  447. hdr = kzalloc(len, GFP_KERNEL);
  448. if (hdr == NULL)
  449. return -ENOMEM;
  450. hdr->commit = 1;
  451. hdr->num_hdls = 2;
  452. hdr->hdl[0].hdl = ntn_ctx->partial_hdr_hdl[0];
  453. hdr->hdl[1].hdl = ntn_ctx->partial_hdr_hdl[1];
  454. if (ipa_del_hdr(hdr)) {
  455. IPA_UC_OFFLOAD_ERR("fail to delete partial header\n");
  456. result = -EFAULT;
  457. goto fail;
  458. }
  459. if (ipa_deregister_intf(ntn_ctx->netdev_name)) {
  460. IPA_UC_OFFLOAD_ERR("fail to delete interface prop\n");
  461. result = -EFAULT;
  462. goto fail;
  463. }
  464. fail:
  465. kfree(hdr);
  466. return result;
  467. }
  468. int ipa_uc_offload_cleanup(u32 clnt_hdl)
  469. {
  470. struct ipa_uc_offload_ctx *offload_ctx;
  471. int ret = 0;
  472. if (clnt_hdl <= IPA_UC_INVALID ||
  473. clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
  474. IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
  475. return -EINVAL;
  476. }
  477. offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
  478. if (!offload_ctx) {
  479. IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
  480. return -EINVAL;
  481. }
  482. if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
  483. IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state);
  484. return -EINVAL;
  485. }
  486. switch (offload_ctx->proto) {
  487. case IPA_UC_NTN:
  488. ret = ipa_uc_ntn_cleanup(offload_ctx);
  489. break;
  490. default:
  491. IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
  492. ret = -EINVAL;
  493. break;
  494. }
  495. if (!ret) {
  496. kfree(offload_ctx);
  497. offload_ctx = NULL;
  498. ipa_uc_offload_ctx[clnt_hdl] = NULL;
  499. }
  500. return ret;
  501. }
  502. EXPORT_SYMBOL(ipa_uc_offload_cleanup);
  503. /**
  504. * ipa_uc_offload_uc_rdyCB() - To register uC ready CB if uC not
  505. * ready
  506. * @inout: [in/out] input/output parameters
  507. * from/to client
  508. *
  509. * Returns: 0 on success, negative on failure
  510. *
  511. */
  512. int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *inp)
  513. {
  514. int ret = 0;
  515. if (!inp) {
  516. IPA_UC_OFFLOAD_ERR("Invalid input\n");
  517. return -EINVAL;
  518. }
  519. if (inp->proto == IPA_UC_NTN)
  520. ret = ipa_ntn_uc_reg_rdyCB(inp->notify, inp->priv);
  521. if (ret == -EEXIST) {
  522. inp->is_uC_ready = true;
  523. ret = 0;
  524. } else
  525. inp->is_uC_ready = false;
  526. return ret;
  527. }
  528. EXPORT_SYMBOL(ipa_uc_offload_reg_rdyCB);
  529. void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto)
  530. {
  531. if (proto == IPA_UC_NTN)
  532. ipa_ntn_uc_dereg_rdyCB();
  533. }
  534. EXPORT_SYMBOL(ipa_uc_offload_dereg_rdyCB);