ipa_uc_offload.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/ipa_uc_offload.h>
  6. #include <linux/msm_ipa.h>
  7. #include "../ipa_common_i.h"
  8. #include "../ipa_v3/ipa_pm.h"
  9. #define IPA_NTN_DMA_POOL_ALIGNMENT 8
  10. #define OFFLOAD_DRV_NAME "ipa_uc_offload"
  11. #define IPA_UC_OFFLOAD_DBG(fmt, args...) \
  12. do { \
  13. pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
  14. __func__, __LINE__, ## args); \
  15. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  16. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  17. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  18. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  19. } while (0)
  20. #define IPA_UC_OFFLOAD_LOW(fmt, args...) \
  21. do { \
  22. pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
  23. __func__, __LINE__, ## args); \
  24. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  25. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  26. } while (0)
  27. #define IPA_UC_OFFLOAD_ERR(fmt, args...) \
  28. do { \
  29. pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
  30. __func__, __LINE__, ## args); \
  31. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  32. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  33. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  34. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  35. } while (0)
  36. #define IPA_UC_OFFLOAD_INFO(fmt, args...) \
  37. do { \
  38. pr_info(OFFLOAD_DRV_NAME " %s:%d " fmt, \
  39. __func__, __LINE__, ## args); \
  40. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  41. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  42. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  43. OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
  44. } while (0)
  45. enum ipa_uc_offload_state {
  46. IPA_UC_OFFLOAD_STATE_INVALID,
  47. IPA_UC_OFFLOAD_STATE_INITIALIZED,
  48. IPA_UC_OFFLOAD_STATE_UP,
  49. };
  50. struct ipa_uc_offload_ctx {
  51. enum ipa_uc_offload_proto proto;
  52. enum ipa_uc_offload_state state;
  53. void *priv;
  54. u8 hdr_len;
  55. u32 partial_hdr_hdl[IPA_IP_MAX];
  56. char netdev_name[IPA_RESOURCE_NAME_MAX];
  57. ipa_notify_cb notify;
  58. struct completion ntn_completion;
  59. u32 pm_hdl;
  60. struct ipa_ntn_conn_in_params conn;
  61. };
  62. static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
  63. static int ipa_uc_ntn_cons_release(void);
  64. static int ipa_uc_ntn_cons_request(void);
  65. static void ipa_uc_offload_rm_notify(void *, enum ipa_rm_event, unsigned long);
  66. static int ipa_commit_partial_hdr(
  67. struct ipa_ioc_add_hdr *hdr,
  68. const char *netdev_name,
  69. struct ipa_hdr_info *hdr_info)
  70. {
  71. int i;
  72. if (hdr == NULL || hdr_info == NULL) {
  73. IPA_UC_OFFLOAD_ERR("Invalid input\n");
  74. return -EINVAL;
  75. }
  76. hdr->commit = 1;
  77. hdr->num_hdrs = 2;
  78. snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
  79. "%s_ipv4", netdev_name);
  80. snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
  81. "%s_ipv6", netdev_name);
  82. for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
  83. hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
  84. memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
  85. hdr->hdr[i].type = hdr_info[i].hdr_type;
  86. hdr->hdr[i].is_partial = 1;
  87. hdr->hdr[i].is_eth2_ofst_valid = 1;
  88. hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
  89. }
  90. if (ipa_add_hdr(hdr)) {
  91. IPA_UC_OFFLOAD_ERR("fail to add partial headers\n");
  92. return -EFAULT;
  93. }
  94. return 0;
  95. }
  96. static void ipa_uc_offload_ntn_pm_cb(void *p, enum ipa_pm_cb_event event)
  97. {
  98. /* suspend/resume is not supported */
  99. IPA_UC_OFFLOAD_DBG("event = %d\n", event);
  100. }
  101. static int ipa_uc_offload_ntn_register_pm_client(
  102. struct ipa_uc_offload_ctx *ntn_ctx)
  103. {
  104. int res;
  105. struct ipa_pm_register_params params;
  106. memset(&params, 0, sizeof(params));
  107. params.name = "ETH";
  108. params.callback = ipa_uc_offload_ntn_pm_cb;
  109. params.user_data = ntn_ctx;
  110. params.group = IPA_PM_GROUP_DEFAULT;
  111. res = ipa_pm_register(&params, &ntn_ctx->pm_hdl);
  112. if (res) {
  113. IPA_UC_OFFLOAD_ERR("fail to register with PM %d\n", res);
  114. return res;
  115. }
  116. res = ipa_pm_associate_ipa_cons_to_client(ntn_ctx->pm_hdl,
  117. IPA_CLIENT_ETHERNET_CONS);
  118. if (res) {
  119. IPA_UC_OFFLOAD_ERR("fail to associate cons with PM %d\n", res);
  120. ipa_pm_deregister(ntn_ctx->pm_hdl);
  121. ntn_ctx->pm_hdl = ~0;
  122. return res;
  123. }
  124. return 0;
  125. }
  126. static void ipa_uc_offload_ntn_deregister_pm_client(
  127. struct ipa_uc_offload_ctx *ntn_ctx)
  128. {
  129. ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
  130. ipa_pm_deregister(ntn_ctx->pm_hdl);
  131. }
  132. static int ipa_uc_offload_ntn_create_rm_resources(
  133. struct ipa_uc_offload_ctx *ntn_ctx)
  134. {
  135. int ret;
  136. struct ipa_rm_create_params param;
  137. memset(&param, 0, sizeof(param));
  138. param.name = IPA_RM_RESOURCE_ETHERNET_PROD;
  139. param.reg_params.user_data = ntn_ctx;
  140. param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
  141. param.floor_voltage = IPA_VOLTAGE_SVS;
  142. ret = ipa_rm_create_resource(&param);
  143. if (ret) {
  144. IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_PROD resource\n");
  145. return -EFAULT;
  146. }
  147. memset(&param, 0, sizeof(param));
  148. param.name = IPA_RM_RESOURCE_ETHERNET_CONS;
  149. param.request_resource = ipa_uc_ntn_cons_request;
  150. param.release_resource = ipa_uc_ntn_cons_release;
  151. ret = ipa_rm_create_resource(&param);
  152. if (ret) {
  153. IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_CONS resource\n");
  154. ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
  155. return -EFAULT;
  156. }
  157. return 0;
  158. }
  159. static int ipa_uc_offload_ntn_reg_intf(
  160. struct ipa_uc_offload_intf_params *inp,
  161. struct ipa_uc_offload_out_params *outp,
  162. struct ipa_uc_offload_ctx *ntn_ctx)
  163. {
  164. struct ipa_ioc_add_hdr *hdr = NULL;
  165. struct ipa_tx_intf tx;
  166. struct ipa_rx_intf rx;
  167. struct ipa_ioc_tx_intf_prop tx_prop[2];
  168. struct ipa_ioc_rx_intf_prop rx_prop[2];
  169. int ret = 0;
  170. u32 len;
  171. IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
  172. inp->netdev_name);
  173. if (ipa_pm_is_used())
  174. ret = ipa_uc_offload_ntn_register_pm_client(ntn_ctx);
  175. else
  176. ret = ipa_uc_offload_ntn_create_rm_resources(ntn_ctx);
  177. if (ret) {
  178. IPA_UC_OFFLOAD_ERR("fail to create rm resource\n");
  179. return -EFAULT;
  180. }
  181. memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
  182. ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len;
  183. ntn_ctx->notify = inp->notify;
  184. ntn_ctx->priv = inp->priv;
  185. /* add partial header */
  186. len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
  187. hdr = kzalloc(len, GFP_KERNEL);
  188. if (hdr == NULL) {
  189. ret = -ENOMEM;
  190. goto fail_alloc;
  191. }
  192. if (ipa_commit_partial_hdr(hdr, ntn_ctx->netdev_name, inp->hdr_info)) {
  193. IPA_UC_OFFLOAD_ERR("fail to commit partial headers\n");
  194. ret = -EFAULT;
  195. goto fail;
  196. }
  197. /* populate tx prop */
  198. tx.num_props = 2;
  199. tx.prop = tx_prop;
  200. memset(tx_prop, 0, sizeof(tx_prop));
  201. tx_prop[0].ip = IPA_IP_v4;
  202. tx_prop[0].dst_pipe = IPA_CLIENT_ETHERNET_CONS;
  203. tx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
  204. memcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
  205. sizeof(tx_prop[0].hdr_name));
  206. tx_prop[1].ip = IPA_IP_v6;
  207. tx_prop[1].dst_pipe = IPA_CLIENT_ETHERNET_CONS;
  208. tx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
  209. memcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
  210. sizeof(tx_prop[1].hdr_name));
  211. /* populate rx prop */
  212. rx.num_props = 2;
  213. rx.prop = rx_prop;
  214. memset(rx_prop, 0, sizeof(rx_prop));
  215. rx_prop[0].ip = IPA_IP_v4;
  216. rx_prop[0].src_pipe = IPA_CLIENT_ETHERNET_PROD;
  217. rx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
  218. if (inp->is_meta_data_valid) {
  219. rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
  220. rx_prop[0].attrib.meta_data = inp->meta_data;
  221. rx_prop[0].attrib.meta_data_mask = inp->meta_data_mask;
  222. }
  223. rx_prop[1].ip = IPA_IP_v6;
  224. rx_prop[1].src_pipe = IPA_CLIENT_ETHERNET_PROD;
  225. rx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
  226. if (inp->is_meta_data_valid) {
  227. rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
  228. rx_prop[1].attrib.meta_data = inp->meta_data;
  229. rx_prop[1].attrib.meta_data_mask = inp->meta_data_mask;
  230. }
  231. if (ipa_register_intf(inp->netdev_name, &tx, &rx)) {
  232. IPA_UC_OFFLOAD_ERR("fail to add interface prop\n");
  233. memset(ntn_ctx, 0, sizeof(*ntn_ctx));
  234. ret = -EFAULT;
  235. goto fail;
  236. }
  237. ntn_ctx->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
  238. ntn_ctx->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
  239. init_completion(&ntn_ctx->ntn_completion);
  240. ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
  241. kfree(hdr);
  242. return ret;
  243. fail:
  244. kfree(hdr);
  245. fail_alloc:
  246. if (ipa_pm_is_used()) {
  247. ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
  248. } else {
  249. ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS);
  250. ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
  251. }
  252. return ret;
  253. }
  254. int ipa_uc_offload_reg_intf(
  255. struct ipa_uc_offload_intf_params *inp,
  256. struct ipa_uc_offload_out_params *outp)
  257. {
  258. struct ipa_uc_offload_ctx *ctx;
  259. int ret = 0;
  260. if (inp == NULL || outp == NULL) {
  261. IPA_UC_OFFLOAD_ERR("invalid params in=%pK out=%pK\n",
  262. inp, outp);
  263. return -EINVAL;
  264. }
  265. if (inp->proto <= IPA_UC_INVALID ||
  266. inp->proto >= IPA_UC_MAX_PROT_SIZE) {
  267. IPA_UC_OFFLOAD_ERR("invalid proto %d\n", inp->proto);
  268. return -EINVAL;
  269. }
  270. if (!ipa_uc_offload_ctx[inp->proto]) {
  271. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  272. if (ctx == NULL) {
  273. IPA_UC_OFFLOAD_ERR("fail to alloc uc offload ctx\n");
  274. return -EFAULT;
  275. }
  276. ipa_uc_offload_ctx[inp->proto] = ctx;
  277. ctx->proto = inp->proto;
  278. } else
  279. ctx = ipa_uc_offload_ctx[inp->proto];
  280. if (ctx->state != IPA_UC_OFFLOAD_STATE_INVALID) {
  281. IPA_UC_OFFLOAD_ERR("Already Initialized\n");
  282. return -EINVAL;
  283. }
  284. if (ctx->proto == IPA_UC_NTN) {
  285. ret = ipa_uc_offload_ntn_reg_intf(inp, outp, ctx);
  286. if (!ret)
  287. outp->clnt_hndl = IPA_UC_NTN;
  288. }
  289. return ret;
  290. }
  291. EXPORT_SYMBOL(ipa_uc_offload_reg_intf);
  292. static int ipa_uc_ntn_cons_release(void)
  293. {
  294. return 0;
  295. }
  296. static int ipa_uc_ntn_cons_request(void)
  297. {
  298. int ret = 0;
  299. struct ipa_uc_offload_ctx *ntn_ctx;
  300. ntn_ctx = ipa_uc_offload_ctx[IPA_UC_NTN];
  301. if (!ntn_ctx) {
  302. IPA_UC_OFFLOAD_ERR("NTN is not initialized\n");
  303. ret = -EFAULT;
  304. } else if (ntn_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
  305. IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", ntn_ctx->state);
  306. ret = -EFAULT;
  307. }
  308. return ret;
  309. }
  310. static void ipa_uc_offload_rm_notify(void *user_data, enum ipa_rm_event event,
  311. unsigned long data)
  312. {
  313. struct ipa_uc_offload_ctx *offload_ctx;
  314. offload_ctx = (struct ipa_uc_offload_ctx *)user_data;
  315. if (!(offload_ctx && offload_ctx->proto > IPA_UC_INVALID &&
  316. offload_ctx->proto < IPA_UC_MAX_PROT_SIZE)) {
  317. IPA_UC_OFFLOAD_ERR("Invalid user data\n");
  318. return;
  319. }
  320. if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED)
  321. IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", offload_ctx->state);
  322. switch (event) {
  323. case IPA_RM_RESOURCE_GRANTED:
  324. complete_all(&offload_ctx->ntn_completion);
  325. break;
  326. case IPA_RM_RESOURCE_RELEASED:
  327. break;
  328. default:
  329. IPA_UC_OFFLOAD_ERR("Invalid RM Evt: %d", event);
  330. break;
  331. }
  332. }
  333. static int ipa_uc_ntn_alloc_conn_smmu_info(struct ipa_ntn_setup_info *dest,
  334. struct ipa_ntn_setup_info *source)
  335. {
  336. int result;
  337. IPA_UC_OFFLOAD_DBG("Allocating smmu info\n");
  338. memcpy(dest, source, sizeof(struct ipa_ntn_setup_info));
  339. dest->data_buff_list =
  340. kcalloc(dest->num_buffers, sizeof(struct ntn_buff_smmu_map),
  341. GFP_KERNEL);
  342. if (dest->data_buff_list == NULL) {
  343. IPA_UC_OFFLOAD_ERR("failed to alloc smmu info\n");
  344. return -ENOMEM;
  345. }
  346. memcpy(dest->data_buff_list, source->data_buff_list,
  347. sizeof(struct ntn_buff_smmu_map) * dest->num_buffers);
  348. result = ipa_smmu_store_sgt(&dest->buff_pool_base_sgt,
  349. source->buff_pool_base_sgt);
  350. if (result) {
  351. kfree(dest->data_buff_list);
  352. return result;
  353. }
  354. result = ipa_smmu_store_sgt(&dest->ring_base_sgt,
  355. source->ring_base_sgt);
  356. if (result) {
  357. kfree(dest->data_buff_list);
  358. ipa_smmu_free_sgt(&dest->buff_pool_base_sgt);
  359. return result;
  360. }
  361. return 0;
  362. }
  363. static void ipa_uc_ntn_free_conn_smmu_info(struct ipa_ntn_setup_info *params)
  364. {
  365. kfree(params->data_buff_list);
  366. ipa_smmu_free_sgt(&params->buff_pool_base_sgt);
  367. ipa_smmu_free_sgt(&params->ring_base_sgt);
  368. }
  369. int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp,
  370. struct ipa_ntn_conn_out_params *outp,
  371. struct ipa_uc_offload_ctx *ntn_ctx)
  372. {
  373. int result = 0;
  374. enum ipa_uc_offload_state prev_state;
  375. if (ntn_ctx->conn.dl.smmu_enabled != ntn_ctx->conn.ul.smmu_enabled) {
  376. IPA_UC_OFFLOAD_ERR("ul and dl smmu enablement do not match\n");
  377. return -EINVAL;
  378. }
  379. prev_state = ntn_ctx->state;
  380. if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
  381. inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
  382. IPA_UC_OFFLOAD_ERR("alignment failure on TX\n");
  383. return -EINVAL;
  384. }
  385. if (inp->ul.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
  386. inp->ul.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
  387. IPA_UC_OFFLOAD_ERR("alignment failure on RX\n");
  388. return -EINVAL;
  389. }
  390. if (ipa_pm_is_used()) {
  391. result = ipa_pm_activate_sync(ntn_ctx->pm_hdl);
  392. if (result) {
  393. IPA_UC_OFFLOAD_ERR("fail to activate: %d\n", result);
  394. return result;
  395. }
  396. } else {
  397. result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
  398. IPA_RM_RESOURCE_APPS_CONS);
  399. if (result) {
  400. IPA_UC_OFFLOAD_ERR("fail to add rm dependency: %d\n",
  401. result);
  402. return result;
  403. }
  404. result = ipa_rm_request_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
  405. if (result == -EINPROGRESS) {
  406. if (wait_for_completion_timeout(&ntn_ctx->ntn_completion
  407. , 10*HZ) == 0) {
  408. IPA_UC_OFFLOAD_ERR("ETH_PROD req timeout\n");
  409. result = -EFAULT;
  410. goto fail;
  411. }
  412. } else if (result != 0) {
  413. IPA_UC_OFFLOAD_ERR("fail to request resource\n");
  414. result = -EFAULT;
  415. goto fail;
  416. }
  417. }
  418. ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
  419. result = ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
  420. ntn_ctx->priv, ntn_ctx->hdr_len, outp);
  421. if (result) {
  422. IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes: %d\n",
  423. result);
  424. ntn_ctx->state = prev_state;
  425. result = -EFAULT;
  426. goto fail;
  427. }
  428. if (ntn_ctx->conn.dl.smmu_enabled) {
  429. result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.dl,
  430. &inp->dl);
  431. if (result) {
  432. IPA_UC_OFFLOAD_ERR("alloc failure on TX\n");
  433. goto fail;
  434. }
  435. result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.ul,
  436. &inp->ul);
  437. if (result) {
  438. ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl);
  439. IPA_UC_OFFLOAD_ERR("alloc failure on RX\n");
  440. goto fail;
  441. }
  442. }
  443. fail:
  444. if (!ipa_pm_is_used())
  445. ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
  446. IPA_RM_RESOURCE_APPS_CONS);
  447. return result;
  448. }
  449. int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp,
  450. struct ipa_uc_offload_conn_out_params *outp)
  451. {
  452. int ret = 0;
  453. struct ipa_uc_offload_ctx *offload_ctx;
  454. if (!(inp && outp)) {
  455. IPA_UC_OFFLOAD_ERR("bad parm. in=%pK out=%pK\n", inp, outp);
  456. return -EINVAL;
  457. }
  458. if (inp->clnt_hndl <= IPA_UC_INVALID ||
  459. inp->clnt_hndl >= IPA_UC_MAX_PROT_SIZE) {
  460. IPA_UC_OFFLOAD_ERR("invalid client handle %d\n",
  461. inp->clnt_hndl);
  462. return -EINVAL;
  463. }
  464. offload_ctx = ipa_uc_offload_ctx[inp->clnt_hndl];
  465. if (!offload_ctx) {
  466. IPA_UC_OFFLOAD_ERR("Invalid Handle\n");
  467. return -EINVAL;
  468. }
  469. if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
  470. IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
  471. return -EPERM;
  472. }
  473. switch (offload_ctx->proto) {
  474. case IPA_UC_NTN:
  475. ret = ipa_uc_ntn_conn_pipes(&inp->u.ntn, &outp->u.ntn,
  476. offload_ctx);
  477. break;
  478. default:
  479. IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", offload_ctx->proto);
  480. ret = -EINVAL;
  481. break;
  482. }
  483. return ret;
  484. }
  485. EXPORT_SYMBOL(ipa_uc_offload_conn_pipes);
  486. int ipa_set_perf_profile(struct ipa_perf_profile *profile)
  487. {
  488. struct ipa_rm_perf_profile rm_profile;
  489. enum ipa_rm_resource_name resource_name;
  490. if (profile == NULL) {
  491. IPA_UC_OFFLOAD_ERR("Invalid input\n");
  492. return -EINVAL;
  493. }
  494. rm_profile.max_supported_bandwidth_mbps =
  495. profile->max_supported_bw_mbps;
  496. if (profile->client == IPA_CLIENT_ETHERNET_PROD) {
  497. resource_name = IPA_RM_RESOURCE_ETHERNET_PROD;
  498. } else if (profile->client == IPA_CLIENT_ETHERNET_CONS) {
  499. resource_name = IPA_RM_RESOURCE_ETHERNET_CONS;
  500. } else {
  501. IPA_UC_OFFLOAD_ERR("not supported\n");
  502. return -EINVAL;
  503. }
  504. if (ipa_pm_is_used())
  505. return ipa_pm_set_throughput(
  506. ipa_uc_offload_ctx[IPA_UC_NTN]->pm_hdl,
  507. profile->max_supported_bw_mbps);
  508. if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
  509. IPA_UC_OFFLOAD_ERR("fail to setup rm perf profile\n");
  510. return -EFAULT;
  511. }
  512. return 0;
  513. }
  514. EXPORT_SYMBOL(ipa_set_perf_profile);
  515. static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
  516. {
  517. int ipa_ep_idx_ul, ipa_ep_idx_dl;
  518. int ret = 0;
  519. if (ntn_ctx->conn.dl.smmu_enabled != ntn_ctx->conn.ul.smmu_enabled) {
  520. IPA_UC_OFFLOAD_ERR("ul and dl smmu enablement do not match\n");
  521. return -EINVAL;
  522. }
  523. ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
  524. if (ipa_pm_is_used()) {
  525. ret = ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
  526. if (ret) {
  527. IPA_UC_OFFLOAD_ERR("fail to deactivate res: %d\n",
  528. ret);
  529. return -EFAULT;
  530. }
  531. } else {
  532. ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
  533. if (ret) {
  534. IPA_UC_OFFLOAD_ERR("fail release ETHERNET_PROD: %d\n",
  535. ret);
  536. return -EFAULT;
  537. }
  538. ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
  539. IPA_RM_RESOURCE_APPS_CONS);
  540. if (ret) {
  541. IPA_UC_OFFLOAD_ERR("fail del dep ETH->APPS, %d\n", ret);
  542. return -EFAULT;
  543. }
  544. }
  545. ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
  546. ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS);
  547. ret = ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl,
  548. &ntn_ctx->conn);
  549. if (ret) {
  550. IPA_UC_OFFLOAD_ERR("fail to tear down ntn offload pipes, %d\n",
  551. ret);
  552. return -EFAULT;
  553. }
  554. if (ntn_ctx->conn.dl.smmu_enabled) {
  555. ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl);
  556. ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.ul);
  557. }
  558. return ret;
  559. }
  560. int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
  561. {
  562. struct ipa_uc_offload_ctx *offload_ctx;
  563. int ret = 0;
  564. if (clnt_hdl <= IPA_UC_INVALID ||
  565. clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
  566. IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
  567. return -EINVAL;
  568. }
  569. offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
  570. if (!offload_ctx) {
  571. IPA_UC_OFFLOAD_ERR("Invalid client Handle\n");
  572. return -EINVAL;
  573. }
  574. if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
  575. IPA_UC_OFFLOAD_ERR("Invalid state\n");
  576. return -EINVAL;
  577. }
  578. switch (offload_ctx->proto) {
  579. case IPA_UC_NTN:
  580. ret = ipa_uc_ntn_disconn_pipes(offload_ctx);
  581. break;
  582. default:
  583. IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
  584. ret = -EINVAL;
  585. break;
  586. }
  587. return ret;
  588. }
  589. EXPORT_SYMBOL(ipa_uc_offload_disconn_pipes);
  590. static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx)
  591. {
  592. int len, result = 0;
  593. struct ipa_ioc_del_hdr *hdr;
  594. if (ipa_pm_is_used()) {
  595. ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
  596. } else {
  597. if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD)) {
  598. IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_PROD\n");
  599. return -EFAULT;
  600. }
  601. if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS)) {
  602. IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_CONS\n");
  603. return -EFAULT;
  604. }
  605. }
  606. len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
  607. hdr = kzalloc(len, GFP_KERNEL);
  608. if (hdr == NULL)
  609. return -ENOMEM;
  610. hdr->commit = 1;
  611. hdr->num_hdls = 2;
  612. hdr->hdl[0].hdl = ntn_ctx->partial_hdr_hdl[0];
  613. hdr->hdl[1].hdl = ntn_ctx->partial_hdr_hdl[1];
  614. if (ipa_del_hdr(hdr)) {
  615. IPA_UC_OFFLOAD_ERR("fail to delete partial header\n");
  616. result = -EFAULT;
  617. goto fail;
  618. }
  619. if (ipa_deregister_intf(ntn_ctx->netdev_name)) {
  620. IPA_UC_OFFLOAD_ERR("fail to delete interface prop\n");
  621. result = -EFAULT;
  622. goto fail;
  623. }
  624. fail:
  625. kfree(hdr);
  626. return result;
  627. }
  628. int ipa_uc_offload_cleanup(u32 clnt_hdl)
  629. {
  630. struct ipa_uc_offload_ctx *offload_ctx;
  631. int ret = 0;
  632. if (clnt_hdl <= IPA_UC_INVALID ||
  633. clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
  634. IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
  635. return -EINVAL;
  636. }
  637. offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
  638. if (!offload_ctx) {
  639. IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
  640. return -EINVAL;
  641. }
  642. if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
  643. IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state);
  644. return -EINVAL;
  645. }
  646. switch (offload_ctx->proto) {
  647. case IPA_UC_NTN:
  648. ret = ipa_uc_ntn_cleanup(offload_ctx);
  649. break;
  650. default:
  651. IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
  652. ret = -EINVAL;
  653. break;
  654. }
  655. if (!ret) {
  656. kfree(offload_ctx);
  657. offload_ctx = NULL;
  658. ipa_uc_offload_ctx[clnt_hdl] = NULL;
  659. }
  660. return ret;
  661. }
  662. EXPORT_SYMBOL(ipa_uc_offload_cleanup);
  663. /**
  664. * ipa_uc_offload_uc_rdyCB() - To register uC ready CB if uC not
  665. * ready
  666. * @inout: [in/out] input/output parameters
  667. * from/to client
  668. *
  669. * Returns: 0 on success, negative on failure
  670. *
  671. */
  672. int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *inp)
  673. {
  674. int ret = 0;
  675. if (!inp) {
  676. IPA_UC_OFFLOAD_ERR("Invalid input\n");
  677. return -EINVAL;
  678. }
  679. if (inp->proto == IPA_UC_NTN)
  680. ret = ipa_ntn_uc_reg_rdyCB(inp->notify, inp->priv);
  681. if (ret == -EEXIST) {
  682. inp->is_uC_ready = true;
  683. ret = 0;
  684. } else
  685. inp->is_uC_ready = false;
  686. return ret;
  687. }
  688. EXPORT_SYMBOL(ipa_uc_offload_reg_rdyCB);
  689. void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto)
  690. {
  691. if (proto == IPA_UC_NTN)
  692. ipa_ntn_uc_dereg_rdyCB();
  693. }
  694. EXPORT_SYMBOL(ipa_uc_offload_dereg_rdyCB);