ipa_dma.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/export.h>
  7. #include <linux/delay.h>
  8. #include <linux/kernel.h>
  9. #include <linux/msm_ipa.h>
  10. #include <linux/mutex.h>
  11. #include <linux/ipa.h>
  12. #include "linux/msm_gsi.h"
  13. #include <linux/dmapool.h>
  14. #include "ipa_i.h"
  15. #define IPA_DMA_POLLING_MIN_SLEEP_RX 1010
  16. #define IPA_DMA_POLLING_MAX_SLEEP_RX 1050
  17. #define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8
  18. #define IPA_DMA_MAX_PKT_SZ 0xFFFF
  19. #define IPA_DMA_DUMMY_BUFF_SZ 8
  20. #define IPA_DMA_PREFETCH_WA_THRESHOLD 9
  21. #define IPADMA_DRV_NAME "ipa_dma"
  22. #define IPADMA_DBG(fmt, args...) \
  23. do { \
  24. pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \
  25. __func__, __LINE__, ## args); \
  26. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  27. IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
  28. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  29. IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
  30. } while (0)
  31. #define IPADMA_DBG_LOW(fmt, args...) \
  32. do { \
  33. pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \
  34. __func__, __LINE__, ## args); \
  35. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  36. IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
  37. } while (0)
  38. #define IPADMA_ERR(fmt, args...) \
  39. do { \
  40. pr_err(IPADMA_DRV_NAME " %s:%d " fmt, \
  41. __func__, __LINE__, ## args); \
  42. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  43. IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
  44. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  45. IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
  46. } while (0)
  47. #define IPADMA_FUNC_ENTRY() \
  48. IPADMA_DBG_LOW("ENTRY\n")
  49. #define IPADMA_FUNC_EXIT() \
  50. IPADMA_DBG_LOW("EXIT\n")
  51. #ifdef CONFIG_DEBUG_FS
  52. #define IPADMA_MAX_MSG_LEN 1024
  53. static char dbg_buff[IPADMA_MAX_MSG_LEN];
  54. static void ipa3_dma_debugfs_init(void);
  55. static void ipa3_dma_debugfs_destroy(void);
  56. #else
  57. static void ipa3_dma_debugfs_init(void) {}
  58. static void ipa3_dma_debugfs_destroy(void) {}
  59. #endif
  60. /**
  61. * struct ipa3_dma_ctx -IPADMA driver context information
  62. * @enable_ref_cnt: ipa dma enable reference count
  63. * @destroy_pending: destroy ipa_dma after handling all pending memcpy
  64. * @ipa_dma_xfer_wrapper_cache: cache of ipa3_dma_xfer_wrapper structs
  65. * @sync_lock: lock for synchronisation in sync_memcpy
  66. * @async_lock: lock for synchronisation in async_memcpy
  67. * @enable_lock: lock for is_enabled
  68. * @pending_lock: lock for synchronize is_enable and pending_cnt
  69. * @done: no pending works-ipadma can be destroyed
  70. * @ipa_dma_sync_prod_hdl: handle of sync memcpy producer
  71. * @ipa_dma_async_prod_hdl:handle of async memcpy producer
  72. * @ipa_dma_sync_cons_hdl: handle of sync memcpy consumer
  73. * @sync_memcpy_pending_cnt: number of pending sync memcopy operations
  74. * @async_memcpy_pending_cnt: number of pending async memcopy operations
  75. * @uc_memcpy_pending_cnt: number of pending uc memcopy operations
  76. * @total_sync_memcpy: total number of sync memcpy (statistics)
  77. * @total_async_memcpy: total number of async memcpy (statistics)
  78. * @total_uc_memcpy: total number of uc memcpy (statistics)
  79. */
  80. struct ipa3_dma_ctx {
  81. unsigned int enable_ref_cnt;
  82. bool destroy_pending;
  83. struct kmem_cache *ipa_dma_xfer_wrapper_cache;
  84. struct mutex sync_lock;
  85. spinlock_t async_lock;
  86. struct mutex enable_lock;
  87. spinlock_t pending_lock;
  88. struct completion done;
  89. u32 ipa_dma_sync_prod_hdl;
  90. u32 ipa_dma_async_prod_hdl;
  91. u32 ipa_dma_sync_cons_hdl;
  92. u32 ipa_dma_async_cons_hdl;
  93. atomic_t sync_memcpy_pending_cnt;
  94. atomic_t async_memcpy_pending_cnt;
  95. atomic_t uc_memcpy_pending_cnt;
  96. atomic_t total_sync_memcpy;
  97. atomic_t total_async_memcpy;
  98. atomic_t total_uc_memcpy;
  99. struct ipa_mem_buffer ipa_dma_dummy_src_sync;
  100. struct ipa_mem_buffer ipa_dma_dummy_dst_sync;
  101. struct ipa_mem_buffer ipa_dma_dummy_src_async;
  102. struct ipa_mem_buffer ipa_dma_dummy_dst_async;
  103. };
  104. static struct ipa3_dma_ctx *ipa3_dma_ctx;
  105. /**
  106. * struct ipa3_dma_init_refcnt_ctrl -IPADMA driver init control information
  107. * @ref_cnt: reference count for initialization operations
  108. * @lock: lock for the reference count
  109. */
  110. struct ipa3_dma_init_refcnt_ctrl {
  111. unsigned int ref_cnt;
  112. struct mutex lock;
  113. };
  114. static struct ipa3_dma_init_refcnt_ctrl *ipa3_dma_init_refcnt_ctrl;
  115. /**
  116. * ipa3_dma_setup() - One time setup for IPA DMA
  117. *
  118. * This function should be called once to setup ipa dma
  119. * by creating the init reference count controller
  120. *
  121. * Return codes: 0: success
  122. * Negative value: failure
  123. */
  124. int ipa3_dma_setup(void)
  125. {
  126. IPADMA_FUNC_ENTRY();
  127. if (ipa3_dma_init_refcnt_ctrl) {
  128. IPADMA_ERR("Setup already done\n");
  129. return -EFAULT;
  130. }
  131. ipa3_dma_init_refcnt_ctrl =
  132. kzalloc(sizeof(*(ipa3_dma_init_refcnt_ctrl)), GFP_KERNEL);
  133. if (!ipa3_dma_init_refcnt_ctrl) {
  134. IPADMA_ERR("kzalloc error.\n");
  135. return -ENOMEM;
  136. }
  137. mutex_init(&ipa3_dma_init_refcnt_ctrl->lock);
  138. IPADMA_FUNC_EXIT();
  139. return 0;
  140. }
  141. /**
  142. * ipa3_dma_shutdown() - Clear setup operations.
  143. *
  144. * Cleanup for the setup function.
  145. * Should be called during IPA driver unloading.
  146. * It assumes all ipa_dma operations are done and ipa_dma is destroyed.
  147. *
  148. * Return codes: None.
  149. */
  150. void ipa3_dma_shutdown(void)
  151. {
  152. IPADMA_FUNC_ENTRY();
  153. if (!ipa3_dma_init_refcnt_ctrl)
  154. return;
  155. kfree(ipa3_dma_init_refcnt_ctrl);
  156. ipa3_dma_init_refcnt_ctrl = NULL;
  157. IPADMA_FUNC_EXIT();
  158. }
  159. /**
  160. * ipa3_dma_init() -Initialize IPADMA.
  161. *
  162. * This function initialize all IPADMA internal data and connect in dma:
  163. * MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS
  164. * MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS
  165. *
  166. * Can be executed several times (re-entrant)
  167. *
  168. * Return codes: 0: success
  169. * -EFAULT: Mismatch between context existence and init ref_cnt
  170. * -EINVAL: IPA driver is not initialized
  171. * -ENOMEM: allocating memory error
  172. * -EPERM: pipe connection failed
  173. */
  174. int ipa3_dma_init(void)
  175. {
  176. struct ipa3_dma_ctx *ipa_dma_ctx_t;
  177. struct ipa_sys_connect_params sys_in;
  178. int res = 0;
  179. int sync_sz;
  180. int async_sz;
  181. IPADMA_FUNC_ENTRY();
  182. if (!ipa3_dma_init_refcnt_ctrl) {
  183. IPADMA_ERR("Setup isn't done yet!\n");
  184. return -EINVAL;
  185. }
  186. mutex_lock(&ipa3_dma_init_refcnt_ctrl->lock);
  187. if (ipa3_dma_init_refcnt_ctrl->ref_cnt > 0) {
  188. IPADMA_DBG("Already initialized refcnt=%d\n",
  189. ipa3_dma_init_refcnt_ctrl->ref_cnt);
  190. if (!ipa3_dma_ctx) {
  191. IPADMA_ERR("Context missing. refcnt=%d\n",
  192. ipa3_dma_init_refcnt_ctrl->ref_cnt);
  193. res = -EFAULT;
  194. } else {
  195. ipa3_dma_init_refcnt_ctrl->ref_cnt++;
  196. }
  197. goto init_unlock;
  198. }
  199. if (ipa3_dma_ctx) {
  200. IPADMA_ERR("Context already exist\n");
  201. res = -EFAULT;
  202. goto init_unlock;
  203. }
  204. if (!ipa3_is_ready()) {
  205. IPADMA_ERR("IPA is not ready yet\n");
  206. res = -EINVAL;
  207. goto init_unlock;
  208. }
  209. ipa_dma_ctx_t = kzalloc(sizeof(*(ipa3_dma_ctx)), GFP_KERNEL);
  210. if (!ipa_dma_ctx_t) {
  211. res = -ENOMEM;
  212. goto init_unlock;
  213. }
  214. ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache =
  215. kmem_cache_create("IPA DMA XFER WRAPPER",
  216. sizeof(struct ipa3_dma_xfer_wrapper), 0, 0, NULL);
  217. if (!ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache) {
  218. IPAERR(":failed to create ipa dma xfer wrapper cache.\n");
  219. res = -ENOMEM;
  220. goto fail_mem_ctrl;
  221. }
  222. mutex_init(&ipa_dma_ctx_t->enable_lock);
  223. spin_lock_init(&ipa_dma_ctx_t->async_lock);
  224. mutex_init(&ipa_dma_ctx_t->sync_lock);
  225. spin_lock_init(&ipa_dma_ctx_t->pending_lock);
  226. init_completion(&ipa_dma_ctx_t->done);
  227. ipa_dma_ctx_t->enable_ref_cnt = 0;
  228. ipa_dma_ctx_t->destroy_pending = false;
  229. atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0);
  230. atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0);
  231. atomic_set(&ipa_dma_ctx_t->uc_memcpy_pending_cnt, 0);
  232. atomic_set(&ipa_dma_ctx_t->total_async_memcpy, 0);
  233. atomic_set(&ipa_dma_ctx_t->total_sync_memcpy, 0);
  234. atomic_set(&ipa_dma_ctx_t->total_uc_memcpy, 0);
  235. sync_sz = IPA_SYS_DESC_FIFO_SZ;
  236. async_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ;
  237. /*
  238. * for ipav3.5 we need to double the rings and allocate dummy buffers
  239. * in order to apply the prefetch WA
  240. */
  241. if (ipa_get_hw_type() == IPA_HW_v3_5) {
  242. sync_sz *= 2;
  243. async_sz *= 2;
  244. ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base =
  245. dma_alloc_coherent(ipa3_ctx->pdev,
  246. IPA_DMA_DUMMY_BUFF_SZ * 4,
  247. &ipa_dma_ctx_t->ipa_dma_dummy_src_sync.phys_base,
  248. GFP_KERNEL);
  249. if (!ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base) {
  250. IPAERR("DMA alloc fail %d bytes for prefetch WA\n",
  251. IPA_DMA_DUMMY_BUFF_SZ);
  252. res = -ENOMEM;
  253. goto fail_alloc_dummy;
  254. }
  255. ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.base =
  256. ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base +
  257. IPA_DMA_DUMMY_BUFF_SZ;
  258. ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.phys_base =
  259. ipa_dma_ctx_t->ipa_dma_dummy_src_sync.phys_base +
  260. IPA_DMA_DUMMY_BUFF_SZ;
  261. ipa_dma_ctx_t->ipa_dma_dummy_src_async.base =
  262. ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.base +
  263. IPA_DMA_DUMMY_BUFF_SZ;
  264. ipa_dma_ctx_t->ipa_dma_dummy_src_async.phys_base =
  265. ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.phys_base +
  266. IPA_DMA_DUMMY_BUFF_SZ;
  267. ipa_dma_ctx_t->ipa_dma_dummy_dst_async.base =
  268. ipa_dma_ctx_t->ipa_dma_dummy_src_async.base +
  269. IPA_DMA_DUMMY_BUFF_SZ;
  270. ipa_dma_ctx_t->ipa_dma_dummy_dst_async.phys_base =
  271. ipa_dma_ctx_t->ipa_dma_dummy_src_async.phys_base +
  272. IPA_DMA_DUMMY_BUFF_SZ;
  273. }
  274. /* IPADMA SYNC PROD-source for sync memcpy */
  275. memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
  276. sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_PROD;
  277. sys_in.desc_fifo_sz = sync_sz;
  278. sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
  279. sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
  280. sys_in.skip_ep_cfg = false;
  281. if (ipa3_setup_sys_pipe(&sys_in,
  282. &ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) {
  283. IPADMA_ERR(":setup sync prod pipe failed\n");
  284. res = -EPERM;
  285. goto fail_sync_prod;
  286. }
  287. /* IPADMA SYNC CONS-destination for sync memcpy */
  288. memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
  289. sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
  290. sys_in.desc_fifo_sz = sync_sz;
  291. sys_in.skip_ep_cfg = false;
  292. sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
  293. sys_in.notify = NULL;
  294. sys_in.priv = NULL;
  295. if (ipa3_setup_sys_pipe(&sys_in,
  296. &ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) {
  297. IPADMA_ERR(":setup sync cons pipe failed.\n");
  298. res = -EPERM;
  299. goto fail_sync_cons;
  300. }
  301. IPADMA_DBG("SYNC MEMCPY pipes are connected\n");
  302. /* IPADMA ASYNC PROD-source for sync memcpy */
  303. memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
  304. sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD;
  305. sys_in.desc_fifo_sz = async_sz;
  306. sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
  307. sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
  308. sys_in.skip_ep_cfg = false;
  309. sys_in.notify = NULL;
  310. if (ipa3_setup_sys_pipe(&sys_in,
  311. &ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) {
  312. IPADMA_ERR(":setup async prod pipe failed.\n");
  313. res = -EPERM;
  314. goto fail_async_prod;
  315. }
  316. /* IPADMA ASYNC CONS-destination for sync memcpy */
  317. memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
  318. sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
  319. sys_in.desc_fifo_sz = async_sz;
  320. sys_in.skip_ep_cfg = false;
  321. sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
  322. sys_in.notify = ipa3_dma_async_memcpy_notify_cb;
  323. sys_in.priv = NULL;
  324. if (ipa3_setup_sys_pipe(&sys_in,
  325. &ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) {
  326. IPADMA_ERR(":setup async cons pipe failed.\n");
  327. res = -EPERM;
  328. goto fail_async_cons;
  329. }
  330. ipa3_dma_debugfs_init();
  331. ipa3_dma_ctx = ipa_dma_ctx_t;
  332. ipa3_dma_init_refcnt_ctrl->ref_cnt = 1;
  333. IPADMA_DBG("ASYNC MEMCPY pipes are connected\n");
  334. IPADMA_FUNC_EXIT();
  335. goto init_unlock;
  336. fail_async_cons:
  337. ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl);
  338. fail_async_prod:
  339. ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl);
  340. fail_sync_cons:
  341. ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl);
  342. fail_sync_prod:
  343. dma_free_coherent(ipa3_ctx->pdev, IPA_DMA_DUMMY_BUFF_SZ * 4,
  344. ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base,
  345. ipa_dma_ctx_t->ipa_dma_dummy_src_sync.phys_base);
  346. fail_alloc_dummy:
  347. kmem_cache_destroy(ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache);
  348. fail_mem_ctrl:
  349. kfree(ipa_dma_ctx_t);
  350. ipa3_dma_ctx = NULL;
  351. init_unlock:
  352. mutex_unlock(&ipa3_dma_init_refcnt_ctrl->lock);
  353. return res;
  354. }
  355. /**
  356. * ipa3_dma_enable() -Vote for IPA clocks.
  357. *
  358. * Can be executed several times (re-entrant)
  359. *
  360. *Return codes: 0: success
  361. * -EINVAL: IPADMA is not initialized
  362. */
  363. int ipa3_dma_enable(void)
  364. {
  365. IPADMA_FUNC_ENTRY();
  366. if ((ipa3_dma_ctx == NULL) ||
  367. (ipa3_dma_init_refcnt_ctrl->ref_cnt < 1)) {
  368. IPADMA_ERR("IPADMA isn't initialized, can't enable\n");
  369. return -EINVAL;
  370. }
  371. mutex_lock(&ipa3_dma_ctx->enable_lock);
  372. if (ipa3_dma_ctx->enable_ref_cnt > 0) {
  373. IPADMA_ERR("Already enabled refcnt=%d\n",
  374. ipa3_dma_ctx->enable_ref_cnt);
  375. ipa3_dma_ctx->enable_ref_cnt++;
  376. mutex_unlock(&ipa3_dma_ctx->enable_lock);
  377. return 0;
  378. }
  379. IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA");
  380. ipa3_dma_ctx->enable_ref_cnt = 1;
  381. mutex_unlock(&ipa3_dma_ctx->enable_lock);
  382. IPADMA_FUNC_EXIT();
  383. return 0;
  384. }
  385. static bool ipa3_dma_work_pending(void)
  386. {
  387. if (atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt)) {
  388. IPADMA_DBG("pending sync\n");
  389. return true;
  390. }
  391. if (atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt)) {
  392. IPADMA_DBG("pending async\n");
  393. return true;
  394. }
  395. if (atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt)) {
  396. IPADMA_DBG("pending uc\n");
  397. return true;
  398. }
  399. IPADMA_DBG_LOW("no pending work\n");
  400. return false;
  401. }
  402. /**
  403. * ipa3_dma_disable()- Unvote for IPA clocks.
  404. *
  405. * enter to power save mode.
  406. *
  407. * Return codes: 0: success
  408. * -EINVAL: IPADMA is not initialized
  409. * -EPERM: Operation not permitted as ipa_dma is already
  410. * diabled
  411. * -EFAULT: can not disable ipa_dma as there are pending
  412. * memcopy works
  413. */
  414. int ipa3_dma_disable(void)
  415. {
  416. unsigned long flags;
  417. int res = 0;
  418. bool dec_clks = false;
  419. IPADMA_FUNC_ENTRY();
  420. if ((ipa3_dma_ctx == NULL) ||
  421. (ipa3_dma_init_refcnt_ctrl->ref_cnt < 1)) {
  422. IPADMA_ERR("IPADMA isn't initialized, can't disable\n");
  423. return -EINVAL;
  424. }
  425. mutex_lock(&ipa3_dma_ctx->enable_lock);
  426. spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
  427. if (ipa3_dma_ctx->enable_ref_cnt > 1) {
  428. IPADMA_DBG("Multiple enablement done. refcnt=%d\n",
  429. ipa3_dma_ctx->enable_ref_cnt);
  430. ipa3_dma_ctx->enable_ref_cnt--;
  431. goto completed;
  432. }
  433. if (ipa3_dma_ctx->enable_ref_cnt == 0) {
  434. IPADMA_ERR("Already disabled\n");
  435. res = -EPERM;
  436. goto completed;
  437. }
  438. if (ipa3_dma_work_pending()) {
  439. IPADMA_ERR("There is pending work, can't disable.\n");
  440. res = -EFAULT;
  441. goto completed;
  442. }
  443. ipa3_dma_ctx->enable_ref_cnt = 0;
  444. dec_clks = true;
  445. IPADMA_FUNC_EXIT();
  446. completed:
  447. spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
  448. if (dec_clks)
  449. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA");
  450. mutex_unlock(&ipa3_dma_ctx->enable_lock);
  451. return res;
  452. }
  453. /**
  454. * ipa3_dma_sync_memcpy()- Perform synchronous memcpy using IPA.
  455. *
  456. * @dest: physical address to store the copied data.
  457. * @src: physical address of the source data to copy.
  458. * @len: number of bytes to copy.
  459. *
  460. * Return codes: 0: success
  461. * -EINVAL: invalid params
  462. * -EPERM: operation not permitted as ipa_dma isn't enable or
  463. * initialized
  464. * -gsi_status : on GSI failures
  465. * -EFAULT: other
  466. */
  467. int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len)
  468. {
  469. int ep_idx;
  470. int res;
  471. int i = 0;
  472. struct ipa3_sys_context *cons_sys;
  473. struct ipa3_sys_context *prod_sys;
  474. struct ipa3_dma_xfer_wrapper *xfer_descr = NULL;
  475. struct ipa3_dma_xfer_wrapper *head_descr = NULL;
  476. struct gsi_xfer_elem prod_xfer_elem;
  477. struct gsi_xfer_elem cons_xfer_elem;
  478. struct gsi_chan_xfer_notify gsi_notify;
  479. unsigned long flags;
  480. bool stop_polling = false;
  481. bool prefetch_wa = false;
  482. IPADMA_FUNC_ENTRY();
  483. IPADMA_DBG_LOW("dest = 0x%llx, src = 0x%llx, len = %d\n",
  484. dest, src, len);
  485. if (ipa3_dma_ctx == NULL) {
  486. IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
  487. return -EPERM;
  488. }
  489. if ((max(src, dest) - min(src, dest)) < len) {
  490. IPADMA_ERR("invalid addresses - overlapping buffers\n");
  491. return -EINVAL;
  492. }
  493. if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
  494. IPADMA_ERR("invalid len, %d\n", len);
  495. return -EINVAL;
  496. }
  497. spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
  498. if (!ipa3_dma_ctx->enable_ref_cnt) {
  499. IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
  500. spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
  501. return -EPERM;
  502. }
  503. atomic_inc(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
  504. spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
  505. ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
  506. if (-1 == ep_idx) {
  507. IPADMA_ERR("Client %u is not mapped\n",
  508. IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
  509. return -EFAULT;
  510. }
  511. cons_sys = ipa3_ctx->ep[ep_idx].sys;
  512. ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
  513. if (-1 == ep_idx) {
  514. IPADMA_ERR("Client %u is not mapped\n",
  515. IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
  516. return -EFAULT;
  517. }
  518. prod_sys = ipa3_ctx->ep[ep_idx].sys;
  519. xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
  520. GFP_KERNEL);
  521. if (!xfer_descr) {
  522. IPADMA_ERR("failed to alloc xfer descr wrapper\n");
  523. res = -ENOMEM;
  524. goto fail_mem_alloc;
  525. }
  526. xfer_descr->phys_addr_dest = dest;
  527. xfer_descr->phys_addr_src = src;
  528. xfer_descr->len = len;
  529. init_completion(&xfer_descr->xfer_done);
  530. mutex_lock(&ipa3_dma_ctx->sync_lock);
  531. list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
  532. cons_sys->len++;
  533. cons_xfer_elem.addr = dest;
  534. cons_xfer_elem.len = len;
  535. cons_xfer_elem.type = GSI_XFER_ELEM_DATA;
  536. cons_xfer_elem.flags = GSI_XFER_FLAG_EOT;
  537. prod_xfer_elem.addr = src;
  538. prod_xfer_elem.len = len;
  539. prod_xfer_elem.type = GSI_XFER_ELEM_DATA;
  540. prod_xfer_elem.xfer_user_data = NULL;
  541. /*
  542. * when copy is less than 9B we need to chain another dummy
  543. * copy so the total size will be larger (for ipav3.5)
  544. * for the consumer we have to prepare an additional credit
  545. */
  546. prefetch_wa = ((ipa_get_hw_type() == IPA_HW_v3_5) &&
  547. len < IPA_DMA_PREFETCH_WA_THRESHOLD);
  548. if (prefetch_wa) {
  549. cons_xfer_elem.xfer_user_data = NULL;
  550. res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
  551. &cons_xfer_elem, false);
  552. if (res) {
  553. IPADMA_ERR(
  554. "Failed: gsi_queue_xfer dest descr res:%d\n",
  555. res);
  556. goto fail_send;
  557. }
  558. cons_xfer_elem.addr =
  559. ipa3_dma_ctx->ipa_dma_dummy_dst_sync.phys_base;
  560. cons_xfer_elem.len = IPA_DMA_DUMMY_BUFF_SZ;
  561. cons_xfer_elem.type = GSI_XFER_ELEM_DATA;
  562. cons_xfer_elem.flags = GSI_XFER_FLAG_EOT;
  563. cons_xfer_elem.xfer_user_data = xfer_descr;
  564. res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
  565. &cons_xfer_elem, true);
  566. if (res) {
  567. IPADMA_ERR(
  568. "Failed: gsi_queue_xfer dummy dest descr res:%d\n",
  569. res);
  570. goto fail_send;
  571. }
  572. prod_xfer_elem.flags = GSI_XFER_FLAG_CHAIN;
  573. res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
  574. &prod_xfer_elem, false);
  575. if (res) {
  576. IPADMA_ERR(
  577. "Failed: gsi_queue_xfer src descr res:%d\n",
  578. res);
  579. ipa_assert();
  580. goto fail_send;
  581. }
  582. prod_xfer_elem.addr =
  583. ipa3_dma_ctx->ipa_dma_dummy_src_sync.phys_base;
  584. prod_xfer_elem.len = IPA_DMA_DUMMY_BUFF_SZ;
  585. prod_xfer_elem.type = GSI_XFER_ELEM_DATA;
  586. prod_xfer_elem.flags = GSI_XFER_FLAG_EOT;
  587. prod_xfer_elem.xfer_user_data = NULL;
  588. res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
  589. &prod_xfer_elem, true);
  590. if (res) {
  591. IPADMA_ERR(
  592. "Failed: gsi_queue_xfer dummy src descr res:%d\n",
  593. res);
  594. ipa_assert();
  595. goto fail_send;
  596. }
  597. } else {
  598. cons_xfer_elem.xfer_user_data = xfer_descr;
  599. res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
  600. &cons_xfer_elem, true);
  601. if (res) {
  602. IPADMA_ERR(
  603. "Failed: gsi_queue_xfer dest descr res:%d\n",
  604. res);
  605. goto fail_send;
  606. }
  607. prod_xfer_elem.flags = GSI_XFER_FLAG_EOT;
  608. res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
  609. &prod_xfer_elem, true);
  610. if (res) {
  611. IPADMA_ERR(
  612. "Failed: gsi_queue_xfer src descr res:%d\n",
  613. res);
  614. ipa_assert();
  615. goto fail_send;
  616. }
  617. }
  618. head_descr = list_first_entry(&cons_sys->head_desc_list,
  619. struct ipa3_dma_xfer_wrapper, link);
  620. /* in case we are not the head of the list, wait for head to wake us */
  621. if (xfer_descr != head_descr) {
  622. mutex_unlock(&ipa3_dma_ctx->sync_lock);
  623. wait_for_completion(&xfer_descr->xfer_done);
  624. mutex_lock(&ipa3_dma_ctx->sync_lock);
  625. head_descr = list_first_entry(&cons_sys->head_desc_list,
  626. struct ipa3_dma_xfer_wrapper, link);
  627. /* Unexpected transfer sent from HW */
  628. ipa_assert_on(xfer_descr != head_descr);
  629. }
  630. mutex_unlock(&ipa3_dma_ctx->sync_lock);
  631. do {
  632. /* wait for transfer to complete */
  633. res = gsi_poll_channel(cons_sys->ep->gsi_chan_hdl,
  634. &gsi_notify);
  635. if (res == GSI_STATUS_SUCCESS)
  636. stop_polling = true;
  637. else if (res != GSI_STATUS_POLL_EMPTY)
  638. IPADMA_ERR(
  639. "Failed: gsi_poll_chanel, returned %d loop#:%d\n",
  640. res, i);
  641. usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX,
  642. IPA_DMA_POLLING_MAX_SLEEP_RX);
  643. i++;
  644. } while (!stop_polling);
  645. /* for prefetch WA we will receive the length of the dummy
  646. * transfer in the event (because it is the second element)
  647. */
  648. if (prefetch_wa)
  649. ipa_assert_on(gsi_notify.bytes_xfered !=
  650. IPA_DMA_DUMMY_BUFF_SZ);
  651. else
  652. ipa_assert_on(len != gsi_notify.bytes_xfered);
  653. ipa_assert_on(dest != ((struct ipa3_dma_xfer_wrapper *)
  654. (gsi_notify.xfer_user_data))->phys_addr_dest);
  655. mutex_lock(&ipa3_dma_ctx->sync_lock);
  656. list_del(&head_descr->link);
  657. cons_sys->len--;
  658. kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
  659. /* wake the head of the list */
  660. if (!list_empty(&cons_sys->head_desc_list)) {
  661. head_descr = list_first_entry(&cons_sys->head_desc_list,
  662. struct ipa3_dma_xfer_wrapper, link);
  663. complete(&head_descr->xfer_done);
  664. }
  665. mutex_unlock(&ipa3_dma_ctx->sync_lock);
  666. atomic_inc(&ipa3_dma_ctx->total_sync_memcpy);
  667. atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
  668. if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
  669. complete(&ipa3_dma_ctx->done);
  670. IPADMA_FUNC_EXIT();
  671. return res;
  672. fail_send:
  673. list_del(&xfer_descr->link);
  674. cons_sys->len--;
  675. mutex_unlock(&ipa3_dma_ctx->sync_lock);
  676. kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
  677. fail_mem_alloc:
  678. atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
  679. if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
  680. complete(&ipa3_dma_ctx->done);
  681. return res;
  682. }
  683. /**
  684. * ipa3_dma_async_memcpy()- Perform asynchronous memcpy using IPA.
  685. *
  686. * @dest: physical address to store the copied data.
  687. * @src: physical address of the source data to copy.
  688. * @len: number of bytes to copy.
  689. * @user_cb: callback function to notify the client when the copy was done.
  690. * @user_param: cookie for user_cb.
  691. *
  692. * Return codes: 0: success
  693. * -EINVAL: invalid params
  694. * -EPERM: operation not permitted as ipa_dma isn't enable or
  695. * initialized
  696. * -gsi_status : on GSI failures
  697. * -EFAULT: descr fifo is full.
  698. */
  699. int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
  700. void (*user_cb)(void *user1), void *user_param)
  701. {
  702. int ep_idx;
  703. int res = 0;
  704. struct ipa3_dma_xfer_wrapper *xfer_descr = NULL;
  705. struct ipa3_sys_context *prod_sys;
  706. struct ipa3_sys_context *cons_sys;
  707. struct gsi_xfer_elem xfer_elem_cons, xfer_elem_prod;
  708. unsigned long flags;
  709. IPADMA_FUNC_ENTRY();
  710. IPADMA_DBG_LOW("dest = 0x%llx, src = 0x%llx, len = %d\n",
  711. dest, src, len);
  712. if (ipa3_dma_ctx == NULL) {
  713. IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
  714. return -EPERM;
  715. }
  716. if ((max(src, dest) - min(src, dest)) < len) {
  717. IPADMA_ERR("invalid addresses - overlapping buffers\n");
  718. return -EINVAL;
  719. }
  720. if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
  721. IPADMA_ERR("invalid len, %d\n", len);
  722. return -EINVAL;
  723. }
  724. if (!user_cb) {
  725. IPADMA_ERR("null pointer: user_cb\n");
  726. return -EINVAL;
  727. }
  728. spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
  729. if (!ipa3_dma_ctx->enable_ref_cnt) {
  730. IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n");
  731. spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
  732. return -EPERM;
  733. }
  734. atomic_inc(&ipa3_dma_ctx->async_memcpy_pending_cnt);
  735. spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
  736. ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
  737. if (-1 == ep_idx) {
  738. IPADMA_ERR("Client %u is not mapped\n",
  739. IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
  740. return -EFAULT;
  741. }
  742. cons_sys = ipa3_ctx->ep[ep_idx].sys;
  743. ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD);
  744. if (-1 == ep_idx) {
  745. IPADMA_ERR("Client %u is not mapped\n",
  746. IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD);
  747. return -EFAULT;
  748. }
  749. prod_sys = ipa3_ctx->ep[ep_idx].sys;
  750. xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
  751. GFP_KERNEL);
  752. if (!xfer_descr) {
  753. res = -ENOMEM;
  754. goto fail_mem_alloc;
  755. }
  756. xfer_descr->phys_addr_dest = dest;
  757. xfer_descr->phys_addr_src = src;
  758. xfer_descr->len = len;
  759. xfer_descr->callback = user_cb;
  760. xfer_descr->user1 = user_param;
  761. spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags);
  762. list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
  763. cons_sys->len++;
  764. /*
  765. * when copy is less than 9B we need to chain another dummy
  766. * copy so the total size will be larger (for ipav3.5)
  767. */
  768. if ((ipa_get_hw_type() == IPA_HW_v3_5) && len <
  769. IPA_DMA_PREFETCH_WA_THRESHOLD) {
  770. xfer_elem_cons.addr = dest;
  771. xfer_elem_cons.len = len;
  772. xfer_elem_cons.type = GSI_XFER_ELEM_DATA;
  773. xfer_elem_cons.flags = GSI_XFER_FLAG_EOT;
  774. xfer_elem_cons.xfer_user_data = NULL;
  775. res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
  776. &xfer_elem_cons, false);
  777. if (res) {
  778. IPADMA_ERR(
  779. "Failed: gsi_queue_xfer on dest descr res: %d\n",
  780. res);
  781. goto fail_send;
  782. }
  783. xfer_elem_cons.addr =
  784. ipa3_dma_ctx->ipa_dma_dummy_dst_async.phys_base;
  785. xfer_elem_cons.len = IPA_DMA_DUMMY_BUFF_SZ;
  786. xfer_elem_cons.type = GSI_XFER_ELEM_DATA;
  787. xfer_elem_cons.flags = GSI_XFER_FLAG_EOT;
  788. xfer_elem_cons.xfer_user_data = xfer_descr;
  789. res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
  790. &xfer_elem_cons, true);
  791. if (res) {
  792. IPADMA_ERR(
  793. "Failed: gsi_queue_xfer on dummy dest descr res: %d\n",
  794. res);
  795. goto fail_send;
  796. }
  797. xfer_elem_prod.addr = src;
  798. xfer_elem_prod.len = len;
  799. xfer_elem_prod.type = GSI_XFER_ELEM_DATA;
  800. xfer_elem_prod.flags = GSI_XFER_FLAG_CHAIN;
  801. xfer_elem_prod.xfer_user_data = NULL;
  802. res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
  803. &xfer_elem_prod, false);
  804. if (res) {
  805. IPADMA_ERR(
  806. "Failed: gsi_queue_xfer on src descr res: %d\n",
  807. res);
  808. ipa_assert();
  809. goto fail_send;
  810. }
  811. xfer_elem_prod.addr =
  812. ipa3_dma_ctx->ipa_dma_dummy_src_async.phys_base;
  813. xfer_elem_prod.len = IPA_DMA_DUMMY_BUFF_SZ;
  814. xfer_elem_prod.type = GSI_XFER_ELEM_DATA;
  815. xfer_elem_prod.flags = GSI_XFER_FLAG_EOT;
  816. xfer_elem_prod.xfer_user_data = NULL;
  817. res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
  818. &xfer_elem_prod, true);
  819. if (res) {
  820. IPADMA_ERR(
  821. "Failed: gsi_queue_xfer on dummy src descr res: %d\n",
  822. res);
  823. ipa_assert();
  824. goto fail_send;
  825. }
  826. } else {
  827. xfer_elem_cons.addr = dest;
  828. xfer_elem_cons.len = len;
  829. xfer_elem_cons.type = GSI_XFER_ELEM_DATA;
  830. xfer_elem_cons.flags = GSI_XFER_FLAG_EOT;
  831. xfer_elem_cons.xfer_user_data = xfer_descr;
  832. res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
  833. &xfer_elem_cons, true);
  834. if (res) {
  835. IPADMA_ERR(
  836. "Failed: gsi_queue_xfer on dummy dest descr res: %d\n",
  837. res);
  838. ipa_assert();
  839. goto fail_send;
  840. }
  841. xfer_elem_prod.addr = src;
  842. xfer_elem_prod.len = len;
  843. xfer_elem_prod.type = GSI_XFER_ELEM_DATA;
  844. xfer_elem_prod.flags = GSI_XFER_FLAG_EOT;
  845. xfer_elem_prod.xfer_user_data = NULL;
  846. res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
  847. &xfer_elem_prod, true);
  848. if (res) {
  849. IPADMA_ERR(
  850. "Failed: gsi_queue_xfer on dummy src descr res: %d\n",
  851. res);
  852. ipa_assert();
  853. goto fail_send;
  854. }
  855. }
  856. spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
  857. IPADMA_FUNC_EXIT();
  858. return res;
  859. fail_send:
  860. list_del(&xfer_descr->link);
  861. spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
  862. kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
  863. fail_mem_alloc:
  864. atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
  865. if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
  866. complete(&ipa3_dma_ctx->done);
  867. return res;
  868. }
  869. /**
  870. * ipa3_dma_uc_memcpy() - Perform a memcpy action using IPA uC
  871. * @dest: physical address to store the copied data.
  872. * @src: physical address of the source data to copy.
  873. * @len: number of bytes to copy.
  874. *
  875. * Return codes: 0: success
  876. * -EINVAL: invalid params
  877. * -EPERM: operation not permitted as ipa_dma isn't enable or
  878. * initialized
  879. * -EBADF: IPA uC is not loaded
  880. */
  881. int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
  882. {
  883. int res;
  884. unsigned long flags;
  885. IPADMA_FUNC_ENTRY();
  886. if (ipa3_dma_ctx == NULL) {
  887. IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
  888. return -EPERM;
  889. }
  890. if ((max(src, dest) - min(src, dest)) < len) {
  891. IPADMA_ERR("invalid addresses - overlapping buffers\n");
  892. return -EINVAL;
  893. }
  894. if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
  895. IPADMA_ERR("invalid len, %d\n", len);
  896. return -EINVAL;
  897. }
  898. spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
  899. if (!ipa3_dma_ctx->enable_ref_cnt) {
  900. IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
  901. spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
  902. return -EPERM;
  903. }
  904. atomic_inc(&ipa3_dma_ctx->uc_memcpy_pending_cnt);
  905. spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
  906. res = ipa3_uc_memcpy(dest, src, len);
  907. if (res) {
  908. IPADMA_ERR("ipa3_uc_memcpy failed %d\n", res);
  909. goto dec_and_exit;
  910. }
  911. atomic_inc(&ipa3_dma_ctx->total_uc_memcpy);
  912. res = 0;
  913. dec_and_exit:
  914. atomic_dec(&ipa3_dma_ctx->uc_memcpy_pending_cnt);
  915. if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
  916. complete(&ipa3_dma_ctx->done);
  917. IPADMA_FUNC_EXIT();
  918. return res;
  919. }
  920. /**
  921. * ipa3_dma_destroy() -teardown IPADMA pipes and release ipadma.
  922. *
  923. * this is a blocking function, returns just after destroying IPADMA.
  924. */
  925. void ipa3_dma_destroy(void)
  926. {
  927. int res = 0;
  928. IPADMA_FUNC_ENTRY();
  929. if (!ipa3_dma_init_refcnt_ctrl) {
  930. IPADMA_ERR("Setup isn't done\n");
  931. return;
  932. }
  933. mutex_lock(&ipa3_dma_init_refcnt_ctrl->lock);
  934. if (ipa3_dma_init_refcnt_ctrl->ref_cnt > 1) {
  935. IPADMA_DBG("Multiple initialization done. refcnt=%d\n",
  936. ipa3_dma_init_refcnt_ctrl->ref_cnt);
  937. ipa3_dma_init_refcnt_ctrl->ref_cnt--;
  938. goto completed;
  939. }
  940. if ((!ipa3_dma_ctx) || (ipa3_dma_init_refcnt_ctrl->ref_cnt == 0)) {
  941. IPADMA_ERR("IPADMA isn't initialized ctx=%pK\n", ipa3_dma_ctx);
  942. goto completed;
  943. }
  944. if (ipa3_dma_work_pending()) {
  945. ipa3_dma_ctx->destroy_pending = true;
  946. IPADMA_DBG("There are pending memcpy, wait for completion\n");
  947. wait_for_completion(&ipa3_dma_ctx->done);
  948. }
  949. if (ipa3_dma_ctx->enable_ref_cnt > 0) {
  950. IPADMA_ERR("IPADMA still enabled\n");
  951. goto completed;
  952. }
  953. res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_cons_hdl);
  954. if (res)
  955. IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n");
  956. ipa3_dma_ctx->ipa_dma_async_cons_hdl = 0;
  957. res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_cons_hdl);
  958. if (res)
  959. IPADMA_ERR("teardown IPADMA SYNC CONS failed\n");
  960. ipa3_dma_ctx->ipa_dma_sync_cons_hdl = 0;
  961. res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_prod_hdl);
  962. if (res)
  963. IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n");
  964. ipa3_dma_ctx->ipa_dma_async_prod_hdl = 0;
  965. res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_prod_hdl);
  966. if (res)
  967. IPADMA_ERR("teardown IPADMA SYNC PROD failed\n");
  968. ipa3_dma_ctx->ipa_dma_sync_prod_hdl = 0;
  969. ipa3_dma_debugfs_destroy();
  970. kmem_cache_destroy(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache);
  971. dma_free_coherent(ipa3_ctx->pdev, IPA_DMA_DUMMY_BUFF_SZ * 4,
  972. ipa3_dma_ctx->ipa_dma_dummy_src_sync.base,
  973. ipa3_dma_ctx->ipa_dma_dummy_src_sync.phys_base);
  974. kfree(ipa3_dma_ctx);
  975. ipa3_dma_ctx = NULL;
  976. ipa3_dma_init_refcnt_ctrl->ref_cnt = 0;
  977. IPADMA_FUNC_EXIT();
  978. completed:
  979. mutex_unlock(&ipa3_dma_init_refcnt_ctrl->lock);
  980. }
  981. /**
  982. * ipa3_dma_async_memcpy_notify_cb() - Callback function which will be called
  983. * by IPA driver after getting notify on Rx operation is completed (data was
  984. * written to dest descriptor on async_cons ep).
  985. *
  986. * @priv -not in use.
  987. * @evt - event name - IPA_RECIVE.
  988. * @data -the ipa_mem_buffer.
  989. */
  990. void ipa3_dma_async_memcpy_notify_cb(void *priv
  991. , enum ipa_dp_evt_type evt, unsigned long data)
  992. {
  993. int ep_idx = 0;
  994. struct ipa3_dma_xfer_wrapper *xfer_descr_expected;
  995. struct ipa3_sys_context *sys;
  996. unsigned long flags;
  997. IPADMA_FUNC_ENTRY();
  998. ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
  999. if (ep_idx < 0) {
  1000. IPADMA_ERR("IPA Client mapping failed\n");
  1001. return;
  1002. }
  1003. sys = ipa3_ctx->ep[ep_idx].sys;
  1004. spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags);
  1005. xfer_descr_expected = list_first_entry(&sys->head_desc_list,
  1006. struct ipa3_dma_xfer_wrapper, link);
  1007. list_del(&xfer_descr_expected->link);
  1008. sys->len--;
  1009. spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
  1010. atomic_inc(&ipa3_dma_ctx->total_async_memcpy);
  1011. atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
  1012. xfer_descr_expected->callback(xfer_descr_expected->user1);
  1013. kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
  1014. xfer_descr_expected);
  1015. if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
  1016. complete(&ipa3_dma_ctx->done);
  1017. IPADMA_FUNC_EXIT();
  1018. }
  1019. #ifdef CONFIG_DEBUG_FS
  1020. static struct dentry *dent;
  1021. static struct dentry *dfile_info;
  1022. static ssize_t ipa3_dma_debugfs_read(struct file *file, char __user *ubuf,
  1023. size_t count, loff_t *ppos)
  1024. {
  1025. int nbytes = 0;
  1026. if (!ipa3_dma_init_refcnt_ctrl) {
  1027. nbytes += scnprintf(&dbg_buff[nbytes],
  1028. IPADMA_MAX_MSG_LEN - nbytes,
  1029. "Setup was not done\n");
  1030. goto completed;
  1031. }
  1032. if (!ipa3_dma_ctx) {
  1033. nbytes += scnprintf(&dbg_buff[nbytes],
  1034. IPADMA_MAX_MSG_LEN - nbytes,
  1035. "Status:\n Not initialized (ref_cnt=%d)\n",
  1036. ipa3_dma_init_refcnt_ctrl->ref_cnt);
  1037. } else {
  1038. nbytes += scnprintf(&dbg_buff[nbytes],
  1039. IPADMA_MAX_MSG_LEN - nbytes,
  1040. "Status:\n Initialized (ref_cnt=%d)\n",
  1041. ipa3_dma_init_refcnt_ctrl->ref_cnt);
  1042. nbytes += scnprintf(&dbg_buff[nbytes],
  1043. IPADMA_MAX_MSG_LEN - nbytes,
  1044. " %s (ref_cnt=%d)\n",
  1045. (ipa3_dma_ctx->enable_ref_cnt > 0) ?
  1046. "Enabled" : "Disabled",
  1047. ipa3_dma_ctx->enable_ref_cnt);
  1048. nbytes += scnprintf(&dbg_buff[nbytes],
  1049. IPADMA_MAX_MSG_LEN - nbytes,
  1050. "Statistics:\n total sync memcpy: %d\n ",
  1051. atomic_read(&ipa3_dma_ctx->total_sync_memcpy));
  1052. nbytes += scnprintf(&dbg_buff[nbytes],
  1053. IPADMA_MAX_MSG_LEN - nbytes,
  1054. "total async memcpy: %d\n ",
  1055. atomic_read(&ipa3_dma_ctx->total_async_memcpy));
  1056. nbytes += scnprintf(&dbg_buff[nbytes],
  1057. IPADMA_MAX_MSG_LEN - nbytes,
  1058. "total uc memcpy: %d\n ",
  1059. atomic_read(&ipa3_dma_ctx->total_uc_memcpy));
  1060. nbytes += scnprintf(&dbg_buff[nbytes],
  1061. IPADMA_MAX_MSG_LEN - nbytes,
  1062. "pending sync memcpy jobs: %d\n ",
  1063. atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt));
  1064. nbytes += scnprintf(&dbg_buff[nbytes],
  1065. IPADMA_MAX_MSG_LEN - nbytes,
  1066. "pending async memcpy jobs: %d\n ",
  1067. atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt));
  1068. nbytes += scnprintf(&dbg_buff[nbytes],
  1069. IPADMA_MAX_MSG_LEN - nbytes,
  1070. "pending uc memcpy jobs: %d\n",
  1071. atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt));
  1072. }
  1073. completed:
  1074. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  1075. }
  1076. static ssize_t ipa3_dma_debugfs_reset_statistics(struct file *file,
  1077. const char __user *ubuf,
  1078. size_t count,
  1079. loff_t *ppos)
  1080. {
  1081. s8 in_num = 0;
  1082. int ret;
  1083. ret = kstrtos8_from_user(ubuf, count, 0, &in_num);
  1084. if (ret)
  1085. return ret;
  1086. switch (in_num) {
  1087. case 0:
  1088. if (ipa3_dma_work_pending())
  1089. IPADMA_ERR("Note, there are pending memcpy\n");
  1090. atomic_set(&ipa3_dma_ctx->total_async_memcpy, 0);
  1091. atomic_set(&ipa3_dma_ctx->total_sync_memcpy, 0);
  1092. break;
  1093. default:
  1094. IPADMA_ERR("invalid argument: To reset statistics echo 0\n");
  1095. break;
  1096. }
  1097. return count;
  1098. }
  1099. const struct file_operations ipa3_ipadma_stats_ops = {
  1100. .read = ipa3_dma_debugfs_read,
  1101. .write = ipa3_dma_debugfs_reset_statistics,
  1102. };
  1103. static void ipa3_dma_debugfs_init(void)
  1104. {
  1105. const mode_t read_write_mode = 0666;
  1106. dent = debugfs_create_dir("ipa_dma", 0);
  1107. if (IS_ERR(dent)) {
  1108. IPADMA_ERR("fail to create folder ipa_dma\n");
  1109. return;
  1110. }
  1111. dfile_info =
  1112. debugfs_create_file("info", read_write_mode, dent,
  1113. 0, &ipa3_ipadma_stats_ops);
  1114. if (!dfile_info || IS_ERR(dfile_info)) {
  1115. IPADMA_ERR("fail to create file stats\n");
  1116. goto fail;
  1117. }
  1118. return;
  1119. fail:
  1120. debugfs_remove_recursive(dent);
  1121. }
  1122. static void ipa3_dma_debugfs_destroy(void)
  1123. {
  1124. debugfs_remove_recursive(dent);
  1125. }
  1126. #endif /* !CONFIG_DEBUG_FS */