dpaa2-qdma.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright 2019 NXP
  3. #include <linux/init.h>
  4. #include <linux/module.h>
  5. #include <linux/dmapool.h>
  6. #include <linux/of_irq.h>
  7. #include <linux/iommu.h>
  8. #include <linux/sys_soc.h>
  9. #include <linux/fsl/mc.h>
  10. #include <soc/fsl/dpaa2-io.h>
  11. #include "../virt-dma.h"
  12. #include "dpdmai.h"
  13. #include "dpaa2-qdma.h"
  14. static bool smmu_disable = true;
  15. static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
  16. {
  17. return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
  18. }
  19. static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
  20. {
  21. return container_of(vd, struct dpaa2_qdma_comp, vdesc);
  22. }
  23. static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
  24. {
  25. struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  26. struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
  27. struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
  28. dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
  29. sizeof(struct dpaa2_fd),
  30. sizeof(struct dpaa2_fd), 0);
  31. if (!dpaa2_chan->fd_pool)
  32. goto err;
  33. dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
  34. sizeof(struct dpaa2_fl_entry),
  35. sizeof(struct dpaa2_fl_entry), 0);
  36. if (!dpaa2_chan->fl_pool)
  37. goto err_fd;
  38. dpaa2_chan->sdd_pool =
  39. dma_pool_create("sdd_pool", dev,
  40. sizeof(struct dpaa2_qdma_sd_d),
  41. sizeof(struct dpaa2_qdma_sd_d), 0);
  42. if (!dpaa2_chan->sdd_pool)
  43. goto err_fl;
  44. return dpaa2_qdma->desc_allocated++;
  45. err_fl:
  46. dma_pool_destroy(dpaa2_chan->fl_pool);
  47. err_fd:
  48. dma_pool_destroy(dpaa2_chan->fd_pool);
  49. err:
  50. return -ENOMEM;
  51. }
  52. static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
  53. {
  54. struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  55. struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
  56. unsigned long flags;
  57. LIST_HEAD(head);
  58. spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
  59. vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
  60. spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
  61. vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
  62. dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
  63. dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
  64. dma_pool_destroy(dpaa2_chan->fd_pool);
  65. dma_pool_destroy(dpaa2_chan->fl_pool);
  66. dma_pool_destroy(dpaa2_chan->sdd_pool);
  67. dpaa2_qdma->desc_allocated--;
  68. }
  69. /*
  70. * Request a command descriptor for enqueue.
  71. */
  72. static struct dpaa2_qdma_comp *
  73. dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
  74. {
  75. struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
  76. struct device *dev = &qdma_priv->dpdmai_dev->dev;
  77. struct dpaa2_qdma_comp *comp_temp = NULL;
  78. unsigned long flags;
  79. spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
  80. if (list_empty(&dpaa2_chan->comp_free)) {
  81. spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  82. comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
  83. if (!comp_temp)
  84. goto err;
  85. comp_temp->fd_virt_addr =
  86. dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
  87. &comp_temp->fd_bus_addr);
  88. if (!comp_temp->fd_virt_addr)
  89. goto err_comp;
  90. comp_temp->fl_virt_addr =
  91. dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
  92. &comp_temp->fl_bus_addr);
  93. if (!comp_temp->fl_virt_addr)
  94. goto err_fd_virt;
  95. comp_temp->desc_virt_addr =
  96. dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
  97. &comp_temp->desc_bus_addr);
  98. if (!comp_temp->desc_virt_addr)
  99. goto err_fl_virt;
  100. comp_temp->qchan = dpaa2_chan;
  101. return comp_temp;
  102. }
  103. comp_temp = list_first_entry(&dpaa2_chan->comp_free,
  104. struct dpaa2_qdma_comp, list);
  105. list_del(&comp_temp->list);
  106. spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  107. comp_temp->qchan = dpaa2_chan;
  108. return comp_temp;
  109. err_fl_virt:
  110. dma_pool_free(dpaa2_chan->fl_pool,
  111. comp_temp->fl_virt_addr,
  112. comp_temp->fl_bus_addr);
  113. err_fd_virt:
  114. dma_pool_free(dpaa2_chan->fd_pool,
  115. comp_temp->fd_virt_addr,
  116. comp_temp->fd_bus_addr);
  117. err_comp:
  118. kfree(comp_temp);
  119. err:
  120. dev_err(dev, "Failed to request descriptor\n");
  121. return NULL;
  122. }
  123. static void
  124. dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
  125. {
  126. struct dpaa2_fd *fd;
  127. fd = dpaa2_comp->fd_virt_addr;
  128. memset(fd, 0, sizeof(struct dpaa2_fd));
  129. /* fd populated */
  130. dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
  131. /*
  132. * Bypass memory translation, Frame list format, short length disable
  133. * we need to disable BMT if fsl-mc use iova addr
  134. */
  135. if (smmu_disable)
  136. dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
  137. dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
  138. dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
  139. }
  140. /* first frame list for descriptor buffer */
  141. static void
  142. dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
  143. struct dpaa2_qdma_comp *dpaa2_comp,
  144. bool wrt_changed)
  145. {
  146. struct dpaa2_qdma_sd_d *sdd;
  147. sdd = dpaa2_comp->desc_virt_addr;
  148. memset(sdd, 0, 2 * (sizeof(*sdd)));
  149. /* source descriptor CMD */
  150. sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
  151. sdd++;
  152. /* dest descriptor CMD */
  153. if (wrt_changed)
  154. sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
  155. else
  156. sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
  157. memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
  158. /* first frame list to source descriptor */
  159. dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
  160. dpaa2_fl_set_len(f_list, 0x20);
  161. dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
  162. /* bypass memory translation */
  163. if (smmu_disable)
  164. f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
  165. }
  166. /* source and destination frame list */
  167. static void
  168. dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
  169. dma_addr_t dst, dma_addr_t src,
  170. size_t len, uint8_t fmt)
  171. {
  172. /* source frame list to source buffer */
  173. memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
  174. dpaa2_fl_set_addr(f_list, src);
  175. dpaa2_fl_set_len(f_list, len);
  176. /* single buffer frame or scatter gather frame */
  177. dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
  178. /* bypass memory translation */
  179. if (smmu_disable)
  180. f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
  181. f_list++;
  182. /* destination frame list to destination buffer */
  183. memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
  184. dpaa2_fl_set_addr(f_list, dst);
  185. dpaa2_fl_set_len(f_list, len);
  186. dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
  187. /* single buffer frame or scatter gather frame */
  188. dpaa2_fl_set_final(f_list, QDMA_FL_F);
  189. /* bypass memory translation */
  190. if (smmu_disable)
  191. f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
  192. }
  193. static struct dma_async_tx_descriptor
  194. *dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
  195. dma_addr_t src, size_t len, ulong flags)
  196. {
  197. struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  198. struct dpaa2_qdma_engine *dpaa2_qdma;
  199. struct dpaa2_qdma_comp *dpaa2_comp;
  200. struct dpaa2_fl_entry *f_list;
  201. bool wrt_changed;
  202. dpaa2_qdma = dpaa2_chan->qdma;
  203. dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
  204. if (!dpaa2_comp)
  205. return NULL;
  206. wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
  207. /* populate Frame descriptor */
  208. dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
  209. f_list = dpaa2_comp->fl_virt_addr;
  210. /* first frame list for descriptor buffer (logn format) */
  211. dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
  212. f_list++;
  213. dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
  214. return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
  215. }
  216. static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
  217. {
  218. struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  219. struct dpaa2_qdma_comp *dpaa2_comp;
  220. struct virt_dma_desc *vdesc;
  221. struct dpaa2_fd *fd;
  222. unsigned long flags;
  223. int err;
  224. spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
  225. spin_lock(&dpaa2_chan->vchan.lock);
  226. if (vchan_issue_pending(&dpaa2_chan->vchan)) {
  227. vdesc = vchan_next_desc(&dpaa2_chan->vchan);
  228. if (!vdesc)
  229. goto err_enqueue;
  230. dpaa2_comp = to_fsl_qdma_comp(vdesc);
  231. fd = dpaa2_comp->fd_virt_addr;
  232. list_del(&vdesc->node);
  233. list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
  234. err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
  235. if (err) {
  236. list_move_tail(&dpaa2_comp->list,
  237. &dpaa2_chan->comp_free);
  238. }
  239. }
  240. err_enqueue:
  241. spin_unlock(&dpaa2_chan->vchan.lock);
  242. spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  243. }
  244. static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
  245. {
  246. struct dpaa2_qdma_priv_per_prio *ppriv;
  247. struct device *dev = &ls_dev->dev;
  248. struct dpaa2_qdma_priv *priv;
  249. u8 prio_def = DPDMAI_PRIO_NUM;
  250. int err = -EINVAL;
  251. int i;
  252. priv = dev_get_drvdata(dev);
  253. priv->dev = dev;
  254. priv->dpqdma_id = ls_dev->obj_desc.id;
  255. /* Get the handle for the DPDMAI this interface is associate with */
  256. err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
  257. if (err) {
  258. dev_err(dev, "dpdmai_open() failed\n");
  259. return err;
  260. }
  261. dev_dbg(dev, "Opened dpdmai object successfully\n");
  262. err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
  263. &priv->dpdmai_attr);
  264. if (err) {
  265. dev_err(dev, "dpdmai_get_attributes() failed\n");
  266. goto exit;
  267. }
  268. if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
  269. err = -EINVAL;
  270. dev_err(dev, "DPDMAI major version mismatch\n"
  271. "Found %u.%u, supported version is %u.%u\n",
  272. priv->dpdmai_attr.version.major,
  273. priv->dpdmai_attr.version.minor,
  274. DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
  275. goto exit;
  276. }
  277. if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
  278. err = -EINVAL;
  279. dev_err(dev, "DPDMAI minor version mismatch\n"
  280. "Found %u.%u, supported version is %u.%u\n",
  281. priv->dpdmai_attr.version.major,
  282. priv->dpdmai_attr.version.minor,
  283. DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
  284. goto exit;
  285. }
  286. priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
  287. ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
  288. if (!ppriv) {
  289. err = -ENOMEM;
  290. goto exit;
  291. }
  292. priv->ppriv = ppriv;
  293. for (i = 0; i < priv->num_pairs; i++) {
  294. err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
  295. i, &priv->rx_queue_attr[i]);
  296. if (err) {
  297. dev_err(dev, "dpdmai_get_rx_queue() failed\n");
  298. goto exit;
  299. }
  300. ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
  301. err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
  302. i, &priv->tx_fqid[i]);
  303. if (err) {
  304. dev_err(dev, "dpdmai_get_tx_queue() failed\n");
  305. goto exit;
  306. }
  307. ppriv->req_fqid = priv->tx_fqid[i];
  308. ppriv->prio = i;
  309. ppriv->priv = priv;
  310. ppriv++;
  311. }
  312. return 0;
  313. exit:
  314. dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
  315. return err;
  316. }
  317. static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
  318. {
  319. struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
  320. struct dpaa2_qdma_priv_per_prio, nctx);
  321. struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
  322. struct dpaa2_qdma_priv *priv = ppriv->priv;
  323. u32 n_chans = priv->dpaa2_qdma->n_chans;
  324. struct dpaa2_qdma_chan *qchan;
  325. const struct dpaa2_fd *fd_eq;
  326. const struct dpaa2_fd *fd;
  327. struct dpaa2_dq *dq;
  328. int is_last = 0;
  329. int found;
  330. u8 status;
  331. int err;
  332. int i;
  333. do {
  334. err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
  335. ppriv->store);
  336. } while (err);
  337. while (!is_last) {
  338. do {
  339. dq = dpaa2_io_store_next(ppriv->store, &is_last);
  340. } while (!is_last && !dq);
  341. if (!dq) {
  342. dev_err(priv->dev, "FQID returned no valid frames!\n");
  343. continue;
  344. }
  345. /* obtain FD and process the error */
  346. fd = dpaa2_dq_fd(dq);
  347. status = dpaa2_fd_get_ctrl(fd) & 0xff;
  348. if (status)
  349. dev_err(priv->dev, "FD error occurred\n");
  350. found = 0;
  351. for (i = 0; i < n_chans; i++) {
  352. qchan = &priv->dpaa2_qdma->chans[i];
  353. spin_lock(&qchan->queue_lock);
  354. if (list_empty(&qchan->comp_used)) {
  355. spin_unlock(&qchan->queue_lock);
  356. continue;
  357. }
  358. list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
  359. &qchan->comp_used, list) {
  360. fd_eq = dpaa2_comp->fd_virt_addr;
  361. if (le64_to_cpu(fd_eq->simple.addr) ==
  362. le64_to_cpu(fd->simple.addr)) {
  363. spin_lock(&qchan->vchan.lock);
  364. vchan_cookie_complete(&
  365. dpaa2_comp->vdesc);
  366. spin_unlock(&qchan->vchan.lock);
  367. found = 1;
  368. break;
  369. }
  370. }
  371. spin_unlock(&qchan->queue_lock);
  372. if (found)
  373. break;
  374. }
  375. }
  376. dpaa2_io_service_rearm(NULL, ctx);
  377. }
  378. static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
  379. {
  380. struct dpaa2_qdma_priv_per_prio *ppriv;
  381. struct device *dev = priv->dev;
  382. int err = -EINVAL;
  383. int i, num;
  384. num = priv->num_pairs;
  385. ppriv = priv->ppriv;
  386. for (i = 0; i < num; i++) {
  387. ppriv->nctx.is_cdan = 0;
  388. ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
  389. ppriv->nctx.id = ppriv->rsp_fqid;
  390. ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
  391. err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
  392. if (err) {
  393. dev_err(dev, "Notification register failed\n");
  394. goto err_service;
  395. }
  396. ppriv->store =
  397. dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
  398. if (!ppriv->store) {
  399. err = -ENOMEM;
  400. dev_err(dev, "dpaa2_io_store_create() failed\n");
  401. goto err_store;
  402. }
  403. ppriv++;
  404. }
  405. return 0;
  406. err_store:
  407. dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
  408. err_service:
  409. ppriv--;
  410. while (ppriv >= priv->ppriv) {
  411. dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
  412. dpaa2_io_store_destroy(ppriv->store);
  413. ppriv--;
  414. }
  415. return err;
  416. }
  417. static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
  418. {
  419. struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
  420. int i;
  421. for (i = 0; i < priv->num_pairs; i++) {
  422. dpaa2_io_store_destroy(ppriv->store);
  423. ppriv++;
  424. }
  425. }
  426. static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
  427. {
  428. struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
  429. struct device *dev = priv->dev;
  430. int i;
  431. for (i = 0; i < priv->num_pairs; i++) {
  432. dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
  433. ppriv++;
  434. }
  435. }
  436. static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
  437. {
  438. struct dpdmai_rx_queue_cfg rx_queue_cfg;
  439. struct dpaa2_qdma_priv_per_prio *ppriv;
  440. struct device *dev = priv->dev;
  441. struct fsl_mc_device *ls_dev;
  442. int i, num;
  443. int err;
  444. ls_dev = to_fsl_mc_device(dev);
  445. num = priv->num_pairs;
  446. ppriv = priv->ppriv;
  447. for (i = 0; i < num; i++) {
  448. rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
  449. DPDMAI_QUEUE_OPT_DEST;
  450. rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
  451. rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
  452. rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
  453. rx_queue_cfg.dest_cfg.priority = ppriv->prio;
  454. err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
  455. rx_queue_cfg.dest_cfg.priority,
  456. &rx_queue_cfg);
  457. if (err) {
  458. dev_err(dev, "dpdmai_set_rx_queue() failed\n");
  459. return err;
  460. }
  461. ppriv++;
  462. }
  463. return 0;
  464. }
  465. static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
  466. {
  467. struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
  468. struct device *dev = priv->dev;
  469. struct fsl_mc_device *ls_dev;
  470. int err = 0;
  471. int i;
  472. ls_dev = to_fsl_mc_device(dev);
  473. for (i = 0; i < priv->num_pairs; i++) {
  474. ppriv->nctx.qman64 = 0;
  475. ppriv->nctx.dpio_id = 0;
  476. ppriv++;
  477. }
  478. err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
  479. if (err)
  480. dev_err(dev, "dpdmai_reset() failed\n");
  481. return err;
  482. }
  483. static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
  484. struct list_head *head)
  485. {
  486. struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
  487. unsigned long flags;
  488. list_for_each_entry_safe(comp_tmp, _comp_tmp,
  489. head, list) {
  490. spin_lock_irqsave(&qchan->queue_lock, flags);
  491. list_del(&comp_tmp->list);
  492. spin_unlock_irqrestore(&qchan->queue_lock, flags);
  493. dma_pool_free(qchan->fd_pool,
  494. comp_tmp->fd_virt_addr,
  495. comp_tmp->fd_bus_addr);
  496. dma_pool_free(qchan->fl_pool,
  497. comp_tmp->fl_virt_addr,
  498. comp_tmp->fl_bus_addr);
  499. dma_pool_free(qchan->sdd_pool,
  500. comp_tmp->desc_virt_addr,
  501. comp_tmp->desc_bus_addr);
  502. kfree(comp_tmp);
  503. }
  504. }
  505. static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
  506. {
  507. struct dpaa2_qdma_chan *qchan;
  508. int num, i;
  509. num = dpaa2_qdma->n_chans;
  510. for (i = 0; i < num; i++) {
  511. qchan = &dpaa2_qdma->chans[i];
  512. dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
  513. dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
  514. dma_pool_destroy(qchan->fd_pool);
  515. dma_pool_destroy(qchan->fl_pool);
  516. dma_pool_destroy(qchan->sdd_pool);
  517. }
  518. }
  519. static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
  520. {
  521. struct dpaa2_qdma_comp *dpaa2_comp;
  522. struct dpaa2_qdma_chan *qchan;
  523. unsigned long flags;
  524. dpaa2_comp = to_fsl_qdma_comp(vdesc);
  525. qchan = dpaa2_comp->qchan;
  526. spin_lock_irqsave(&qchan->queue_lock, flags);
  527. list_move_tail(&dpaa2_comp->list, &qchan->comp_free);
  528. spin_unlock_irqrestore(&qchan->queue_lock, flags);
  529. }
  530. static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
  531. {
  532. struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
  533. struct dpaa2_qdma_chan *dpaa2_chan;
  534. int num = priv->num_pairs;
  535. int i;
  536. INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
  537. for (i = 0; i < dpaa2_qdma->n_chans; i++) {
  538. dpaa2_chan = &dpaa2_qdma->chans[i];
  539. dpaa2_chan->qdma = dpaa2_qdma;
  540. dpaa2_chan->fqid = priv->tx_fqid[i % num];
  541. dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
  542. vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
  543. spin_lock_init(&dpaa2_chan->queue_lock);
  544. INIT_LIST_HEAD(&dpaa2_chan->comp_used);
  545. INIT_LIST_HEAD(&dpaa2_chan->comp_free);
  546. }
  547. return 0;
  548. }
  549. static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
  550. {
  551. struct device *dev = &dpdmai_dev->dev;
  552. struct dpaa2_qdma_engine *dpaa2_qdma;
  553. struct dpaa2_qdma_priv *priv;
  554. int err;
  555. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  556. if (!priv)
  557. return -ENOMEM;
  558. dev_set_drvdata(dev, priv);
  559. priv->dpdmai_dev = dpdmai_dev;
  560. priv->iommu_domain = iommu_get_domain_for_dev(dev);
  561. if (priv->iommu_domain)
  562. smmu_disable = false;
  563. /* obtain a MC portal */
  564. err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
  565. if (err) {
  566. if (err == -ENXIO)
  567. err = -EPROBE_DEFER;
  568. else
  569. dev_err(dev, "MC portal allocation failed\n");
  570. goto err_mcportal;
  571. }
  572. /* DPDMAI initialization */
  573. err = dpaa2_qdma_setup(dpdmai_dev);
  574. if (err) {
  575. dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
  576. goto err_dpdmai_setup;
  577. }
  578. /* DPIO */
  579. err = dpaa2_qdma_dpio_setup(priv);
  580. if (err) {
  581. dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
  582. goto err_dpio_setup;
  583. }
  584. /* DPDMAI binding to DPIO */
  585. err = dpaa2_dpdmai_bind(priv);
  586. if (err) {
  587. dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
  588. goto err_bind;
  589. }
  590. /* DPDMAI enable */
  591. err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
  592. if (err) {
  593. dev_err(dev, "dpdmai_enable() failed\n");
  594. goto err_enable;
  595. }
  596. dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
  597. if (!dpaa2_qdma) {
  598. err = -ENOMEM;
  599. goto err_eng;
  600. }
  601. priv->dpaa2_qdma = dpaa2_qdma;
  602. dpaa2_qdma->priv = priv;
  603. dpaa2_qdma->desc_allocated = 0;
  604. dpaa2_qdma->n_chans = NUM_CH;
  605. dpaa2_dpdmai_init_channels(dpaa2_qdma);
  606. if (soc_device_match(soc_fixup_tuning))
  607. dpaa2_qdma->qdma_wrtype_fixup = true;
  608. else
  609. dpaa2_qdma->qdma_wrtype_fixup = false;
  610. dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
  611. dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
  612. dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
  613. dpaa2_qdma->dma_dev.dev = dev;
  614. dpaa2_qdma->dma_dev.device_alloc_chan_resources =
  615. dpaa2_qdma_alloc_chan_resources;
  616. dpaa2_qdma->dma_dev.device_free_chan_resources =
  617. dpaa2_qdma_free_chan_resources;
  618. dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
  619. dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
  620. dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
  621. err = dma_async_device_register(&dpaa2_qdma->dma_dev);
  622. if (err) {
  623. dev_err(dev, "Can't register NXP QDMA engine.\n");
  624. goto err_dpaa2_qdma;
  625. }
  626. return 0;
  627. err_dpaa2_qdma:
  628. kfree(dpaa2_qdma);
  629. err_eng:
  630. dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
  631. err_enable:
  632. dpaa2_dpdmai_dpio_unbind(priv);
  633. err_bind:
  634. dpaa2_dpmai_store_free(priv);
  635. dpaa2_dpdmai_dpio_free(priv);
  636. err_dpio_setup:
  637. kfree(priv->ppriv);
  638. dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
  639. err_dpdmai_setup:
  640. fsl_mc_portal_free(priv->mc_io);
  641. err_mcportal:
  642. kfree(priv);
  643. dev_set_drvdata(dev, NULL);
  644. return err;
  645. }
  646. static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
  647. {
  648. struct dpaa2_qdma_engine *dpaa2_qdma;
  649. struct dpaa2_qdma_priv *priv;
  650. struct device *dev;
  651. dev = &ls_dev->dev;
  652. priv = dev_get_drvdata(dev);
  653. dpaa2_qdma = priv->dpaa2_qdma;
  654. dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
  655. dpaa2_dpdmai_dpio_unbind(priv);
  656. dpaa2_dpmai_store_free(priv);
  657. dpaa2_dpdmai_dpio_free(priv);
  658. dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
  659. fsl_mc_portal_free(priv->mc_io);
  660. dev_set_drvdata(dev, NULL);
  661. dpaa2_dpdmai_free_channels(dpaa2_qdma);
  662. dma_async_device_unregister(&dpaa2_qdma->dma_dev);
  663. kfree(priv);
  664. kfree(dpaa2_qdma);
  665. return 0;
  666. }
  667. static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
  668. {
  669. struct dpaa2_qdma_priv *priv;
  670. struct device *dev;
  671. dev = &ls_dev->dev;
  672. priv = dev_get_drvdata(dev);
  673. dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
  674. dpaa2_dpdmai_dpio_unbind(priv);
  675. dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
  676. dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
  677. }
  678. static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
  679. {
  680. .vendor = FSL_MC_VENDOR_FREESCALE,
  681. .obj_type = "dpdmai",
  682. },
  683. { .vendor = 0x0 }
  684. };
  685. static struct fsl_mc_driver dpaa2_qdma_driver = {
  686. .driver = {
  687. .name = "dpaa2-qdma",
  688. .owner = THIS_MODULE,
  689. },
  690. .probe = dpaa2_qdma_probe,
  691. .remove = dpaa2_qdma_remove,
  692. .shutdown = dpaa2_qdma_shutdown,
  693. .match_id_table = dpaa2_qdma_id_table
  694. };
  695. static int __init dpaa2_qdma_driver_init(void)
  696. {
  697. return fsl_mc_driver_register(&(dpaa2_qdma_driver));
  698. }
  699. late_initcall(dpaa2_qdma_driver_init);
  700. static void __exit fsl_qdma_exit(void)
  701. {
  702. fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
  703. }
  704. module_exit(fsl_qdma_exit);
  705. MODULE_ALIAS("platform:fsl-dpaa2-qdma");
  706. MODULE_LICENSE("GPL v2");
  707. MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");