altera_sgdma.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Altera TSE SGDMA and MSGDMA Linux driver
  3. * Copyright (C) 2014 Altera Corporation. All rights reserved
  4. */
  5. #include <linux/list.h>
  6. #include "altera_utils.h"
  7. #include "altera_tse.h"
  8. #include "altera_sgdmahw.h"
  9. #include "altera_sgdma.h"
  10. static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
  11. struct sgdma_descrip __iomem *ndesc,
  12. dma_addr_t ndesc_phys,
  13. dma_addr_t raddr,
  14. dma_addr_t waddr,
  15. u16 length,
  16. int generate_eop,
  17. int rfixed,
  18. int wfixed);
  19. static int sgdma_async_write(struct altera_tse_private *priv,
  20. struct sgdma_descrip __iomem *desc);
  21. static int sgdma_async_read(struct altera_tse_private *priv);
  22. static dma_addr_t
  23. sgdma_txphysaddr(struct altera_tse_private *priv,
  24. struct sgdma_descrip __iomem *desc);
  25. static dma_addr_t
  26. sgdma_rxphysaddr(struct altera_tse_private *priv,
  27. struct sgdma_descrip __iomem *desc);
  28. static int sgdma_txbusy(struct altera_tse_private *priv);
  29. static int sgdma_rxbusy(struct altera_tse_private *priv);
  30. static void
  31. queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
  32. static void
  33. queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
  34. static struct tse_buffer *
  35. dequeue_tx(struct altera_tse_private *priv);
  36. static struct tse_buffer *
  37. dequeue_rx(struct altera_tse_private *priv);
  38. static struct tse_buffer *
  39. queue_rx_peekhead(struct altera_tse_private *priv);
  40. int sgdma_initialize(struct altera_tse_private *priv)
  41. {
  42. priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
  43. SGDMA_CTRLREG_INTEN;
  44. priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
  45. SGDMA_CTRLREG_INTEN |
  46. SGDMA_CTRLREG_ILASTD;
  47. INIT_LIST_HEAD(&priv->txlisthd);
  48. INIT_LIST_HEAD(&priv->rxlisthd);
  49. priv->rxdescphys = (dma_addr_t) 0;
  50. priv->txdescphys = (dma_addr_t) 0;
  51. priv->rxdescphys = dma_map_single(priv->device,
  52. (void __force *)priv->rx_dma_desc,
  53. priv->rxdescmem, DMA_BIDIRECTIONAL);
  54. if (dma_mapping_error(priv->device, priv->rxdescphys)) {
  55. sgdma_uninitialize(priv);
  56. netdev_err(priv->dev, "error mapping rx descriptor memory\n");
  57. return -EINVAL;
  58. }
  59. priv->txdescphys = dma_map_single(priv->device,
  60. (void __force *)priv->tx_dma_desc,
  61. priv->txdescmem, DMA_TO_DEVICE);
  62. if (dma_mapping_error(priv->device, priv->txdescphys)) {
  63. sgdma_uninitialize(priv);
  64. netdev_err(priv->dev, "error mapping tx descriptor memory\n");
  65. return -EINVAL;
  66. }
  67. /* Initialize descriptor memory to all 0's, sync memory to cache */
  68. memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
  69. memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
  70. dma_sync_single_for_device(priv->device, priv->txdescphys,
  71. priv->txdescmem, DMA_TO_DEVICE);
  72. dma_sync_single_for_device(priv->device, priv->rxdescphys,
  73. priv->rxdescmem, DMA_TO_DEVICE);
  74. return 0;
  75. }
  76. void sgdma_uninitialize(struct altera_tse_private *priv)
  77. {
  78. if (priv->rxdescphys)
  79. dma_unmap_single(priv->device, priv->rxdescphys,
  80. priv->rxdescmem, DMA_BIDIRECTIONAL);
  81. if (priv->txdescphys)
  82. dma_unmap_single(priv->device, priv->txdescphys,
  83. priv->txdescmem, DMA_TO_DEVICE);
  84. }
  85. /* This function resets the SGDMA controller and clears the
  86. * descriptor memory used for transmits and receives.
  87. */
  88. void sgdma_reset(struct altera_tse_private *priv)
  89. {
  90. /* Initialize descriptor memory to 0 */
  91. memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
  92. memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
  93. csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
  94. csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
  95. csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
  96. csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
  97. }
  98. /* For SGDMA, interrupts remain enabled after initially enabling,
  99. * so no need to provide implementations for abstract enable
  100. * and disable
  101. */
  102. void sgdma_enable_rxirq(struct altera_tse_private *priv)
  103. {
  104. }
  105. void sgdma_enable_txirq(struct altera_tse_private *priv)
  106. {
  107. }
  108. void sgdma_disable_rxirq(struct altera_tse_private *priv)
  109. {
  110. }
  111. void sgdma_disable_txirq(struct altera_tse_private *priv)
  112. {
  113. }
  114. void sgdma_clear_rxirq(struct altera_tse_private *priv)
  115. {
  116. tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
  117. SGDMA_CTRLREG_CLRINT);
  118. }
  119. void sgdma_clear_txirq(struct altera_tse_private *priv)
  120. {
  121. tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
  122. SGDMA_CTRLREG_CLRINT);
  123. }
  124. /* transmits buffer through SGDMA. Returns number of buffers
  125. * transmitted, 0 if not possible.
  126. *
  127. * tx_lock is held by the caller
  128. */
  129. int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
  130. {
  131. struct sgdma_descrip __iomem *descbase =
  132. (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
  133. struct sgdma_descrip __iomem *cdesc = &descbase[0];
  134. struct sgdma_descrip __iomem *ndesc = &descbase[1];
  135. /* wait 'til the tx sgdma is ready for the next transmit request */
  136. if (sgdma_txbusy(priv))
  137. return 0;
  138. sgdma_setup_descrip(cdesc, /* current descriptor */
  139. ndesc, /* next descriptor */
  140. sgdma_txphysaddr(priv, ndesc),
  141. buffer->dma_addr, /* address of packet to xmit */
  142. 0, /* write addr 0 for tx dma */
  143. buffer->len, /* length of packet */
  144. SGDMA_CONTROL_EOP, /* Generate EOP */
  145. 0, /* read fixed */
  146. SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
  147. sgdma_async_write(priv, cdesc);
  148. /* enqueue the request to the pending transmit queue */
  149. queue_tx(priv, buffer);
  150. return 1;
  151. }
  152. /* tx_lock held to protect access to queued tx list
  153. */
  154. u32 sgdma_tx_completions(struct altera_tse_private *priv)
  155. {
  156. u32 ready = 0;
  157. if (!sgdma_txbusy(priv) &&
  158. ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
  159. & SGDMA_CONTROL_HW_OWNED) == 0) &&
  160. (dequeue_tx(priv))) {
  161. ready = 1;
  162. }
  163. return ready;
  164. }
  165. void sgdma_start_rxdma(struct altera_tse_private *priv)
  166. {
  167. sgdma_async_read(priv);
  168. }
  169. void sgdma_add_rx_desc(struct altera_tse_private *priv,
  170. struct tse_buffer *rxbuffer)
  171. {
  172. queue_rx(priv, rxbuffer);
  173. }
  174. /* status is returned on upper 16 bits,
  175. * length is returned in lower 16 bits
  176. */
  177. u32 sgdma_rx_status(struct altera_tse_private *priv)
  178. {
  179. struct sgdma_descrip __iomem *base =
  180. (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
  181. struct sgdma_descrip __iomem *desc = NULL;
  182. struct tse_buffer *rxbuffer = NULL;
  183. unsigned int rxstatus = 0;
  184. u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
  185. desc = &base[0];
  186. if (sts & SGDMA_STSREG_EOP) {
  187. unsigned int pktlength = 0;
  188. unsigned int pktstatus = 0;
  189. dma_sync_single_for_cpu(priv->device,
  190. priv->rxdescphys,
  191. SGDMA_DESC_LEN,
  192. DMA_FROM_DEVICE);
  193. pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
  194. pktstatus = csrrd8(desc, sgdma_descroffs(status));
  195. rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
  196. rxstatus = rxstatus << 16;
  197. rxstatus |= (pktlength & 0xffff);
  198. if (rxstatus) {
  199. csrwr8(0, desc, sgdma_descroffs(status));
  200. rxbuffer = dequeue_rx(priv);
  201. if (rxbuffer == NULL)
  202. netdev_info(priv->dev,
  203. "sgdma rx and rx queue empty!\n");
  204. /* Clear control */
  205. csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
  206. /* clear status */
  207. csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
  208. /* kick the rx sgdma after reaping this descriptor */
  209. sgdma_async_read(priv);
  210. } else {
  211. /* If the SGDMA indicated an end of packet on recv,
  212. * then it's expected that the rxstatus from the
  213. * descriptor is non-zero - meaning a valid packet
  214. * with a nonzero length, or an error has been
  215. * indicated. if not, then all we can do is signal
  216. * an error and return no packet received. Most likely
  217. * there is a system design error, or an error in the
  218. * underlying kernel (cache or cache management problem)
  219. */
  220. netdev_err(priv->dev,
  221. "SGDMA RX Error Info: %x, %x, %x\n",
  222. sts, csrrd8(desc, sgdma_descroffs(status)),
  223. rxstatus);
  224. }
  225. } else if (sts == 0) {
  226. sgdma_async_read(priv);
  227. }
  228. return rxstatus;
  229. }
  230. /* Private functions */
  231. static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
  232. struct sgdma_descrip __iomem *ndesc,
  233. dma_addr_t ndesc_phys,
  234. dma_addr_t raddr,
  235. dma_addr_t waddr,
  236. u16 length,
  237. int generate_eop,
  238. int rfixed,
  239. int wfixed)
  240. {
  241. /* Clear the next descriptor as not owned by hardware */
  242. u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
  243. ctrl &= ~SGDMA_CONTROL_HW_OWNED;
  244. csrwr8(ctrl, ndesc, sgdma_descroffs(control));
  245. ctrl = SGDMA_CONTROL_HW_OWNED;
  246. ctrl |= generate_eop;
  247. ctrl |= rfixed;
  248. ctrl |= wfixed;
  249. /* Channel is implicitly zero, initialized to 0 by default */
  250. csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
  251. csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
  252. csrwr32(0, desc, sgdma_descroffs(pad1));
  253. csrwr32(0, desc, sgdma_descroffs(pad2));
  254. csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
  255. csrwr8(ctrl, desc, sgdma_descroffs(control));
  256. csrwr8(0, desc, sgdma_descroffs(status));
  257. csrwr8(0, desc, sgdma_descroffs(wburst));
  258. csrwr8(0, desc, sgdma_descroffs(rburst));
  259. csrwr16(length, desc, sgdma_descroffs(bytes));
  260. csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
  261. }
  262. /* If hardware is busy, don't restart async read.
  263. * if status register is 0 - meaning initial state, restart async read,
  264. * probably for the first time when populating a receive buffer.
  265. * If read status indicate not busy and a status, restart the async
  266. * DMA read.
  267. */
  268. static int sgdma_async_read(struct altera_tse_private *priv)
  269. {
  270. struct sgdma_descrip __iomem *descbase =
  271. (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
  272. struct sgdma_descrip __iomem *cdesc = &descbase[0];
  273. struct sgdma_descrip __iomem *ndesc = &descbase[1];
  274. struct tse_buffer *rxbuffer = NULL;
  275. if (!sgdma_rxbusy(priv)) {
  276. rxbuffer = queue_rx_peekhead(priv);
  277. if (rxbuffer == NULL) {
  278. netdev_err(priv->dev, "no rx buffers available\n");
  279. return 0;
  280. }
  281. sgdma_setup_descrip(cdesc, /* current descriptor */
  282. ndesc, /* next descriptor */
  283. sgdma_rxphysaddr(priv, ndesc),
  284. 0, /* read addr 0 for rx dma */
  285. rxbuffer->dma_addr, /* write addr for rx dma */
  286. 0, /* read 'til EOP */
  287. 0, /* EOP: NA for rx dma */
  288. 0, /* read fixed: NA for rx dma */
  289. 0); /* SOP: NA for rx DMA */
  290. dma_sync_single_for_device(priv->device,
  291. priv->rxdescphys,
  292. SGDMA_DESC_LEN,
  293. DMA_TO_DEVICE);
  294. csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
  295. priv->rx_dma_csr,
  296. sgdma_csroffs(next_descrip));
  297. csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
  298. priv->rx_dma_csr,
  299. sgdma_csroffs(control));
  300. return 1;
  301. }
  302. return 0;
  303. }
  304. static int sgdma_async_write(struct altera_tse_private *priv,
  305. struct sgdma_descrip __iomem *desc)
  306. {
  307. if (sgdma_txbusy(priv))
  308. return 0;
  309. /* clear control and status */
  310. csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
  311. csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
  312. dma_sync_single_for_device(priv->device, priv->txdescphys,
  313. SGDMA_DESC_LEN, DMA_TO_DEVICE);
  314. csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
  315. priv->tx_dma_csr,
  316. sgdma_csroffs(next_descrip));
  317. csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
  318. priv->tx_dma_csr,
  319. sgdma_csroffs(control));
  320. return 1;
  321. }
  322. static dma_addr_t
  323. sgdma_txphysaddr(struct altera_tse_private *priv,
  324. struct sgdma_descrip __iomem *desc)
  325. {
  326. dma_addr_t paddr = priv->txdescmem_busaddr;
  327. uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
  328. return (dma_addr_t)((uintptr_t)paddr + offs);
  329. }
  330. static dma_addr_t
  331. sgdma_rxphysaddr(struct altera_tse_private *priv,
  332. struct sgdma_descrip __iomem *desc)
  333. {
  334. dma_addr_t paddr = priv->rxdescmem_busaddr;
  335. uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
  336. return (dma_addr_t)((uintptr_t)paddr + offs);
  337. }
  338. #define list_remove_head(list, entry, type, member) \
  339. do { \
  340. entry = NULL; \
  341. if (!list_empty(list)) { \
  342. entry = list_entry((list)->next, type, member); \
  343. list_del_init(&entry->member); \
  344. } \
  345. } while (0)
  346. #define list_peek_head(list, entry, type, member) \
  347. do { \
  348. entry = NULL; \
  349. if (!list_empty(list)) { \
  350. entry = list_entry((list)->next, type, member); \
  351. } \
  352. } while (0)
  353. /* adds a tse_buffer to the tail of a tx buffer list.
  354. * assumes the caller is managing and holding a mutual exclusion
  355. * primitive to avoid simultaneous pushes/pops to the list.
  356. */
  357. static void
  358. queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
  359. {
  360. list_add_tail(&buffer->lh, &priv->txlisthd);
  361. }
  362. /* adds a tse_buffer to the tail of a rx buffer list
  363. * assumes the caller is managing and holding a mutual exclusion
  364. * primitive to avoid simultaneous pushes/pops to the list.
  365. */
  366. static void
  367. queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
  368. {
  369. list_add_tail(&buffer->lh, &priv->rxlisthd);
  370. }
  371. /* dequeues a tse_buffer from the transmit buffer list, otherwise
  372. * returns NULL if empty.
  373. * assumes the caller is managing and holding a mutual exclusion
  374. * primitive to avoid simultaneous pushes/pops to the list.
  375. */
  376. static struct tse_buffer *
  377. dequeue_tx(struct altera_tse_private *priv)
  378. {
  379. struct tse_buffer *buffer = NULL;
  380. list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
  381. return buffer;
  382. }
  383. /* dequeues a tse_buffer from the receive buffer list, otherwise
  384. * returns NULL if empty
  385. * assumes the caller is managing and holding a mutual exclusion
  386. * primitive to avoid simultaneous pushes/pops to the list.
  387. */
  388. static struct tse_buffer *
  389. dequeue_rx(struct altera_tse_private *priv)
  390. {
  391. struct tse_buffer *buffer = NULL;
  392. list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
  393. return buffer;
  394. }
  395. /* dequeues a tse_buffer from the receive buffer list, otherwise
  396. * returns NULL if empty
  397. * assumes the caller is managing and holding a mutual exclusion
  398. * primitive to avoid simultaneous pushes/pops to the list while the
  399. * head is being examined.
  400. */
  401. static struct tse_buffer *
  402. queue_rx_peekhead(struct altera_tse_private *priv)
  403. {
  404. struct tse_buffer *buffer = NULL;
  405. list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
  406. return buffer;
  407. }
  408. /* check and return rx sgdma status without polling
  409. */
  410. static int sgdma_rxbusy(struct altera_tse_private *priv)
  411. {
  412. return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
  413. & SGDMA_STSREG_BUSY;
  414. }
  415. /* waits for the tx sgdma to finish it's current operation, returns 0
  416. * when it transitions to nonbusy, returns 1 if the operation times out
  417. */
  418. static int sgdma_txbusy(struct altera_tse_private *priv)
  419. {
  420. int delay = 0;
  421. /* if DMA is busy, wait for current transaction to finish */
  422. while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
  423. & SGDMA_STSREG_BUSY) && (delay++ < 100))
  424. udelay(1);
  425. if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
  426. & SGDMA_STSREG_BUSY) {
  427. netdev_err(priv->dev, "timeout waiting for tx dma\n");
  428. return 1;
  429. }
  430. return 0;
  431. }