bcm4908_enet.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2021 Rafał Miłecki <[email protected]>
  4. */
  5. #include <linux/delay.h>
  6. #include <linux/etherdevice.h>
  7. #include <linux/if_vlan.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/of.h>
  11. #include <linux/of_net.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/slab.h>
  14. #include <linux/string.h>
  15. #include "bcm4908_enet.h"
  16. #include "unimac.h"
  17. #define ENET_DMA_CH_RX_CFG ENET_DMA_CH0_CFG
  18. #define ENET_DMA_CH_TX_CFG ENET_DMA_CH1_CFG
  19. #define ENET_DMA_CH_RX_STATE_RAM ENET_DMA_CH0_STATE_RAM
  20. #define ENET_DMA_CH_TX_STATE_RAM ENET_DMA_CH1_STATE_RAM
  21. #define ENET_TX_BDS_NUM 200
  22. #define ENET_RX_BDS_NUM 200
  23. #define ENET_RX_BDS_NUM_MAX 8192
  24. #define ENET_DMA_INT_DEFAULTS (ENET_DMA_CH_CFG_INT_DONE | \
  25. ENET_DMA_CH_CFG_INT_NO_DESC | \
  26. ENET_DMA_CH_CFG_INT_BUFF_DONE)
  27. #define ENET_DMA_MAX_BURST_LEN 8 /* in 64 bit words */
  28. #define ENET_MTU_MAX ETH_DATA_LEN /* Is it possible to support 2044? */
  29. #define BRCM_MAX_TAG_LEN 6
  30. #define ENET_MAX_ETH_OVERHEAD (ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \
  31. ETH_FCS_LEN + 4) /* 32 */
  32. struct bcm4908_enet_dma_ring_bd {
  33. __le32 ctl;
  34. __le32 addr;
  35. } __packed;
  36. struct bcm4908_enet_dma_ring_slot {
  37. struct sk_buff *skb;
  38. unsigned int len;
  39. dma_addr_t dma_addr;
  40. };
  41. struct bcm4908_enet_dma_ring {
  42. int is_tx;
  43. int read_idx;
  44. int write_idx;
  45. int length;
  46. u16 cfg_block;
  47. u16 st_ram_block;
  48. struct napi_struct napi;
  49. union {
  50. void *cpu_addr;
  51. struct bcm4908_enet_dma_ring_bd *buf_desc;
  52. };
  53. dma_addr_t dma_addr;
  54. struct bcm4908_enet_dma_ring_slot *slots;
  55. };
  56. struct bcm4908_enet {
  57. struct device *dev;
  58. struct net_device *netdev;
  59. void __iomem *base;
  60. int irq_tx;
  61. struct bcm4908_enet_dma_ring tx_ring;
  62. struct bcm4908_enet_dma_ring rx_ring;
  63. };
  64. /***
  65. * R/W ops
  66. */
  67. static u32 enet_read(struct bcm4908_enet *enet, u16 offset)
  68. {
  69. return readl(enet->base + offset);
  70. }
  71. static void enet_write(struct bcm4908_enet *enet, u16 offset, u32 value)
  72. {
  73. writel(value, enet->base + offset);
  74. }
  75. static void enet_maskset(struct bcm4908_enet *enet, u16 offset, u32 mask, u32 set)
  76. {
  77. u32 val;
  78. WARN_ON(set & ~mask);
  79. val = enet_read(enet, offset);
  80. val = (val & ~mask) | (set & mask);
  81. enet_write(enet, offset, val);
  82. }
  83. static void enet_set(struct bcm4908_enet *enet, u16 offset, u32 set)
  84. {
  85. enet_maskset(enet, offset, set, set);
  86. }
  87. static u32 enet_umac_read(struct bcm4908_enet *enet, u16 offset)
  88. {
  89. return enet_read(enet, ENET_UNIMAC + offset);
  90. }
  91. static void enet_umac_write(struct bcm4908_enet *enet, u16 offset, u32 value)
  92. {
  93. enet_write(enet, ENET_UNIMAC + offset, value);
  94. }
  95. static void enet_umac_set(struct bcm4908_enet *enet, u16 offset, u32 set)
  96. {
  97. enet_set(enet, ENET_UNIMAC + offset, set);
  98. }
  99. /***
  100. * Helpers
  101. */
  102. static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu)
  103. {
  104. enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD);
  105. }
  106. /***
  107. * DMA ring ops
  108. */
  109. static void bcm4908_enet_dma_ring_intrs_on(struct bcm4908_enet *enet,
  110. struct bcm4908_enet_dma_ring *ring)
  111. {
  112. enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
  113. }
  114. static void bcm4908_enet_dma_ring_intrs_off(struct bcm4908_enet *enet,
  115. struct bcm4908_enet_dma_ring *ring)
  116. {
  117. enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
  118. }
  119. static void bcm4908_enet_dma_ring_intrs_ack(struct bcm4908_enet *enet,
  120. struct bcm4908_enet_dma_ring *ring)
  121. {
  122. enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
  123. }
  124. /***
  125. * DMA
  126. */
  127. static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
  128. struct bcm4908_enet_dma_ring *ring)
  129. {
  130. int size = ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
  131. struct device *dev = enet->dev;
  132. ring->cpu_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, GFP_KERNEL);
  133. if (!ring->cpu_addr)
  134. return -ENOMEM;
  135. if (((uintptr_t)ring->cpu_addr) & (0x40 - 1)) {
  136. dev_err(dev, "Invalid DMA ring alignment\n");
  137. goto err_free_buf_descs;
  138. }
  139. ring->slots = kcalloc(ring->length, sizeof(*ring->slots), GFP_KERNEL);
  140. if (!ring->slots)
  141. goto err_free_buf_descs;
  142. return 0;
  143. err_free_buf_descs:
  144. dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr);
  145. ring->cpu_addr = NULL;
  146. return -ENOMEM;
  147. }
  148. static void bcm4908_enet_dma_free(struct bcm4908_enet *enet)
  149. {
  150. struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
  151. struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
  152. struct device *dev = enet->dev;
  153. int size;
  154. size = rx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
  155. if (rx_ring->cpu_addr)
  156. dma_free_coherent(dev, size, rx_ring->cpu_addr, rx_ring->dma_addr);
  157. kfree(rx_ring->slots);
  158. size = tx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
  159. if (tx_ring->cpu_addr)
  160. dma_free_coherent(dev, size, tx_ring->cpu_addr, tx_ring->dma_addr);
  161. kfree(tx_ring->slots);
  162. }
  163. static int bcm4908_enet_dma_alloc(struct bcm4908_enet *enet)
  164. {
  165. struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
  166. struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
  167. struct device *dev = enet->dev;
  168. int err;
  169. tx_ring->length = ENET_TX_BDS_NUM;
  170. tx_ring->is_tx = 1;
  171. tx_ring->cfg_block = ENET_DMA_CH_TX_CFG;
  172. tx_ring->st_ram_block = ENET_DMA_CH_TX_STATE_RAM;
  173. err = bcm4908_dma_alloc_buf_descs(enet, tx_ring);
  174. if (err) {
  175. dev_err(dev, "Failed to alloc TX buf descriptors: %d\n", err);
  176. return err;
  177. }
  178. rx_ring->length = ENET_RX_BDS_NUM;
  179. rx_ring->is_tx = 0;
  180. rx_ring->cfg_block = ENET_DMA_CH_RX_CFG;
  181. rx_ring->st_ram_block = ENET_DMA_CH_RX_STATE_RAM;
  182. err = bcm4908_dma_alloc_buf_descs(enet, rx_ring);
  183. if (err) {
  184. dev_err(dev, "Failed to alloc RX buf descriptors: %d\n", err);
  185. bcm4908_enet_dma_free(enet);
  186. return err;
  187. }
  188. return 0;
  189. }
  190. static void bcm4908_enet_dma_reset(struct bcm4908_enet *enet)
  191. {
  192. struct bcm4908_enet_dma_ring *rings[] = { &enet->rx_ring, &enet->tx_ring };
  193. int i;
  194. /* Disable the DMA controller and channel */
  195. for (i = 0; i < ARRAY_SIZE(rings); i++)
  196. enet_write(enet, rings[i]->cfg_block + ENET_DMA_CH_CFG, 0);
  197. enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN, 0);
  198. /* Reset channels state */
  199. for (i = 0; i < ARRAY_SIZE(rings); i++) {
  200. struct bcm4908_enet_dma_ring *ring = rings[i];
  201. enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR, 0);
  202. enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_STATE_DATA, 0);
  203. enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS, 0);
  204. enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR, 0);
  205. }
  206. }
  207. static int bcm4908_enet_dma_alloc_rx_buf(struct bcm4908_enet *enet, unsigned int idx)
  208. {
  209. struct bcm4908_enet_dma_ring_bd *buf_desc = &enet->rx_ring.buf_desc[idx];
  210. struct bcm4908_enet_dma_ring_slot *slot = &enet->rx_ring.slots[idx];
  211. struct device *dev = enet->dev;
  212. u32 tmp;
  213. int err;
  214. slot->len = ENET_MTU_MAX + ENET_MAX_ETH_OVERHEAD;
  215. slot->skb = netdev_alloc_skb(enet->netdev, slot->len);
  216. if (!slot->skb)
  217. return -ENOMEM;
  218. slot->dma_addr = dma_map_single(dev, slot->skb->data, slot->len, DMA_FROM_DEVICE);
  219. err = dma_mapping_error(dev, slot->dma_addr);
  220. if (err) {
  221. dev_err(dev, "Failed to map DMA buffer: %d\n", err);
  222. kfree_skb(slot->skb);
  223. slot->skb = NULL;
  224. return err;
  225. }
  226. tmp = slot->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
  227. tmp |= DMA_CTL_STATUS_OWN;
  228. if (idx == enet->rx_ring.length - 1)
  229. tmp |= DMA_CTL_STATUS_WRAP;
  230. buf_desc->ctl = cpu_to_le32(tmp);
  231. buf_desc->addr = cpu_to_le32(slot->dma_addr);
  232. return 0;
  233. }
  234. static void bcm4908_enet_dma_ring_init(struct bcm4908_enet *enet,
  235. struct bcm4908_enet_dma_ring *ring)
  236. {
  237. int reset_channel = 0; /* We support only 1 main channel (with TX and RX) */
  238. int reset_subch = ring->is_tx ? 1 : 0;
  239. /* Reset the DMA channel */
  240. enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, BIT(reset_channel * 2 + reset_subch));
  241. enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, 0);
  242. enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
  243. enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_MAX_BURST, ENET_DMA_MAX_BURST_LEN);
  244. enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
  245. enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR,
  246. (uint32_t)ring->dma_addr);
  247. ring->read_idx = 0;
  248. ring->write_idx = 0;
  249. }
  250. static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
  251. {
  252. struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
  253. struct bcm4908_enet_dma_ring_slot *slot;
  254. struct device *dev = enet->dev;
  255. int i;
  256. for (i = rx_ring->length - 1; i >= 0; i--) {
  257. slot = &rx_ring->slots[i];
  258. if (!slot->skb)
  259. continue;
  260. dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_FROM_DEVICE);
  261. kfree_skb(slot->skb);
  262. slot->skb = NULL;
  263. }
  264. }
  265. static int bcm4908_enet_dma_init(struct bcm4908_enet *enet)
  266. {
  267. struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
  268. struct device *dev = enet->dev;
  269. int err;
  270. int i;
  271. for (i = 0; i < rx_ring->length; i++) {
  272. err = bcm4908_enet_dma_alloc_rx_buf(enet, i);
  273. if (err) {
  274. dev_err(dev, "Failed to alloc RX buffer: %d\n", err);
  275. bcm4908_enet_dma_uninit(enet);
  276. return err;
  277. }
  278. }
  279. bcm4908_enet_dma_ring_init(enet, &enet->tx_ring);
  280. bcm4908_enet_dma_ring_init(enet, &enet->rx_ring);
  281. return 0;
  282. }
  283. static void bcm4908_enet_dma_tx_ring_enable(struct bcm4908_enet *enet,
  284. struct bcm4908_enet_dma_ring *ring)
  285. {
  286. enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
  287. }
  288. static void bcm4908_enet_dma_tx_ring_disable(struct bcm4908_enet *enet,
  289. struct bcm4908_enet_dma_ring *ring)
  290. {
  291. enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
  292. }
  293. static void bcm4908_enet_dma_rx_ring_enable(struct bcm4908_enet *enet,
  294. struct bcm4908_enet_dma_ring *ring)
  295. {
  296. enet_set(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
  297. }
  298. static void bcm4908_enet_dma_rx_ring_disable(struct bcm4908_enet *enet,
  299. struct bcm4908_enet_dma_ring *ring)
  300. {
  301. unsigned long deadline;
  302. u32 tmp;
  303. enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
  304. deadline = jiffies + usecs_to_jiffies(2000);
  305. do {
  306. tmp = enet_read(enet, ring->cfg_block + ENET_DMA_CH_CFG);
  307. if (!(tmp & ENET_DMA_CH_CFG_ENABLE))
  308. return;
  309. enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
  310. usleep_range(10, 30);
  311. } while (!time_after_eq(jiffies, deadline));
  312. dev_warn(enet->dev, "Timeout waiting for DMA TX stop\n");
  313. }
  314. /***
  315. * Ethernet driver
  316. */
  317. static void bcm4908_enet_gmac_init(struct bcm4908_enet *enet)
  318. {
  319. u32 cmd;
  320. bcm4908_enet_set_mtu(enet, enet->netdev->mtu);
  321. cmd = enet_umac_read(enet, UMAC_CMD);
  322. enet_umac_write(enet, UMAC_CMD, cmd | CMD_SW_RESET);
  323. enet_umac_write(enet, UMAC_CMD, cmd & ~CMD_SW_RESET);
  324. enet_set(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH);
  325. enet_maskset(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH, 0);
  326. enet_set(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB);
  327. enet_maskset(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB, 0);
  328. cmd = enet_umac_read(enet, UMAC_CMD);
  329. cmd &= ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT);
  330. cmd &= ~CMD_TX_EN;
  331. cmd &= ~CMD_RX_EN;
  332. cmd |= CMD_SPEED_1000 << CMD_SPEED_SHIFT;
  333. enet_umac_write(enet, UMAC_CMD, cmd);
  334. enet_maskset(enet, ENET_GMAC_STATUS,
  335. ENET_GMAC_STATUS_ETH_SPEED_MASK |
  336. ENET_GMAC_STATUS_HD |
  337. ENET_GMAC_STATUS_AUTO_CFG_EN |
  338. ENET_GMAC_STATUS_LINK_UP,
  339. ENET_GMAC_STATUS_ETH_SPEED_1000 |
  340. ENET_GMAC_STATUS_AUTO_CFG_EN |
  341. ENET_GMAC_STATUS_LINK_UP);
  342. }
  343. static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id)
  344. {
  345. struct bcm4908_enet *enet = dev_id;
  346. struct bcm4908_enet_dma_ring *ring;
  347. ring = (irq == enet->irq_tx) ? &enet->tx_ring : &enet->rx_ring;
  348. bcm4908_enet_dma_ring_intrs_off(enet, ring);
  349. bcm4908_enet_dma_ring_intrs_ack(enet, ring);
  350. napi_schedule(&ring->napi);
  351. return IRQ_HANDLED;
  352. }
  353. static int bcm4908_enet_open(struct net_device *netdev)
  354. {
  355. struct bcm4908_enet *enet = netdev_priv(netdev);
  356. struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
  357. struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
  358. struct device *dev = enet->dev;
  359. int err;
  360. err = request_irq(netdev->irq, bcm4908_enet_irq_handler, 0, "enet", enet);
  361. if (err) {
  362. dev_err(dev, "Failed to request IRQ %d: %d\n", netdev->irq, err);
  363. return err;
  364. }
  365. if (enet->irq_tx > 0) {
  366. err = request_irq(enet->irq_tx, bcm4908_enet_irq_handler, 0,
  367. "tx", enet);
  368. if (err) {
  369. dev_err(dev, "Failed to request IRQ %d: %d\n",
  370. enet->irq_tx, err);
  371. free_irq(netdev->irq, enet);
  372. return err;
  373. }
  374. }
  375. bcm4908_enet_gmac_init(enet);
  376. bcm4908_enet_dma_reset(enet);
  377. bcm4908_enet_dma_init(enet);
  378. enet_umac_set(enet, UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
  379. enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN);
  380. enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0);
  381. if (enet->irq_tx > 0) {
  382. napi_enable(&tx_ring->napi);
  383. bcm4908_enet_dma_ring_intrs_ack(enet, tx_ring);
  384. bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
  385. }
  386. bcm4908_enet_dma_rx_ring_enable(enet, rx_ring);
  387. napi_enable(&rx_ring->napi);
  388. netif_carrier_on(netdev);
  389. netif_start_queue(netdev);
  390. bcm4908_enet_dma_ring_intrs_ack(enet, rx_ring);
  391. bcm4908_enet_dma_ring_intrs_on(enet, rx_ring);
  392. return 0;
  393. }
  394. static int bcm4908_enet_stop(struct net_device *netdev)
  395. {
  396. struct bcm4908_enet *enet = netdev_priv(netdev);
  397. struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
  398. struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
  399. netif_stop_queue(netdev);
  400. netif_carrier_off(netdev);
  401. napi_disable(&rx_ring->napi);
  402. napi_disable(&tx_ring->napi);
  403. bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
  404. bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
  405. bcm4908_enet_dma_uninit(enet);
  406. free_irq(enet->irq_tx, enet);
  407. free_irq(enet->netdev->irq, enet);
  408. return 0;
  409. }
  410. static netdev_tx_t bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
  411. {
  412. struct bcm4908_enet *enet = netdev_priv(netdev);
  413. struct bcm4908_enet_dma_ring *ring = &enet->tx_ring;
  414. struct bcm4908_enet_dma_ring_slot *slot;
  415. struct device *dev = enet->dev;
  416. struct bcm4908_enet_dma_ring_bd *buf_desc;
  417. int free_buf_descs;
  418. u32 tmp;
  419. /* Free transmitted skbs */
  420. if (enet->irq_tx < 0 &&
  421. !(le32_to_cpu(ring->buf_desc[ring->read_idx].ctl) & DMA_CTL_STATUS_OWN))
  422. napi_schedule(&enet->tx_ring.napi);
  423. /* Don't use the last empty buf descriptor */
  424. if (ring->read_idx <= ring->write_idx)
  425. free_buf_descs = ring->read_idx - ring->write_idx + ring->length;
  426. else
  427. free_buf_descs = ring->read_idx - ring->write_idx;
  428. if (free_buf_descs < 2) {
  429. netif_stop_queue(netdev);
  430. return NETDEV_TX_BUSY;
  431. }
  432. /* Hardware removes OWN bit after sending data */
  433. buf_desc = &ring->buf_desc[ring->write_idx];
  434. if (unlikely(le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)) {
  435. netif_stop_queue(netdev);
  436. return NETDEV_TX_BUSY;
  437. }
  438. slot = &ring->slots[ring->write_idx];
  439. slot->skb = skb;
  440. slot->len = skb->len;
  441. slot->dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
  442. if (unlikely(dma_mapping_error(dev, slot->dma_addr)))
  443. return NETDEV_TX_BUSY;
  444. tmp = skb->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
  445. tmp |= DMA_CTL_STATUS_OWN;
  446. tmp |= DMA_CTL_STATUS_SOP;
  447. tmp |= DMA_CTL_STATUS_EOP;
  448. tmp |= DMA_CTL_STATUS_APPEND_CRC;
  449. if (ring->write_idx + 1 == ring->length - 1)
  450. tmp |= DMA_CTL_STATUS_WRAP;
  451. buf_desc->addr = cpu_to_le32((uint32_t)slot->dma_addr);
  452. buf_desc->ctl = cpu_to_le32(tmp);
  453. bcm4908_enet_dma_tx_ring_enable(enet, &enet->tx_ring);
  454. if (++ring->write_idx == ring->length - 1)
  455. ring->write_idx = 0;
  456. return NETDEV_TX_OK;
  457. }
  458. static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight)
  459. {
  460. struct bcm4908_enet_dma_ring *rx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi);
  461. struct bcm4908_enet *enet = container_of(rx_ring, struct bcm4908_enet, rx_ring);
  462. struct device *dev = enet->dev;
  463. int handled = 0;
  464. while (handled < weight) {
  465. struct bcm4908_enet_dma_ring_bd *buf_desc;
  466. struct bcm4908_enet_dma_ring_slot slot;
  467. u32 ctl;
  468. int len;
  469. int err;
  470. buf_desc = &enet->rx_ring.buf_desc[enet->rx_ring.read_idx];
  471. ctl = le32_to_cpu(buf_desc->ctl);
  472. if (ctl & DMA_CTL_STATUS_OWN)
  473. break;
  474. slot = enet->rx_ring.slots[enet->rx_ring.read_idx];
  475. /* Provide new buffer before unpinning the old one */
  476. err = bcm4908_enet_dma_alloc_rx_buf(enet, enet->rx_ring.read_idx);
  477. if (err)
  478. break;
  479. if (++enet->rx_ring.read_idx == enet->rx_ring.length)
  480. enet->rx_ring.read_idx = 0;
  481. len = (ctl & DMA_CTL_LEN_DESC_BUFLENGTH) >> DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
  482. if (len < ETH_ZLEN ||
  483. (ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) {
  484. kfree_skb(slot.skb);
  485. enet->netdev->stats.rx_dropped++;
  486. break;
  487. }
  488. dma_unmap_single(dev, slot.dma_addr, slot.len, DMA_FROM_DEVICE);
  489. skb_put(slot.skb, len - ETH_FCS_LEN);
  490. slot.skb->protocol = eth_type_trans(slot.skb, enet->netdev);
  491. netif_receive_skb(slot.skb);
  492. enet->netdev->stats.rx_packets++;
  493. enet->netdev->stats.rx_bytes += len;
  494. handled++;
  495. }
  496. if (handled < weight) {
  497. napi_complete_done(napi, handled);
  498. bcm4908_enet_dma_ring_intrs_on(enet, rx_ring);
  499. }
  500. /* Hardware could disable ring if it run out of descriptors */
  501. bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring);
  502. return handled;
  503. }
  504. static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
  505. {
  506. struct bcm4908_enet_dma_ring *tx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi);
  507. struct bcm4908_enet *enet = container_of(tx_ring, struct bcm4908_enet, tx_ring);
  508. struct bcm4908_enet_dma_ring_bd *buf_desc;
  509. struct bcm4908_enet_dma_ring_slot *slot;
  510. struct device *dev = enet->dev;
  511. unsigned int bytes = 0;
  512. int handled = 0;
  513. while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) {
  514. buf_desc = &tx_ring->buf_desc[tx_ring->read_idx];
  515. if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
  516. break;
  517. slot = &tx_ring->slots[tx_ring->read_idx];
  518. dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
  519. dev_kfree_skb(slot->skb);
  520. handled++;
  521. bytes += slot->len;
  522. if (++tx_ring->read_idx == tx_ring->length)
  523. tx_ring->read_idx = 0;
  524. }
  525. enet->netdev->stats.tx_packets += handled;
  526. enet->netdev->stats.tx_bytes += bytes;
  527. if (handled < weight) {
  528. napi_complete_done(napi, handled);
  529. bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
  530. }
  531. if (netif_queue_stopped(enet->netdev))
  532. netif_wake_queue(enet->netdev);
  533. return handled;
  534. }
  535. static int bcm4908_enet_change_mtu(struct net_device *netdev, int new_mtu)
  536. {
  537. struct bcm4908_enet *enet = netdev_priv(netdev);
  538. bcm4908_enet_set_mtu(enet, new_mtu);
  539. return 0;
  540. }
  541. static const struct net_device_ops bcm4908_enet_netdev_ops = {
  542. .ndo_open = bcm4908_enet_open,
  543. .ndo_stop = bcm4908_enet_stop,
  544. .ndo_start_xmit = bcm4908_enet_start_xmit,
  545. .ndo_set_mac_address = eth_mac_addr,
  546. .ndo_change_mtu = bcm4908_enet_change_mtu,
  547. };
  548. static int bcm4908_enet_probe(struct platform_device *pdev)
  549. {
  550. struct device *dev = &pdev->dev;
  551. struct net_device *netdev;
  552. struct bcm4908_enet *enet;
  553. int err;
  554. netdev = devm_alloc_etherdev(dev, sizeof(*enet));
  555. if (!netdev)
  556. return -ENOMEM;
  557. enet = netdev_priv(netdev);
  558. enet->dev = dev;
  559. enet->netdev = netdev;
  560. enet->base = devm_platform_ioremap_resource(pdev, 0);
  561. if (IS_ERR(enet->base)) {
  562. dev_err(dev, "Failed to map registers: %ld\n", PTR_ERR(enet->base));
  563. return PTR_ERR(enet->base);
  564. }
  565. netdev->irq = platform_get_irq_byname(pdev, "rx");
  566. if (netdev->irq < 0)
  567. return netdev->irq;
  568. enet->irq_tx = platform_get_irq_byname(pdev, "tx");
  569. err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
  570. if (err)
  571. return err;
  572. err = bcm4908_enet_dma_alloc(enet);
  573. if (err)
  574. return err;
  575. SET_NETDEV_DEV(netdev, &pdev->dev);
  576. err = of_get_ethdev_address(dev->of_node, netdev);
  577. if (err == -EPROBE_DEFER)
  578. goto err_dma_free;
  579. if (err)
  580. eth_hw_addr_random(netdev);
  581. netdev->netdev_ops = &bcm4908_enet_netdev_ops;
  582. netdev->min_mtu = ETH_ZLEN;
  583. netdev->mtu = ETH_DATA_LEN;
  584. netdev->max_mtu = ENET_MTU_MAX;
  585. netif_napi_add_tx(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx);
  586. netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx);
  587. err = register_netdev(netdev);
  588. if (err)
  589. goto err_dma_free;
  590. platform_set_drvdata(pdev, enet);
  591. return 0;
  592. err_dma_free:
  593. bcm4908_enet_dma_free(enet);
  594. return err;
  595. }
  596. static int bcm4908_enet_remove(struct platform_device *pdev)
  597. {
  598. struct bcm4908_enet *enet = platform_get_drvdata(pdev);
  599. unregister_netdev(enet->netdev);
  600. netif_napi_del(&enet->rx_ring.napi);
  601. netif_napi_del(&enet->tx_ring.napi);
  602. bcm4908_enet_dma_free(enet);
  603. return 0;
  604. }
  605. static const struct of_device_id bcm4908_enet_of_match[] = {
  606. { .compatible = "brcm,bcm4908-enet"},
  607. {},
  608. };
  609. static struct platform_driver bcm4908_enet_driver = {
  610. .driver = {
  611. .name = "bcm4908_enet",
  612. .of_match_table = bcm4908_enet_of_match,
  613. },
  614. .probe = bcm4908_enet_probe,
  615. .remove = bcm4908_enet_remove,
  616. };
  617. module_platform_driver(bcm4908_enet_driver);
  618. MODULE_LICENSE("GPL v2");
  619. MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);