lantiq_xrx200.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Lantiq / Intel PMAC driver for XRX200 SoCs
  4. *
  5. * Copyright (C) 2010 Lantiq Deutschland
  6. * Copyright (C) 2012 John Crispin <[email protected]>
  7. * Copyright (C) 2017 - 2018 Hauke Mehrtens <[email protected]>
  8. */
  9. #include <linux/etherdevice.h>
  10. #include <linux/module.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/if_vlan.h>
  16. #include <linux/of_net.h>
  17. #include <linux/of_platform.h>
  18. #include <xway_dma.h>
  19. /* DMA */
  20. #define XRX200_DMA_DATA_LEN (SZ_64K - 1)
  21. #define XRX200_DMA_RX 0
  22. #define XRX200_DMA_TX 1
  23. #define XRX200_DMA_BURST_LEN 8
  24. #define XRX200_DMA_PACKET_COMPLETE 0
  25. #define XRX200_DMA_PACKET_IN_PROGRESS 1
  26. /* cpu port mac */
  27. #define PMAC_RX_IPG 0x0024
  28. #define PMAC_RX_IPG_MASK 0xf
  29. #define PMAC_HD_CTL 0x0000
  30. /* Add Ethernet header to packets from DMA to PMAC */
  31. #define PMAC_HD_CTL_ADD BIT(0)
  32. /* Add VLAN tag to Packets from DMA to PMAC */
  33. #define PMAC_HD_CTL_TAG BIT(1)
  34. /* Add CRC to packets from DMA to PMAC */
  35. #define PMAC_HD_CTL_AC BIT(2)
  36. /* Add status header to packets from PMAC to DMA */
  37. #define PMAC_HD_CTL_AS BIT(3)
  38. /* Remove CRC from packets from PMAC to DMA */
  39. #define PMAC_HD_CTL_RC BIT(4)
  40. /* Remove Layer-2 header from packets from PMAC to DMA */
  41. #define PMAC_HD_CTL_RL2 BIT(5)
  42. /* Status header is present from DMA to PMAC */
  43. #define PMAC_HD_CTL_RXSH BIT(6)
  44. /* Add special tag from PMAC to switch */
  45. #define PMAC_HD_CTL_AST BIT(7)
  46. /* Remove specail Tag from PMAC to DMA */
  47. #define PMAC_HD_CTL_RST BIT(8)
  48. /* Check CRC from DMA to PMAC */
  49. #define PMAC_HD_CTL_CCRC BIT(9)
  50. /* Enable reaction to Pause frames in the PMAC */
  51. #define PMAC_HD_CTL_FC BIT(10)
  52. struct xrx200_chan {
  53. int tx_free;
  54. struct napi_struct napi;
  55. struct ltq_dma_channel dma;
  56. union {
  57. struct sk_buff *skb[LTQ_DESC_NUM];
  58. void *rx_buff[LTQ_DESC_NUM];
  59. };
  60. struct sk_buff *skb_head;
  61. struct sk_buff *skb_tail;
  62. struct xrx200_priv *priv;
  63. };
  64. struct xrx200_priv {
  65. struct clk *clk;
  66. struct xrx200_chan chan_tx;
  67. struct xrx200_chan chan_rx;
  68. u16 rx_buf_size;
  69. u16 rx_skb_size;
  70. struct net_device *net_dev;
  71. struct device *dev;
  72. __iomem void *pmac_reg;
  73. };
  74. static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset)
  75. {
  76. return __raw_readl(priv->pmac_reg + offset);
  77. }
  78. static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset)
  79. {
  80. __raw_writel(val, priv->pmac_reg + offset);
  81. }
  82. static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
  83. u32 offset)
  84. {
  85. u32 val = xrx200_pmac_r32(priv, offset);
  86. val &= ~(clear);
  87. val |= set;
  88. xrx200_pmac_w32(priv, val, offset);
  89. }
  90. static int xrx200_max_frame_len(int mtu)
  91. {
  92. return VLAN_ETH_HLEN + mtu;
  93. }
  94. static int xrx200_buffer_size(int mtu)
  95. {
  96. return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
  97. }
  98. static int xrx200_skb_size(u16 buf_size)
  99. {
  100. return SKB_DATA_ALIGN(buf_size + NET_SKB_PAD + NET_IP_ALIGN) +
  101. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  102. }
  103. /* drop all the packets from the DMA ring */
  104. static void xrx200_flush_dma(struct xrx200_chan *ch)
  105. {
  106. int i;
  107. for (i = 0; i < LTQ_DESC_NUM; i++) {
  108. struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  109. if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
  110. break;
  111. desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
  112. ch->priv->rx_buf_size;
  113. ch->dma.desc++;
  114. ch->dma.desc %= LTQ_DESC_NUM;
  115. }
  116. }
  117. static int xrx200_open(struct net_device *net_dev)
  118. {
  119. struct xrx200_priv *priv = netdev_priv(net_dev);
  120. napi_enable(&priv->chan_tx.napi);
  121. ltq_dma_open(&priv->chan_tx.dma);
  122. ltq_dma_enable_irq(&priv->chan_tx.dma);
  123. napi_enable(&priv->chan_rx.napi);
  124. ltq_dma_open(&priv->chan_rx.dma);
  125. /* The boot loader does not always deactivate the receiving of frames
  126. * on the ports and then some packets queue up in the PPE buffers.
  127. * They already passed the PMAC so they do not have the tags
  128. * configured here. Read the these packets here and drop them.
  129. * The HW should have written them into memory after 10us
  130. */
  131. usleep_range(20, 40);
  132. xrx200_flush_dma(&priv->chan_rx);
  133. ltq_dma_enable_irq(&priv->chan_rx.dma);
  134. netif_wake_queue(net_dev);
  135. return 0;
  136. }
  137. static int xrx200_close(struct net_device *net_dev)
  138. {
  139. struct xrx200_priv *priv = netdev_priv(net_dev);
  140. netif_stop_queue(net_dev);
  141. napi_disable(&priv->chan_rx.napi);
  142. ltq_dma_close(&priv->chan_rx.dma);
  143. napi_disable(&priv->chan_tx.napi);
  144. ltq_dma_close(&priv->chan_tx.dma);
  145. return 0;
  146. }
  147. static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int size))
  148. {
  149. void *buf = ch->rx_buff[ch->dma.desc];
  150. struct xrx200_priv *priv = ch->priv;
  151. dma_addr_t mapping;
  152. int ret = 0;
  153. ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
  154. if (!ch->rx_buff[ch->dma.desc]) {
  155. ch->rx_buff[ch->dma.desc] = buf;
  156. ret = -ENOMEM;
  157. goto skip;
  158. }
  159. mapping = dma_map_single(priv->dev, ch->rx_buff[ch->dma.desc],
  160. priv->rx_buf_size, DMA_FROM_DEVICE);
  161. if (unlikely(dma_mapping_error(priv->dev, mapping))) {
  162. skb_free_frag(ch->rx_buff[ch->dma.desc]);
  163. ch->rx_buff[ch->dma.desc] = buf;
  164. ret = -ENOMEM;
  165. goto skip;
  166. }
  167. ch->dma.desc_base[ch->dma.desc].addr = mapping + NET_SKB_PAD + NET_IP_ALIGN;
  168. /* Make sure the address is written before we give it to HW */
  169. wmb();
  170. skip:
  171. ch->dma.desc_base[ch->dma.desc].ctl =
  172. LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
  173. return ret;
  174. }
  175. static int xrx200_hw_receive(struct xrx200_chan *ch)
  176. {
  177. struct xrx200_priv *priv = ch->priv;
  178. struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  179. void *buf = ch->rx_buff[ch->dma.desc];
  180. u32 ctl = desc->ctl;
  181. int len = (ctl & LTQ_DMA_SIZE_MASK);
  182. struct net_device *net_dev = priv->net_dev;
  183. struct sk_buff *skb;
  184. int ret;
  185. ret = xrx200_alloc_buf(ch, napi_alloc_frag);
  186. ch->dma.desc++;
  187. ch->dma.desc %= LTQ_DESC_NUM;
  188. if (ret) {
  189. net_dev->stats.rx_dropped++;
  190. netdev_err(net_dev, "failed to allocate new rx buffer\n");
  191. return ret;
  192. }
  193. skb = build_skb(buf, priv->rx_skb_size);
  194. if (!skb) {
  195. skb_free_frag(buf);
  196. net_dev->stats.rx_dropped++;
  197. return -ENOMEM;
  198. }
  199. skb_reserve(skb, NET_SKB_PAD);
  200. skb_put(skb, len);
  201. /* add buffers to skb via skb->frag_list */
  202. if (ctl & LTQ_DMA_SOP) {
  203. ch->skb_head = skb;
  204. ch->skb_tail = skb;
  205. skb_reserve(skb, NET_IP_ALIGN);
  206. } else if (ch->skb_head) {
  207. if (ch->skb_head == ch->skb_tail)
  208. skb_shinfo(ch->skb_tail)->frag_list = skb;
  209. else
  210. ch->skb_tail->next = skb;
  211. ch->skb_tail = skb;
  212. ch->skb_head->len += skb->len;
  213. ch->skb_head->data_len += skb->len;
  214. ch->skb_head->truesize += skb->truesize;
  215. }
  216. if (ctl & LTQ_DMA_EOP) {
  217. ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
  218. net_dev->stats.rx_packets++;
  219. net_dev->stats.rx_bytes += ch->skb_head->len;
  220. netif_receive_skb(ch->skb_head);
  221. ch->skb_head = NULL;
  222. ch->skb_tail = NULL;
  223. ret = XRX200_DMA_PACKET_COMPLETE;
  224. } else {
  225. ret = XRX200_DMA_PACKET_IN_PROGRESS;
  226. }
  227. return ret;
  228. }
  229. static int xrx200_poll_rx(struct napi_struct *napi, int budget)
  230. {
  231. struct xrx200_chan *ch = container_of(napi,
  232. struct xrx200_chan, napi);
  233. int rx = 0;
  234. int ret;
  235. while (rx < budget) {
  236. struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  237. if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
  238. ret = xrx200_hw_receive(ch);
  239. if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
  240. continue;
  241. if (ret != XRX200_DMA_PACKET_COMPLETE)
  242. break;
  243. rx++;
  244. } else {
  245. break;
  246. }
  247. }
  248. if (rx < budget) {
  249. if (napi_complete_done(&ch->napi, rx))
  250. ltq_dma_enable_irq(&ch->dma);
  251. }
  252. return rx;
  253. }
  254. static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
  255. {
  256. struct xrx200_chan *ch = container_of(napi,
  257. struct xrx200_chan, napi);
  258. struct net_device *net_dev = ch->priv->net_dev;
  259. int pkts = 0;
  260. int bytes = 0;
  261. netif_tx_lock(net_dev);
  262. while (pkts < budget) {
  263. struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
  264. if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
  265. struct sk_buff *skb = ch->skb[ch->tx_free];
  266. pkts++;
  267. bytes += skb->len;
  268. ch->skb[ch->tx_free] = NULL;
  269. consume_skb(skb);
  270. memset(&ch->dma.desc_base[ch->tx_free], 0,
  271. sizeof(struct ltq_dma_desc));
  272. ch->tx_free++;
  273. ch->tx_free %= LTQ_DESC_NUM;
  274. } else {
  275. break;
  276. }
  277. }
  278. net_dev->stats.tx_packets += pkts;
  279. net_dev->stats.tx_bytes += bytes;
  280. netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
  281. netif_tx_unlock(net_dev);
  282. if (netif_queue_stopped(net_dev))
  283. netif_wake_queue(net_dev);
  284. if (pkts < budget) {
  285. if (napi_complete_done(&ch->napi, pkts))
  286. ltq_dma_enable_irq(&ch->dma);
  287. }
  288. return pkts;
  289. }
  290. static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
  291. struct net_device *net_dev)
  292. {
  293. struct xrx200_priv *priv = netdev_priv(net_dev);
  294. struct xrx200_chan *ch = &priv->chan_tx;
  295. struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  296. u32 byte_offset;
  297. dma_addr_t mapping;
  298. int len;
  299. skb->dev = net_dev;
  300. if (skb_put_padto(skb, ETH_ZLEN)) {
  301. net_dev->stats.tx_dropped++;
  302. return NETDEV_TX_OK;
  303. }
  304. len = skb->len;
  305. if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
  306. netdev_err(net_dev, "tx ring full\n");
  307. netif_stop_queue(net_dev);
  308. return NETDEV_TX_BUSY;
  309. }
  310. ch->skb[ch->dma.desc] = skb;
  311. mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
  312. if (unlikely(dma_mapping_error(priv->dev, mapping)))
  313. goto err_drop;
  314. /* dma needs to start on a burst length value aligned address */
  315. byte_offset = mapping % (XRX200_DMA_BURST_LEN * 4);
  316. desc->addr = mapping - byte_offset;
  317. /* Make sure the address is written before we give it to HW */
  318. wmb();
  319. desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
  320. LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
  321. ch->dma.desc++;
  322. ch->dma.desc %= LTQ_DESC_NUM;
  323. if (ch->dma.desc == ch->tx_free)
  324. netif_stop_queue(net_dev);
  325. netdev_sent_queue(net_dev, len);
  326. return NETDEV_TX_OK;
  327. err_drop:
  328. dev_kfree_skb(skb);
  329. net_dev->stats.tx_dropped++;
  330. net_dev->stats.tx_errors++;
  331. return NETDEV_TX_OK;
  332. }
  333. static int
  334. xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
  335. {
  336. struct xrx200_priv *priv = netdev_priv(net_dev);
  337. struct xrx200_chan *ch_rx = &priv->chan_rx;
  338. int old_mtu = net_dev->mtu;
  339. bool running = false;
  340. void *buff;
  341. int curr_desc;
  342. int ret = 0;
  343. net_dev->mtu = new_mtu;
  344. priv->rx_buf_size = xrx200_buffer_size(new_mtu);
  345. priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
  346. if (new_mtu <= old_mtu)
  347. return ret;
  348. running = netif_running(net_dev);
  349. if (running) {
  350. napi_disable(&ch_rx->napi);
  351. ltq_dma_close(&ch_rx->dma);
  352. }
  353. xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM);
  354. curr_desc = ch_rx->dma.desc;
  355. for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
  356. ch_rx->dma.desc++) {
  357. buff = ch_rx->rx_buff[ch_rx->dma.desc];
  358. ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
  359. if (ret) {
  360. net_dev->mtu = old_mtu;
  361. priv->rx_buf_size = xrx200_buffer_size(old_mtu);
  362. priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
  363. break;
  364. }
  365. skb_free_frag(buff);
  366. }
  367. ch_rx->dma.desc = curr_desc;
  368. if (running) {
  369. napi_enable(&ch_rx->napi);
  370. ltq_dma_open(&ch_rx->dma);
  371. ltq_dma_enable_irq(&ch_rx->dma);
  372. }
  373. return ret;
  374. }
  375. static const struct net_device_ops xrx200_netdev_ops = {
  376. .ndo_open = xrx200_open,
  377. .ndo_stop = xrx200_close,
  378. .ndo_start_xmit = xrx200_start_xmit,
  379. .ndo_change_mtu = xrx200_change_mtu,
  380. .ndo_set_mac_address = eth_mac_addr,
  381. .ndo_validate_addr = eth_validate_addr,
  382. };
  383. static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
  384. {
  385. struct xrx200_chan *ch = ptr;
  386. if (napi_schedule_prep(&ch->napi)) {
  387. ltq_dma_disable_irq(&ch->dma);
  388. __napi_schedule(&ch->napi);
  389. }
  390. ltq_dma_ack_irq(&ch->dma);
  391. return IRQ_HANDLED;
  392. }
  393. static int xrx200_dma_init(struct xrx200_priv *priv)
  394. {
  395. struct xrx200_chan *ch_rx = &priv->chan_rx;
  396. struct xrx200_chan *ch_tx = &priv->chan_tx;
  397. int ret = 0;
  398. int i;
  399. ltq_dma_init_port(DMA_PORT_ETOP, XRX200_DMA_BURST_LEN,
  400. XRX200_DMA_BURST_LEN);
  401. ch_rx->dma.nr = XRX200_DMA_RX;
  402. ch_rx->dma.dev = priv->dev;
  403. ch_rx->priv = priv;
  404. ltq_dma_alloc_rx(&ch_rx->dma);
  405. for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
  406. ch_rx->dma.desc++) {
  407. ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
  408. if (ret)
  409. goto rx_free;
  410. }
  411. ch_rx->dma.desc = 0;
  412. ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
  413. "xrx200_net_rx", &priv->chan_rx);
  414. if (ret) {
  415. dev_err(priv->dev, "failed to request RX irq %d\n",
  416. ch_rx->dma.irq);
  417. goto rx_ring_free;
  418. }
  419. ch_tx->dma.nr = XRX200_DMA_TX;
  420. ch_tx->dma.dev = priv->dev;
  421. ch_tx->priv = priv;
  422. ltq_dma_alloc_tx(&ch_tx->dma);
  423. ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
  424. "xrx200_net_tx", &priv->chan_tx);
  425. if (ret) {
  426. dev_err(priv->dev, "failed to request TX irq %d\n",
  427. ch_tx->dma.irq);
  428. goto tx_free;
  429. }
  430. return ret;
  431. tx_free:
  432. ltq_dma_free(&ch_tx->dma);
  433. rx_ring_free:
  434. /* free the allocated RX ring */
  435. for (i = 0; i < LTQ_DESC_NUM; i++) {
  436. if (priv->chan_rx.skb[i])
  437. skb_free_frag(priv->chan_rx.rx_buff[i]);
  438. }
  439. rx_free:
  440. ltq_dma_free(&ch_rx->dma);
  441. return ret;
  442. }
  443. static void xrx200_hw_cleanup(struct xrx200_priv *priv)
  444. {
  445. int i;
  446. ltq_dma_free(&priv->chan_tx.dma);
  447. ltq_dma_free(&priv->chan_rx.dma);
  448. /* free the allocated RX ring */
  449. for (i = 0; i < LTQ_DESC_NUM; i++)
  450. skb_free_frag(priv->chan_rx.rx_buff[i]);
  451. }
  452. static int xrx200_probe(struct platform_device *pdev)
  453. {
  454. struct device *dev = &pdev->dev;
  455. struct device_node *np = dev->of_node;
  456. struct xrx200_priv *priv;
  457. struct net_device *net_dev;
  458. int err;
  459. /* alloc the network device */
  460. net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
  461. if (!net_dev)
  462. return -ENOMEM;
  463. priv = netdev_priv(net_dev);
  464. priv->net_dev = net_dev;
  465. priv->dev = dev;
  466. net_dev->netdev_ops = &xrx200_netdev_ops;
  467. SET_NETDEV_DEV(net_dev, dev);
  468. net_dev->min_mtu = ETH_ZLEN;
  469. net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
  470. priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
  471. priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
  472. /* load the memory ranges */
  473. priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
  474. if (IS_ERR(priv->pmac_reg))
  475. return PTR_ERR(priv->pmac_reg);
  476. priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
  477. if (priv->chan_rx.dma.irq < 0)
  478. return -ENOENT;
  479. priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
  480. if (priv->chan_tx.dma.irq < 0)
  481. return -ENOENT;
  482. /* get the clock */
  483. priv->clk = devm_clk_get(dev, NULL);
  484. if (IS_ERR(priv->clk)) {
  485. dev_err(dev, "failed to get clock\n");
  486. return PTR_ERR(priv->clk);
  487. }
  488. err = of_get_ethdev_address(np, net_dev);
  489. if (err)
  490. eth_hw_addr_random(net_dev);
  491. /* bring up the dma engine and IP core */
  492. err = xrx200_dma_init(priv);
  493. if (err)
  494. return err;
  495. /* enable clock gate */
  496. err = clk_prepare_enable(priv->clk);
  497. if (err)
  498. goto err_uninit_dma;
  499. /* set IPG to 12 */
  500. xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG);
  501. /* enable status header, enable CRC */
  502. xrx200_pmac_mask(priv, 0,
  503. PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH |
  504. PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
  505. PMAC_HD_CTL);
  506. /* setup NAPI */
  507. netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx);
  508. netif_napi_add_tx(net_dev, &priv->chan_tx.napi,
  509. xrx200_tx_housekeeping);
  510. platform_set_drvdata(pdev, priv);
  511. err = register_netdev(net_dev);
  512. if (err)
  513. goto err_unprepare_clk;
  514. return 0;
  515. err_unprepare_clk:
  516. clk_disable_unprepare(priv->clk);
  517. err_uninit_dma:
  518. xrx200_hw_cleanup(priv);
  519. return err;
  520. }
  521. static int xrx200_remove(struct platform_device *pdev)
  522. {
  523. struct xrx200_priv *priv = platform_get_drvdata(pdev);
  524. struct net_device *net_dev = priv->net_dev;
  525. /* free stack related instances */
  526. netif_stop_queue(net_dev);
  527. netif_napi_del(&priv->chan_tx.napi);
  528. netif_napi_del(&priv->chan_rx.napi);
  529. /* remove the actual device */
  530. unregister_netdev(net_dev);
  531. /* release the clock */
  532. clk_disable_unprepare(priv->clk);
  533. /* shut down hardware */
  534. xrx200_hw_cleanup(priv);
  535. return 0;
  536. }
  537. static const struct of_device_id xrx200_match[] = {
  538. { .compatible = "lantiq,xrx200-net" },
  539. {},
  540. };
  541. MODULE_DEVICE_TABLE(of, xrx200_match);
  542. static struct platform_driver xrx200_driver = {
  543. .probe = xrx200_probe,
  544. .remove = xrx200_remove,
  545. .driver = {
  546. .name = "lantiq,xrx200-net",
  547. .of_match_table = xrx200_match,
  548. },
  549. };
  550. module_platform_driver(xrx200_driver);
  551. MODULE_AUTHOR("John Crispin <[email protected]>");
  552. MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
  553. MODULE_LICENSE("GPL");