main.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Applied Micro X-Gene SoC Ethernet v2 Driver
  4. *
  5. * Copyright (c) 2017, Applied Micro Circuits Corporation
  6. * Author(s): Iyappan Subramanian <[email protected]>
  7. * Keyur Chudgar <[email protected]>
  8. */
  9. #include "main.h"
  10. static const struct acpi_device_id xge_acpi_match[];
  11. static int xge_get_resources(struct xge_pdata *pdata)
  12. {
  13. struct platform_device *pdev;
  14. struct net_device *ndev;
  15. int phy_mode, ret = 0;
  16. struct resource *res;
  17. struct device *dev;
  18. pdev = pdata->pdev;
  19. dev = &pdev->dev;
  20. ndev = pdata->ndev;
  21. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  22. if (!res) {
  23. dev_err(dev, "Resource enet_csr not defined\n");
  24. return -ENODEV;
  25. }
  26. pdata->resources.base_addr = devm_ioremap(dev, res->start,
  27. resource_size(res));
  28. if (!pdata->resources.base_addr) {
  29. dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
  30. return -ENOMEM;
  31. }
  32. if (device_get_ethdev_address(dev, ndev))
  33. eth_hw_addr_random(ndev);
  34. memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
  35. phy_mode = device_get_phy_mode(dev);
  36. if (phy_mode < 0) {
  37. dev_err(dev, "Unable to get phy-connection-type\n");
  38. return phy_mode;
  39. }
  40. pdata->resources.phy_mode = phy_mode;
  41. if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
  42. dev_err(dev, "Incorrect phy-connection-type specified\n");
  43. return -ENODEV;
  44. }
  45. ret = platform_get_irq(pdev, 0);
  46. if (ret < 0)
  47. return ret;
  48. pdata->resources.irq = ret;
  49. return 0;
  50. }
  51. static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
  52. {
  53. struct xge_pdata *pdata = netdev_priv(ndev);
  54. struct xge_desc_ring *ring = pdata->rx_ring;
  55. const u8 slots = XGENE_ENET_NUM_DESC - 1;
  56. struct device *dev = &pdata->pdev->dev;
  57. struct xge_raw_desc *raw_desc;
  58. u64 addr_lo, addr_hi;
  59. u8 tail = ring->tail;
  60. struct sk_buff *skb;
  61. dma_addr_t dma_addr;
  62. u16 len;
  63. int i;
  64. for (i = 0; i < nbuf; i++) {
  65. raw_desc = &ring->raw_desc[tail];
  66. len = XGENE_ENET_STD_MTU;
  67. skb = netdev_alloc_skb(ndev, len);
  68. if (unlikely(!skb))
  69. return -ENOMEM;
  70. dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
  71. if (dma_mapping_error(dev, dma_addr)) {
  72. netdev_err(ndev, "DMA mapping error\n");
  73. dev_kfree_skb_any(skb);
  74. return -EINVAL;
  75. }
  76. ring->pkt_info[tail].skb = skb;
  77. ring->pkt_info[tail].dma_addr = dma_addr;
  78. addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
  79. addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
  80. raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
  81. SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
  82. SET_BITS(PKT_ADDRH,
  83. upper_32_bits(dma_addr)));
  84. dma_wmb();
  85. raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
  86. SET_BITS(E, 1));
  87. tail = (tail + 1) & slots;
  88. }
  89. ring->tail = tail;
  90. return 0;
  91. }
  92. static int xge_init_hw(struct net_device *ndev)
  93. {
  94. struct xge_pdata *pdata = netdev_priv(ndev);
  95. int ret;
  96. ret = xge_port_reset(ndev);
  97. if (ret)
  98. return ret;
  99. xge_port_init(ndev);
  100. pdata->nbufs = NUM_BUFS;
  101. return 0;
  102. }
  103. static irqreturn_t xge_irq(const int irq, void *data)
  104. {
  105. struct xge_pdata *pdata = data;
  106. if (napi_schedule_prep(&pdata->napi)) {
  107. xge_intr_disable(pdata);
  108. __napi_schedule(&pdata->napi);
  109. }
  110. return IRQ_HANDLED;
  111. }
  112. static int xge_request_irq(struct net_device *ndev)
  113. {
  114. struct xge_pdata *pdata = netdev_priv(ndev);
  115. int ret;
  116. snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
  117. ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
  118. pdata);
  119. if (ret)
  120. netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
  121. return ret;
  122. }
  123. static void xge_free_irq(struct net_device *ndev)
  124. {
  125. struct xge_pdata *pdata = netdev_priv(ndev);
  126. free_irq(pdata->resources.irq, pdata);
  127. }
  128. static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
  129. {
  130. if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
  131. (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
  132. return true;
  133. return false;
  134. }
  135. static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  136. {
  137. struct xge_pdata *pdata = netdev_priv(ndev);
  138. struct device *dev = &pdata->pdev->dev;
  139. struct xge_desc_ring *tx_ring;
  140. struct xge_raw_desc *raw_desc;
  141. static dma_addr_t dma_addr;
  142. u64 addr_lo, addr_hi;
  143. void *pkt_buf;
  144. u8 tail;
  145. u16 len;
  146. tx_ring = pdata->tx_ring;
  147. tail = tx_ring->tail;
  148. len = skb_headlen(skb);
  149. raw_desc = &tx_ring->raw_desc[tail];
  150. if (!is_tx_slot_available(raw_desc)) {
  151. netif_stop_queue(ndev);
  152. return NETDEV_TX_BUSY;
  153. }
  154. /* Packet buffers should be 64B aligned */
  155. pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
  156. GFP_ATOMIC);
  157. if (unlikely(!pkt_buf)) {
  158. dev_kfree_skb_any(skb);
  159. return NETDEV_TX_OK;
  160. }
  161. memcpy(pkt_buf, skb->data, len);
  162. addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
  163. addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
  164. raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
  165. SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
  166. SET_BITS(PKT_ADDRH,
  167. upper_32_bits(dma_addr)));
  168. tx_ring->pkt_info[tail].skb = skb;
  169. tx_ring->pkt_info[tail].dma_addr = dma_addr;
  170. tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
  171. dma_wmb();
  172. raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
  173. SET_BITS(PKT_SIZE, len) |
  174. SET_BITS(E, 0));
  175. skb_tx_timestamp(skb);
  176. xge_wr_csr(pdata, DMATXCTRL, 1);
  177. tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
  178. return NETDEV_TX_OK;
  179. }
  180. static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
  181. {
  182. if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
  183. !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
  184. return true;
  185. return false;
  186. }
  187. static void xge_txc_poll(struct net_device *ndev)
  188. {
  189. struct xge_pdata *pdata = netdev_priv(ndev);
  190. struct device *dev = &pdata->pdev->dev;
  191. struct xge_desc_ring *tx_ring;
  192. struct xge_raw_desc *raw_desc;
  193. dma_addr_t dma_addr;
  194. struct sk_buff *skb;
  195. void *pkt_buf;
  196. u32 data;
  197. u8 head;
  198. tx_ring = pdata->tx_ring;
  199. head = tx_ring->head;
  200. data = xge_rd_csr(pdata, DMATXSTATUS);
  201. if (!GET_BITS(TXPKTCOUNT, data))
  202. return;
  203. while (1) {
  204. raw_desc = &tx_ring->raw_desc[head];
  205. if (!is_tx_hw_done(raw_desc))
  206. break;
  207. dma_rmb();
  208. skb = tx_ring->pkt_info[head].skb;
  209. dma_addr = tx_ring->pkt_info[head].dma_addr;
  210. pkt_buf = tx_ring->pkt_info[head].pkt_buf;
  211. pdata->stats.tx_packets++;
  212. pdata->stats.tx_bytes += skb->len;
  213. dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
  214. dev_kfree_skb_any(skb);
  215. /* clear pktstart address and pktsize */
  216. raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
  217. SET_BITS(PKT_SIZE, SLOT_EMPTY));
  218. xge_wr_csr(pdata, DMATXSTATUS, 1);
  219. head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
  220. }
  221. if (netif_queue_stopped(ndev))
  222. netif_wake_queue(ndev);
  223. tx_ring->head = head;
  224. }
  225. static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
  226. {
  227. struct xge_pdata *pdata = netdev_priv(ndev);
  228. struct device *dev = &pdata->pdev->dev;
  229. struct xge_desc_ring *rx_ring;
  230. struct xge_raw_desc *raw_desc;
  231. struct sk_buff *skb;
  232. dma_addr_t dma_addr;
  233. int processed = 0;
  234. u8 head, rx_error;
  235. int i, ret;
  236. u32 data;
  237. u16 len;
  238. rx_ring = pdata->rx_ring;
  239. head = rx_ring->head;
  240. data = xge_rd_csr(pdata, DMARXSTATUS);
  241. if (!GET_BITS(RXPKTCOUNT, data))
  242. return 0;
  243. for (i = 0; i < budget; i++) {
  244. raw_desc = &rx_ring->raw_desc[head];
  245. if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
  246. break;
  247. dma_rmb();
  248. skb = rx_ring->pkt_info[head].skb;
  249. rx_ring->pkt_info[head].skb = NULL;
  250. dma_addr = rx_ring->pkt_info[head].dma_addr;
  251. len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
  252. dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
  253. DMA_FROM_DEVICE);
  254. rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
  255. if (unlikely(rx_error)) {
  256. pdata->stats.rx_errors++;
  257. dev_kfree_skb_any(skb);
  258. goto out;
  259. }
  260. skb_put(skb, len);
  261. skb->protocol = eth_type_trans(skb, ndev);
  262. pdata->stats.rx_packets++;
  263. pdata->stats.rx_bytes += len;
  264. napi_gro_receive(&pdata->napi, skb);
  265. out:
  266. ret = xge_refill_buffers(ndev, 1);
  267. xge_wr_csr(pdata, DMARXSTATUS, 1);
  268. xge_wr_csr(pdata, DMARXCTRL, 1);
  269. if (ret)
  270. break;
  271. head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
  272. processed++;
  273. }
  274. rx_ring->head = head;
  275. return processed;
  276. }
  277. static void xge_delete_desc_ring(struct net_device *ndev,
  278. struct xge_desc_ring *ring)
  279. {
  280. struct xge_pdata *pdata = netdev_priv(ndev);
  281. struct device *dev = &pdata->pdev->dev;
  282. u16 size;
  283. if (!ring)
  284. return;
  285. size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
  286. if (ring->desc_addr)
  287. dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
  288. kfree(ring->pkt_info);
  289. kfree(ring);
  290. }
  291. static void xge_free_buffers(struct net_device *ndev)
  292. {
  293. struct xge_pdata *pdata = netdev_priv(ndev);
  294. struct xge_desc_ring *ring = pdata->rx_ring;
  295. struct device *dev = &pdata->pdev->dev;
  296. struct sk_buff *skb;
  297. dma_addr_t dma_addr;
  298. int i;
  299. for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
  300. skb = ring->pkt_info[i].skb;
  301. dma_addr = ring->pkt_info[i].dma_addr;
  302. if (!skb)
  303. continue;
  304. dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
  305. DMA_FROM_DEVICE);
  306. dev_kfree_skb_any(skb);
  307. }
  308. }
  309. static void xge_delete_desc_rings(struct net_device *ndev)
  310. {
  311. struct xge_pdata *pdata = netdev_priv(ndev);
  312. xge_txc_poll(ndev);
  313. xge_delete_desc_ring(ndev, pdata->tx_ring);
  314. xge_rx_poll(ndev, 64);
  315. xge_free_buffers(ndev);
  316. xge_delete_desc_ring(ndev, pdata->rx_ring);
  317. }
  318. static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
  319. {
  320. struct xge_pdata *pdata = netdev_priv(ndev);
  321. struct device *dev = &pdata->pdev->dev;
  322. struct xge_desc_ring *ring;
  323. u16 size;
  324. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  325. if (!ring)
  326. return NULL;
  327. ring->ndev = ndev;
  328. size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
  329. ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
  330. GFP_KERNEL);
  331. if (!ring->desc_addr)
  332. goto err;
  333. ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
  334. GFP_KERNEL);
  335. if (!ring->pkt_info)
  336. goto err;
  337. xge_setup_desc(ring);
  338. return ring;
  339. err:
  340. xge_delete_desc_ring(ndev, ring);
  341. return NULL;
  342. }
  343. static int xge_create_desc_rings(struct net_device *ndev)
  344. {
  345. struct xge_pdata *pdata = netdev_priv(ndev);
  346. struct xge_desc_ring *ring;
  347. int ret;
  348. /* create tx ring */
  349. ring = xge_create_desc_ring(ndev);
  350. if (!ring)
  351. goto err;
  352. pdata->tx_ring = ring;
  353. xge_update_tx_desc_addr(pdata);
  354. /* create rx ring */
  355. ring = xge_create_desc_ring(ndev);
  356. if (!ring)
  357. goto err;
  358. pdata->rx_ring = ring;
  359. xge_update_rx_desc_addr(pdata);
  360. ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
  361. if (ret)
  362. goto err;
  363. return 0;
  364. err:
  365. xge_delete_desc_rings(ndev);
  366. return -ENOMEM;
  367. }
  368. static int xge_open(struct net_device *ndev)
  369. {
  370. struct xge_pdata *pdata = netdev_priv(ndev);
  371. int ret;
  372. ret = xge_create_desc_rings(ndev);
  373. if (ret)
  374. return ret;
  375. napi_enable(&pdata->napi);
  376. ret = xge_request_irq(ndev);
  377. if (ret)
  378. return ret;
  379. xge_intr_enable(pdata);
  380. xge_wr_csr(pdata, DMARXCTRL, 1);
  381. phy_start(ndev->phydev);
  382. xge_mac_enable(pdata);
  383. netif_start_queue(ndev);
  384. return 0;
  385. }
  386. static int xge_close(struct net_device *ndev)
  387. {
  388. struct xge_pdata *pdata = netdev_priv(ndev);
  389. netif_stop_queue(ndev);
  390. xge_mac_disable(pdata);
  391. phy_stop(ndev->phydev);
  392. xge_intr_disable(pdata);
  393. xge_free_irq(ndev);
  394. napi_disable(&pdata->napi);
  395. xge_delete_desc_rings(ndev);
  396. return 0;
  397. }
  398. static int xge_napi(struct napi_struct *napi, const int budget)
  399. {
  400. struct net_device *ndev = napi->dev;
  401. struct xge_pdata *pdata;
  402. int processed;
  403. pdata = netdev_priv(ndev);
  404. xge_txc_poll(ndev);
  405. processed = xge_rx_poll(ndev, budget);
  406. if (processed < budget) {
  407. napi_complete_done(napi, processed);
  408. xge_intr_enable(pdata);
  409. }
  410. return processed;
  411. }
  412. static int xge_set_mac_addr(struct net_device *ndev, void *addr)
  413. {
  414. struct xge_pdata *pdata = netdev_priv(ndev);
  415. int ret;
  416. ret = eth_mac_addr(ndev, addr);
  417. if (ret)
  418. return ret;
  419. xge_mac_set_station_addr(pdata);
  420. return 0;
  421. }
  422. static bool is_tx_pending(struct xge_raw_desc *raw_desc)
  423. {
  424. if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
  425. return true;
  426. return false;
  427. }
  428. static void xge_free_pending_skb(struct net_device *ndev)
  429. {
  430. struct xge_pdata *pdata = netdev_priv(ndev);
  431. struct device *dev = &pdata->pdev->dev;
  432. struct xge_desc_ring *tx_ring;
  433. struct xge_raw_desc *raw_desc;
  434. dma_addr_t dma_addr;
  435. struct sk_buff *skb;
  436. void *pkt_buf;
  437. int i;
  438. tx_ring = pdata->tx_ring;
  439. for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
  440. raw_desc = &tx_ring->raw_desc[i];
  441. if (!is_tx_pending(raw_desc))
  442. continue;
  443. skb = tx_ring->pkt_info[i].skb;
  444. dma_addr = tx_ring->pkt_info[i].dma_addr;
  445. pkt_buf = tx_ring->pkt_info[i].pkt_buf;
  446. dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
  447. dev_kfree_skb_any(skb);
  448. }
  449. }
  450. static void xge_timeout(struct net_device *ndev, unsigned int txqueue)
  451. {
  452. struct xge_pdata *pdata = netdev_priv(ndev);
  453. rtnl_lock();
  454. if (!netif_running(ndev))
  455. goto out;
  456. netif_stop_queue(ndev);
  457. xge_intr_disable(pdata);
  458. napi_disable(&pdata->napi);
  459. xge_wr_csr(pdata, DMATXCTRL, 0);
  460. xge_txc_poll(ndev);
  461. xge_free_pending_skb(ndev);
  462. xge_wr_csr(pdata, DMATXSTATUS, ~0U);
  463. xge_setup_desc(pdata->tx_ring);
  464. xge_update_tx_desc_addr(pdata);
  465. xge_mac_init(pdata);
  466. napi_enable(&pdata->napi);
  467. xge_intr_enable(pdata);
  468. xge_mac_enable(pdata);
  469. netif_start_queue(ndev);
  470. out:
  471. rtnl_unlock();
  472. }
  473. static void xge_get_stats64(struct net_device *ndev,
  474. struct rtnl_link_stats64 *storage)
  475. {
  476. struct xge_pdata *pdata = netdev_priv(ndev);
  477. struct xge_stats *stats = &pdata->stats;
  478. storage->tx_packets += stats->tx_packets;
  479. storage->tx_bytes += stats->tx_bytes;
  480. storage->rx_packets += stats->rx_packets;
  481. storage->rx_bytes += stats->rx_bytes;
  482. storage->rx_errors += stats->rx_errors;
  483. }
  484. static const struct net_device_ops xgene_ndev_ops = {
  485. .ndo_open = xge_open,
  486. .ndo_stop = xge_close,
  487. .ndo_start_xmit = xge_start_xmit,
  488. .ndo_set_mac_address = xge_set_mac_addr,
  489. .ndo_tx_timeout = xge_timeout,
  490. .ndo_get_stats64 = xge_get_stats64,
  491. };
  492. static int xge_probe(struct platform_device *pdev)
  493. {
  494. struct device *dev = &pdev->dev;
  495. struct net_device *ndev;
  496. struct xge_pdata *pdata;
  497. int ret;
  498. ndev = alloc_etherdev(sizeof(*pdata));
  499. if (!ndev)
  500. return -ENOMEM;
  501. pdata = netdev_priv(ndev);
  502. pdata->pdev = pdev;
  503. pdata->ndev = ndev;
  504. SET_NETDEV_DEV(ndev, dev);
  505. platform_set_drvdata(pdev, pdata);
  506. ndev->netdev_ops = &xgene_ndev_ops;
  507. ndev->features |= NETIF_F_GSO |
  508. NETIF_F_GRO;
  509. ret = xge_get_resources(pdata);
  510. if (ret)
  511. goto err;
  512. ndev->hw_features = ndev->features;
  513. xge_set_ethtool_ops(ndev);
  514. ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
  515. if (ret) {
  516. netdev_err(ndev, "No usable DMA configuration\n");
  517. goto err;
  518. }
  519. ret = xge_init_hw(ndev);
  520. if (ret)
  521. goto err;
  522. ret = xge_mdio_config(ndev);
  523. if (ret)
  524. goto err;
  525. netif_napi_add(ndev, &pdata->napi, xge_napi);
  526. ret = register_netdev(ndev);
  527. if (ret) {
  528. netdev_err(ndev, "Failed to register netdev\n");
  529. goto err_mdio_remove;
  530. }
  531. return 0;
  532. err_mdio_remove:
  533. xge_mdio_remove(ndev);
  534. err:
  535. free_netdev(ndev);
  536. return ret;
  537. }
  538. static int xge_remove(struct platform_device *pdev)
  539. {
  540. struct xge_pdata *pdata;
  541. struct net_device *ndev;
  542. pdata = platform_get_drvdata(pdev);
  543. ndev = pdata->ndev;
  544. rtnl_lock();
  545. if (netif_running(ndev))
  546. dev_close(ndev);
  547. rtnl_unlock();
  548. xge_mdio_remove(ndev);
  549. unregister_netdev(ndev);
  550. free_netdev(ndev);
  551. return 0;
  552. }
  553. static void xge_shutdown(struct platform_device *pdev)
  554. {
  555. struct xge_pdata *pdata;
  556. pdata = platform_get_drvdata(pdev);
  557. if (!pdata)
  558. return;
  559. if (!pdata->ndev)
  560. return;
  561. xge_remove(pdev);
  562. }
  563. static const struct acpi_device_id xge_acpi_match[] = {
  564. { "APMC0D80" },
  565. { }
  566. };
  567. MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
  568. static struct platform_driver xge_driver = {
  569. .driver = {
  570. .name = "xgene-enet-v2",
  571. .acpi_match_table = ACPI_PTR(xge_acpi_match),
  572. },
  573. .probe = xge_probe,
  574. .remove = xge_remove,
  575. .shutdown = xge_shutdown,
  576. };
  577. module_platform_driver(xge_driver);
  578. MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
  579. MODULE_AUTHOR("Iyappan Subramanian <[email protected]>");
  580. MODULE_LICENSE("GPL");