dwc-xlgmac-net.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349
  1. /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
  2. *
  3. * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
  4. *
  5. * This program is dual-licensed; you may select either version 2 of
  6. * the GNU General Public License ("GPL") or BSD license ("BSD").
  7. *
  8. * This Synopsys DWC XLGMAC software driver and associated documentation
  9. * (hereinafter the "Software") is an unsupported proprietary work of
  10. * Synopsys, Inc. unless otherwise expressly agreed to in writing between
  11. * Synopsys and you. The Software IS NOT an item of Licensed Software or a
  12. * Licensed Product under any End User Software License Agreement or
  13. * Agreement for Licensed Products with Synopsys or any supplement thereto.
  14. * Synopsys is a registered trademark of Synopsys, Inc. Other names included
  15. * in the SOFTWARE may be the trademarks of their respective owners.
  16. */
  17. #include <linux/netdevice.h>
  18. #include <linux/tcp.h>
  19. #include <linux/interrupt.h>
  20. #include "dwc-xlgmac.h"
  21. #include "dwc-xlgmac-reg.h"
  22. static int xlgmac_one_poll(struct napi_struct *, int);
  23. static int xlgmac_all_poll(struct napi_struct *, int);
  24. static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
  25. {
  26. return (ring->dma_desc_count - (ring->cur - ring->dirty));
  27. }
  28. static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
  29. {
  30. return (ring->cur - ring->dirty);
  31. }
  32. static int xlgmac_maybe_stop_tx_queue(
  33. struct xlgmac_channel *channel,
  34. struct xlgmac_ring *ring,
  35. unsigned int count)
  36. {
  37. struct xlgmac_pdata *pdata = channel->pdata;
  38. if (count > xlgmac_tx_avail_desc(ring)) {
  39. netif_info(pdata, drv, pdata->netdev,
  40. "Tx queue stopped, not enough descriptors available\n");
  41. netif_stop_subqueue(pdata->netdev, channel->queue_index);
  42. ring->tx.queue_stopped = 1;
  43. /* If we haven't notified the hardware because of xmit_more
  44. * support, tell it now
  45. */
  46. if (ring->tx.xmit_more)
  47. pdata->hw_ops.tx_start_xmit(channel, ring);
  48. return NETDEV_TX_BUSY;
  49. }
  50. return 0;
  51. }
  52. static void xlgmac_prep_vlan(struct sk_buff *skb,
  53. struct xlgmac_pkt_info *pkt_info)
  54. {
  55. if (skb_vlan_tag_present(skb))
  56. pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
  57. }
  58. static int xlgmac_prep_tso(struct sk_buff *skb,
  59. struct xlgmac_pkt_info *pkt_info)
  60. {
  61. int ret;
  62. if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
  63. TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
  64. TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
  65. return 0;
  66. ret = skb_cow_head(skb, 0);
  67. if (ret)
  68. return ret;
  69. pkt_info->header_len = skb_tcp_all_headers(skb);
  70. pkt_info->tcp_header_len = tcp_hdrlen(skb);
  71. pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
  72. pkt_info->mss = skb_shinfo(skb)->gso_size;
  73. XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
  74. XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
  75. pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
  76. XLGMAC_PR("mss=%u\n", pkt_info->mss);
  77. /* Update the number of packets that will ultimately be transmitted
  78. * along with the extra bytes for each extra packet
  79. */
  80. pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
  81. pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
  82. return 0;
  83. }
  84. static int xlgmac_is_tso(struct sk_buff *skb)
  85. {
  86. if (skb->ip_summed != CHECKSUM_PARTIAL)
  87. return 0;
  88. if (!skb_is_gso(skb))
  89. return 0;
  90. return 1;
  91. }
  92. static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
  93. struct xlgmac_ring *ring,
  94. struct sk_buff *skb,
  95. struct xlgmac_pkt_info *pkt_info)
  96. {
  97. skb_frag_t *frag;
  98. unsigned int context_desc;
  99. unsigned int len;
  100. unsigned int i;
  101. pkt_info->skb = skb;
  102. context_desc = 0;
  103. pkt_info->desc_count = 0;
  104. pkt_info->tx_packets = 1;
  105. pkt_info->tx_bytes = skb->len;
  106. if (xlgmac_is_tso(skb)) {
  107. /* TSO requires an extra descriptor if mss is different */
  108. if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
  109. context_desc = 1;
  110. pkt_info->desc_count++;
  111. }
  112. /* TSO requires an extra descriptor for TSO header */
  113. pkt_info->desc_count++;
  114. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  115. pkt_info->attributes,
  116. TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
  117. TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
  118. 1);
  119. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  120. pkt_info->attributes,
  121. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
  122. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
  123. 1);
  124. } else if (skb->ip_summed == CHECKSUM_PARTIAL)
  125. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  126. pkt_info->attributes,
  127. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
  128. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
  129. 1);
  130. if (skb_vlan_tag_present(skb)) {
  131. /* VLAN requires an extra descriptor if tag is different */
  132. if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
  133. /* We can share with the TSO context descriptor */
  134. if (!context_desc) {
  135. context_desc = 1;
  136. pkt_info->desc_count++;
  137. }
  138. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  139. pkt_info->attributes,
  140. TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
  141. TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
  142. 1);
  143. }
  144. for (len = skb_headlen(skb); len;) {
  145. pkt_info->desc_count++;
  146. len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
  147. }
  148. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  149. frag = &skb_shinfo(skb)->frags[i];
  150. for (len = skb_frag_size(frag); len; ) {
  151. pkt_info->desc_count++;
  152. len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
  153. }
  154. }
  155. }
  156. static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
  157. {
  158. unsigned int rx_buf_size;
  159. if (mtu > XLGMAC_JUMBO_PACKET_MTU) {
  160. netdev_alert(netdev, "MTU exceeds maximum supported value\n");
  161. return -EINVAL;
  162. }
  163. rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  164. rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
  165. rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) &
  166. ~(XLGMAC_RX_BUF_ALIGN - 1);
  167. return rx_buf_size;
  168. }
  169. static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata)
  170. {
  171. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  172. struct xlgmac_channel *channel;
  173. enum xlgmac_int int_id;
  174. unsigned int i;
  175. channel = pdata->channel_head;
  176. for (i = 0; i < pdata->channel_count; i++, channel++) {
  177. if (channel->tx_ring && channel->rx_ring)
  178. int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
  179. else if (channel->tx_ring)
  180. int_id = XLGMAC_INT_DMA_CH_SR_TI;
  181. else if (channel->rx_ring)
  182. int_id = XLGMAC_INT_DMA_CH_SR_RI;
  183. else
  184. continue;
  185. hw_ops->enable_int(channel, int_id);
  186. }
  187. }
  188. static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata)
  189. {
  190. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  191. struct xlgmac_channel *channel;
  192. enum xlgmac_int int_id;
  193. unsigned int i;
  194. channel = pdata->channel_head;
  195. for (i = 0; i < pdata->channel_count; i++, channel++) {
  196. if (channel->tx_ring && channel->rx_ring)
  197. int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
  198. else if (channel->tx_ring)
  199. int_id = XLGMAC_INT_DMA_CH_SR_TI;
  200. else if (channel->rx_ring)
  201. int_id = XLGMAC_INT_DMA_CH_SR_RI;
  202. else
  203. continue;
  204. hw_ops->disable_int(channel, int_id);
  205. }
  206. }
  207. static irqreturn_t xlgmac_isr(int irq, void *data)
  208. {
  209. unsigned int dma_isr, dma_ch_isr, mac_isr;
  210. struct xlgmac_pdata *pdata = data;
  211. struct xlgmac_channel *channel;
  212. struct xlgmac_hw_ops *hw_ops;
  213. unsigned int i, ti, ri;
  214. hw_ops = &pdata->hw_ops;
  215. /* The DMA interrupt status register also reports MAC and MTL
  216. * interrupts. So for polling mode, we just need to check for
  217. * this register to be non-zero
  218. */
  219. dma_isr = readl(pdata->mac_regs + DMA_ISR);
  220. if (!dma_isr)
  221. return IRQ_HANDLED;
  222. netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
  223. for (i = 0; i < pdata->channel_count; i++) {
  224. if (!(dma_isr & (1 << i)))
  225. continue;
  226. channel = pdata->channel_head + i;
  227. dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
  228. netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
  229. i, dma_ch_isr);
  230. /* The TI or RI interrupt bits may still be set even if using
  231. * per channel DMA interrupts. Check to be sure those are not
  232. * enabled before using the private data napi structure.
  233. */
  234. ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
  235. DMA_CH_SR_TI_LEN);
  236. ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
  237. DMA_CH_SR_RI_LEN);
  238. if (!pdata->per_channel_irq && (ti || ri)) {
  239. if (napi_schedule_prep(&pdata->napi)) {
  240. /* Disable Tx and Rx interrupts */
  241. xlgmac_disable_rx_tx_ints(pdata);
  242. pdata->stats.napi_poll_isr++;
  243. /* Turn on polling */
  244. __napi_schedule_irqoff(&pdata->napi);
  245. }
  246. }
  247. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS,
  248. DMA_CH_SR_TPS_LEN))
  249. pdata->stats.tx_process_stopped++;
  250. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS,
  251. DMA_CH_SR_RPS_LEN))
  252. pdata->stats.rx_process_stopped++;
  253. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS,
  254. DMA_CH_SR_TBU_LEN))
  255. pdata->stats.tx_buffer_unavailable++;
  256. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
  257. DMA_CH_SR_RBU_LEN))
  258. pdata->stats.rx_buffer_unavailable++;
  259. /* Restart the device on a Fatal Bus Error */
  260. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
  261. DMA_CH_SR_FBE_LEN)) {
  262. pdata->stats.fatal_bus_error++;
  263. schedule_work(&pdata->restart_work);
  264. }
  265. /* Clear all interrupt signals */
  266. writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
  267. }
  268. if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
  269. DMA_ISR_MACIS_LEN)) {
  270. mac_isr = readl(pdata->mac_regs + MAC_ISR);
  271. if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
  272. MAC_ISR_MMCTXIS_LEN))
  273. hw_ops->tx_mmc_int(pdata);
  274. if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
  275. MAC_ISR_MMCRXIS_LEN))
  276. hw_ops->rx_mmc_int(pdata);
  277. }
  278. return IRQ_HANDLED;
  279. }
  280. static irqreturn_t xlgmac_dma_isr(int irq, void *data)
  281. {
  282. struct xlgmac_channel *channel = data;
  283. /* Per channel DMA interrupts are enabled, so we use the per
  284. * channel napi structure and not the private data napi structure
  285. */
  286. if (napi_schedule_prep(&channel->napi)) {
  287. /* Disable Tx and Rx interrupts */
  288. disable_irq_nosync(channel->dma_irq);
  289. /* Turn on polling */
  290. __napi_schedule_irqoff(&channel->napi);
  291. }
  292. return IRQ_HANDLED;
  293. }
  294. static void xlgmac_tx_timer(struct timer_list *t)
  295. {
  296. struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
  297. struct xlgmac_pdata *pdata = channel->pdata;
  298. struct napi_struct *napi;
  299. napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
  300. if (napi_schedule_prep(napi)) {
  301. /* Disable Tx and Rx interrupts */
  302. if (pdata->per_channel_irq)
  303. disable_irq_nosync(channel->dma_irq);
  304. else
  305. xlgmac_disable_rx_tx_ints(pdata);
  306. pdata->stats.napi_poll_txtimer++;
  307. /* Turn on polling */
  308. __napi_schedule(napi);
  309. }
  310. channel->tx_timer_active = 0;
  311. }
  312. static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
  313. {
  314. struct xlgmac_channel *channel;
  315. unsigned int i;
  316. channel = pdata->channel_head;
  317. for (i = 0; i < pdata->channel_count; i++, channel++) {
  318. if (!channel->tx_ring)
  319. break;
  320. timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0);
  321. }
  322. }
  323. static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
  324. {
  325. struct xlgmac_channel *channel;
  326. unsigned int i;
  327. channel = pdata->channel_head;
  328. for (i = 0; i < pdata->channel_count; i++, channel++) {
  329. if (!channel->tx_ring)
  330. break;
  331. del_timer_sync(&channel->tx_timer);
  332. }
  333. }
  334. static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
  335. {
  336. struct xlgmac_channel *channel;
  337. unsigned int i;
  338. if (pdata->per_channel_irq) {
  339. channel = pdata->channel_head;
  340. for (i = 0; i < pdata->channel_count; i++, channel++) {
  341. if (add)
  342. netif_napi_add(pdata->netdev, &channel->napi,
  343. xlgmac_one_poll);
  344. napi_enable(&channel->napi);
  345. }
  346. } else {
  347. if (add)
  348. netif_napi_add(pdata->netdev, &pdata->napi,
  349. xlgmac_all_poll);
  350. napi_enable(&pdata->napi);
  351. }
  352. }
  353. static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del)
  354. {
  355. struct xlgmac_channel *channel;
  356. unsigned int i;
  357. if (pdata->per_channel_irq) {
  358. channel = pdata->channel_head;
  359. for (i = 0; i < pdata->channel_count; i++, channel++) {
  360. napi_disable(&channel->napi);
  361. if (del)
  362. netif_napi_del(&channel->napi);
  363. }
  364. } else {
  365. napi_disable(&pdata->napi);
  366. if (del)
  367. netif_napi_del(&pdata->napi);
  368. }
  369. }
  370. static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
  371. {
  372. struct net_device *netdev = pdata->netdev;
  373. struct xlgmac_channel *channel;
  374. unsigned int i;
  375. int ret;
  376. ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
  377. IRQF_SHARED, netdev->name, pdata);
  378. if (ret) {
  379. netdev_alert(netdev, "error requesting irq %d\n",
  380. pdata->dev_irq);
  381. return ret;
  382. }
  383. if (!pdata->per_channel_irq)
  384. return 0;
  385. channel = pdata->channel_head;
  386. for (i = 0; i < pdata->channel_count; i++, channel++) {
  387. snprintf(channel->dma_irq_name,
  388. sizeof(channel->dma_irq_name) - 1,
  389. "%s-TxRx-%u", netdev_name(netdev),
  390. channel->queue_index);
  391. ret = devm_request_irq(pdata->dev, channel->dma_irq,
  392. xlgmac_dma_isr, 0,
  393. channel->dma_irq_name, channel);
  394. if (ret) {
  395. netdev_alert(netdev, "error requesting irq %d\n",
  396. channel->dma_irq);
  397. goto err_irq;
  398. }
  399. }
  400. return 0;
  401. err_irq:
  402. /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
  403. for (i--, channel--; i < pdata->channel_count; i--, channel--)
  404. devm_free_irq(pdata->dev, channel->dma_irq, channel);
  405. devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
  406. return ret;
  407. }
  408. static void xlgmac_free_irqs(struct xlgmac_pdata *pdata)
  409. {
  410. struct xlgmac_channel *channel;
  411. unsigned int i;
  412. devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
  413. if (!pdata->per_channel_irq)
  414. return;
  415. channel = pdata->channel_head;
  416. for (i = 0; i < pdata->channel_count; i++, channel++)
  417. devm_free_irq(pdata->dev, channel->dma_irq, channel);
  418. }
  419. static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata)
  420. {
  421. struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
  422. struct xlgmac_desc_data *desc_data;
  423. struct xlgmac_channel *channel;
  424. struct xlgmac_ring *ring;
  425. unsigned int i, j;
  426. channel = pdata->channel_head;
  427. for (i = 0; i < pdata->channel_count; i++, channel++) {
  428. ring = channel->tx_ring;
  429. if (!ring)
  430. break;
  431. for (j = 0; j < ring->dma_desc_count; j++) {
  432. desc_data = XLGMAC_GET_DESC_DATA(ring, j);
  433. desc_ops->unmap_desc_data(pdata, desc_data);
  434. }
  435. }
  436. }
  437. static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata)
  438. {
  439. struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
  440. struct xlgmac_desc_data *desc_data;
  441. struct xlgmac_channel *channel;
  442. struct xlgmac_ring *ring;
  443. unsigned int i, j;
  444. channel = pdata->channel_head;
  445. for (i = 0; i < pdata->channel_count; i++, channel++) {
  446. ring = channel->rx_ring;
  447. if (!ring)
  448. break;
  449. for (j = 0; j < ring->dma_desc_count; j++) {
  450. desc_data = XLGMAC_GET_DESC_DATA(ring, j);
  451. desc_ops->unmap_desc_data(pdata, desc_data);
  452. }
  453. }
  454. }
  455. static int xlgmac_start(struct xlgmac_pdata *pdata)
  456. {
  457. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  458. struct net_device *netdev = pdata->netdev;
  459. int ret;
  460. hw_ops->init(pdata);
  461. xlgmac_napi_enable(pdata, 1);
  462. ret = xlgmac_request_irqs(pdata);
  463. if (ret)
  464. goto err_napi;
  465. hw_ops->enable_tx(pdata);
  466. hw_ops->enable_rx(pdata);
  467. netif_tx_start_all_queues(netdev);
  468. return 0;
  469. err_napi:
  470. xlgmac_napi_disable(pdata, 1);
  471. hw_ops->exit(pdata);
  472. return ret;
  473. }
  474. static void xlgmac_stop(struct xlgmac_pdata *pdata)
  475. {
  476. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  477. struct net_device *netdev = pdata->netdev;
  478. struct xlgmac_channel *channel;
  479. struct netdev_queue *txq;
  480. unsigned int i;
  481. netif_tx_stop_all_queues(netdev);
  482. xlgmac_stop_timers(pdata);
  483. hw_ops->disable_tx(pdata);
  484. hw_ops->disable_rx(pdata);
  485. xlgmac_free_irqs(pdata);
  486. xlgmac_napi_disable(pdata, 1);
  487. hw_ops->exit(pdata);
  488. channel = pdata->channel_head;
  489. for (i = 0; i < pdata->channel_count; i++, channel++) {
  490. if (!channel->tx_ring)
  491. continue;
  492. txq = netdev_get_tx_queue(netdev, channel->queue_index);
  493. netdev_tx_reset_queue(txq);
  494. }
  495. }
  496. static void xlgmac_restart_dev(struct xlgmac_pdata *pdata)
  497. {
  498. /* If not running, "restart" will happen on open */
  499. if (!netif_running(pdata->netdev))
  500. return;
  501. xlgmac_stop(pdata);
  502. xlgmac_free_tx_data(pdata);
  503. xlgmac_free_rx_data(pdata);
  504. xlgmac_start(pdata);
  505. }
  506. static void xlgmac_restart(struct work_struct *work)
  507. {
  508. struct xlgmac_pdata *pdata = container_of(work,
  509. struct xlgmac_pdata,
  510. restart_work);
  511. rtnl_lock();
  512. xlgmac_restart_dev(pdata);
  513. rtnl_unlock();
  514. }
  515. static int xlgmac_open(struct net_device *netdev)
  516. {
  517. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  518. struct xlgmac_desc_ops *desc_ops;
  519. int ret;
  520. desc_ops = &pdata->desc_ops;
  521. /* TODO: Initialize the phy */
  522. /* Calculate the Rx buffer size before allocating rings */
  523. ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu);
  524. if (ret < 0)
  525. return ret;
  526. pdata->rx_buf_size = ret;
  527. /* Allocate the channels and rings */
  528. ret = desc_ops->alloc_channels_and_rings(pdata);
  529. if (ret)
  530. return ret;
  531. INIT_WORK(&pdata->restart_work, xlgmac_restart);
  532. xlgmac_init_timers(pdata);
  533. ret = xlgmac_start(pdata);
  534. if (ret)
  535. goto err_channels_and_rings;
  536. return 0;
  537. err_channels_and_rings:
  538. desc_ops->free_channels_and_rings(pdata);
  539. return ret;
  540. }
  541. static int xlgmac_close(struct net_device *netdev)
  542. {
  543. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  544. struct xlgmac_desc_ops *desc_ops;
  545. desc_ops = &pdata->desc_ops;
  546. /* Stop the device */
  547. xlgmac_stop(pdata);
  548. /* Free the channels and rings */
  549. desc_ops->free_channels_and_rings(pdata);
  550. return 0;
  551. }
  552. static void xlgmac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
  553. {
  554. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  555. netdev_warn(netdev, "tx timeout, device restarting\n");
  556. schedule_work(&pdata->restart_work);
  557. }
  558. static netdev_tx_t xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
  559. {
  560. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  561. struct xlgmac_pkt_info *tx_pkt_info;
  562. struct xlgmac_desc_ops *desc_ops;
  563. struct xlgmac_channel *channel;
  564. struct xlgmac_hw_ops *hw_ops;
  565. struct netdev_queue *txq;
  566. struct xlgmac_ring *ring;
  567. int ret;
  568. desc_ops = &pdata->desc_ops;
  569. hw_ops = &pdata->hw_ops;
  570. XLGMAC_PR("skb->len = %d\n", skb->len);
  571. channel = pdata->channel_head + skb->queue_mapping;
  572. txq = netdev_get_tx_queue(netdev, channel->queue_index);
  573. ring = channel->tx_ring;
  574. tx_pkt_info = &ring->pkt_info;
  575. if (skb->len == 0) {
  576. netif_err(pdata, tx_err, netdev,
  577. "empty skb received from stack\n");
  578. dev_kfree_skb_any(skb);
  579. return NETDEV_TX_OK;
  580. }
  581. /* Prepare preliminary packet info for TX */
  582. memset(tx_pkt_info, 0, sizeof(*tx_pkt_info));
  583. xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
  584. /* Check that there are enough descriptors available */
  585. ret = xlgmac_maybe_stop_tx_queue(channel, ring,
  586. tx_pkt_info->desc_count);
  587. if (ret)
  588. return ret;
  589. ret = xlgmac_prep_tso(skb, tx_pkt_info);
  590. if (ret) {
  591. netif_err(pdata, tx_err, netdev,
  592. "error processing TSO packet\n");
  593. dev_kfree_skb_any(skb);
  594. return ret;
  595. }
  596. xlgmac_prep_vlan(skb, tx_pkt_info);
  597. if (!desc_ops->map_tx_skb(channel, skb)) {
  598. dev_kfree_skb_any(skb);
  599. return NETDEV_TX_OK;
  600. }
  601. /* Report on the actual number of bytes (to be) sent */
  602. netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes);
  603. /* Configure required descriptor fields for transmission */
  604. hw_ops->dev_xmit(channel);
  605. if (netif_msg_pktdata(pdata))
  606. xlgmac_print_pkt(netdev, skb, true);
  607. /* Stop the queue in advance if there may not be enough descriptors */
  608. xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
  609. return NETDEV_TX_OK;
  610. }
  611. static void xlgmac_get_stats64(struct net_device *netdev,
  612. struct rtnl_link_stats64 *s)
  613. {
  614. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  615. struct xlgmac_stats *pstats = &pdata->stats;
  616. pdata->hw_ops.read_mmc_stats(pdata);
  617. s->rx_packets = pstats->rxframecount_gb;
  618. s->rx_bytes = pstats->rxoctetcount_gb;
  619. s->rx_errors = pstats->rxframecount_gb -
  620. pstats->rxbroadcastframes_g -
  621. pstats->rxmulticastframes_g -
  622. pstats->rxunicastframes_g;
  623. s->multicast = pstats->rxmulticastframes_g;
  624. s->rx_length_errors = pstats->rxlengtherror;
  625. s->rx_crc_errors = pstats->rxcrcerror;
  626. s->rx_fifo_errors = pstats->rxfifooverflow;
  627. s->tx_packets = pstats->txframecount_gb;
  628. s->tx_bytes = pstats->txoctetcount_gb;
  629. s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
  630. s->tx_dropped = netdev->stats.tx_dropped;
  631. }
  632. static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
  633. {
  634. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  635. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  636. struct sockaddr *saddr = addr;
  637. if (!is_valid_ether_addr(saddr->sa_data))
  638. return -EADDRNOTAVAIL;
  639. eth_hw_addr_set(netdev, saddr->sa_data);
  640. hw_ops->set_mac_address(pdata, netdev->dev_addr);
  641. return 0;
  642. }
  643. static int xlgmac_ioctl(struct net_device *netdev,
  644. struct ifreq *ifreq, int cmd)
  645. {
  646. if (!netif_running(netdev))
  647. return -ENODEV;
  648. return 0;
  649. }
  650. static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
  651. {
  652. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  653. int ret;
  654. ret = xlgmac_calc_rx_buf_size(netdev, mtu);
  655. if (ret < 0)
  656. return ret;
  657. pdata->rx_buf_size = ret;
  658. netdev->mtu = mtu;
  659. xlgmac_restart_dev(pdata);
  660. return 0;
  661. }
  662. static int xlgmac_vlan_rx_add_vid(struct net_device *netdev,
  663. __be16 proto,
  664. u16 vid)
  665. {
  666. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  667. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  668. set_bit(vid, pdata->active_vlans);
  669. hw_ops->update_vlan_hash_table(pdata);
  670. return 0;
  671. }
  672. static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev,
  673. __be16 proto,
  674. u16 vid)
  675. {
  676. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  677. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  678. clear_bit(vid, pdata->active_vlans);
  679. hw_ops->update_vlan_hash_table(pdata);
  680. return 0;
  681. }
  682. #ifdef CONFIG_NET_POLL_CONTROLLER
  683. static void xlgmac_poll_controller(struct net_device *netdev)
  684. {
  685. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  686. struct xlgmac_channel *channel;
  687. unsigned int i;
  688. if (pdata->per_channel_irq) {
  689. channel = pdata->channel_head;
  690. for (i = 0; i < pdata->channel_count; i++, channel++)
  691. xlgmac_dma_isr(channel->dma_irq, channel);
  692. } else {
  693. disable_irq(pdata->dev_irq);
  694. xlgmac_isr(pdata->dev_irq, pdata);
  695. enable_irq(pdata->dev_irq);
  696. }
  697. }
  698. #endif /* CONFIG_NET_POLL_CONTROLLER */
  699. static int xlgmac_set_features(struct net_device *netdev,
  700. netdev_features_t features)
  701. {
  702. netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
  703. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  704. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  705. int ret = 0;
  706. rxhash = pdata->netdev_features & NETIF_F_RXHASH;
  707. rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
  708. rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
  709. rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
  710. if ((features & NETIF_F_RXHASH) && !rxhash)
  711. ret = hw_ops->enable_rss(pdata);
  712. else if (!(features & NETIF_F_RXHASH) && rxhash)
  713. ret = hw_ops->disable_rss(pdata);
  714. if (ret)
  715. return ret;
  716. if ((features & NETIF_F_RXCSUM) && !rxcsum)
  717. hw_ops->enable_rx_csum(pdata);
  718. else if (!(features & NETIF_F_RXCSUM) && rxcsum)
  719. hw_ops->disable_rx_csum(pdata);
  720. if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
  721. hw_ops->enable_rx_vlan_stripping(pdata);
  722. else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
  723. hw_ops->disable_rx_vlan_stripping(pdata);
  724. if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
  725. hw_ops->enable_rx_vlan_filtering(pdata);
  726. else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
  727. hw_ops->disable_rx_vlan_filtering(pdata);
  728. pdata->netdev_features = features;
  729. return 0;
  730. }
  731. static void xlgmac_set_rx_mode(struct net_device *netdev)
  732. {
  733. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  734. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  735. hw_ops->config_rx_mode(pdata);
  736. }
  737. static const struct net_device_ops xlgmac_netdev_ops = {
  738. .ndo_open = xlgmac_open,
  739. .ndo_stop = xlgmac_close,
  740. .ndo_start_xmit = xlgmac_xmit,
  741. .ndo_tx_timeout = xlgmac_tx_timeout,
  742. .ndo_get_stats64 = xlgmac_get_stats64,
  743. .ndo_change_mtu = xlgmac_change_mtu,
  744. .ndo_set_mac_address = xlgmac_set_mac_address,
  745. .ndo_validate_addr = eth_validate_addr,
  746. .ndo_eth_ioctl = xlgmac_ioctl,
  747. .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid,
  748. .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid,
  749. #ifdef CONFIG_NET_POLL_CONTROLLER
  750. .ndo_poll_controller = xlgmac_poll_controller,
  751. #endif
  752. .ndo_set_features = xlgmac_set_features,
  753. .ndo_set_rx_mode = xlgmac_set_rx_mode,
  754. };
  755. const struct net_device_ops *xlgmac_get_netdev_ops(void)
  756. {
  757. return &xlgmac_netdev_ops;
  758. }
  759. static void xlgmac_rx_refresh(struct xlgmac_channel *channel)
  760. {
  761. struct xlgmac_pdata *pdata = channel->pdata;
  762. struct xlgmac_ring *ring = channel->rx_ring;
  763. struct xlgmac_desc_data *desc_data;
  764. struct xlgmac_desc_ops *desc_ops;
  765. struct xlgmac_hw_ops *hw_ops;
  766. desc_ops = &pdata->desc_ops;
  767. hw_ops = &pdata->hw_ops;
  768. while (ring->dirty != ring->cur) {
  769. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
  770. /* Reset desc_data values */
  771. desc_ops->unmap_desc_data(pdata, desc_data);
  772. if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
  773. break;
  774. hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
  775. ring->dirty++;
  776. }
  777. /* Make sure everything is written before the register write */
  778. wmb();
  779. /* Update the Rx Tail Pointer Register with address of
  780. * the last cleaned entry
  781. */
  782. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
  783. writel(lower_32_bits(desc_data->dma_desc_addr),
  784. XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
  785. }
  786. static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata,
  787. struct napi_struct *napi,
  788. struct xlgmac_desc_data *desc_data,
  789. unsigned int len)
  790. {
  791. unsigned int copy_len;
  792. struct sk_buff *skb;
  793. u8 *packet;
  794. skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
  795. if (!skb)
  796. return NULL;
  797. /* Start with the header buffer which may contain just the header
  798. * or the header plus data
  799. */
  800. dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
  801. desc_data->rx.hdr.dma_off,
  802. desc_data->rx.hdr.dma_len,
  803. DMA_FROM_DEVICE);
  804. packet = page_address(desc_data->rx.hdr.pa.pages) +
  805. desc_data->rx.hdr.pa.pages_offset;
  806. copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len;
  807. copy_len = min(desc_data->rx.hdr.dma_len, copy_len);
  808. skb_copy_to_linear_data(skb, packet, copy_len);
  809. skb_put(skb, copy_len);
  810. len -= copy_len;
  811. if (len) {
  812. /* Add the remaining data as a frag */
  813. dma_sync_single_range_for_cpu(pdata->dev,
  814. desc_data->rx.buf.dma_base,
  815. desc_data->rx.buf.dma_off,
  816. desc_data->rx.buf.dma_len,
  817. DMA_FROM_DEVICE);
  818. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  819. desc_data->rx.buf.pa.pages,
  820. desc_data->rx.buf.pa.pages_offset,
  821. len, desc_data->rx.buf.dma_len);
  822. desc_data->rx.buf.pa.pages = NULL;
  823. }
  824. return skb;
  825. }
  826. static int xlgmac_tx_poll(struct xlgmac_channel *channel)
  827. {
  828. struct xlgmac_pdata *pdata = channel->pdata;
  829. struct xlgmac_ring *ring = channel->tx_ring;
  830. struct net_device *netdev = pdata->netdev;
  831. unsigned int tx_packets = 0, tx_bytes = 0;
  832. struct xlgmac_desc_data *desc_data;
  833. struct xlgmac_dma_desc *dma_desc;
  834. struct xlgmac_desc_ops *desc_ops;
  835. struct xlgmac_hw_ops *hw_ops;
  836. struct netdev_queue *txq;
  837. int processed = 0;
  838. unsigned int cur;
  839. desc_ops = &pdata->desc_ops;
  840. hw_ops = &pdata->hw_ops;
  841. /* Nothing to do if there isn't a Tx ring for this channel */
  842. if (!ring)
  843. return 0;
  844. cur = ring->cur;
  845. /* Be sure we get ring->cur before accessing descriptor data */
  846. smp_rmb();
  847. txq = netdev_get_tx_queue(netdev, channel->queue_index);
  848. while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
  849. (ring->dirty != cur)) {
  850. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
  851. dma_desc = desc_data->dma_desc;
  852. if (!hw_ops->tx_complete(dma_desc))
  853. break;
  854. /* Make sure descriptor fields are read after reading
  855. * the OWN bit
  856. */
  857. dma_rmb();
  858. if (netif_msg_tx_done(pdata))
  859. xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
  860. if (hw_ops->is_last_desc(dma_desc)) {
  861. tx_packets += desc_data->tx.packets;
  862. tx_bytes += desc_data->tx.bytes;
  863. }
  864. /* Free the SKB and reset the descriptor for re-use */
  865. desc_ops->unmap_desc_data(pdata, desc_data);
  866. hw_ops->tx_desc_reset(desc_data);
  867. processed++;
  868. ring->dirty++;
  869. }
  870. if (!processed)
  871. return 0;
  872. netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
  873. if ((ring->tx.queue_stopped == 1) &&
  874. (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
  875. ring->tx.queue_stopped = 0;
  876. netif_tx_wake_queue(txq);
  877. }
  878. XLGMAC_PR("processed=%d\n", processed);
  879. return processed;
  880. }
  881. static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
  882. {
  883. struct xlgmac_pdata *pdata = channel->pdata;
  884. struct xlgmac_ring *ring = channel->rx_ring;
  885. struct net_device *netdev = pdata->netdev;
  886. unsigned int len, dma_desc_len, max_len;
  887. unsigned int context_next, context;
  888. struct xlgmac_desc_data *desc_data;
  889. struct xlgmac_pkt_info *pkt_info;
  890. unsigned int incomplete, error;
  891. struct xlgmac_hw_ops *hw_ops;
  892. unsigned int received = 0;
  893. struct napi_struct *napi;
  894. struct sk_buff *skb;
  895. int packet_count = 0;
  896. hw_ops = &pdata->hw_ops;
  897. /* Nothing to do if there isn't a Rx ring for this channel */
  898. if (!ring)
  899. return 0;
  900. incomplete = 0;
  901. context_next = 0;
  902. napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
  903. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
  904. pkt_info = &ring->pkt_info;
  905. while (packet_count < budget) {
  906. /* First time in loop see if we need to restore state */
  907. if (!received && desc_data->state_saved) {
  908. skb = desc_data->state.skb;
  909. error = desc_data->state.error;
  910. len = desc_data->state.len;
  911. } else {
  912. memset(pkt_info, 0, sizeof(*pkt_info));
  913. skb = NULL;
  914. error = 0;
  915. len = 0;
  916. }
  917. read_again:
  918. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
  919. if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
  920. xlgmac_rx_refresh(channel);
  921. if (hw_ops->dev_read(channel))
  922. break;
  923. received++;
  924. ring->cur++;
  925. incomplete = XLGMAC_GET_REG_BITS(
  926. pkt_info->attributes,
  927. RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
  928. RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
  929. context_next = XLGMAC_GET_REG_BITS(
  930. pkt_info->attributes,
  931. RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
  932. RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
  933. context = XLGMAC_GET_REG_BITS(
  934. pkt_info->attributes,
  935. RX_PACKET_ATTRIBUTES_CONTEXT_POS,
  936. RX_PACKET_ATTRIBUTES_CONTEXT_LEN);
  937. /* Earlier error, just drain the remaining data */
  938. if ((incomplete || context_next) && error)
  939. goto read_again;
  940. if (error || pkt_info->errors) {
  941. if (pkt_info->errors)
  942. netif_err(pdata, rx_err, netdev,
  943. "error in received packet\n");
  944. dev_kfree_skb(skb);
  945. goto next_packet;
  946. }
  947. if (!context) {
  948. /* Length is cumulative, get this descriptor's length */
  949. dma_desc_len = desc_data->rx.len - len;
  950. len += dma_desc_len;
  951. if (dma_desc_len && !skb) {
  952. skb = xlgmac_create_skb(pdata, napi, desc_data,
  953. dma_desc_len);
  954. if (!skb)
  955. error = 1;
  956. } else if (dma_desc_len) {
  957. dma_sync_single_range_for_cpu(
  958. pdata->dev,
  959. desc_data->rx.buf.dma_base,
  960. desc_data->rx.buf.dma_off,
  961. desc_data->rx.buf.dma_len,
  962. DMA_FROM_DEVICE);
  963. skb_add_rx_frag(
  964. skb, skb_shinfo(skb)->nr_frags,
  965. desc_data->rx.buf.pa.pages,
  966. desc_data->rx.buf.pa.pages_offset,
  967. dma_desc_len,
  968. desc_data->rx.buf.dma_len);
  969. desc_data->rx.buf.pa.pages = NULL;
  970. }
  971. }
  972. if (incomplete || context_next)
  973. goto read_again;
  974. if (!skb)
  975. goto next_packet;
  976. /* Be sure we don't exceed the configured MTU */
  977. max_len = netdev->mtu + ETH_HLEN;
  978. if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  979. (skb->protocol == htons(ETH_P_8021Q)))
  980. max_len += VLAN_HLEN;
  981. if (skb->len > max_len) {
  982. netif_err(pdata, rx_err, netdev,
  983. "packet length exceeds configured MTU\n");
  984. dev_kfree_skb(skb);
  985. goto next_packet;
  986. }
  987. if (netif_msg_pktdata(pdata))
  988. xlgmac_print_pkt(netdev, skb, false);
  989. skb_checksum_none_assert(skb);
  990. if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
  991. RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
  992. RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
  993. skb->ip_summed = CHECKSUM_UNNECESSARY;
  994. if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
  995. RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
  996. RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) {
  997. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  998. pkt_info->vlan_ctag);
  999. pdata->stats.rx_vlan_packets++;
  1000. }
  1001. if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
  1002. RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
  1003. RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
  1004. skb_set_hash(skb, pkt_info->rss_hash,
  1005. pkt_info->rss_hash_type);
  1006. skb->dev = netdev;
  1007. skb->protocol = eth_type_trans(skb, netdev);
  1008. skb_record_rx_queue(skb, channel->queue_index);
  1009. napi_gro_receive(napi, skb);
  1010. next_packet:
  1011. packet_count++;
  1012. }
  1013. /* Check if we need to save state before leaving */
  1014. if (received && (incomplete || context_next)) {
  1015. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
  1016. desc_data->state_saved = 1;
  1017. desc_data->state.skb = skb;
  1018. desc_data->state.len = len;
  1019. desc_data->state.error = error;
  1020. }
  1021. XLGMAC_PR("packet_count = %d\n", packet_count);
  1022. return packet_count;
  1023. }
  1024. static int xlgmac_one_poll(struct napi_struct *napi, int budget)
  1025. {
  1026. struct xlgmac_channel *channel = container_of(napi,
  1027. struct xlgmac_channel,
  1028. napi);
  1029. int processed = 0;
  1030. XLGMAC_PR("budget=%d\n", budget);
  1031. /* Cleanup Tx ring first */
  1032. xlgmac_tx_poll(channel);
  1033. /* Process Rx ring next */
  1034. processed = xlgmac_rx_poll(channel, budget);
  1035. /* If we processed everything, we are done */
  1036. if (processed < budget) {
  1037. /* Turn off polling */
  1038. napi_complete_done(napi, processed);
  1039. /* Enable Tx and Rx interrupts */
  1040. enable_irq(channel->dma_irq);
  1041. }
  1042. XLGMAC_PR("received = %d\n", processed);
  1043. return processed;
  1044. }
  1045. static int xlgmac_all_poll(struct napi_struct *napi, int budget)
  1046. {
  1047. struct xlgmac_pdata *pdata = container_of(napi,
  1048. struct xlgmac_pdata,
  1049. napi);
  1050. struct xlgmac_channel *channel;
  1051. int processed, last_processed;
  1052. int ring_budget;
  1053. unsigned int i;
  1054. XLGMAC_PR("budget=%d\n", budget);
  1055. processed = 0;
  1056. ring_budget = budget / pdata->rx_ring_count;
  1057. do {
  1058. last_processed = processed;
  1059. channel = pdata->channel_head;
  1060. for (i = 0; i < pdata->channel_count; i++, channel++) {
  1061. /* Cleanup Tx ring first */
  1062. xlgmac_tx_poll(channel);
  1063. /* Process Rx ring next */
  1064. if (ring_budget > (budget - processed))
  1065. ring_budget = budget - processed;
  1066. processed += xlgmac_rx_poll(channel, ring_budget);
  1067. }
  1068. } while ((processed < budget) && (processed != last_processed));
  1069. /* If we processed everything, we are done */
  1070. if (processed < budget) {
  1071. /* Turn off polling */
  1072. napi_complete_done(napi, processed);
  1073. /* Enable Tx and Rx interrupts */
  1074. xlgmac_enable_rx_tx_ints(pdata);
  1075. }
  1076. XLGMAC_PR("received = %d\n", processed);
  1077. return processed;
  1078. }