xgbe-desc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. /*
  2. * AMD 10Gb Ethernet driver
  3. *
  4. * This file is available to you under your choice of the following two
  5. * licenses:
  6. *
  7. * License 1: GPLv2
  8. *
  9. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  10. *
  11. * This file is free software; you may copy, redistribute and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation, either version 2 of the License, or (at
  14. * your option) any later version.
  15. *
  16. * This file is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. *
  24. * This file incorporates work covered by the following copyright and
  25. * permission notice:
  26. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  27. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  28. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  29. * and you.
  30. *
  31. * The Software IS NOT an item of Licensed Software or Licensed Product
  32. * under any End User Software License Agreement or Agreement for Licensed
  33. * Product with Synopsys or any supplement thereto. Permission is hereby
  34. * granted, free of charge, to any person obtaining a copy of this software
  35. * annotated with this license and the Software, to deal in the Software
  36. * without restriction, including without limitation the rights to use,
  37. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  38. * of the Software, and to permit persons to whom the Software is furnished
  39. * to do so, subject to the following conditions:
  40. *
  41. * The above copyright notice and this permission notice shall be included
  42. * in all copies or substantial portions of the Software.
  43. *
  44. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  45. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  46. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  47. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  48. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  49. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  50. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  51. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  52. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  53. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  54. * THE POSSIBILITY OF SUCH DAMAGE.
  55. *
  56. *
  57. * License 2: Modified BSD
  58. *
  59. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  60. * All rights reserved.
  61. *
  62. * Redistribution and use in source and binary forms, with or without
  63. * modification, are permitted provided that the following conditions are met:
  64. * * Redistributions of source code must retain the above copyright
  65. * notice, this list of conditions and the following disclaimer.
  66. * * Redistributions in binary form must reproduce the above copyright
  67. * notice, this list of conditions and the following disclaimer in the
  68. * documentation and/or other materials provided with the distribution.
  69. * * Neither the name of Advanced Micro Devices, Inc. nor the
  70. * names of its contributors may be used to endorse or promote products
  71. * derived from this software without specific prior written permission.
  72. *
  73. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  74. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  75. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  76. * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  77. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  78. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  79. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  80. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  81. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  82. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  83. *
  84. * This file incorporates work covered by the following copyright and
  85. * permission notice:
  86. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  87. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  88. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  89. * and you.
  90. *
  91. * The Software IS NOT an item of Licensed Software or Licensed Product
  92. * under any End User Software License Agreement or Agreement for Licensed
  93. * Product with Synopsys or any supplement thereto. Permission is hereby
  94. * granted, free of charge, to any person obtaining a copy of this software
  95. * annotated with this license and the Software, to deal in the Software
  96. * without restriction, including without limitation the rights to use,
  97. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  98. * of the Software, and to permit persons to whom the Software is furnished
  99. * to do so, subject to the following conditions:
  100. *
  101. * The above copyright notice and this permission notice shall be included
  102. * in all copies or substantial portions of the Software.
  103. *
  104. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  105. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  106. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  107. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  108. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  109. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  110. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  111. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  112. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  113. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  114. * THE POSSIBILITY OF SUCH DAMAGE.
  115. */
  116. #include "xgbe.h"
  117. #include "xgbe-common.h"
  118. static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
  119. static void xgbe_free_ring(struct xgbe_prv_data *pdata,
  120. struct xgbe_ring *ring)
  121. {
  122. struct xgbe_ring_data *rdata;
  123. unsigned int i;
  124. if (!ring)
  125. return;
  126. if (ring->rdata) {
  127. for (i = 0; i < ring->rdesc_count; i++) {
  128. rdata = XGBE_GET_DESC_DATA(ring, i);
  129. xgbe_unmap_rdata(pdata, rdata);
  130. }
  131. kfree(ring->rdata);
  132. ring->rdata = NULL;
  133. }
  134. if (ring->rx_hdr_pa.pages) {
  135. dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
  136. ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
  137. put_page(ring->rx_hdr_pa.pages);
  138. ring->rx_hdr_pa.pages = NULL;
  139. ring->rx_hdr_pa.pages_len = 0;
  140. ring->rx_hdr_pa.pages_offset = 0;
  141. ring->rx_hdr_pa.pages_dma = 0;
  142. }
  143. if (ring->rx_buf_pa.pages) {
  144. dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
  145. ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
  146. put_page(ring->rx_buf_pa.pages);
  147. ring->rx_buf_pa.pages = NULL;
  148. ring->rx_buf_pa.pages_len = 0;
  149. ring->rx_buf_pa.pages_offset = 0;
  150. ring->rx_buf_pa.pages_dma = 0;
  151. }
  152. if (ring->rdesc) {
  153. dma_free_coherent(pdata->dev,
  154. (sizeof(struct xgbe_ring_desc) *
  155. ring->rdesc_count),
  156. ring->rdesc, ring->rdesc_dma);
  157. ring->rdesc = NULL;
  158. }
  159. }
  160. static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
  161. {
  162. struct xgbe_channel *channel;
  163. unsigned int i;
  164. DBGPR("-->xgbe_free_ring_resources\n");
  165. for (i = 0; i < pdata->channel_count; i++) {
  166. channel = pdata->channel[i];
  167. xgbe_free_ring(pdata, channel->tx_ring);
  168. xgbe_free_ring(pdata, channel->rx_ring);
  169. }
  170. DBGPR("<--xgbe_free_ring_resources\n");
  171. }
  172. static void *xgbe_alloc_node(size_t size, int node)
  173. {
  174. void *mem;
  175. mem = kzalloc_node(size, GFP_KERNEL, node);
  176. if (!mem)
  177. mem = kzalloc(size, GFP_KERNEL);
  178. return mem;
  179. }
  180. static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
  181. dma_addr_t *dma, int node)
  182. {
  183. void *mem;
  184. int cur_node = dev_to_node(dev);
  185. set_dev_node(dev, node);
  186. mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
  187. set_dev_node(dev, cur_node);
  188. if (!mem)
  189. mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
  190. return mem;
  191. }
  192. static int xgbe_init_ring(struct xgbe_prv_data *pdata,
  193. struct xgbe_ring *ring, unsigned int rdesc_count)
  194. {
  195. size_t size;
  196. if (!ring)
  197. return 0;
  198. /* Descriptors */
  199. size = rdesc_count * sizeof(struct xgbe_ring_desc);
  200. ring->rdesc_count = rdesc_count;
  201. ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
  202. ring->node);
  203. if (!ring->rdesc)
  204. return -ENOMEM;
  205. /* Descriptor information */
  206. size = rdesc_count * sizeof(struct xgbe_ring_data);
  207. ring->rdata = xgbe_alloc_node(size, ring->node);
  208. if (!ring->rdata)
  209. return -ENOMEM;
  210. netif_dbg(pdata, drv, pdata->netdev,
  211. "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
  212. ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
  213. return 0;
  214. }
  215. static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
  216. {
  217. struct xgbe_channel *channel;
  218. unsigned int i;
  219. int ret;
  220. for (i = 0; i < pdata->channel_count; i++) {
  221. channel = pdata->channel[i];
  222. netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
  223. channel->name);
  224. ret = xgbe_init_ring(pdata, channel->tx_ring,
  225. pdata->tx_desc_count);
  226. if (ret) {
  227. netdev_alert(pdata->netdev,
  228. "error initializing Tx ring\n");
  229. goto err_ring;
  230. }
  231. netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
  232. channel->name);
  233. ret = xgbe_init_ring(pdata, channel->rx_ring,
  234. pdata->rx_desc_count);
  235. if (ret) {
  236. netdev_alert(pdata->netdev,
  237. "error initializing Rx ring\n");
  238. goto err_ring;
  239. }
  240. }
  241. return 0;
  242. err_ring:
  243. xgbe_free_ring_resources(pdata);
  244. return ret;
  245. }
  246. static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
  247. struct xgbe_page_alloc *pa, int alloc_order,
  248. int node)
  249. {
  250. struct page *pages = NULL;
  251. dma_addr_t pages_dma;
  252. gfp_t gfp;
  253. int order;
  254. again:
  255. order = alloc_order;
  256. /* Try to obtain pages, decreasing order if necessary */
  257. gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
  258. while (order >= 0) {
  259. pages = alloc_pages_node(node, gfp, order);
  260. if (pages)
  261. break;
  262. order--;
  263. }
  264. /* If we couldn't get local pages, try getting from anywhere */
  265. if (!pages && (node != NUMA_NO_NODE)) {
  266. node = NUMA_NO_NODE;
  267. goto again;
  268. }
  269. if (!pages)
  270. return -ENOMEM;
  271. /* Map the pages */
  272. pages_dma = dma_map_page(pdata->dev, pages, 0,
  273. PAGE_SIZE << order, DMA_FROM_DEVICE);
  274. if (dma_mapping_error(pdata->dev, pages_dma)) {
  275. put_page(pages);
  276. return -ENOMEM;
  277. }
  278. pa->pages = pages;
  279. pa->pages_len = PAGE_SIZE << order;
  280. pa->pages_offset = 0;
  281. pa->pages_dma = pages_dma;
  282. return 0;
  283. }
  284. static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
  285. struct xgbe_page_alloc *pa,
  286. unsigned int len)
  287. {
  288. get_page(pa->pages);
  289. bd->pa = *pa;
  290. bd->dma_base = pa->pages_dma;
  291. bd->dma_off = pa->pages_offset;
  292. bd->dma_len = len;
  293. pa->pages_offset += len;
  294. if ((pa->pages_offset + len) > pa->pages_len) {
  295. /* This data descriptor is responsible for unmapping page(s) */
  296. bd->pa_unmap = *pa;
  297. /* Get a new allocation next time */
  298. pa->pages = NULL;
  299. pa->pages_len = 0;
  300. pa->pages_offset = 0;
  301. pa->pages_dma = 0;
  302. }
  303. }
  304. static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
  305. struct xgbe_ring *ring,
  306. struct xgbe_ring_data *rdata)
  307. {
  308. int ret;
  309. if (!ring->rx_hdr_pa.pages) {
  310. ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
  311. if (ret)
  312. return ret;
  313. }
  314. if (!ring->rx_buf_pa.pages) {
  315. ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
  316. PAGE_ALLOC_COSTLY_ORDER, ring->node);
  317. if (ret)
  318. return ret;
  319. }
  320. /* Set up the header page info */
  321. xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
  322. XGBE_SKB_ALLOC_SIZE);
  323. /* Set up the buffer page info */
  324. xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
  325. pdata->rx_buf_size);
  326. return 0;
  327. }
  328. static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
  329. {
  330. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  331. struct xgbe_channel *channel;
  332. struct xgbe_ring *ring;
  333. struct xgbe_ring_data *rdata;
  334. struct xgbe_ring_desc *rdesc;
  335. dma_addr_t rdesc_dma;
  336. unsigned int i, j;
  337. DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
  338. for (i = 0; i < pdata->channel_count; i++) {
  339. channel = pdata->channel[i];
  340. ring = channel->tx_ring;
  341. if (!ring)
  342. break;
  343. rdesc = ring->rdesc;
  344. rdesc_dma = ring->rdesc_dma;
  345. for (j = 0; j < ring->rdesc_count; j++) {
  346. rdata = XGBE_GET_DESC_DATA(ring, j);
  347. rdata->rdesc = rdesc;
  348. rdata->rdesc_dma = rdesc_dma;
  349. rdesc++;
  350. rdesc_dma += sizeof(struct xgbe_ring_desc);
  351. }
  352. ring->cur = 0;
  353. ring->dirty = 0;
  354. memset(&ring->tx, 0, sizeof(ring->tx));
  355. hw_if->tx_desc_init(channel);
  356. }
  357. DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
  358. }
  359. static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
  360. {
  361. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  362. struct xgbe_channel *channel;
  363. struct xgbe_ring *ring;
  364. struct xgbe_ring_desc *rdesc;
  365. struct xgbe_ring_data *rdata;
  366. dma_addr_t rdesc_dma;
  367. unsigned int i, j;
  368. DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
  369. for (i = 0; i < pdata->channel_count; i++) {
  370. channel = pdata->channel[i];
  371. ring = channel->rx_ring;
  372. if (!ring)
  373. break;
  374. rdesc = ring->rdesc;
  375. rdesc_dma = ring->rdesc_dma;
  376. for (j = 0; j < ring->rdesc_count; j++) {
  377. rdata = XGBE_GET_DESC_DATA(ring, j);
  378. rdata->rdesc = rdesc;
  379. rdata->rdesc_dma = rdesc_dma;
  380. if (xgbe_map_rx_buffer(pdata, ring, rdata))
  381. break;
  382. rdesc++;
  383. rdesc_dma += sizeof(struct xgbe_ring_desc);
  384. }
  385. ring->cur = 0;
  386. ring->dirty = 0;
  387. hw_if->rx_desc_init(channel);
  388. }
  389. DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
  390. }
  391. static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
  392. struct xgbe_ring_data *rdata)
  393. {
  394. if (rdata->skb_dma) {
  395. if (rdata->mapped_as_page) {
  396. dma_unmap_page(pdata->dev, rdata->skb_dma,
  397. rdata->skb_dma_len, DMA_TO_DEVICE);
  398. } else {
  399. dma_unmap_single(pdata->dev, rdata->skb_dma,
  400. rdata->skb_dma_len, DMA_TO_DEVICE);
  401. }
  402. rdata->skb_dma = 0;
  403. rdata->skb_dma_len = 0;
  404. }
  405. if (rdata->skb) {
  406. dev_kfree_skb_any(rdata->skb);
  407. rdata->skb = NULL;
  408. }
  409. if (rdata->rx.hdr.pa.pages)
  410. put_page(rdata->rx.hdr.pa.pages);
  411. if (rdata->rx.hdr.pa_unmap.pages) {
  412. dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
  413. rdata->rx.hdr.pa_unmap.pages_len,
  414. DMA_FROM_DEVICE);
  415. put_page(rdata->rx.hdr.pa_unmap.pages);
  416. }
  417. if (rdata->rx.buf.pa.pages)
  418. put_page(rdata->rx.buf.pa.pages);
  419. if (rdata->rx.buf.pa_unmap.pages) {
  420. dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
  421. rdata->rx.buf.pa_unmap.pages_len,
  422. DMA_FROM_DEVICE);
  423. put_page(rdata->rx.buf.pa_unmap.pages);
  424. }
  425. memset(&rdata->tx, 0, sizeof(rdata->tx));
  426. memset(&rdata->rx, 0, sizeof(rdata->rx));
  427. rdata->mapped_as_page = 0;
  428. if (rdata->state_saved) {
  429. rdata->state_saved = 0;
  430. rdata->state.skb = NULL;
  431. rdata->state.len = 0;
  432. rdata->state.error = 0;
  433. }
  434. }
  435. static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
  436. {
  437. struct xgbe_prv_data *pdata = channel->pdata;
  438. struct xgbe_ring *ring = channel->tx_ring;
  439. struct xgbe_ring_data *rdata;
  440. struct xgbe_packet_data *packet;
  441. skb_frag_t *frag;
  442. dma_addr_t skb_dma;
  443. unsigned int start_index, cur_index;
  444. unsigned int offset, tso, vlan, datalen, len;
  445. unsigned int i;
  446. DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
  447. offset = 0;
  448. start_index = ring->cur;
  449. cur_index = ring->cur;
  450. packet = &ring->packet_data;
  451. packet->rdesc_count = 0;
  452. packet->length = 0;
  453. tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  454. TSO_ENABLE);
  455. vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  456. VLAN_CTAG);
  457. /* Save space for a context descriptor if needed */
  458. if ((tso && (packet->mss != ring->tx.cur_mss)) ||
  459. (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
  460. cur_index++;
  461. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  462. if (tso) {
  463. /* Map the TSO header */
  464. skb_dma = dma_map_single(pdata->dev, skb->data,
  465. packet->header_len, DMA_TO_DEVICE);
  466. if (dma_mapping_error(pdata->dev, skb_dma)) {
  467. netdev_alert(pdata->netdev, "dma_map_single failed\n");
  468. goto err_out;
  469. }
  470. rdata->skb_dma = skb_dma;
  471. rdata->skb_dma_len = packet->header_len;
  472. netif_dbg(pdata, tx_queued, pdata->netdev,
  473. "skb header: index=%u, dma=%pad, len=%u\n",
  474. cur_index, &skb_dma, packet->header_len);
  475. offset = packet->header_len;
  476. packet->length += packet->header_len;
  477. cur_index++;
  478. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  479. }
  480. /* Map the (remainder of the) packet */
  481. for (datalen = skb_headlen(skb) - offset; datalen; ) {
  482. len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
  483. skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
  484. DMA_TO_DEVICE);
  485. if (dma_mapping_error(pdata->dev, skb_dma)) {
  486. netdev_alert(pdata->netdev, "dma_map_single failed\n");
  487. goto err_out;
  488. }
  489. rdata->skb_dma = skb_dma;
  490. rdata->skb_dma_len = len;
  491. netif_dbg(pdata, tx_queued, pdata->netdev,
  492. "skb data: index=%u, dma=%pad, len=%u\n",
  493. cur_index, &skb_dma, len);
  494. datalen -= len;
  495. offset += len;
  496. packet->length += len;
  497. cur_index++;
  498. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  499. }
  500. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  501. netif_dbg(pdata, tx_queued, pdata->netdev,
  502. "mapping frag %u\n", i);
  503. frag = &skb_shinfo(skb)->frags[i];
  504. offset = 0;
  505. for (datalen = skb_frag_size(frag); datalen; ) {
  506. len = min_t(unsigned int, datalen,
  507. XGBE_TX_MAX_BUF_SIZE);
  508. skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
  509. len, DMA_TO_DEVICE);
  510. if (dma_mapping_error(pdata->dev, skb_dma)) {
  511. netdev_alert(pdata->netdev,
  512. "skb_frag_dma_map failed\n");
  513. goto err_out;
  514. }
  515. rdata->skb_dma = skb_dma;
  516. rdata->skb_dma_len = len;
  517. rdata->mapped_as_page = 1;
  518. netif_dbg(pdata, tx_queued, pdata->netdev,
  519. "skb frag: index=%u, dma=%pad, len=%u\n",
  520. cur_index, &skb_dma, len);
  521. datalen -= len;
  522. offset += len;
  523. packet->length += len;
  524. cur_index++;
  525. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  526. }
  527. }
  528. /* Save the skb address in the last entry. We always have some data
  529. * that has been mapped so rdata is always advanced past the last
  530. * piece of mapped data - use the entry pointed to by cur_index - 1.
  531. */
  532. rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
  533. rdata->skb = skb;
  534. /* Save the number of descriptor entries used */
  535. packet->rdesc_count = cur_index - start_index;
  536. DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
  537. return packet->rdesc_count;
  538. err_out:
  539. while (start_index < cur_index) {
  540. rdata = XGBE_GET_DESC_DATA(ring, start_index++);
  541. xgbe_unmap_rdata(pdata, rdata);
  542. }
  543. DBGPR("<--xgbe_map_tx_skb: count=0\n");
  544. return 0;
  545. }
  546. void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
  547. {
  548. DBGPR("-->xgbe_init_function_ptrs_desc\n");
  549. desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
  550. desc_if->free_ring_resources = xgbe_free_ring_resources;
  551. desc_if->map_tx_skb = xgbe_map_tx_skb;
  552. desc_if->map_rx_buffer = xgbe_map_rx_buffer;
  553. desc_if->unmap_rdata = xgbe_unmap_rdata;
  554. desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
  555. desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
  556. DBGPR("<--xgbe_init_function_ptrs_desc\n");
  557. }