netdev.c 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2009 - 2018 Intel Corporation. */
  3. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  4. #include <linux/module.h>
  5. #include <linux/types.h>
  6. #include <linux/init.h>
  7. #include <linux/pci.h>
  8. #include <linux/vmalloc.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/delay.h>
  11. #include <linux/netdevice.h>
  12. #include <linux/tcp.h>
  13. #include <linux/ipv6.h>
  14. #include <linux/slab.h>
  15. #include <net/checksum.h>
  16. #include <net/ip6_checksum.h>
  17. #include <linux/mii.h>
  18. #include <linux/ethtool.h>
  19. #include <linux/if_vlan.h>
  20. #include <linux/prefetch.h>
  21. #include <linux/sctp.h>
  22. #include "igbvf.h"
  23. char igbvf_driver_name[] = "igbvf";
  24. static const char igbvf_driver_string[] =
  25. "Intel(R) Gigabit Virtual Function Network Driver";
  26. static const char igbvf_copyright[] =
  27. "Copyright (c) 2009 - 2012 Intel Corporation.";
  28. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
  29. static int debug = -1;
  30. module_param(debug, int, 0);
  31. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  32. static int igbvf_poll(struct napi_struct *napi, int budget);
  33. static void igbvf_reset(struct igbvf_adapter *);
  34. static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
  35. static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
  36. static struct igbvf_info igbvf_vf_info = {
  37. .mac = e1000_vfadapt,
  38. .flags = 0,
  39. .pba = 10,
  40. .init_ops = e1000_init_function_pointers_vf,
  41. };
  42. static struct igbvf_info igbvf_i350_vf_info = {
  43. .mac = e1000_vfadapt_i350,
  44. .flags = 0,
  45. .pba = 10,
  46. .init_ops = e1000_init_function_pointers_vf,
  47. };
  48. static const struct igbvf_info *igbvf_info_tbl[] = {
  49. [board_vf] = &igbvf_vf_info,
  50. [board_i350_vf] = &igbvf_i350_vf_info,
  51. };
  52. /**
  53. * igbvf_desc_unused - calculate if we have unused descriptors
  54. * @ring: address of receive ring structure
  55. **/
  56. static int igbvf_desc_unused(struct igbvf_ring *ring)
  57. {
  58. if (ring->next_to_clean > ring->next_to_use)
  59. return ring->next_to_clean - ring->next_to_use - 1;
  60. return ring->count + ring->next_to_clean - ring->next_to_use - 1;
  61. }
  62. /**
  63. * igbvf_receive_skb - helper function to handle Rx indications
  64. * @adapter: board private structure
  65. * @netdev: pointer to netdev struct
  66. * @skb: skb to indicate to stack
  67. * @status: descriptor status field as written by hardware
  68. * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
  69. * @skb: pointer to sk_buff to be indicated to stack
  70. **/
  71. static void igbvf_receive_skb(struct igbvf_adapter *adapter,
  72. struct net_device *netdev,
  73. struct sk_buff *skb,
  74. u32 status, __le16 vlan)
  75. {
  76. u16 vid;
  77. if (status & E1000_RXD_STAT_VP) {
  78. if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
  79. (status & E1000_RXDEXT_STATERR_LB))
  80. vid = be16_to_cpu((__force __be16)vlan) & E1000_RXD_SPC_VLAN_MASK;
  81. else
  82. vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
  83. if (test_bit(vid, adapter->active_vlans))
  84. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
  85. }
  86. napi_gro_receive(&adapter->rx_ring->napi, skb);
  87. }
  88. static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
  89. u32 status_err, struct sk_buff *skb)
  90. {
  91. skb_checksum_none_assert(skb);
  92. /* Ignore Checksum bit is set or checksum is disabled through ethtool */
  93. if ((status_err & E1000_RXD_STAT_IXSM) ||
  94. (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
  95. return;
  96. /* TCP/UDP checksum error bit is set */
  97. if (status_err &
  98. (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
  99. /* let the stack verify checksum errors */
  100. adapter->hw_csum_err++;
  101. return;
  102. }
  103. /* It must be a TCP or UDP packet with a valid checksum */
  104. if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
  105. skb->ip_summed = CHECKSUM_UNNECESSARY;
  106. adapter->hw_csum_good++;
  107. }
  108. /**
  109. * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
  110. * @rx_ring: address of ring structure to repopulate
  111. * @cleaned_count: number of buffers to repopulate
  112. **/
  113. static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
  114. int cleaned_count)
  115. {
  116. struct igbvf_adapter *adapter = rx_ring->adapter;
  117. struct net_device *netdev = adapter->netdev;
  118. struct pci_dev *pdev = adapter->pdev;
  119. union e1000_adv_rx_desc *rx_desc;
  120. struct igbvf_buffer *buffer_info;
  121. struct sk_buff *skb;
  122. unsigned int i;
  123. int bufsz;
  124. i = rx_ring->next_to_use;
  125. buffer_info = &rx_ring->buffer_info[i];
  126. if (adapter->rx_ps_hdr_size)
  127. bufsz = adapter->rx_ps_hdr_size;
  128. else
  129. bufsz = adapter->rx_buffer_len;
  130. while (cleaned_count--) {
  131. rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
  132. if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
  133. if (!buffer_info->page) {
  134. buffer_info->page = alloc_page(GFP_ATOMIC);
  135. if (!buffer_info->page) {
  136. adapter->alloc_rx_buff_failed++;
  137. goto no_buffers;
  138. }
  139. buffer_info->page_offset = 0;
  140. } else {
  141. buffer_info->page_offset ^= PAGE_SIZE / 2;
  142. }
  143. buffer_info->page_dma =
  144. dma_map_page(&pdev->dev, buffer_info->page,
  145. buffer_info->page_offset,
  146. PAGE_SIZE / 2,
  147. DMA_FROM_DEVICE);
  148. if (dma_mapping_error(&pdev->dev,
  149. buffer_info->page_dma)) {
  150. __free_page(buffer_info->page);
  151. buffer_info->page = NULL;
  152. dev_err(&pdev->dev, "RX DMA map failed\n");
  153. break;
  154. }
  155. }
  156. if (!buffer_info->skb) {
  157. skb = netdev_alloc_skb_ip_align(netdev, bufsz);
  158. if (!skb) {
  159. adapter->alloc_rx_buff_failed++;
  160. goto no_buffers;
  161. }
  162. buffer_info->skb = skb;
  163. buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
  164. bufsz,
  165. DMA_FROM_DEVICE);
  166. if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
  167. dev_kfree_skb(buffer_info->skb);
  168. buffer_info->skb = NULL;
  169. dev_err(&pdev->dev, "RX DMA map failed\n");
  170. goto no_buffers;
  171. }
  172. }
  173. /* Refresh the desc even if buffer_addrs didn't change because
  174. * each write-back erases this info.
  175. */
  176. if (adapter->rx_ps_hdr_size) {
  177. rx_desc->read.pkt_addr =
  178. cpu_to_le64(buffer_info->page_dma);
  179. rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
  180. } else {
  181. rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
  182. rx_desc->read.hdr_addr = 0;
  183. }
  184. i++;
  185. if (i == rx_ring->count)
  186. i = 0;
  187. buffer_info = &rx_ring->buffer_info[i];
  188. }
  189. no_buffers:
  190. if (rx_ring->next_to_use != i) {
  191. rx_ring->next_to_use = i;
  192. if (i == 0)
  193. i = (rx_ring->count - 1);
  194. else
  195. i--;
  196. /* Force memory writes to complete before letting h/w
  197. * know there are new descriptors to fetch. (Only
  198. * applicable for weak-ordered memory model archs,
  199. * such as IA-64).
  200. */
  201. wmb();
  202. writel(i, adapter->hw.hw_addr + rx_ring->tail);
  203. }
  204. }
  205. /**
  206. * igbvf_clean_rx_irq - Send received data up the network stack; legacy
  207. * @adapter: board private structure
  208. * @work_done: output parameter used to indicate completed work
  209. * @work_to_do: input parameter setting limit of work
  210. *
  211. * the return value indicates whether actual cleaning was done, there
  212. * is no guarantee that everything was cleaned
  213. **/
  214. static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
  215. int *work_done, int work_to_do)
  216. {
  217. struct igbvf_ring *rx_ring = adapter->rx_ring;
  218. struct net_device *netdev = adapter->netdev;
  219. struct pci_dev *pdev = adapter->pdev;
  220. union e1000_adv_rx_desc *rx_desc, *next_rxd;
  221. struct igbvf_buffer *buffer_info, *next_buffer;
  222. struct sk_buff *skb;
  223. bool cleaned = false;
  224. int cleaned_count = 0;
  225. unsigned int total_bytes = 0, total_packets = 0;
  226. unsigned int i;
  227. u32 length, hlen, staterr;
  228. i = rx_ring->next_to_clean;
  229. rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
  230. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  231. while (staterr & E1000_RXD_STAT_DD) {
  232. if (*work_done >= work_to_do)
  233. break;
  234. (*work_done)++;
  235. rmb(); /* read descriptor and rx_buffer_info after status DD */
  236. buffer_info = &rx_ring->buffer_info[i];
  237. /* HW will not DMA in data larger than the given buffer, even
  238. * if it parses the (NFS, of course) header to be larger. In
  239. * that case, it fills the header buffer and spills the rest
  240. * into the page.
  241. */
  242. hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info)
  243. & E1000_RXDADV_HDRBUFLEN_MASK) >>
  244. E1000_RXDADV_HDRBUFLEN_SHIFT;
  245. if (hlen > adapter->rx_ps_hdr_size)
  246. hlen = adapter->rx_ps_hdr_size;
  247. length = le16_to_cpu(rx_desc->wb.upper.length);
  248. cleaned = true;
  249. cleaned_count++;
  250. skb = buffer_info->skb;
  251. prefetch(skb->data - NET_IP_ALIGN);
  252. buffer_info->skb = NULL;
  253. if (!adapter->rx_ps_hdr_size) {
  254. dma_unmap_single(&pdev->dev, buffer_info->dma,
  255. adapter->rx_buffer_len,
  256. DMA_FROM_DEVICE);
  257. buffer_info->dma = 0;
  258. skb_put(skb, length);
  259. goto send_up;
  260. }
  261. if (!skb_shinfo(skb)->nr_frags) {
  262. dma_unmap_single(&pdev->dev, buffer_info->dma,
  263. adapter->rx_ps_hdr_size,
  264. DMA_FROM_DEVICE);
  265. buffer_info->dma = 0;
  266. skb_put(skb, hlen);
  267. }
  268. if (length) {
  269. dma_unmap_page(&pdev->dev, buffer_info->page_dma,
  270. PAGE_SIZE / 2,
  271. DMA_FROM_DEVICE);
  272. buffer_info->page_dma = 0;
  273. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  274. buffer_info->page,
  275. buffer_info->page_offset,
  276. length);
  277. if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
  278. (page_count(buffer_info->page) != 1))
  279. buffer_info->page = NULL;
  280. else
  281. get_page(buffer_info->page);
  282. skb->len += length;
  283. skb->data_len += length;
  284. skb->truesize += PAGE_SIZE / 2;
  285. }
  286. send_up:
  287. i++;
  288. if (i == rx_ring->count)
  289. i = 0;
  290. next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
  291. prefetch(next_rxd);
  292. next_buffer = &rx_ring->buffer_info[i];
  293. if (!(staterr & E1000_RXD_STAT_EOP)) {
  294. buffer_info->skb = next_buffer->skb;
  295. buffer_info->dma = next_buffer->dma;
  296. next_buffer->skb = skb;
  297. next_buffer->dma = 0;
  298. goto next_desc;
  299. }
  300. if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
  301. dev_kfree_skb_irq(skb);
  302. goto next_desc;
  303. }
  304. total_bytes += skb->len;
  305. total_packets++;
  306. igbvf_rx_checksum_adv(adapter, staterr, skb);
  307. skb->protocol = eth_type_trans(skb, netdev);
  308. igbvf_receive_skb(adapter, netdev, skb, staterr,
  309. rx_desc->wb.upper.vlan);
  310. next_desc:
  311. rx_desc->wb.upper.status_error = 0;
  312. /* return some buffers to hardware, one at a time is too slow */
  313. if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
  314. igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
  315. cleaned_count = 0;
  316. }
  317. /* use prefetched values */
  318. rx_desc = next_rxd;
  319. buffer_info = next_buffer;
  320. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  321. }
  322. rx_ring->next_to_clean = i;
  323. cleaned_count = igbvf_desc_unused(rx_ring);
  324. if (cleaned_count)
  325. igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
  326. adapter->total_rx_packets += total_packets;
  327. adapter->total_rx_bytes += total_bytes;
  328. netdev->stats.rx_bytes += total_bytes;
  329. netdev->stats.rx_packets += total_packets;
  330. return cleaned;
  331. }
  332. static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
  333. struct igbvf_buffer *buffer_info)
  334. {
  335. if (buffer_info->dma) {
  336. if (buffer_info->mapped_as_page)
  337. dma_unmap_page(&adapter->pdev->dev,
  338. buffer_info->dma,
  339. buffer_info->length,
  340. DMA_TO_DEVICE);
  341. else
  342. dma_unmap_single(&adapter->pdev->dev,
  343. buffer_info->dma,
  344. buffer_info->length,
  345. DMA_TO_DEVICE);
  346. buffer_info->dma = 0;
  347. }
  348. if (buffer_info->skb) {
  349. dev_kfree_skb_any(buffer_info->skb);
  350. buffer_info->skb = NULL;
  351. }
  352. buffer_info->time_stamp = 0;
  353. }
  354. /**
  355. * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
  356. * @adapter: board private structure
  357. * @tx_ring: ring being initialized
  358. *
  359. * Return 0 on success, negative on failure
  360. **/
  361. int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
  362. struct igbvf_ring *tx_ring)
  363. {
  364. struct pci_dev *pdev = adapter->pdev;
  365. int size;
  366. size = sizeof(struct igbvf_buffer) * tx_ring->count;
  367. tx_ring->buffer_info = vzalloc(size);
  368. if (!tx_ring->buffer_info)
  369. goto err;
  370. /* round up to nearest 4K */
  371. tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
  372. tx_ring->size = ALIGN(tx_ring->size, 4096);
  373. tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
  374. &tx_ring->dma, GFP_KERNEL);
  375. if (!tx_ring->desc)
  376. goto err;
  377. tx_ring->adapter = adapter;
  378. tx_ring->next_to_use = 0;
  379. tx_ring->next_to_clean = 0;
  380. return 0;
  381. err:
  382. vfree(tx_ring->buffer_info);
  383. dev_err(&adapter->pdev->dev,
  384. "Unable to allocate memory for the transmit descriptor ring\n");
  385. return -ENOMEM;
  386. }
  387. /**
  388. * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
  389. * @adapter: board private structure
  390. * @rx_ring: ring being initialized
  391. *
  392. * Returns 0 on success, negative on failure
  393. **/
  394. int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
  395. struct igbvf_ring *rx_ring)
  396. {
  397. struct pci_dev *pdev = adapter->pdev;
  398. int size, desc_len;
  399. size = sizeof(struct igbvf_buffer) * rx_ring->count;
  400. rx_ring->buffer_info = vzalloc(size);
  401. if (!rx_ring->buffer_info)
  402. goto err;
  403. desc_len = sizeof(union e1000_adv_rx_desc);
  404. /* Round up to nearest 4K */
  405. rx_ring->size = rx_ring->count * desc_len;
  406. rx_ring->size = ALIGN(rx_ring->size, 4096);
  407. rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
  408. &rx_ring->dma, GFP_KERNEL);
  409. if (!rx_ring->desc)
  410. goto err;
  411. rx_ring->next_to_clean = 0;
  412. rx_ring->next_to_use = 0;
  413. rx_ring->adapter = adapter;
  414. return 0;
  415. err:
  416. vfree(rx_ring->buffer_info);
  417. rx_ring->buffer_info = NULL;
  418. dev_err(&adapter->pdev->dev,
  419. "Unable to allocate memory for the receive descriptor ring\n");
  420. return -ENOMEM;
  421. }
  422. /**
  423. * igbvf_clean_tx_ring - Free Tx Buffers
  424. * @tx_ring: ring to be cleaned
  425. **/
  426. static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
  427. {
  428. struct igbvf_adapter *adapter = tx_ring->adapter;
  429. struct igbvf_buffer *buffer_info;
  430. unsigned long size;
  431. unsigned int i;
  432. if (!tx_ring->buffer_info)
  433. return;
  434. /* Free all the Tx ring sk_buffs */
  435. for (i = 0; i < tx_ring->count; i++) {
  436. buffer_info = &tx_ring->buffer_info[i];
  437. igbvf_put_txbuf(adapter, buffer_info);
  438. }
  439. size = sizeof(struct igbvf_buffer) * tx_ring->count;
  440. memset(tx_ring->buffer_info, 0, size);
  441. /* Zero out the descriptor ring */
  442. memset(tx_ring->desc, 0, tx_ring->size);
  443. tx_ring->next_to_use = 0;
  444. tx_ring->next_to_clean = 0;
  445. writel(0, adapter->hw.hw_addr + tx_ring->head);
  446. writel(0, adapter->hw.hw_addr + tx_ring->tail);
  447. }
  448. /**
  449. * igbvf_free_tx_resources - Free Tx Resources per Queue
  450. * @tx_ring: ring to free resources from
  451. *
  452. * Free all transmit software resources
  453. **/
  454. void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
  455. {
  456. struct pci_dev *pdev = tx_ring->adapter->pdev;
  457. igbvf_clean_tx_ring(tx_ring);
  458. vfree(tx_ring->buffer_info);
  459. tx_ring->buffer_info = NULL;
  460. dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
  461. tx_ring->dma);
  462. tx_ring->desc = NULL;
  463. }
  464. /**
  465. * igbvf_clean_rx_ring - Free Rx Buffers per Queue
  466. * @rx_ring: ring structure pointer to free buffers from
  467. **/
  468. static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
  469. {
  470. struct igbvf_adapter *adapter = rx_ring->adapter;
  471. struct igbvf_buffer *buffer_info;
  472. struct pci_dev *pdev = adapter->pdev;
  473. unsigned long size;
  474. unsigned int i;
  475. if (!rx_ring->buffer_info)
  476. return;
  477. /* Free all the Rx ring sk_buffs */
  478. for (i = 0; i < rx_ring->count; i++) {
  479. buffer_info = &rx_ring->buffer_info[i];
  480. if (buffer_info->dma) {
  481. if (adapter->rx_ps_hdr_size) {
  482. dma_unmap_single(&pdev->dev, buffer_info->dma,
  483. adapter->rx_ps_hdr_size,
  484. DMA_FROM_DEVICE);
  485. } else {
  486. dma_unmap_single(&pdev->dev, buffer_info->dma,
  487. adapter->rx_buffer_len,
  488. DMA_FROM_DEVICE);
  489. }
  490. buffer_info->dma = 0;
  491. }
  492. if (buffer_info->skb) {
  493. dev_kfree_skb(buffer_info->skb);
  494. buffer_info->skb = NULL;
  495. }
  496. if (buffer_info->page) {
  497. if (buffer_info->page_dma)
  498. dma_unmap_page(&pdev->dev,
  499. buffer_info->page_dma,
  500. PAGE_SIZE / 2,
  501. DMA_FROM_DEVICE);
  502. put_page(buffer_info->page);
  503. buffer_info->page = NULL;
  504. buffer_info->page_dma = 0;
  505. buffer_info->page_offset = 0;
  506. }
  507. }
  508. size = sizeof(struct igbvf_buffer) * rx_ring->count;
  509. memset(rx_ring->buffer_info, 0, size);
  510. /* Zero out the descriptor ring */
  511. memset(rx_ring->desc, 0, rx_ring->size);
  512. rx_ring->next_to_clean = 0;
  513. rx_ring->next_to_use = 0;
  514. writel(0, adapter->hw.hw_addr + rx_ring->head);
  515. writel(0, adapter->hw.hw_addr + rx_ring->tail);
  516. }
  517. /**
  518. * igbvf_free_rx_resources - Free Rx Resources
  519. * @rx_ring: ring to clean the resources from
  520. *
  521. * Free all receive software resources
  522. **/
  523. void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
  524. {
  525. struct pci_dev *pdev = rx_ring->adapter->pdev;
  526. igbvf_clean_rx_ring(rx_ring);
  527. vfree(rx_ring->buffer_info);
  528. rx_ring->buffer_info = NULL;
  529. dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
  530. rx_ring->dma);
  531. rx_ring->desc = NULL;
  532. }
  533. /**
  534. * igbvf_update_itr - update the dynamic ITR value based on statistics
  535. * @adapter: pointer to adapter
  536. * @itr_setting: current adapter->itr
  537. * @packets: the number of packets during this measurement interval
  538. * @bytes: the number of bytes during this measurement interval
  539. *
  540. * Stores a new ITR value based on packets and byte counts during the last
  541. * interrupt. The advantage of per interrupt computation is faster updates
  542. * and more accurate ITR for the current traffic pattern. Constants in this
  543. * function were computed based on theoretical maximum wire speed and thresholds
  544. * were set based on testing data as well as attempting to minimize response
  545. * time while increasing bulk throughput.
  546. **/
  547. static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
  548. enum latency_range itr_setting,
  549. int packets, int bytes)
  550. {
  551. enum latency_range retval = itr_setting;
  552. if (packets == 0)
  553. goto update_itr_done;
  554. switch (itr_setting) {
  555. case lowest_latency:
  556. /* handle TSO and jumbo frames */
  557. if (bytes/packets > 8000)
  558. retval = bulk_latency;
  559. else if ((packets < 5) && (bytes > 512))
  560. retval = low_latency;
  561. break;
  562. case low_latency: /* 50 usec aka 20000 ints/s */
  563. if (bytes > 10000) {
  564. /* this if handles the TSO accounting */
  565. if (bytes/packets > 8000)
  566. retval = bulk_latency;
  567. else if ((packets < 10) || ((bytes/packets) > 1200))
  568. retval = bulk_latency;
  569. else if ((packets > 35))
  570. retval = lowest_latency;
  571. } else if (bytes/packets > 2000) {
  572. retval = bulk_latency;
  573. } else if (packets <= 2 && bytes < 512) {
  574. retval = lowest_latency;
  575. }
  576. break;
  577. case bulk_latency: /* 250 usec aka 4000 ints/s */
  578. if (bytes > 25000) {
  579. if (packets > 35)
  580. retval = low_latency;
  581. } else if (bytes < 6000) {
  582. retval = low_latency;
  583. }
  584. break;
  585. default:
  586. break;
  587. }
  588. update_itr_done:
  589. return retval;
  590. }
  591. static int igbvf_range_to_itr(enum latency_range current_range)
  592. {
  593. int new_itr;
  594. switch (current_range) {
  595. /* counts and packets in update_itr are dependent on these numbers */
  596. case lowest_latency:
  597. new_itr = IGBVF_70K_ITR;
  598. break;
  599. case low_latency:
  600. new_itr = IGBVF_20K_ITR;
  601. break;
  602. case bulk_latency:
  603. new_itr = IGBVF_4K_ITR;
  604. break;
  605. default:
  606. new_itr = IGBVF_START_ITR;
  607. break;
  608. }
  609. return new_itr;
  610. }
  611. static void igbvf_set_itr(struct igbvf_adapter *adapter)
  612. {
  613. u32 new_itr;
  614. adapter->tx_ring->itr_range =
  615. igbvf_update_itr(adapter,
  616. adapter->tx_ring->itr_val,
  617. adapter->total_tx_packets,
  618. adapter->total_tx_bytes);
  619. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  620. if (adapter->requested_itr == 3 &&
  621. adapter->tx_ring->itr_range == lowest_latency)
  622. adapter->tx_ring->itr_range = low_latency;
  623. new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
  624. if (new_itr != adapter->tx_ring->itr_val) {
  625. u32 current_itr = adapter->tx_ring->itr_val;
  626. /* this attempts to bias the interrupt rate towards Bulk
  627. * by adding intermediate steps when interrupt rate is
  628. * increasing
  629. */
  630. new_itr = new_itr > current_itr ?
  631. min(current_itr + (new_itr >> 2), new_itr) :
  632. new_itr;
  633. adapter->tx_ring->itr_val = new_itr;
  634. adapter->tx_ring->set_itr = 1;
  635. }
  636. adapter->rx_ring->itr_range =
  637. igbvf_update_itr(adapter, adapter->rx_ring->itr_val,
  638. adapter->total_rx_packets,
  639. adapter->total_rx_bytes);
  640. if (adapter->requested_itr == 3 &&
  641. adapter->rx_ring->itr_range == lowest_latency)
  642. adapter->rx_ring->itr_range = low_latency;
  643. new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range);
  644. if (new_itr != adapter->rx_ring->itr_val) {
  645. u32 current_itr = adapter->rx_ring->itr_val;
  646. new_itr = new_itr > current_itr ?
  647. min(current_itr + (new_itr >> 2), new_itr) :
  648. new_itr;
  649. adapter->rx_ring->itr_val = new_itr;
  650. adapter->rx_ring->set_itr = 1;
  651. }
  652. }
  653. /**
  654. * igbvf_clean_tx_irq - Reclaim resources after transmit completes
  655. * @tx_ring: ring structure to clean descriptors from
  656. *
  657. * returns true if ring is completely cleaned
  658. **/
  659. static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
  660. {
  661. struct igbvf_adapter *adapter = tx_ring->adapter;
  662. struct net_device *netdev = adapter->netdev;
  663. struct igbvf_buffer *buffer_info;
  664. struct sk_buff *skb;
  665. union e1000_adv_tx_desc *tx_desc, *eop_desc;
  666. unsigned int total_bytes = 0, total_packets = 0;
  667. unsigned int i, count = 0;
  668. bool cleaned = false;
  669. i = tx_ring->next_to_clean;
  670. buffer_info = &tx_ring->buffer_info[i];
  671. eop_desc = buffer_info->next_to_watch;
  672. do {
  673. /* if next_to_watch is not set then there is no work pending */
  674. if (!eop_desc)
  675. break;
  676. /* prevent any other reads prior to eop_desc */
  677. smp_rmb();
  678. /* if DD is not set pending work has not been completed */
  679. if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
  680. break;
  681. /* clear next_to_watch to prevent false hangs */
  682. buffer_info->next_to_watch = NULL;
  683. for (cleaned = false; !cleaned; count++) {
  684. tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
  685. cleaned = (tx_desc == eop_desc);
  686. skb = buffer_info->skb;
  687. if (skb) {
  688. unsigned int segs, bytecount;
  689. /* gso_segs is currently only valid for tcp */
  690. segs = skb_shinfo(skb)->gso_segs ?: 1;
  691. /* multiply data chunks by size of headers */
  692. bytecount = ((segs - 1) * skb_headlen(skb)) +
  693. skb->len;
  694. total_packets += segs;
  695. total_bytes += bytecount;
  696. }
  697. igbvf_put_txbuf(adapter, buffer_info);
  698. tx_desc->wb.status = 0;
  699. i++;
  700. if (i == tx_ring->count)
  701. i = 0;
  702. buffer_info = &tx_ring->buffer_info[i];
  703. }
  704. eop_desc = buffer_info->next_to_watch;
  705. } while (count < tx_ring->count);
  706. tx_ring->next_to_clean = i;
  707. if (unlikely(count && netif_carrier_ok(netdev) &&
  708. igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
  709. /* Make sure that anybody stopping the queue after this
  710. * sees the new next_to_clean.
  711. */
  712. smp_mb();
  713. if (netif_queue_stopped(netdev) &&
  714. !(test_bit(__IGBVF_DOWN, &adapter->state))) {
  715. netif_wake_queue(netdev);
  716. ++adapter->restart_queue;
  717. }
  718. }
  719. netdev->stats.tx_bytes += total_bytes;
  720. netdev->stats.tx_packets += total_packets;
  721. return count < tx_ring->count;
  722. }
  723. static irqreturn_t igbvf_msix_other(int irq, void *data)
  724. {
  725. struct net_device *netdev = data;
  726. struct igbvf_adapter *adapter = netdev_priv(netdev);
  727. struct e1000_hw *hw = &adapter->hw;
  728. adapter->int_counter1++;
  729. hw->mac.get_link_status = 1;
  730. if (!test_bit(__IGBVF_DOWN, &adapter->state))
  731. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  732. ew32(EIMS, adapter->eims_other);
  733. return IRQ_HANDLED;
  734. }
  735. static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
  736. {
  737. struct net_device *netdev = data;
  738. struct igbvf_adapter *adapter = netdev_priv(netdev);
  739. struct e1000_hw *hw = &adapter->hw;
  740. struct igbvf_ring *tx_ring = adapter->tx_ring;
  741. if (tx_ring->set_itr) {
  742. writel(tx_ring->itr_val,
  743. adapter->hw.hw_addr + tx_ring->itr_register);
  744. adapter->tx_ring->set_itr = 0;
  745. }
  746. adapter->total_tx_bytes = 0;
  747. adapter->total_tx_packets = 0;
  748. /* auto mask will automatically re-enable the interrupt when we write
  749. * EICS
  750. */
  751. if (!igbvf_clean_tx_irq(tx_ring))
  752. /* Ring was not completely cleaned, so fire another interrupt */
  753. ew32(EICS, tx_ring->eims_value);
  754. else
  755. ew32(EIMS, tx_ring->eims_value);
  756. return IRQ_HANDLED;
  757. }
  758. static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
  759. {
  760. struct net_device *netdev = data;
  761. struct igbvf_adapter *adapter = netdev_priv(netdev);
  762. adapter->int_counter0++;
  763. /* Write the ITR value calculated at the end of the
  764. * previous interrupt.
  765. */
  766. if (adapter->rx_ring->set_itr) {
  767. writel(adapter->rx_ring->itr_val,
  768. adapter->hw.hw_addr + adapter->rx_ring->itr_register);
  769. adapter->rx_ring->set_itr = 0;
  770. }
  771. if (napi_schedule_prep(&adapter->rx_ring->napi)) {
  772. adapter->total_rx_bytes = 0;
  773. adapter->total_rx_packets = 0;
  774. __napi_schedule(&adapter->rx_ring->napi);
  775. }
  776. return IRQ_HANDLED;
  777. }
  778. #define IGBVF_NO_QUEUE -1
  779. static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
  780. int tx_queue, int msix_vector)
  781. {
  782. struct e1000_hw *hw = &adapter->hw;
  783. u32 ivar, index;
  784. /* 82576 uses a table-based method for assigning vectors.
  785. * Each queue has a single entry in the table to which we write
  786. * a vector number along with a "valid" bit. Sadly, the layout
  787. * of the table is somewhat counterintuitive.
  788. */
  789. if (rx_queue > IGBVF_NO_QUEUE) {
  790. index = (rx_queue >> 1);
  791. ivar = array_er32(IVAR0, index);
  792. if (rx_queue & 0x1) {
  793. /* vector goes into third byte of register */
  794. ivar = ivar & 0xFF00FFFF;
  795. ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
  796. } else {
  797. /* vector goes into low byte of register */
  798. ivar = ivar & 0xFFFFFF00;
  799. ivar |= msix_vector | E1000_IVAR_VALID;
  800. }
  801. adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector);
  802. array_ew32(IVAR0, index, ivar);
  803. }
  804. if (tx_queue > IGBVF_NO_QUEUE) {
  805. index = (tx_queue >> 1);
  806. ivar = array_er32(IVAR0, index);
  807. if (tx_queue & 0x1) {
  808. /* vector goes into high byte of register */
  809. ivar = ivar & 0x00FFFFFF;
  810. ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
  811. } else {
  812. /* vector goes into second byte of register */
  813. ivar = ivar & 0xFFFF00FF;
  814. ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
  815. }
  816. adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector);
  817. array_ew32(IVAR0, index, ivar);
  818. }
  819. }
  820. /**
  821. * igbvf_configure_msix - Configure MSI-X hardware
  822. * @adapter: board private structure
  823. *
  824. * igbvf_configure_msix sets up the hardware to properly
  825. * generate MSI-X interrupts.
  826. **/
  827. static void igbvf_configure_msix(struct igbvf_adapter *adapter)
  828. {
  829. u32 tmp;
  830. struct e1000_hw *hw = &adapter->hw;
  831. struct igbvf_ring *tx_ring = adapter->tx_ring;
  832. struct igbvf_ring *rx_ring = adapter->rx_ring;
  833. int vector = 0;
  834. adapter->eims_enable_mask = 0;
  835. igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
  836. adapter->eims_enable_mask |= tx_ring->eims_value;
  837. writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register);
  838. igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
  839. adapter->eims_enable_mask |= rx_ring->eims_value;
  840. writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register);
  841. /* set vector for other causes, i.e. link changes */
  842. tmp = (vector++ | E1000_IVAR_VALID);
  843. ew32(IVAR_MISC, tmp);
  844. adapter->eims_enable_mask = GENMASK(vector - 1, 0);
  845. adapter->eims_other = BIT(vector - 1);
  846. e1e_flush();
  847. }
  848. static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
  849. {
  850. if (adapter->msix_entries) {
  851. pci_disable_msix(adapter->pdev);
  852. kfree(adapter->msix_entries);
  853. adapter->msix_entries = NULL;
  854. }
  855. }
  856. /**
  857. * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
  858. * @adapter: board private structure
  859. *
  860. * Attempt to configure interrupts using the best available
  861. * capabilities of the hardware and kernel.
  862. **/
  863. static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
  864. {
  865. int err = -ENOMEM;
  866. int i;
  867. /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */
  868. adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
  869. GFP_KERNEL);
  870. if (adapter->msix_entries) {
  871. for (i = 0; i < 3; i++)
  872. adapter->msix_entries[i].entry = i;
  873. err = pci_enable_msix_range(adapter->pdev,
  874. adapter->msix_entries, 3, 3);
  875. }
  876. if (err < 0) {
  877. /* MSI-X failed */
  878. dev_err(&adapter->pdev->dev,
  879. "Failed to initialize MSI-X interrupts.\n");
  880. igbvf_reset_interrupt_capability(adapter);
  881. }
  882. }
  883. /**
  884. * igbvf_request_msix - Initialize MSI-X interrupts
  885. * @adapter: board private structure
  886. *
  887. * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
  888. * kernel.
  889. **/
  890. static int igbvf_request_msix(struct igbvf_adapter *adapter)
  891. {
  892. struct net_device *netdev = adapter->netdev;
  893. int err = 0, vector = 0;
  894. if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
  895. sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
  896. sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
  897. } else {
  898. memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
  899. memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
  900. }
  901. err = request_irq(adapter->msix_entries[vector].vector,
  902. igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
  903. netdev);
  904. if (err)
  905. goto out;
  906. adapter->tx_ring->itr_register = E1000_EITR(vector);
  907. adapter->tx_ring->itr_val = adapter->current_itr;
  908. vector++;
  909. err = request_irq(adapter->msix_entries[vector].vector,
  910. igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
  911. netdev);
  912. if (err)
  913. goto free_irq_tx;
  914. adapter->rx_ring->itr_register = E1000_EITR(vector);
  915. adapter->rx_ring->itr_val = adapter->current_itr;
  916. vector++;
  917. err = request_irq(adapter->msix_entries[vector].vector,
  918. igbvf_msix_other, 0, netdev->name, netdev);
  919. if (err)
  920. goto free_irq_rx;
  921. igbvf_configure_msix(adapter);
  922. return 0;
  923. free_irq_rx:
  924. free_irq(adapter->msix_entries[--vector].vector, netdev);
  925. free_irq_tx:
  926. free_irq(adapter->msix_entries[--vector].vector, netdev);
  927. out:
  928. return err;
  929. }
  930. /**
  931. * igbvf_alloc_queues - Allocate memory for all rings
  932. * @adapter: board private structure to initialize
  933. **/
  934. static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
  935. {
  936. struct net_device *netdev = adapter->netdev;
  937. adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
  938. if (!adapter->tx_ring)
  939. return -ENOMEM;
  940. adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
  941. if (!adapter->rx_ring) {
  942. kfree(adapter->tx_ring);
  943. return -ENOMEM;
  944. }
  945. netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll);
  946. return 0;
  947. }
  948. /**
  949. * igbvf_request_irq - initialize interrupts
  950. * @adapter: board private structure
  951. *
  952. * Attempts to configure interrupts using the best available
  953. * capabilities of the hardware and kernel.
  954. **/
  955. static int igbvf_request_irq(struct igbvf_adapter *adapter)
  956. {
  957. int err = -1;
  958. /* igbvf supports msi-x only */
  959. if (adapter->msix_entries)
  960. err = igbvf_request_msix(adapter);
  961. if (!err)
  962. return err;
  963. dev_err(&adapter->pdev->dev,
  964. "Unable to allocate interrupt, Error: %d\n", err);
  965. return err;
  966. }
  967. static void igbvf_free_irq(struct igbvf_adapter *adapter)
  968. {
  969. struct net_device *netdev = adapter->netdev;
  970. int vector;
  971. if (adapter->msix_entries) {
  972. for (vector = 0; vector < 3; vector++)
  973. free_irq(adapter->msix_entries[vector].vector, netdev);
  974. }
  975. }
  976. /**
  977. * igbvf_irq_disable - Mask off interrupt generation on the NIC
  978. * @adapter: board private structure
  979. **/
  980. static void igbvf_irq_disable(struct igbvf_adapter *adapter)
  981. {
  982. struct e1000_hw *hw = &adapter->hw;
  983. ew32(EIMC, ~0);
  984. if (adapter->msix_entries)
  985. ew32(EIAC, 0);
  986. }
  987. /**
  988. * igbvf_irq_enable - Enable default interrupt generation settings
  989. * @adapter: board private structure
  990. **/
  991. static void igbvf_irq_enable(struct igbvf_adapter *adapter)
  992. {
  993. struct e1000_hw *hw = &adapter->hw;
  994. ew32(EIAC, adapter->eims_enable_mask);
  995. ew32(EIAM, adapter->eims_enable_mask);
  996. ew32(EIMS, adapter->eims_enable_mask);
  997. }
  998. /**
  999. * igbvf_poll - NAPI Rx polling callback
  1000. * @napi: struct associated with this polling callback
  1001. * @budget: amount of packets driver is allowed to process this poll
  1002. **/
  1003. static int igbvf_poll(struct napi_struct *napi, int budget)
  1004. {
  1005. struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
  1006. struct igbvf_adapter *adapter = rx_ring->adapter;
  1007. struct e1000_hw *hw = &adapter->hw;
  1008. int work_done = 0;
  1009. igbvf_clean_rx_irq(adapter, &work_done, budget);
  1010. if (work_done == budget)
  1011. return budget;
  1012. /* Exit the polling mode, but don't re-enable interrupts if stack might
  1013. * poll us due to busy-polling
  1014. */
  1015. if (likely(napi_complete_done(napi, work_done))) {
  1016. if (adapter->requested_itr & 3)
  1017. igbvf_set_itr(adapter);
  1018. if (!test_bit(__IGBVF_DOWN, &adapter->state))
  1019. ew32(EIMS, adapter->rx_ring->eims_value);
  1020. }
  1021. return work_done;
  1022. }
  1023. /**
  1024. * igbvf_set_rlpml - set receive large packet maximum length
  1025. * @adapter: board private structure
  1026. *
  1027. * Configure the maximum size of packets that will be received
  1028. */
  1029. static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
  1030. {
  1031. int max_frame_size;
  1032. struct e1000_hw *hw = &adapter->hw;
  1033. max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
  1034. spin_lock_bh(&hw->mbx_lock);
  1035. e1000_rlpml_set_vf(hw, max_frame_size);
  1036. spin_unlock_bh(&hw->mbx_lock);
  1037. }
  1038. static int igbvf_vlan_rx_add_vid(struct net_device *netdev,
  1039. __be16 proto, u16 vid)
  1040. {
  1041. struct igbvf_adapter *adapter = netdev_priv(netdev);
  1042. struct e1000_hw *hw = &adapter->hw;
  1043. spin_lock_bh(&hw->mbx_lock);
  1044. if (hw->mac.ops.set_vfta(hw, vid, true)) {
  1045. dev_warn(&adapter->pdev->dev, "Vlan id %d\n is not added", vid);
  1046. spin_unlock_bh(&hw->mbx_lock);
  1047. return -EINVAL;
  1048. }
  1049. spin_unlock_bh(&hw->mbx_lock);
  1050. set_bit(vid, adapter->active_vlans);
  1051. return 0;
  1052. }
  1053. static int igbvf_vlan_rx_kill_vid(struct net_device *netdev,
  1054. __be16 proto, u16 vid)
  1055. {
  1056. struct igbvf_adapter *adapter = netdev_priv(netdev);
  1057. struct e1000_hw *hw = &adapter->hw;
  1058. spin_lock_bh(&hw->mbx_lock);
  1059. if (hw->mac.ops.set_vfta(hw, vid, false)) {
  1060. dev_err(&adapter->pdev->dev,
  1061. "Failed to remove vlan id %d\n", vid);
  1062. spin_unlock_bh(&hw->mbx_lock);
  1063. return -EINVAL;
  1064. }
  1065. spin_unlock_bh(&hw->mbx_lock);
  1066. clear_bit(vid, adapter->active_vlans);
  1067. return 0;
  1068. }
  1069. static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
  1070. {
  1071. u16 vid;
  1072. for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
  1073. igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
  1074. }
  1075. /**
  1076. * igbvf_configure_tx - Configure Transmit Unit after Reset
  1077. * @adapter: board private structure
  1078. *
  1079. * Configure the Tx unit of the MAC after a reset.
  1080. **/
  1081. static void igbvf_configure_tx(struct igbvf_adapter *adapter)
  1082. {
  1083. struct e1000_hw *hw = &adapter->hw;
  1084. struct igbvf_ring *tx_ring = adapter->tx_ring;
  1085. u64 tdba;
  1086. u32 txdctl, dca_txctrl;
  1087. /* disable transmits */
  1088. txdctl = er32(TXDCTL(0));
  1089. ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
  1090. e1e_flush();
  1091. msleep(10);
  1092. /* Setup the HW Tx Head and Tail descriptor pointers */
  1093. ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
  1094. tdba = tx_ring->dma;
  1095. ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
  1096. ew32(TDBAH(0), (tdba >> 32));
  1097. ew32(TDH(0), 0);
  1098. ew32(TDT(0), 0);
  1099. tx_ring->head = E1000_TDH(0);
  1100. tx_ring->tail = E1000_TDT(0);
  1101. /* Turn off Relaxed Ordering on head write-backs. The writebacks
  1102. * MUST be delivered in order or it will completely screw up
  1103. * our bookkeeping.
  1104. */
  1105. dca_txctrl = er32(DCA_TXCTRL(0));
  1106. dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
  1107. ew32(DCA_TXCTRL(0), dca_txctrl);
  1108. /* enable transmits */
  1109. txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
  1110. ew32(TXDCTL(0), txdctl);
  1111. /* Setup Transmit Descriptor Settings for eop descriptor */
  1112. adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
  1113. /* enable Report Status bit */
  1114. adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
  1115. }
  1116. /**
  1117. * igbvf_setup_srrctl - configure the receive control registers
  1118. * @adapter: Board private structure
  1119. **/
  1120. static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
  1121. {
  1122. struct e1000_hw *hw = &adapter->hw;
  1123. u32 srrctl = 0;
  1124. srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
  1125. E1000_SRRCTL_BSIZEHDR_MASK |
  1126. E1000_SRRCTL_BSIZEPKT_MASK);
  1127. /* Enable queue drop to avoid head of line blocking */
  1128. srrctl |= E1000_SRRCTL_DROP_EN;
  1129. /* Setup buffer sizes */
  1130. srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
  1131. E1000_SRRCTL_BSIZEPKT_SHIFT;
  1132. if (adapter->rx_buffer_len < 2048) {
  1133. adapter->rx_ps_hdr_size = 0;
  1134. srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
  1135. } else {
  1136. adapter->rx_ps_hdr_size = 128;
  1137. srrctl |= adapter->rx_ps_hdr_size <<
  1138. E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
  1139. srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
  1140. }
  1141. ew32(SRRCTL(0), srrctl);
  1142. }
  1143. /**
  1144. * igbvf_configure_rx - Configure Receive Unit after Reset
  1145. * @adapter: board private structure
  1146. *
  1147. * Configure the Rx unit of the MAC after a reset.
  1148. **/
  1149. static void igbvf_configure_rx(struct igbvf_adapter *adapter)
  1150. {
  1151. struct e1000_hw *hw = &adapter->hw;
  1152. struct igbvf_ring *rx_ring = adapter->rx_ring;
  1153. u64 rdba;
  1154. u32 rxdctl;
  1155. /* disable receives */
  1156. rxdctl = er32(RXDCTL(0));
  1157. ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
  1158. e1e_flush();
  1159. msleep(10);
  1160. /* Setup the HW Rx Head and Tail Descriptor Pointers and
  1161. * the Base and Length of the Rx Descriptor Ring
  1162. */
  1163. rdba = rx_ring->dma;
  1164. ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
  1165. ew32(RDBAH(0), (rdba >> 32));
  1166. ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
  1167. rx_ring->head = E1000_RDH(0);
  1168. rx_ring->tail = E1000_RDT(0);
  1169. ew32(RDH(0), 0);
  1170. ew32(RDT(0), 0);
  1171. rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
  1172. rxdctl &= 0xFFF00000;
  1173. rxdctl |= IGBVF_RX_PTHRESH;
  1174. rxdctl |= IGBVF_RX_HTHRESH << 8;
  1175. rxdctl |= IGBVF_RX_WTHRESH << 16;
  1176. igbvf_set_rlpml(adapter);
  1177. /* enable receives */
  1178. ew32(RXDCTL(0), rxdctl);
  1179. }
  1180. /**
  1181. * igbvf_set_multi - Multicast and Promiscuous mode set
  1182. * @netdev: network interface device structure
  1183. *
  1184. * The set_multi entry point is called whenever the multicast address
  1185. * list or the network interface flags are updated. This routine is
  1186. * responsible for configuring the hardware for proper multicast,
  1187. * promiscuous mode, and all-multi behavior.
  1188. **/
  1189. static void igbvf_set_multi(struct net_device *netdev)
  1190. {
  1191. struct igbvf_adapter *adapter = netdev_priv(netdev);
  1192. struct e1000_hw *hw = &adapter->hw;
  1193. struct netdev_hw_addr *ha;
  1194. u8 *mta_list = NULL;
  1195. int i;
  1196. if (!netdev_mc_empty(netdev)) {
  1197. mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN,
  1198. GFP_ATOMIC);
  1199. if (!mta_list)
  1200. return;
  1201. }
  1202. /* prepare a packed array of only addresses. */
  1203. i = 0;
  1204. netdev_for_each_mc_addr(ha, netdev)
  1205. memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
  1206. spin_lock_bh(&hw->mbx_lock);
  1207. hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
  1208. spin_unlock_bh(&hw->mbx_lock);
  1209. kfree(mta_list);
  1210. }
  1211. /**
  1212. * igbvf_set_uni - Configure unicast MAC filters
  1213. * @netdev: network interface device structure
  1214. *
  1215. * This routine is responsible for configuring the hardware for proper
  1216. * unicast filters.
  1217. **/
  1218. static int igbvf_set_uni(struct net_device *netdev)
  1219. {
  1220. struct igbvf_adapter *adapter = netdev_priv(netdev);
  1221. struct e1000_hw *hw = &adapter->hw;
  1222. if (netdev_uc_count(netdev) > IGBVF_MAX_MAC_FILTERS) {
  1223. pr_err("Too many unicast filters - No Space\n");
  1224. return -ENOSPC;
  1225. }
  1226. spin_lock_bh(&hw->mbx_lock);
  1227. /* Clear all unicast MAC filters */
  1228. hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_CLR, NULL);
  1229. spin_unlock_bh(&hw->mbx_lock);
  1230. if (!netdev_uc_empty(netdev)) {
  1231. struct netdev_hw_addr *ha;
  1232. /* Add MAC filters one by one */
  1233. netdev_for_each_uc_addr(ha, netdev) {
  1234. spin_lock_bh(&hw->mbx_lock);
  1235. hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_ADD,
  1236. ha->addr);
  1237. spin_unlock_bh(&hw->mbx_lock);
  1238. udelay(200);
  1239. }
  1240. }
  1241. return 0;
  1242. }
  1243. static void igbvf_set_rx_mode(struct net_device *netdev)
  1244. {
  1245. igbvf_set_multi(netdev);
  1246. igbvf_set_uni(netdev);
  1247. }
  1248. /**
  1249. * igbvf_configure - configure the hardware for Rx and Tx
  1250. * @adapter: private board structure
  1251. **/
  1252. static void igbvf_configure(struct igbvf_adapter *adapter)
  1253. {
  1254. igbvf_set_rx_mode(adapter->netdev);
  1255. igbvf_restore_vlan(adapter);
  1256. igbvf_configure_tx(adapter);
  1257. igbvf_setup_srrctl(adapter);
  1258. igbvf_configure_rx(adapter);
  1259. igbvf_alloc_rx_buffers(adapter->rx_ring,
  1260. igbvf_desc_unused(adapter->rx_ring));
  1261. }
  1262. /* igbvf_reset - bring the hardware into a known good state
  1263. * @adapter: private board structure
  1264. *
  1265. * This function boots the hardware and enables some settings that
  1266. * require a configuration cycle of the hardware - those cannot be
  1267. * set/changed during runtime. After reset the device needs to be
  1268. * properly configured for Rx, Tx etc.
  1269. */
  1270. static void igbvf_reset(struct igbvf_adapter *adapter)
  1271. {
  1272. struct e1000_mac_info *mac = &adapter->hw.mac;
  1273. struct net_device *netdev = adapter->netdev;
  1274. struct e1000_hw *hw = &adapter->hw;
  1275. spin_lock_bh(&hw->mbx_lock);
  1276. /* Allow time for pending master requests to run */
  1277. if (mac->ops.reset_hw(hw))
  1278. dev_info(&adapter->pdev->dev, "PF still resetting\n");
  1279. mac->ops.init_hw(hw);
  1280. spin_unlock_bh(&hw->mbx_lock);
  1281. if (is_valid_ether_addr(adapter->hw.mac.addr)) {
  1282. eth_hw_addr_set(netdev, adapter->hw.mac.addr);
  1283. memcpy(netdev->perm_addr, adapter->hw.mac.addr,
  1284. netdev->addr_len);
  1285. }
  1286. adapter->last_reset = jiffies;
  1287. }
  1288. int igbvf_up(struct igbvf_adapter *adapter)
  1289. {
  1290. struct e1000_hw *hw = &adapter->hw;
  1291. /* hardware has been reset, we need to reload some things */
  1292. igbvf_configure(adapter);
  1293. clear_bit(__IGBVF_DOWN, &adapter->state);
  1294. napi_enable(&adapter->rx_ring->napi);
  1295. if (adapter->msix_entries)
  1296. igbvf_configure_msix(adapter);
  1297. /* Clear any pending interrupts. */
  1298. er32(EICR);
  1299. igbvf_irq_enable(adapter);
  1300. /* start the watchdog */
  1301. hw->mac.get_link_status = 1;
  1302. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  1303. return 0;
  1304. }
  1305. void igbvf_down(struct igbvf_adapter *adapter)
  1306. {
  1307. struct net_device *netdev = adapter->netdev;
  1308. struct e1000_hw *hw = &adapter->hw;
  1309. u32 rxdctl, txdctl;
  1310. /* signal that we're down so the interrupt handler does not
  1311. * reschedule our watchdog timer
  1312. */
  1313. set_bit(__IGBVF_DOWN, &adapter->state);
  1314. /* disable receives in the hardware */
  1315. rxdctl = er32(RXDCTL(0));
  1316. ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
  1317. netif_carrier_off(netdev);
  1318. netif_stop_queue(netdev);
  1319. /* disable transmits in the hardware */
  1320. txdctl = er32(TXDCTL(0));
  1321. ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
  1322. /* flush both disables and wait for them to finish */
  1323. e1e_flush();
  1324. msleep(10);
  1325. napi_disable(&adapter->rx_ring->napi);
  1326. igbvf_irq_disable(adapter);
  1327. del_timer_sync(&adapter->watchdog_timer);
  1328. /* record the stats before reset*/
  1329. igbvf_update_stats(adapter);
  1330. adapter->link_speed = 0;
  1331. adapter->link_duplex = 0;
  1332. igbvf_reset(adapter);
  1333. igbvf_clean_tx_ring(adapter->tx_ring);
  1334. igbvf_clean_rx_ring(adapter->rx_ring);
  1335. }
  1336. void igbvf_reinit_locked(struct igbvf_adapter *adapter)
  1337. {
  1338. might_sleep();
  1339. while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
  1340. usleep_range(1000, 2000);
  1341. igbvf_down(adapter);
  1342. igbvf_up(adapter);
  1343. clear_bit(__IGBVF_RESETTING, &adapter->state);
  1344. }
  1345. /**
  1346. * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
  1347. * @adapter: board private structure to initialize
  1348. *
  1349. * igbvf_sw_init initializes the Adapter private data structure.
  1350. * Fields are initialized based on PCI device information and
  1351. * OS network device settings (MTU size).
  1352. **/
  1353. static int igbvf_sw_init(struct igbvf_adapter *adapter)
  1354. {
  1355. struct net_device *netdev = adapter->netdev;
  1356. s32 rc;
  1357. adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
  1358. adapter->rx_ps_hdr_size = 0;
  1359. adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  1360. adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
  1361. adapter->tx_int_delay = 8;
  1362. adapter->tx_abs_int_delay = 32;
  1363. adapter->rx_int_delay = 0;
  1364. adapter->rx_abs_int_delay = 8;
  1365. adapter->requested_itr = 3;
  1366. adapter->current_itr = IGBVF_START_ITR;
  1367. /* Set various function pointers */
  1368. adapter->ei->init_ops(&adapter->hw);
  1369. rc = adapter->hw.mac.ops.init_params(&adapter->hw);
  1370. if (rc)
  1371. return rc;
  1372. rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
  1373. if (rc)
  1374. return rc;
  1375. igbvf_set_interrupt_capability(adapter);
  1376. if (igbvf_alloc_queues(adapter))
  1377. return -ENOMEM;
  1378. spin_lock_init(&adapter->tx_queue_lock);
  1379. /* Explicitly disable IRQ since the NIC can be in any state. */
  1380. igbvf_irq_disable(adapter);
  1381. spin_lock_init(&adapter->stats_lock);
  1382. spin_lock_init(&adapter->hw.mbx_lock);
  1383. set_bit(__IGBVF_DOWN, &adapter->state);
  1384. return 0;
  1385. }
  1386. static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
  1387. {
  1388. struct e1000_hw *hw = &adapter->hw;
  1389. adapter->stats.last_gprc = er32(VFGPRC);
  1390. adapter->stats.last_gorc = er32(VFGORC);
  1391. adapter->stats.last_gptc = er32(VFGPTC);
  1392. adapter->stats.last_gotc = er32(VFGOTC);
  1393. adapter->stats.last_mprc = er32(VFMPRC);
  1394. adapter->stats.last_gotlbc = er32(VFGOTLBC);
  1395. adapter->stats.last_gptlbc = er32(VFGPTLBC);
  1396. adapter->stats.last_gorlbc = er32(VFGORLBC);
  1397. adapter->stats.last_gprlbc = er32(VFGPRLBC);
  1398. adapter->stats.base_gprc = er32(VFGPRC);
  1399. adapter->stats.base_gorc = er32(VFGORC);
  1400. adapter->stats.base_gptc = er32(VFGPTC);
  1401. adapter->stats.base_gotc = er32(VFGOTC);
  1402. adapter->stats.base_mprc = er32(VFMPRC);
  1403. adapter->stats.base_gotlbc = er32(VFGOTLBC);
  1404. adapter->stats.base_gptlbc = er32(VFGPTLBC);
  1405. adapter->stats.base_gorlbc = er32(VFGORLBC);
  1406. adapter->stats.base_gprlbc = er32(VFGPRLBC);
  1407. }
  1408. /**
  1409. * igbvf_open - Called when a network interface is made active
  1410. * @netdev: network interface device structure
  1411. *
  1412. * Returns 0 on success, negative value on failure
  1413. *
  1414. * The open entry point is called when a network interface is made
  1415. * active by the system (IFF_UP). At this point all resources needed
  1416. * for transmit and receive operations are allocated, the interrupt
  1417. * handler is registered with the OS, the watchdog timer is started,
  1418. * and the stack is notified that the interface is ready.
  1419. **/
  1420. static int igbvf_open(struct net_device *netdev)
  1421. {
  1422. struct igbvf_adapter *adapter = netdev_priv(netdev);
  1423. struct e1000_hw *hw = &adapter->hw;
  1424. int err;
  1425. /* disallow open during test */
  1426. if (test_bit(__IGBVF_TESTING, &adapter->state))
  1427. return -EBUSY;
  1428. /* allocate transmit descriptors */
  1429. err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
  1430. if (err)
  1431. goto err_setup_tx;
  1432. /* allocate receive descriptors */
  1433. err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
  1434. if (err)
  1435. goto err_setup_rx;
  1436. /* before we allocate an interrupt, we must be ready to handle it.
  1437. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
  1438. * as soon as we call pci_request_irq, so we have to setup our
  1439. * clean_rx handler before we do so.
  1440. */
  1441. igbvf_configure(adapter);
  1442. err = igbvf_request_irq(adapter);
  1443. if (err)
  1444. goto err_req_irq;
  1445. /* From here on the code is the same as igbvf_up() */
  1446. clear_bit(__IGBVF_DOWN, &adapter->state);
  1447. napi_enable(&adapter->rx_ring->napi);
  1448. /* clear any pending interrupts */
  1449. er32(EICR);
  1450. igbvf_irq_enable(adapter);
  1451. /* start the watchdog */
  1452. hw->mac.get_link_status = 1;
  1453. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  1454. return 0;
  1455. err_req_irq:
  1456. igbvf_free_rx_resources(adapter->rx_ring);
  1457. err_setup_rx:
  1458. igbvf_free_tx_resources(adapter->tx_ring);
  1459. err_setup_tx:
  1460. igbvf_reset(adapter);
  1461. return err;
  1462. }
  1463. /**
  1464. * igbvf_close - Disables a network interface
  1465. * @netdev: network interface device structure
  1466. *
  1467. * Returns 0, this is not allowed to fail
  1468. *
  1469. * The close entry point is called when an interface is de-activated
  1470. * by the OS. The hardware is still under the drivers control, but
  1471. * needs to be disabled. A global MAC reset is issued to stop the
  1472. * hardware, and all transmit and receive resources are freed.
  1473. **/
  1474. static int igbvf_close(struct net_device *netdev)
  1475. {
  1476. struct igbvf_adapter *adapter = netdev_priv(netdev);
  1477. WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
  1478. igbvf_down(adapter);
  1479. igbvf_free_irq(adapter);
  1480. igbvf_free_tx_resources(adapter->tx_ring);
  1481. igbvf_free_rx_resources(adapter->rx_ring);
  1482. return 0;
  1483. }
  1484. /**
  1485. * igbvf_set_mac - Change the Ethernet Address of the NIC
  1486. * @netdev: network interface device structure
  1487. * @p: pointer to an address structure
  1488. *
  1489. * Returns 0 on success, negative on failure
  1490. **/
  1491. static int igbvf_set_mac(struct net_device *netdev, void *p)
  1492. {
  1493. struct igbvf_adapter *adapter = netdev_priv(netdev);
  1494. struct e1000_hw *hw = &adapter->hw;
  1495. struct sockaddr *addr = p;
  1496. if (!is_valid_ether_addr(addr->sa_data))
  1497. return -EADDRNOTAVAIL;
  1498. memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
  1499. spin_lock_bh(&hw->mbx_lock);
  1500. hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
  1501. spin_unlock_bh(&hw->mbx_lock);
  1502. if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
  1503. return -EADDRNOTAVAIL;
  1504. eth_hw_addr_set(netdev, addr->sa_data);
  1505. return 0;
  1506. }
  1507. #define UPDATE_VF_COUNTER(reg, name) \
  1508. { \
  1509. u32 current_counter = er32(reg); \
  1510. if (current_counter < adapter->stats.last_##name) \
  1511. adapter->stats.name += 0x100000000LL; \
  1512. adapter->stats.last_##name = current_counter; \
  1513. adapter->stats.name &= 0xFFFFFFFF00000000LL; \
  1514. adapter->stats.name |= current_counter; \
  1515. }
  1516. /**
  1517. * igbvf_update_stats - Update the board statistics counters
  1518. * @adapter: board private structure
  1519. **/
  1520. void igbvf_update_stats(struct igbvf_adapter *adapter)
  1521. {
  1522. struct e1000_hw *hw = &adapter->hw;
  1523. struct pci_dev *pdev = adapter->pdev;
  1524. /* Prevent stats update while adapter is being reset, link is down
  1525. * or if the pci connection is down.
  1526. */
  1527. if (adapter->link_speed == 0)
  1528. return;
  1529. if (test_bit(__IGBVF_RESETTING, &adapter->state))
  1530. return;
  1531. if (pci_channel_offline(pdev))
  1532. return;
  1533. UPDATE_VF_COUNTER(VFGPRC, gprc);
  1534. UPDATE_VF_COUNTER(VFGORC, gorc);
  1535. UPDATE_VF_COUNTER(VFGPTC, gptc);
  1536. UPDATE_VF_COUNTER(VFGOTC, gotc);
  1537. UPDATE_VF_COUNTER(VFMPRC, mprc);
  1538. UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
  1539. UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
  1540. UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
  1541. UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
  1542. /* Fill out the OS statistics structure */
  1543. adapter->netdev->stats.multicast = adapter->stats.mprc;
  1544. }
  1545. static void igbvf_print_link_info(struct igbvf_adapter *adapter)
  1546. {
  1547. dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
  1548. adapter->link_speed,
  1549. adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
  1550. }
  1551. static bool igbvf_has_link(struct igbvf_adapter *adapter)
  1552. {
  1553. struct e1000_hw *hw = &adapter->hw;
  1554. s32 ret_val = E1000_SUCCESS;
  1555. bool link_active;
  1556. /* If interface is down, stay link down */
  1557. if (test_bit(__IGBVF_DOWN, &adapter->state))
  1558. return false;
  1559. spin_lock_bh(&hw->mbx_lock);
  1560. ret_val = hw->mac.ops.check_for_link(hw);
  1561. spin_unlock_bh(&hw->mbx_lock);
  1562. link_active = !hw->mac.get_link_status;
  1563. /* if check for link returns error we will need to reset */
  1564. if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
  1565. schedule_work(&adapter->reset_task);
  1566. return link_active;
  1567. }
  1568. /**
  1569. * igbvf_watchdog - Timer Call-back
  1570. * @t: timer list pointer containing private struct
  1571. **/
  1572. static void igbvf_watchdog(struct timer_list *t)
  1573. {
  1574. struct igbvf_adapter *adapter = from_timer(adapter, t, watchdog_timer);
  1575. /* Do the rest outside of interrupt context */
  1576. schedule_work(&adapter->watchdog_task);
  1577. }
  1578. static void igbvf_watchdog_task(struct work_struct *work)
  1579. {
  1580. struct igbvf_adapter *adapter = container_of(work,
  1581. struct igbvf_adapter,
  1582. watchdog_task);
  1583. struct net_device *netdev = adapter->netdev;
  1584. struct e1000_mac_info *mac = &adapter->hw.mac;
  1585. struct igbvf_ring *tx_ring = adapter->tx_ring;
  1586. struct e1000_hw *hw = &adapter->hw;
  1587. u32 link;
  1588. int tx_pending = 0;
  1589. link = igbvf_has_link(adapter);
  1590. if (link) {
  1591. if (!netif_carrier_ok(netdev)) {
  1592. mac->ops.get_link_up_info(&adapter->hw,
  1593. &adapter->link_speed,
  1594. &adapter->link_duplex);
  1595. igbvf_print_link_info(adapter);
  1596. netif_carrier_on(netdev);
  1597. netif_wake_queue(netdev);
  1598. }
  1599. } else {
  1600. if (netif_carrier_ok(netdev)) {
  1601. adapter->link_speed = 0;
  1602. adapter->link_duplex = 0;
  1603. dev_info(&adapter->pdev->dev, "Link is Down\n");
  1604. netif_carrier_off(netdev);
  1605. netif_stop_queue(netdev);
  1606. }
  1607. }
  1608. if (netif_carrier_ok(netdev)) {
  1609. igbvf_update_stats(adapter);
  1610. } else {
  1611. tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
  1612. tx_ring->count);
  1613. if (tx_pending) {
  1614. /* We've lost link, so the controller stops DMA,
  1615. * but we've got queued Tx work that's never going
  1616. * to get done, so reset controller to flush Tx.
  1617. * (Do the reset outside of interrupt context).
  1618. */
  1619. adapter->tx_timeout_count++;
  1620. schedule_work(&adapter->reset_task);
  1621. }
  1622. }
  1623. /* Cause software interrupt to ensure Rx ring is cleaned */
  1624. ew32(EICS, adapter->rx_ring->eims_value);
  1625. /* Reset the timer */
  1626. if (!test_bit(__IGBVF_DOWN, &adapter->state))
  1627. mod_timer(&adapter->watchdog_timer,
  1628. round_jiffies(jiffies + (2 * HZ)));
  1629. }
  1630. #define IGBVF_TX_FLAGS_CSUM 0x00000001
  1631. #define IGBVF_TX_FLAGS_VLAN 0x00000002
  1632. #define IGBVF_TX_FLAGS_TSO 0x00000004
  1633. #define IGBVF_TX_FLAGS_IPV4 0x00000008
  1634. #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
  1635. #define IGBVF_TX_FLAGS_VLAN_SHIFT 16
  1636. static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens,
  1637. u32 type_tucmd, u32 mss_l4len_idx)
  1638. {
  1639. struct e1000_adv_tx_context_desc *context_desc;
  1640. struct igbvf_buffer *buffer_info;
  1641. u16 i = tx_ring->next_to_use;
  1642. context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
  1643. buffer_info = &tx_ring->buffer_info[i];
  1644. i++;
  1645. tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  1646. /* set bits to identify this as an advanced context descriptor */
  1647. type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
  1648. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  1649. context_desc->seqnum_seed = 0;
  1650. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
  1651. context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
  1652. buffer_info->time_stamp = jiffies;
  1653. buffer_info->dma = 0;
  1654. }
  1655. static int igbvf_tso(struct igbvf_ring *tx_ring,
  1656. struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
  1657. {
  1658. u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
  1659. union {
  1660. struct iphdr *v4;
  1661. struct ipv6hdr *v6;
  1662. unsigned char *hdr;
  1663. } ip;
  1664. union {
  1665. struct tcphdr *tcp;
  1666. unsigned char *hdr;
  1667. } l4;
  1668. u32 paylen, l4_offset;
  1669. int err;
  1670. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1671. return 0;
  1672. if (!skb_is_gso(skb))
  1673. return 0;
  1674. err = skb_cow_head(skb, 0);
  1675. if (err < 0)
  1676. return err;
  1677. ip.hdr = skb_network_header(skb);
  1678. l4.hdr = skb_checksum_start(skb);
  1679. /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
  1680. type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
  1681. /* initialize outer IP header fields */
  1682. if (ip.v4->version == 4) {
  1683. unsigned char *csum_start = skb_checksum_start(skb);
  1684. unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
  1685. /* IP header will have to cancel out any data that
  1686. * is not a part of the outer IP header
  1687. */
  1688. ip.v4->check = csum_fold(csum_partial(trans_start,
  1689. csum_start - trans_start,
  1690. 0));
  1691. type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
  1692. ip.v4->tot_len = 0;
  1693. } else {
  1694. ip.v6->payload_len = 0;
  1695. }
  1696. /* determine offset of inner transport header */
  1697. l4_offset = l4.hdr - skb->data;
  1698. /* compute length of segmentation header */
  1699. *hdr_len = (l4.tcp->doff * 4) + l4_offset;
  1700. /* remove payload length from inner checksum */
  1701. paylen = skb->len - l4_offset;
  1702. csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
  1703. /* MSS L4LEN IDX */
  1704. mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
  1705. mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
  1706. /* VLAN MACLEN IPLEN */
  1707. vlan_macip_lens = l4.hdr - ip.hdr;
  1708. vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
  1709. vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
  1710. igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
  1711. return 1;
  1712. }
  1713. static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb,
  1714. u32 tx_flags, __be16 protocol)
  1715. {
  1716. u32 vlan_macip_lens = 0;
  1717. u32 type_tucmd = 0;
  1718. if (skb->ip_summed != CHECKSUM_PARTIAL) {
  1719. csum_failed:
  1720. if (!(tx_flags & IGBVF_TX_FLAGS_VLAN))
  1721. return false;
  1722. goto no_csum;
  1723. }
  1724. switch (skb->csum_offset) {
  1725. case offsetof(struct tcphdr, check):
  1726. type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
  1727. fallthrough;
  1728. case offsetof(struct udphdr, check):
  1729. break;
  1730. case offsetof(struct sctphdr, checksum):
  1731. /* validate that this is actually an SCTP request */
  1732. if (skb_csum_is_sctp(skb)) {
  1733. type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
  1734. break;
  1735. }
  1736. fallthrough;
  1737. default:
  1738. skb_checksum_help(skb);
  1739. goto csum_failed;
  1740. }
  1741. vlan_macip_lens = skb_checksum_start_offset(skb) -
  1742. skb_network_offset(skb);
  1743. no_csum:
  1744. vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
  1745. vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
  1746. igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
  1747. return true;
  1748. }
  1749. static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
  1750. {
  1751. struct igbvf_adapter *adapter = netdev_priv(netdev);
  1752. /* there is enough descriptors then we don't need to worry */
  1753. if (igbvf_desc_unused(adapter->tx_ring) >= size)
  1754. return 0;
  1755. netif_stop_queue(netdev);
  1756. /* Herbert's original patch had:
  1757. * smp_mb__after_netif_stop_queue();
  1758. * but since that doesn't exist yet, just open code it.
  1759. */
  1760. smp_mb();
  1761. /* We need to check again just in case room has been made available */
  1762. if (igbvf_desc_unused(adapter->tx_ring) < size)
  1763. return -EBUSY;
  1764. netif_wake_queue(netdev);
  1765. ++adapter->restart_queue;
  1766. return 0;
  1767. }
  1768. #define IGBVF_MAX_TXD_PWR 16
  1769. #define IGBVF_MAX_DATA_PER_TXD (1u << IGBVF_MAX_TXD_PWR)
  1770. static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
  1771. struct igbvf_ring *tx_ring,
  1772. struct sk_buff *skb)
  1773. {
  1774. struct igbvf_buffer *buffer_info;
  1775. struct pci_dev *pdev = adapter->pdev;
  1776. unsigned int len = skb_headlen(skb);
  1777. unsigned int count = 0, i;
  1778. unsigned int f;
  1779. i = tx_ring->next_to_use;
  1780. buffer_info = &tx_ring->buffer_info[i];
  1781. BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
  1782. buffer_info->length = len;
  1783. /* set time_stamp *before* dma to help avoid a possible race */
  1784. buffer_info->time_stamp = jiffies;
  1785. buffer_info->mapped_as_page = false;
  1786. buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
  1787. DMA_TO_DEVICE);
  1788. if (dma_mapping_error(&pdev->dev, buffer_info->dma))
  1789. goto dma_error;
  1790. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
  1791. const skb_frag_t *frag;
  1792. count++;
  1793. i++;
  1794. if (i == tx_ring->count)
  1795. i = 0;
  1796. frag = &skb_shinfo(skb)->frags[f];
  1797. len = skb_frag_size(frag);
  1798. buffer_info = &tx_ring->buffer_info[i];
  1799. BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
  1800. buffer_info->length = len;
  1801. buffer_info->time_stamp = jiffies;
  1802. buffer_info->mapped_as_page = true;
  1803. buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
  1804. DMA_TO_DEVICE);
  1805. if (dma_mapping_error(&pdev->dev, buffer_info->dma))
  1806. goto dma_error;
  1807. }
  1808. tx_ring->buffer_info[i].skb = skb;
  1809. return ++count;
  1810. dma_error:
  1811. dev_err(&pdev->dev, "TX DMA map failed\n");
  1812. /* clear timestamp and dma mappings for failed buffer_info mapping */
  1813. buffer_info->dma = 0;
  1814. buffer_info->time_stamp = 0;
  1815. buffer_info->length = 0;
  1816. buffer_info->mapped_as_page = false;
  1817. if (count)
  1818. count--;
  1819. /* clear timestamp and dma mappings for remaining portion of packet */
  1820. while (count--) {
  1821. if (i == 0)
  1822. i += tx_ring->count;
  1823. i--;
  1824. buffer_info = &tx_ring->buffer_info[i];
  1825. igbvf_put_txbuf(adapter, buffer_info);
  1826. }
  1827. return 0;
  1828. }
  1829. static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
  1830. struct igbvf_ring *tx_ring,
  1831. int tx_flags, int count,
  1832. unsigned int first, u32 paylen,
  1833. u8 hdr_len)
  1834. {
  1835. union e1000_adv_tx_desc *tx_desc = NULL;
  1836. struct igbvf_buffer *buffer_info;
  1837. u32 olinfo_status = 0, cmd_type_len;
  1838. unsigned int i;
  1839. cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
  1840. E1000_ADVTXD_DCMD_DEXT);
  1841. if (tx_flags & IGBVF_TX_FLAGS_VLAN)
  1842. cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
  1843. if (tx_flags & IGBVF_TX_FLAGS_TSO) {
  1844. cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
  1845. /* insert tcp checksum */
  1846. olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
  1847. /* insert ip checksum */
  1848. if (tx_flags & IGBVF_TX_FLAGS_IPV4)
  1849. olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
  1850. } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
  1851. olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
  1852. }
  1853. olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
  1854. i = tx_ring->next_to_use;
  1855. while (count--) {
  1856. buffer_info = &tx_ring->buffer_info[i];
  1857. tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
  1858. tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
  1859. tx_desc->read.cmd_type_len =
  1860. cpu_to_le32(cmd_type_len | buffer_info->length);
  1861. tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
  1862. i++;
  1863. if (i == tx_ring->count)
  1864. i = 0;
  1865. }
  1866. tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
  1867. /* Force memory writes to complete before letting h/w
  1868. * know there are new descriptors to fetch. (Only
  1869. * applicable for weak-ordered memory model archs,
  1870. * such as IA-64).
  1871. */
  1872. wmb();
  1873. tx_ring->buffer_info[first].next_to_watch = tx_desc;
  1874. tx_ring->next_to_use = i;
  1875. writel(i, adapter->hw.hw_addr + tx_ring->tail);
  1876. }
  1877. static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
  1878. struct net_device *netdev,
  1879. struct igbvf_ring *tx_ring)
  1880. {
  1881. struct igbvf_adapter *adapter = netdev_priv(netdev);
  1882. unsigned int first, tx_flags = 0;
  1883. u8 hdr_len = 0;
  1884. int count = 0;
  1885. int tso = 0;
  1886. __be16 protocol = vlan_get_protocol(skb);
  1887. if (test_bit(__IGBVF_DOWN, &adapter->state)) {
  1888. dev_kfree_skb_any(skb);
  1889. return NETDEV_TX_OK;
  1890. }
  1891. if (skb->len <= 0) {
  1892. dev_kfree_skb_any(skb);
  1893. return NETDEV_TX_OK;
  1894. }
  1895. /* need: count + 4 desc gap to keep tail from touching
  1896. * + 2 desc gap to keep tail from touching head,
  1897. * + 1 desc for skb->data,
  1898. * + 1 desc for context descriptor,
  1899. * head, otherwise try next time
  1900. */
  1901. if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
  1902. /* this is a hard error */
  1903. return NETDEV_TX_BUSY;
  1904. }
  1905. if (skb_vlan_tag_present(skb)) {
  1906. tx_flags |= IGBVF_TX_FLAGS_VLAN;
  1907. tx_flags |= (skb_vlan_tag_get(skb) <<
  1908. IGBVF_TX_FLAGS_VLAN_SHIFT);
  1909. }
  1910. if (protocol == htons(ETH_P_IP))
  1911. tx_flags |= IGBVF_TX_FLAGS_IPV4;
  1912. first = tx_ring->next_to_use;
  1913. tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len);
  1914. if (unlikely(tso < 0)) {
  1915. dev_kfree_skb_any(skb);
  1916. return NETDEV_TX_OK;
  1917. }
  1918. if (tso)
  1919. tx_flags |= IGBVF_TX_FLAGS_TSO;
  1920. else if (igbvf_tx_csum(tx_ring, skb, tx_flags, protocol) &&
  1921. (skb->ip_summed == CHECKSUM_PARTIAL))
  1922. tx_flags |= IGBVF_TX_FLAGS_CSUM;
  1923. /* count reflects descriptors mapped, if 0 then mapping error
  1924. * has occurred and we need to rewind the descriptor queue
  1925. */
  1926. count = igbvf_tx_map_adv(adapter, tx_ring, skb);
  1927. if (count) {
  1928. igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
  1929. first, skb->len, hdr_len);
  1930. /* Make sure there is space in the ring for the next send. */
  1931. igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
  1932. } else {
  1933. dev_kfree_skb_any(skb);
  1934. tx_ring->buffer_info[first].time_stamp = 0;
  1935. tx_ring->next_to_use = first;
  1936. }
  1937. return NETDEV_TX_OK;
  1938. }
  1939. static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
  1940. struct net_device *netdev)
  1941. {
  1942. struct igbvf_adapter *adapter = netdev_priv(netdev);
  1943. struct igbvf_ring *tx_ring;
  1944. if (test_bit(__IGBVF_DOWN, &adapter->state)) {
  1945. dev_kfree_skb_any(skb);
  1946. return NETDEV_TX_OK;
  1947. }
  1948. tx_ring = &adapter->tx_ring[0];
  1949. return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
  1950. }
  1951. /**
  1952. * igbvf_tx_timeout - Respond to a Tx Hang
  1953. * @netdev: network interface device structure
  1954. * @txqueue: queue timing out (unused)
  1955. **/
  1956. static void igbvf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
  1957. {
  1958. struct igbvf_adapter *adapter = netdev_priv(netdev);
  1959. /* Do the reset outside of interrupt context */
  1960. adapter->tx_timeout_count++;
  1961. schedule_work(&adapter->reset_task);
  1962. }
  1963. static void igbvf_reset_task(struct work_struct *work)
  1964. {
  1965. struct igbvf_adapter *adapter;
  1966. adapter = container_of(work, struct igbvf_adapter, reset_task);
  1967. igbvf_reinit_locked(adapter);
  1968. }
  1969. /**
  1970. * igbvf_change_mtu - Change the Maximum Transfer Unit
  1971. * @netdev: network interface device structure
  1972. * @new_mtu: new value for maximum frame size
  1973. *
  1974. * Returns 0 on success, negative on failure
  1975. **/
  1976. static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
  1977. {
  1978. struct igbvf_adapter *adapter = netdev_priv(netdev);
  1979. int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
  1980. while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
  1981. usleep_range(1000, 2000);
  1982. /* igbvf_down has a dependency on max_frame_size */
  1983. adapter->max_frame_size = max_frame;
  1984. if (netif_running(netdev))
  1985. igbvf_down(adapter);
  1986. /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
  1987. * means we reserve 2 more, this pushes us to allocate from the next
  1988. * larger slab size.
  1989. * i.e. RXBUFFER_2048 --> size-4096 slab
  1990. * However with the new *_jumbo_rx* routines, jumbo receives will use
  1991. * fragmented skbs
  1992. */
  1993. if (max_frame <= 1024)
  1994. adapter->rx_buffer_len = 1024;
  1995. else if (max_frame <= 2048)
  1996. adapter->rx_buffer_len = 2048;
  1997. else
  1998. #if (PAGE_SIZE / 2) > 16384
  1999. adapter->rx_buffer_len = 16384;
  2000. #else
  2001. adapter->rx_buffer_len = PAGE_SIZE / 2;
  2002. #endif
  2003. /* adjust allocation if LPE protects us, and we aren't using SBP */
  2004. if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
  2005. (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
  2006. adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
  2007. ETH_FCS_LEN;
  2008. netdev_dbg(netdev, "changing MTU from %d to %d\n",
  2009. netdev->mtu, new_mtu);
  2010. netdev->mtu = new_mtu;
  2011. if (netif_running(netdev))
  2012. igbvf_up(adapter);
  2013. else
  2014. igbvf_reset(adapter);
  2015. clear_bit(__IGBVF_RESETTING, &adapter->state);
  2016. return 0;
  2017. }
  2018. static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  2019. {
  2020. switch (cmd) {
  2021. default:
  2022. return -EOPNOTSUPP;
  2023. }
  2024. }
  2025. static int igbvf_suspend(struct device *dev_d)
  2026. {
  2027. struct net_device *netdev = dev_get_drvdata(dev_d);
  2028. struct igbvf_adapter *adapter = netdev_priv(netdev);
  2029. netif_device_detach(netdev);
  2030. if (netif_running(netdev)) {
  2031. WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
  2032. igbvf_down(adapter);
  2033. igbvf_free_irq(adapter);
  2034. }
  2035. return 0;
  2036. }
  2037. static int __maybe_unused igbvf_resume(struct device *dev_d)
  2038. {
  2039. struct pci_dev *pdev = to_pci_dev(dev_d);
  2040. struct net_device *netdev = pci_get_drvdata(pdev);
  2041. struct igbvf_adapter *adapter = netdev_priv(netdev);
  2042. u32 err;
  2043. pci_set_master(pdev);
  2044. if (netif_running(netdev)) {
  2045. err = igbvf_request_irq(adapter);
  2046. if (err)
  2047. return err;
  2048. }
  2049. igbvf_reset(adapter);
  2050. if (netif_running(netdev))
  2051. igbvf_up(adapter);
  2052. netif_device_attach(netdev);
  2053. return 0;
  2054. }
  2055. static void igbvf_shutdown(struct pci_dev *pdev)
  2056. {
  2057. igbvf_suspend(&pdev->dev);
  2058. }
  2059. #ifdef CONFIG_NET_POLL_CONTROLLER
  2060. /* Polling 'interrupt' - used by things like netconsole to send skbs
  2061. * without having to re-enable interrupts. It's not called while
  2062. * the interrupt routine is executing.
  2063. */
  2064. static void igbvf_netpoll(struct net_device *netdev)
  2065. {
  2066. struct igbvf_adapter *adapter = netdev_priv(netdev);
  2067. disable_irq(adapter->pdev->irq);
  2068. igbvf_clean_tx_irq(adapter->tx_ring);
  2069. enable_irq(adapter->pdev->irq);
  2070. }
  2071. #endif
  2072. /**
  2073. * igbvf_io_error_detected - called when PCI error is detected
  2074. * @pdev: Pointer to PCI device
  2075. * @state: The current pci connection state
  2076. *
  2077. * This function is called after a PCI bus error affecting
  2078. * this device has been detected.
  2079. */
  2080. static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
  2081. pci_channel_state_t state)
  2082. {
  2083. struct net_device *netdev = pci_get_drvdata(pdev);
  2084. struct igbvf_adapter *adapter = netdev_priv(netdev);
  2085. netif_device_detach(netdev);
  2086. if (state == pci_channel_io_perm_failure)
  2087. return PCI_ERS_RESULT_DISCONNECT;
  2088. if (netif_running(netdev))
  2089. igbvf_down(adapter);
  2090. pci_disable_device(pdev);
  2091. /* Request a slot reset. */
  2092. return PCI_ERS_RESULT_NEED_RESET;
  2093. }
  2094. /**
  2095. * igbvf_io_slot_reset - called after the pci bus has been reset.
  2096. * @pdev: Pointer to PCI device
  2097. *
  2098. * Restart the card from scratch, as if from a cold-boot. Implementation
  2099. * resembles the first-half of the igbvf_resume routine.
  2100. */
  2101. static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
  2102. {
  2103. struct net_device *netdev = pci_get_drvdata(pdev);
  2104. struct igbvf_adapter *adapter = netdev_priv(netdev);
  2105. if (pci_enable_device_mem(pdev)) {
  2106. dev_err(&pdev->dev,
  2107. "Cannot re-enable PCI device after reset.\n");
  2108. return PCI_ERS_RESULT_DISCONNECT;
  2109. }
  2110. pci_set_master(pdev);
  2111. igbvf_reset(adapter);
  2112. return PCI_ERS_RESULT_RECOVERED;
  2113. }
  2114. /**
  2115. * igbvf_io_resume - called when traffic can start flowing again.
  2116. * @pdev: Pointer to PCI device
  2117. *
  2118. * This callback is called when the error recovery driver tells us that
  2119. * its OK to resume normal operation. Implementation resembles the
  2120. * second-half of the igbvf_resume routine.
  2121. */
  2122. static void igbvf_io_resume(struct pci_dev *pdev)
  2123. {
  2124. struct net_device *netdev = pci_get_drvdata(pdev);
  2125. struct igbvf_adapter *adapter = netdev_priv(netdev);
  2126. if (netif_running(netdev)) {
  2127. if (igbvf_up(adapter)) {
  2128. dev_err(&pdev->dev,
  2129. "can't bring device back up after reset\n");
  2130. return;
  2131. }
  2132. }
  2133. netif_device_attach(netdev);
  2134. }
  2135. static void igbvf_print_device_info(struct igbvf_adapter *adapter)
  2136. {
  2137. struct e1000_hw *hw = &adapter->hw;
  2138. struct net_device *netdev = adapter->netdev;
  2139. struct pci_dev *pdev = adapter->pdev;
  2140. if (hw->mac.type == e1000_vfadapt_i350)
  2141. dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n");
  2142. else
  2143. dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
  2144. dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
  2145. }
  2146. static int igbvf_set_features(struct net_device *netdev,
  2147. netdev_features_t features)
  2148. {
  2149. struct igbvf_adapter *adapter = netdev_priv(netdev);
  2150. if (features & NETIF_F_RXCSUM)
  2151. adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
  2152. else
  2153. adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
  2154. return 0;
  2155. }
  2156. #define IGBVF_MAX_MAC_HDR_LEN 127
  2157. #define IGBVF_MAX_NETWORK_HDR_LEN 511
  2158. static netdev_features_t
  2159. igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
  2160. netdev_features_t features)
  2161. {
  2162. unsigned int network_hdr_len, mac_hdr_len;
  2163. /* Make certain the headers can be described by a context descriptor */
  2164. mac_hdr_len = skb_network_header(skb) - skb->data;
  2165. if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
  2166. return features & ~(NETIF_F_HW_CSUM |
  2167. NETIF_F_SCTP_CRC |
  2168. NETIF_F_HW_VLAN_CTAG_TX |
  2169. NETIF_F_TSO |
  2170. NETIF_F_TSO6);
  2171. network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
  2172. if (unlikely(network_hdr_len > IGBVF_MAX_NETWORK_HDR_LEN))
  2173. return features & ~(NETIF_F_HW_CSUM |
  2174. NETIF_F_SCTP_CRC |
  2175. NETIF_F_TSO |
  2176. NETIF_F_TSO6);
  2177. /* We can only support IPV4 TSO in tunnels if we can mangle the
  2178. * inner IP ID field, so strip TSO if MANGLEID is not supported.
  2179. */
  2180. if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
  2181. features &= ~NETIF_F_TSO;
  2182. return features;
  2183. }
  2184. static const struct net_device_ops igbvf_netdev_ops = {
  2185. .ndo_open = igbvf_open,
  2186. .ndo_stop = igbvf_close,
  2187. .ndo_start_xmit = igbvf_xmit_frame,
  2188. .ndo_set_rx_mode = igbvf_set_rx_mode,
  2189. .ndo_set_mac_address = igbvf_set_mac,
  2190. .ndo_change_mtu = igbvf_change_mtu,
  2191. .ndo_eth_ioctl = igbvf_ioctl,
  2192. .ndo_tx_timeout = igbvf_tx_timeout,
  2193. .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
  2194. .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
  2195. #ifdef CONFIG_NET_POLL_CONTROLLER
  2196. .ndo_poll_controller = igbvf_netpoll,
  2197. #endif
  2198. .ndo_set_features = igbvf_set_features,
  2199. .ndo_features_check = igbvf_features_check,
  2200. };
  2201. /**
  2202. * igbvf_probe - Device Initialization Routine
  2203. * @pdev: PCI device information struct
  2204. * @ent: entry in igbvf_pci_tbl
  2205. *
  2206. * Returns 0 on success, negative on failure
  2207. *
  2208. * igbvf_probe initializes an adapter identified by a pci_dev structure.
  2209. * The OS initialization, configuring of the adapter private structure,
  2210. * and a hardware reset occur.
  2211. **/
  2212. static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  2213. {
  2214. struct net_device *netdev;
  2215. struct igbvf_adapter *adapter;
  2216. struct e1000_hw *hw;
  2217. const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
  2218. static int cards_found;
  2219. int err;
  2220. err = pci_enable_device_mem(pdev);
  2221. if (err)
  2222. return err;
  2223. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2224. if (err) {
  2225. dev_err(&pdev->dev,
  2226. "No usable DMA configuration, aborting\n");
  2227. goto err_dma;
  2228. }
  2229. err = pci_request_regions(pdev, igbvf_driver_name);
  2230. if (err)
  2231. goto err_pci_reg;
  2232. pci_set_master(pdev);
  2233. err = -ENOMEM;
  2234. netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
  2235. if (!netdev)
  2236. goto err_alloc_etherdev;
  2237. SET_NETDEV_DEV(netdev, &pdev->dev);
  2238. pci_set_drvdata(pdev, netdev);
  2239. adapter = netdev_priv(netdev);
  2240. hw = &adapter->hw;
  2241. adapter->netdev = netdev;
  2242. adapter->pdev = pdev;
  2243. adapter->ei = ei;
  2244. adapter->pba = ei->pba;
  2245. adapter->flags = ei->flags;
  2246. adapter->hw.back = adapter;
  2247. adapter->hw.mac.type = ei->mac;
  2248. adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  2249. /* PCI config space info */
  2250. hw->vendor_id = pdev->vendor;
  2251. hw->device_id = pdev->device;
  2252. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  2253. hw->subsystem_device_id = pdev->subsystem_device;
  2254. hw->revision_id = pdev->revision;
  2255. err = -EIO;
  2256. adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
  2257. pci_resource_len(pdev, 0));
  2258. if (!adapter->hw.hw_addr)
  2259. goto err_ioremap;
  2260. if (ei->get_variants) {
  2261. err = ei->get_variants(adapter);
  2262. if (err)
  2263. goto err_get_variants;
  2264. }
  2265. /* setup adapter struct */
  2266. err = igbvf_sw_init(adapter);
  2267. if (err)
  2268. goto err_sw_init;
  2269. /* construct the net_device struct */
  2270. netdev->netdev_ops = &igbvf_netdev_ops;
  2271. igbvf_set_ethtool_ops(netdev);
  2272. netdev->watchdog_timeo = 5 * HZ;
  2273. strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
  2274. adapter->bd_number = cards_found++;
  2275. netdev->hw_features = NETIF_F_SG |
  2276. NETIF_F_TSO |
  2277. NETIF_F_TSO6 |
  2278. NETIF_F_RXCSUM |
  2279. NETIF_F_HW_CSUM |
  2280. NETIF_F_SCTP_CRC;
  2281. #define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
  2282. NETIF_F_GSO_GRE_CSUM | \
  2283. NETIF_F_GSO_IPXIP4 | \
  2284. NETIF_F_GSO_IPXIP6 | \
  2285. NETIF_F_GSO_UDP_TUNNEL | \
  2286. NETIF_F_GSO_UDP_TUNNEL_CSUM)
  2287. netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES;
  2288. netdev->hw_features |= NETIF_F_GSO_PARTIAL |
  2289. IGBVF_GSO_PARTIAL_FEATURES;
  2290. netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
  2291. netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
  2292. netdev->mpls_features |= NETIF_F_HW_CSUM;
  2293. netdev->hw_enc_features |= netdev->vlan_features;
  2294. /* set this bit last since it cannot be part of vlan_features */
  2295. netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
  2296. NETIF_F_HW_VLAN_CTAG_RX |
  2297. NETIF_F_HW_VLAN_CTAG_TX;
  2298. /* MTU range: 68 - 9216 */
  2299. netdev->min_mtu = ETH_MIN_MTU;
  2300. netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
  2301. spin_lock_bh(&hw->mbx_lock);
  2302. /*reset the controller to put the device in a known good state */
  2303. err = hw->mac.ops.reset_hw(hw);
  2304. if (err) {
  2305. dev_info(&pdev->dev,
  2306. "PF still in reset state. Is the PF interface up?\n");
  2307. } else {
  2308. err = hw->mac.ops.read_mac_addr(hw);
  2309. if (err)
  2310. dev_info(&pdev->dev, "Error reading MAC address.\n");
  2311. else if (is_zero_ether_addr(adapter->hw.mac.addr))
  2312. dev_info(&pdev->dev,
  2313. "MAC address not assigned by administrator.\n");
  2314. eth_hw_addr_set(netdev, adapter->hw.mac.addr);
  2315. }
  2316. spin_unlock_bh(&hw->mbx_lock);
  2317. if (!is_valid_ether_addr(netdev->dev_addr)) {
  2318. dev_info(&pdev->dev, "Assigning random MAC address.\n");
  2319. eth_hw_addr_random(netdev);
  2320. memcpy(adapter->hw.mac.addr, netdev->dev_addr,
  2321. netdev->addr_len);
  2322. }
  2323. timer_setup(&adapter->watchdog_timer, igbvf_watchdog, 0);
  2324. INIT_WORK(&adapter->reset_task, igbvf_reset_task);
  2325. INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
  2326. /* ring size defaults */
  2327. adapter->rx_ring->count = 1024;
  2328. adapter->tx_ring->count = 1024;
  2329. /* reset the hardware with the new settings */
  2330. igbvf_reset(adapter);
  2331. /* set hardware-specific flags */
  2332. if (adapter->hw.mac.type == e1000_vfadapt_i350)
  2333. adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP;
  2334. strcpy(netdev->name, "eth%d");
  2335. err = register_netdev(netdev);
  2336. if (err)
  2337. goto err_hw_init;
  2338. /* tell the stack to leave us alone until igbvf_open() is called */
  2339. netif_carrier_off(netdev);
  2340. netif_stop_queue(netdev);
  2341. igbvf_print_device_info(adapter);
  2342. igbvf_initialize_last_counter_stats(adapter);
  2343. return 0;
  2344. err_hw_init:
  2345. netif_napi_del(&adapter->rx_ring->napi);
  2346. kfree(adapter->tx_ring);
  2347. kfree(adapter->rx_ring);
  2348. err_sw_init:
  2349. igbvf_reset_interrupt_capability(adapter);
  2350. err_get_variants:
  2351. iounmap(adapter->hw.hw_addr);
  2352. err_ioremap:
  2353. free_netdev(netdev);
  2354. err_alloc_etherdev:
  2355. pci_release_regions(pdev);
  2356. err_pci_reg:
  2357. err_dma:
  2358. pci_disable_device(pdev);
  2359. return err;
  2360. }
  2361. /**
  2362. * igbvf_remove - Device Removal Routine
  2363. * @pdev: PCI device information struct
  2364. *
  2365. * igbvf_remove is called by the PCI subsystem to alert the driver
  2366. * that it should release a PCI device. The could be caused by a
  2367. * Hot-Plug event, or because the driver is going to be removed from
  2368. * memory.
  2369. **/
  2370. static void igbvf_remove(struct pci_dev *pdev)
  2371. {
  2372. struct net_device *netdev = pci_get_drvdata(pdev);
  2373. struct igbvf_adapter *adapter = netdev_priv(netdev);
  2374. struct e1000_hw *hw = &adapter->hw;
  2375. /* The watchdog timer may be rescheduled, so explicitly
  2376. * disable it from being rescheduled.
  2377. */
  2378. set_bit(__IGBVF_DOWN, &adapter->state);
  2379. del_timer_sync(&adapter->watchdog_timer);
  2380. cancel_work_sync(&adapter->reset_task);
  2381. cancel_work_sync(&adapter->watchdog_task);
  2382. unregister_netdev(netdev);
  2383. igbvf_reset_interrupt_capability(adapter);
  2384. /* it is important to delete the NAPI struct prior to freeing the
  2385. * Rx ring so that you do not end up with null pointer refs
  2386. */
  2387. netif_napi_del(&adapter->rx_ring->napi);
  2388. kfree(adapter->tx_ring);
  2389. kfree(adapter->rx_ring);
  2390. iounmap(hw->hw_addr);
  2391. if (hw->flash_address)
  2392. iounmap(hw->flash_address);
  2393. pci_release_regions(pdev);
  2394. free_netdev(netdev);
  2395. pci_disable_device(pdev);
  2396. }
  2397. /* PCI Error Recovery (ERS) */
  2398. static const struct pci_error_handlers igbvf_err_handler = {
  2399. .error_detected = igbvf_io_error_detected,
  2400. .slot_reset = igbvf_io_slot_reset,
  2401. .resume = igbvf_io_resume,
  2402. };
  2403. static const struct pci_device_id igbvf_pci_tbl[] = {
  2404. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
  2405. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
  2406. { } /* terminate list */
  2407. };
  2408. MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
  2409. static SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume);
  2410. /* PCI Device API Driver */
  2411. static struct pci_driver igbvf_driver = {
  2412. .name = igbvf_driver_name,
  2413. .id_table = igbvf_pci_tbl,
  2414. .probe = igbvf_probe,
  2415. .remove = igbvf_remove,
  2416. .driver.pm = &igbvf_pm_ops,
  2417. .shutdown = igbvf_shutdown,
  2418. .err_handler = &igbvf_err_handler
  2419. };
  2420. /**
  2421. * igbvf_init_module - Driver Registration Routine
  2422. *
  2423. * igbvf_init_module is the first routine called when the driver is
  2424. * loaded. All it does is register with the PCI subsystem.
  2425. **/
  2426. static int __init igbvf_init_module(void)
  2427. {
  2428. int ret;
  2429. pr_info("%s\n", igbvf_driver_string);
  2430. pr_info("%s\n", igbvf_copyright);
  2431. ret = pci_register_driver(&igbvf_driver);
  2432. return ret;
  2433. }
  2434. module_init(igbvf_init_module);
  2435. /**
  2436. * igbvf_exit_module - Driver Exit Cleanup Routine
  2437. *
  2438. * igbvf_exit_module is called just before the driver is removed
  2439. * from memory.
  2440. **/
  2441. static void __exit igbvf_exit_module(void)
  2442. {
  2443. pci_unregister_driver(&igbvf_driver);
  2444. }
  2445. module_exit(igbvf_exit_module);
  2446. MODULE_AUTHOR("Intel Corporation, <[email protected]>");
  2447. MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
  2448. MODULE_LICENSE("GPL v2");
  2449. /* netdev.c */