bcmgenet.c 115 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Broadcom GENET (Gigabit Ethernet) controller driver
  4. *
  5. * Copyright (c) 2014-2020 Broadcom
  6. */
  7. #define pr_fmt(fmt) "bcmgenet: " fmt
  8. #include <linux/acpi.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/sched.h>
  12. #include <linux/types.h>
  13. #include <linux/fcntl.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/string.h>
  16. #include <linux/if_ether.h>
  17. #include <linux/init.h>
  18. #include <linux/errno.h>
  19. #include <linux/delay.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/pm.h>
  23. #include <linux/clk.h>
  24. #include <net/arp.h>
  25. #include <linux/mii.h>
  26. #include <linux/ethtool.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/inetdevice.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/in.h>
  32. #include <linux/ip.h>
  33. #include <linux/ipv6.h>
  34. #include <linux/phy.h>
  35. #include <linux/platform_data/bcmgenet.h>
  36. #include <asm/unaligned.h>
  37. #include "bcmgenet.h"
  38. /* Maximum number of hardware queues, downsized if needed */
  39. #define GENET_MAX_MQ_CNT 4
  40. /* Default highest priority queue for multi queue support */
  41. #define GENET_Q0_PRIORITY 0
  42. #define GENET_Q16_RX_BD_CNT \
  43. (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
  44. #define GENET_Q16_TX_BD_CNT \
  45. (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
  46. #define RX_BUF_LENGTH 2048
  47. #define SKB_ALIGNMENT 32
  48. /* Tx/Rx DMA register offset, skip 256 descriptors */
  49. #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
  50. #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
  51. #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
  52. TOTAL_DESC * DMA_DESC_SIZE)
  53. #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
  54. TOTAL_DESC * DMA_DESC_SIZE)
  55. /* Forward declarations */
  56. static void bcmgenet_set_rx_mode(struct net_device *dev);
  57. static inline void bcmgenet_writel(u32 value, void __iomem *offset)
  58. {
  59. /* MIPS chips strapped for BE will automagically configure the
  60. * peripheral registers for CPU-native byte order.
  61. */
  62. if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  63. __raw_writel(value, offset);
  64. else
  65. writel_relaxed(value, offset);
  66. }
  67. static inline u32 bcmgenet_readl(void __iomem *offset)
  68. {
  69. if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  70. return __raw_readl(offset);
  71. else
  72. return readl_relaxed(offset);
  73. }
  74. static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
  75. void __iomem *d, u32 value)
  76. {
  77. bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
  78. }
  79. static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
  80. void __iomem *d,
  81. dma_addr_t addr)
  82. {
  83. bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
  84. /* Register writes to GISB bus can take couple hundred nanoseconds
  85. * and are done for each packet, save these expensive writes unless
  86. * the platform is explicitly configured for 64-bits/LPAE.
  87. */
  88. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  89. if (priv->hw_params->flags & GENET_HAS_40BITS)
  90. bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
  91. #endif
  92. }
  93. /* Combined address + length/status setter */
  94. static inline void dmadesc_set(struct bcmgenet_priv *priv,
  95. void __iomem *d, dma_addr_t addr, u32 val)
  96. {
  97. dmadesc_set_addr(priv, d, addr);
  98. dmadesc_set_length_status(priv, d, val);
  99. }
  100. static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
  101. void __iomem *d)
  102. {
  103. dma_addr_t addr;
  104. addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO);
  105. /* Register writes to GISB bus can take couple hundred nanoseconds
  106. * and are done for each packet, save these expensive writes unless
  107. * the platform is explicitly configured for 64-bits/LPAE.
  108. */
  109. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  110. if (priv->hw_params->flags & GENET_HAS_40BITS)
  111. addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32;
  112. #endif
  113. return addr;
  114. }
  115. #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
  116. #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
  117. NETIF_MSG_LINK)
  118. static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
  119. {
  120. if (GENET_IS_V1(priv))
  121. return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
  122. else
  123. return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
  124. }
  125. static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
  126. {
  127. if (GENET_IS_V1(priv))
  128. bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
  129. else
  130. bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
  131. }
  132. /* These macros are defined to deal with register map change
  133. * between GENET1.1 and GENET2. Only those currently being used
  134. * by driver are defined.
  135. */
  136. static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
  137. {
  138. if (GENET_IS_V1(priv))
  139. return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
  140. else
  141. return bcmgenet_readl(priv->base +
  142. priv->hw_params->tbuf_offset + TBUF_CTRL);
  143. }
  144. static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
  145. {
  146. if (GENET_IS_V1(priv))
  147. bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
  148. else
  149. bcmgenet_writel(val, priv->base +
  150. priv->hw_params->tbuf_offset + TBUF_CTRL);
  151. }
  152. static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
  153. {
  154. if (GENET_IS_V1(priv))
  155. return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
  156. else
  157. return bcmgenet_readl(priv->base +
  158. priv->hw_params->tbuf_offset + TBUF_BP_MC);
  159. }
  160. static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
  161. {
  162. if (GENET_IS_V1(priv))
  163. bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
  164. else
  165. bcmgenet_writel(val, priv->base +
  166. priv->hw_params->tbuf_offset + TBUF_BP_MC);
  167. }
  168. /* RX/TX DMA register accessors */
  169. enum dma_reg {
  170. DMA_RING_CFG = 0,
  171. DMA_CTRL,
  172. DMA_STATUS,
  173. DMA_SCB_BURST_SIZE,
  174. DMA_ARB_CTRL,
  175. DMA_PRIORITY_0,
  176. DMA_PRIORITY_1,
  177. DMA_PRIORITY_2,
  178. DMA_INDEX2RING_0,
  179. DMA_INDEX2RING_1,
  180. DMA_INDEX2RING_2,
  181. DMA_INDEX2RING_3,
  182. DMA_INDEX2RING_4,
  183. DMA_INDEX2RING_5,
  184. DMA_INDEX2RING_6,
  185. DMA_INDEX2RING_7,
  186. DMA_RING0_TIMEOUT,
  187. DMA_RING1_TIMEOUT,
  188. DMA_RING2_TIMEOUT,
  189. DMA_RING3_TIMEOUT,
  190. DMA_RING4_TIMEOUT,
  191. DMA_RING5_TIMEOUT,
  192. DMA_RING6_TIMEOUT,
  193. DMA_RING7_TIMEOUT,
  194. DMA_RING8_TIMEOUT,
  195. DMA_RING9_TIMEOUT,
  196. DMA_RING10_TIMEOUT,
  197. DMA_RING11_TIMEOUT,
  198. DMA_RING12_TIMEOUT,
  199. DMA_RING13_TIMEOUT,
  200. DMA_RING14_TIMEOUT,
  201. DMA_RING15_TIMEOUT,
  202. DMA_RING16_TIMEOUT,
  203. };
  204. static const u8 bcmgenet_dma_regs_v3plus[] = {
  205. [DMA_RING_CFG] = 0x00,
  206. [DMA_CTRL] = 0x04,
  207. [DMA_STATUS] = 0x08,
  208. [DMA_SCB_BURST_SIZE] = 0x0C,
  209. [DMA_ARB_CTRL] = 0x2C,
  210. [DMA_PRIORITY_0] = 0x30,
  211. [DMA_PRIORITY_1] = 0x34,
  212. [DMA_PRIORITY_2] = 0x38,
  213. [DMA_RING0_TIMEOUT] = 0x2C,
  214. [DMA_RING1_TIMEOUT] = 0x30,
  215. [DMA_RING2_TIMEOUT] = 0x34,
  216. [DMA_RING3_TIMEOUT] = 0x38,
  217. [DMA_RING4_TIMEOUT] = 0x3c,
  218. [DMA_RING5_TIMEOUT] = 0x40,
  219. [DMA_RING6_TIMEOUT] = 0x44,
  220. [DMA_RING7_TIMEOUT] = 0x48,
  221. [DMA_RING8_TIMEOUT] = 0x4c,
  222. [DMA_RING9_TIMEOUT] = 0x50,
  223. [DMA_RING10_TIMEOUT] = 0x54,
  224. [DMA_RING11_TIMEOUT] = 0x58,
  225. [DMA_RING12_TIMEOUT] = 0x5c,
  226. [DMA_RING13_TIMEOUT] = 0x60,
  227. [DMA_RING14_TIMEOUT] = 0x64,
  228. [DMA_RING15_TIMEOUT] = 0x68,
  229. [DMA_RING16_TIMEOUT] = 0x6C,
  230. [DMA_INDEX2RING_0] = 0x70,
  231. [DMA_INDEX2RING_1] = 0x74,
  232. [DMA_INDEX2RING_2] = 0x78,
  233. [DMA_INDEX2RING_3] = 0x7C,
  234. [DMA_INDEX2RING_4] = 0x80,
  235. [DMA_INDEX2RING_5] = 0x84,
  236. [DMA_INDEX2RING_6] = 0x88,
  237. [DMA_INDEX2RING_7] = 0x8C,
  238. };
  239. static const u8 bcmgenet_dma_regs_v2[] = {
  240. [DMA_RING_CFG] = 0x00,
  241. [DMA_CTRL] = 0x04,
  242. [DMA_STATUS] = 0x08,
  243. [DMA_SCB_BURST_SIZE] = 0x0C,
  244. [DMA_ARB_CTRL] = 0x30,
  245. [DMA_PRIORITY_0] = 0x34,
  246. [DMA_PRIORITY_1] = 0x38,
  247. [DMA_PRIORITY_2] = 0x3C,
  248. [DMA_RING0_TIMEOUT] = 0x2C,
  249. [DMA_RING1_TIMEOUT] = 0x30,
  250. [DMA_RING2_TIMEOUT] = 0x34,
  251. [DMA_RING3_TIMEOUT] = 0x38,
  252. [DMA_RING4_TIMEOUT] = 0x3c,
  253. [DMA_RING5_TIMEOUT] = 0x40,
  254. [DMA_RING6_TIMEOUT] = 0x44,
  255. [DMA_RING7_TIMEOUT] = 0x48,
  256. [DMA_RING8_TIMEOUT] = 0x4c,
  257. [DMA_RING9_TIMEOUT] = 0x50,
  258. [DMA_RING10_TIMEOUT] = 0x54,
  259. [DMA_RING11_TIMEOUT] = 0x58,
  260. [DMA_RING12_TIMEOUT] = 0x5c,
  261. [DMA_RING13_TIMEOUT] = 0x60,
  262. [DMA_RING14_TIMEOUT] = 0x64,
  263. [DMA_RING15_TIMEOUT] = 0x68,
  264. [DMA_RING16_TIMEOUT] = 0x6C,
  265. };
  266. static const u8 bcmgenet_dma_regs_v1[] = {
  267. [DMA_CTRL] = 0x00,
  268. [DMA_STATUS] = 0x04,
  269. [DMA_SCB_BURST_SIZE] = 0x0C,
  270. [DMA_ARB_CTRL] = 0x30,
  271. [DMA_PRIORITY_0] = 0x34,
  272. [DMA_PRIORITY_1] = 0x38,
  273. [DMA_PRIORITY_2] = 0x3C,
  274. [DMA_RING0_TIMEOUT] = 0x2C,
  275. [DMA_RING1_TIMEOUT] = 0x30,
  276. [DMA_RING2_TIMEOUT] = 0x34,
  277. [DMA_RING3_TIMEOUT] = 0x38,
  278. [DMA_RING4_TIMEOUT] = 0x3c,
  279. [DMA_RING5_TIMEOUT] = 0x40,
  280. [DMA_RING6_TIMEOUT] = 0x44,
  281. [DMA_RING7_TIMEOUT] = 0x48,
  282. [DMA_RING8_TIMEOUT] = 0x4c,
  283. [DMA_RING9_TIMEOUT] = 0x50,
  284. [DMA_RING10_TIMEOUT] = 0x54,
  285. [DMA_RING11_TIMEOUT] = 0x58,
  286. [DMA_RING12_TIMEOUT] = 0x5c,
  287. [DMA_RING13_TIMEOUT] = 0x60,
  288. [DMA_RING14_TIMEOUT] = 0x64,
  289. [DMA_RING15_TIMEOUT] = 0x68,
  290. [DMA_RING16_TIMEOUT] = 0x6C,
  291. };
  292. /* Set at runtime once bcmgenet version is known */
  293. static const u8 *bcmgenet_dma_regs;
  294. static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
  295. {
  296. return netdev_priv(dev_get_drvdata(dev));
  297. }
  298. static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
  299. enum dma_reg r)
  300. {
  301. return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
  302. DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
  303. }
  304. static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
  305. u32 val, enum dma_reg r)
  306. {
  307. bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
  308. DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
  309. }
  310. static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
  311. enum dma_reg r)
  312. {
  313. return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
  314. DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
  315. }
  316. static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
  317. u32 val, enum dma_reg r)
  318. {
  319. bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
  320. DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
  321. }
  322. /* RDMA/TDMA ring registers and accessors
  323. * we merge the common fields and just prefix with T/D the registers
  324. * having different meaning depending on the direction
  325. */
  326. enum dma_ring_reg {
  327. TDMA_READ_PTR = 0,
  328. RDMA_WRITE_PTR = TDMA_READ_PTR,
  329. TDMA_READ_PTR_HI,
  330. RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
  331. TDMA_CONS_INDEX,
  332. RDMA_PROD_INDEX = TDMA_CONS_INDEX,
  333. TDMA_PROD_INDEX,
  334. RDMA_CONS_INDEX = TDMA_PROD_INDEX,
  335. DMA_RING_BUF_SIZE,
  336. DMA_START_ADDR,
  337. DMA_START_ADDR_HI,
  338. DMA_END_ADDR,
  339. DMA_END_ADDR_HI,
  340. DMA_MBUF_DONE_THRESH,
  341. TDMA_FLOW_PERIOD,
  342. RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
  343. TDMA_WRITE_PTR,
  344. RDMA_READ_PTR = TDMA_WRITE_PTR,
  345. TDMA_WRITE_PTR_HI,
  346. RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
  347. };
  348. /* GENET v4 supports 40-bits pointer addressing
  349. * for obvious reasons the LO and HI word parts
  350. * are contiguous, but this offsets the other
  351. * registers.
  352. */
  353. static const u8 genet_dma_ring_regs_v4[] = {
  354. [TDMA_READ_PTR] = 0x00,
  355. [TDMA_READ_PTR_HI] = 0x04,
  356. [TDMA_CONS_INDEX] = 0x08,
  357. [TDMA_PROD_INDEX] = 0x0C,
  358. [DMA_RING_BUF_SIZE] = 0x10,
  359. [DMA_START_ADDR] = 0x14,
  360. [DMA_START_ADDR_HI] = 0x18,
  361. [DMA_END_ADDR] = 0x1C,
  362. [DMA_END_ADDR_HI] = 0x20,
  363. [DMA_MBUF_DONE_THRESH] = 0x24,
  364. [TDMA_FLOW_PERIOD] = 0x28,
  365. [TDMA_WRITE_PTR] = 0x2C,
  366. [TDMA_WRITE_PTR_HI] = 0x30,
  367. };
  368. static const u8 genet_dma_ring_regs_v123[] = {
  369. [TDMA_READ_PTR] = 0x00,
  370. [TDMA_CONS_INDEX] = 0x04,
  371. [TDMA_PROD_INDEX] = 0x08,
  372. [DMA_RING_BUF_SIZE] = 0x0C,
  373. [DMA_START_ADDR] = 0x10,
  374. [DMA_END_ADDR] = 0x14,
  375. [DMA_MBUF_DONE_THRESH] = 0x18,
  376. [TDMA_FLOW_PERIOD] = 0x1C,
  377. [TDMA_WRITE_PTR] = 0x20,
  378. };
  379. /* Set at runtime once GENET version is known */
  380. static const u8 *genet_dma_ring_regs;
  381. static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
  382. unsigned int ring,
  383. enum dma_ring_reg r)
  384. {
  385. return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
  386. (DMA_RING_SIZE * ring) +
  387. genet_dma_ring_regs[r]);
  388. }
  389. static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
  390. unsigned int ring, u32 val,
  391. enum dma_ring_reg r)
  392. {
  393. bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
  394. (DMA_RING_SIZE * ring) +
  395. genet_dma_ring_regs[r]);
  396. }
  397. static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
  398. unsigned int ring,
  399. enum dma_ring_reg r)
  400. {
  401. return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
  402. (DMA_RING_SIZE * ring) +
  403. genet_dma_ring_regs[r]);
  404. }
  405. static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
  406. unsigned int ring, u32 val,
  407. enum dma_ring_reg r)
  408. {
  409. bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
  410. (DMA_RING_SIZE * ring) +
  411. genet_dma_ring_regs[r]);
  412. }
  413. static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
  414. {
  415. u32 offset;
  416. u32 reg;
  417. offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
  418. reg = bcmgenet_hfb_reg_readl(priv, offset);
  419. reg |= (1 << (f_index % 32));
  420. bcmgenet_hfb_reg_writel(priv, reg, offset);
  421. reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
  422. reg |= RBUF_HFB_EN;
  423. bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
  424. }
  425. static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
  426. {
  427. u32 offset, reg, reg1;
  428. offset = HFB_FLT_ENABLE_V3PLUS;
  429. reg = bcmgenet_hfb_reg_readl(priv, offset);
  430. reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
  431. if (f_index < 32) {
  432. reg1 &= ~(1 << (f_index % 32));
  433. bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
  434. } else {
  435. reg &= ~(1 << (f_index % 32));
  436. bcmgenet_hfb_reg_writel(priv, reg, offset);
  437. }
  438. if (!reg && !reg1) {
  439. reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
  440. reg &= ~RBUF_HFB_EN;
  441. bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
  442. }
  443. }
  444. static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
  445. u32 f_index, u32 rx_queue)
  446. {
  447. u32 offset;
  448. u32 reg;
  449. offset = f_index / 8;
  450. reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
  451. reg &= ~(0xF << (4 * (f_index % 8)));
  452. reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
  453. bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
  454. }
  455. static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
  456. u32 f_index, u32 f_length)
  457. {
  458. u32 offset;
  459. u32 reg;
  460. offset = HFB_FLT_LEN_V3PLUS +
  461. ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
  462. sizeof(u32);
  463. reg = bcmgenet_hfb_reg_readl(priv, offset);
  464. reg &= ~(0xFF << (8 * (f_index % 4)));
  465. reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
  466. bcmgenet_hfb_reg_writel(priv, reg, offset);
  467. }
  468. static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
  469. {
  470. while (size) {
  471. switch (*(unsigned char *)mask++) {
  472. case 0x00:
  473. case 0x0f:
  474. case 0xf0:
  475. case 0xff:
  476. size--;
  477. continue;
  478. default:
  479. return -EINVAL;
  480. }
  481. }
  482. return 0;
  483. }
  484. #define VALIDATE_MASK(x) \
  485. bcmgenet_hfb_validate_mask(&(x), sizeof(x))
  486. static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index,
  487. u32 offset, void *val, void *mask,
  488. size_t size)
  489. {
  490. u32 index, tmp;
  491. index = f_index * priv->hw_params->hfb_filter_size + offset / 2;
  492. tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32));
  493. while (size--) {
  494. if (offset++ & 1) {
  495. tmp &= ~0x300FF;
  496. tmp |= (*(unsigned char *)val++);
  497. switch ((*(unsigned char *)mask++)) {
  498. case 0xFF:
  499. tmp |= 0x30000;
  500. break;
  501. case 0xF0:
  502. tmp |= 0x20000;
  503. break;
  504. case 0x0F:
  505. tmp |= 0x10000;
  506. break;
  507. }
  508. bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32));
  509. if (size)
  510. tmp = bcmgenet_hfb_readl(priv,
  511. index * sizeof(u32));
  512. } else {
  513. tmp &= ~0xCFF00;
  514. tmp |= (*(unsigned char *)val++) << 8;
  515. switch ((*(unsigned char *)mask++)) {
  516. case 0xFF:
  517. tmp |= 0xC0000;
  518. break;
  519. case 0xF0:
  520. tmp |= 0x80000;
  521. break;
  522. case 0x0F:
  523. tmp |= 0x40000;
  524. break;
  525. }
  526. if (!size)
  527. bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32));
  528. }
  529. }
  530. return 0;
  531. }
  532. static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
  533. struct bcmgenet_rxnfc_rule *rule)
  534. {
  535. struct ethtool_rx_flow_spec *fs = &rule->fs;
  536. u32 offset = 0, f_length = 0, f;
  537. u8 val_8, mask_8;
  538. __be16 val_16;
  539. u16 mask_16;
  540. size_t size;
  541. f = fs->location;
  542. if (fs->flow_type & FLOW_MAC_EXT) {
  543. bcmgenet_hfb_insert_data(priv, f, 0,
  544. &fs->h_ext.h_dest, &fs->m_ext.h_dest,
  545. sizeof(fs->h_ext.h_dest));
  546. }
  547. if (fs->flow_type & FLOW_EXT) {
  548. if (fs->m_ext.vlan_etype ||
  549. fs->m_ext.vlan_tci) {
  550. bcmgenet_hfb_insert_data(priv, f, 12,
  551. &fs->h_ext.vlan_etype,
  552. &fs->m_ext.vlan_etype,
  553. sizeof(fs->h_ext.vlan_etype));
  554. bcmgenet_hfb_insert_data(priv, f, 14,
  555. &fs->h_ext.vlan_tci,
  556. &fs->m_ext.vlan_tci,
  557. sizeof(fs->h_ext.vlan_tci));
  558. offset += VLAN_HLEN;
  559. f_length += DIV_ROUND_UP(VLAN_HLEN, 2);
  560. }
  561. }
  562. switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  563. case ETHER_FLOW:
  564. f_length += DIV_ROUND_UP(ETH_HLEN, 2);
  565. bcmgenet_hfb_insert_data(priv, f, 0,
  566. &fs->h_u.ether_spec.h_dest,
  567. &fs->m_u.ether_spec.h_dest,
  568. sizeof(fs->h_u.ether_spec.h_dest));
  569. bcmgenet_hfb_insert_data(priv, f, ETH_ALEN,
  570. &fs->h_u.ether_spec.h_source,
  571. &fs->m_u.ether_spec.h_source,
  572. sizeof(fs->h_u.ether_spec.h_source));
  573. bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
  574. &fs->h_u.ether_spec.h_proto,
  575. &fs->m_u.ether_spec.h_proto,
  576. sizeof(fs->h_u.ether_spec.h_proto));
  577. break;
  578. case IP_USER_FLOW:
  579. f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2);
  580. /* Specify IP Ether Type */
  581. val_16 = htons(ETH_P_IP);
  582. mask_16 = 0xFFFF;
  583. bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
  584. &val_16, &mask_16, sizeof(val_16));
  585. bcmgenet_hfb_insert_data(priv, f, 15 + offset,
  586. &fs->h_u.usr_ip4_spec.tos,
  587. &fs->m_u.usr_ip4_spec.tos,
  588. sizeof(fs->h_u.usr_ip4_spec.tos));
  589. bcmgenet_hfb_insert_data(priv, f, 23 + offset,
  590. &fs->h_u.usr_ip4_spec.proto,
  591. &fs->m_u.usr_ip4_spec.proto,
  592. sizeof(fs->h_u.usr_ip4_spec.proto));
  593. bcmgenet_hfb_insert_data(priv, f, 26 + offset,
  594. &fs->h_u.usr_ip4_spec.ip4src,
  595. &fs->m_u.usr_ip4_spec.ip4src,
  596. sizeof(fs->h_u.usr_ip4_spec.ip4src));
  597. bcmgenet_hfb_insert_data(priv, f, 30 + offset,
  598. &fs->h_u.usr_ip4_spec.ip4dst,
  599. &fs->m_u.usr_ip4_spec.ip4dst,
  600. sizeof(fs->h_u.usr_ip4_spec.ip4dst));
  601. if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
  602. break;
  603. /* Only supports 20 byte IPv4 header */
  604. val_8 = 0x45;
  605. mask_8 = 0xFF;
  606. bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset,
  607. &val_8, &mask_8,
  608. sizeof(val_8));
  609. size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
  610. bcmgenet_hfb_insert_data(priv, f,
  611. ETH_HLEN + 20 + offset,
  612. &fs->h_u.usr_ip4_spec.l4_4_bytes,
  613. &fs->m_u.usr_ip4_spec.l4_4_bytes,
  614. size);
  615. f_length += DIV_ROUND_UP(size, 2);
  616. break;
  617. }
  618. bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
  619. if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
  620. /* Ring 0 flows can be handled by the default Descriptor Ring
  621. * We'll map them to ring 0, but don't enable the filter
  622. */
  623. bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
  624. rule->state = BCMGENET_RXNFC_STATE_DISABLED;
  625. } else {
  626. /* Other Rx rings are direct mapped here */
  627. bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
  628. fs->ring_cookie);
  629. bcmgenet_hfb_enable_filter(priv, f);
  630. rule->state = BCMGENET_RXNFC_STATE_ENABLED;
  631. }
  632. }
  633. /* bcmgenet_hfb_clear
  634. *
  635. * Clear Hardware Filter Block and disable all filtering.
  636. */
  637. static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
  638. {
  639. u32 base, i;
  640. base = f_index * priv->hw_params->hfb_filter_size;
  641. for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
  642. bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
  643. }
  644. static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
  645. {
  646. u32 i;
  647. if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
  648. return;
  649. bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
  650. bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
  651. bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
  652. for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
  653. bcmgenet_rdma_writel(priv, 0x0, i);
  654. for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
  655. bcmgenet_hfb_reg_writel(priv, 0x0,
  656. HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
  657. for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
  658. bcmgenet_hfb_clear_filter(priv, i);
  659. }
  660. static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
  661. {
  662. int i;
  663. INIT_LIST_HEAD(&priv->rxnfc_list);
  664. if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
  665. return;
  666. for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
  667. INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
  668. priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
  669. }
  670. bcmgenet_hfb_clear(priv);
  671. }
  672. static int bcmgenet_begin(struct net_device *dev)
  673. {
  674. struct bcmgenet_priv *priv = netdev_priv(dev);
  675. /* Turn on the clock */
  676. return clk_prepare_enable(priv->clk);
  677. }
  678. static void bcmgenet_complete(struct net_device *dev)
  679. {
  680. struct bcmgenet_priv *priv = netdev_priv(dev);
  681. /* Turn off the clock */
  682. clk_disable_unprepare(priv->clk);
  683. }
  684. static int bcmgenet_get_link_ksettings(struct net_device *dev,
  685. struct ethtool_link_ksettings *cmd)
  686. {
  687. if (!netif_running(dev))
  688. return -EINVAL;
  689. if (!dev->phydev)
  690. return -ENODEV;
  691. phy_ethtool_ksettings_get(dev->phydev, cmd);
  692. return 0;
  693. }
  694. static int bcmgenet_set_link_ksettings(struct net_device *dev,
  695. const struct ethtool_link_ksettings *cmd)
  696. {
  697. if (!netif_running(dev))
  698. return -EINVAL;
  699. if (!dev->phydev)
  700. return -ENODEV;
  701. return phy_ethtool_ksettings_set(dev->phydev, cmd);
  702. }
  703. static int bcmgenet_set_features(struct net_device *dev,
  704. netdev_features_t features)
  705. {
  706. struct bcmgenet_priv *priv = netdev_priv(dev);
  707. u32 reg;
  708. int ret;
  709. ret = clk_prepare_enable(priv->clk);
  710. if (ret)
  711. return ret;
  712. /* Make sure we reflect the value of CRC_CMD_FWD */
  713. reg = bcmgenet_umac_readl(priv, UMAC_CMD);
  714. priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
  715. clk_disable_unprepare(priv->clk);
  716. return ret;
  717. }
  718. static u32 bcmgenet_get_msglevel(struct net_device *dev)
  719. {
  720. struct bcmgenet_priv *priv = netdev_priv(dev);
  721. return priv->msg_enable;
  722. }
  723. static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
  724. {
  725. struct bcmgenet_priv *priv = netdev_priv(dev);
  726. priv->msg_enable = level;
  727. }
  728. static int bcmgenet_get_coalesce(struct net_device *dev,
  729. struct ethtool_coalesce *ec,
  730. struct kernel_ethtool_coalesce *kernel_coal,
  731. struct netlink_ext_ack *extack)
  732. {
  733. struct bcmgenet_priv *priv = netdev_priv(dev);
  734. struct bcmgenet_rx_ring *ring;
  735. unsigned int i;
  736. ec->tx_max_coalesced_frames =
  737. bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
  738. DMA_MBUF_DONE_THRESH);
  739. ec->rx_max_coalesced_frames =
  740. bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
  741. DMA_MBUF_DONE_THRESH);
  742. ec->rx_coalesce_usecs =
  743. bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
  744. for (i = 0; i < priv->hw_params->rx_queues; i++) {
  745. ring = &priv->rx_rings[i];
  746. ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
  747. }
  748. ring = &priv->rx_rings[DESC_INDEX];
  749. ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
  750. return 0;
  751. }
  752. static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
  753. u32 usecs, u32 pkts)
  754. {
  755. struct bcmgenet_priv *priv = ring->priv;
  756. unsigned int i = ring->index;
  757. u32 reg;
  758. bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH);
  759. reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
  760. reg &= ~DMA_TIMEOUT_MASK;
  761. reg |= DIV_ROUND_UP(usecs * 1000, 8192);
  762. bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
  763. }
  764. static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
  765. struct ethtool_coalesce *ec)
  766. {
  767. struct dim_cq_moder moder;
  768. u32 usecs, pkts;
  769. ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
  770. ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
  771. usecs = ring->rx_coalesce_usecs;
  772. pkts = ring->rx_max_coalesced_frames;
  773. if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
  774. moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
  775. usecs = moder.usec;
  776. pkts = moder.pkts;
  777. }
  778. ring->dim.use_dim = ec->use_adaptive_rx_coalesce;
  779. bcmgenet_set_rx_coalesce(ring, usecs, pkts);
  780. }
  781. static int bcmgenet_set_coalesce(struct net_device *dev,
  782. struct ethtool_coalesce *ec,
  783. struct kernel_ethtool_coalesce *kernel_coal,
  784. struct netlink_ext_ack *extack)
  785. {
  786. struct bcmgenet_priv *priv = netdev_priv(dev);
  787. unsigned int i;
  788. /* Base system clock is 125Mhz, DMA timeout is this reference clock
  789. * divided by 1024, which yields roughly 8.192us, our maximum value
  790. * has to fit in the DMA_TIMEOUT_MASK (16 bits)
  791. */
  792. if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
  793. ec->tx_max_coalesced_frames == 0 ||
  794. ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
  795. ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
  796. return -EINVAL;
  797. if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
  798. return -EINVAL;
  799. /* GENET TDMA hardware does not support a configurable timeout, but will
  800. * always generate an interrupt either after MBDONE packets have been
  801. * transmitted, or when the ring is empty.
  802. */
  803. /* Program all TX queues with the same values, as there is no
  804. * ethtool knob to do coalescing on a per-queue basis
  805. */
  806. for (i = 0; i < priv->hw_params->tx_queues; i++)
  807. bcmgenet_tdma_ring_writel(priv, i,
  808. ec->tx_max_coalesced_frames,
  809. DMA_MBUF_DONE_THRESH);
  810. bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
  811. ec->tx_max_coalesced_frames,
  812. DMA_MBUF_DONE_THRESH);
  813. for (i = 0; i < priv->hw_params->rx_queues; i++)
  814. bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
  815. bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
  816. return 0;
  817. }
  818. static void bcmgenet_get_pauseparam(struct net_device *dev,
  819. struct ethtool_pauseparam *epause)
  820. {
  821. struct bcmgenet_priv *priv;
  822. u32 umac_cmd;
  823. priv = netdev_priv(dev);
  824. epause->autoneg = priv->autoneg_pause;
  825. if (netif_carrier_ok(dev)) {
  826. /* report active state when link is up */
  827. umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD);
  828. epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE);
  829. epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE);
  830. } else {
  831. /* otherwise report stored settings */
  832. epause->tx_pause = priv->tx_pause;
  833. epause->rx_pause = priv->rx_pause;
  834. }
  835. }
  836. static int bcmgenet_set_pauseparam(struct net_device *dev,
  837. struct ethtool_pauseparam *epause)
  838. {
  839. struct bcmgenet_priv *priv = netdev_priv(dev);
  840. if (!dev->phydev)
  841. return -ENODEV;
  842. if (!phy_validate_pause(dev->phydev, epause))
  843. return -EINVAL;
  844. priv->autoneg_pause = !!epause->autoneg;
  845. priv->tx_pause = !!epause->tx_pause;
  846. priv->rx_pause = !!epause->rx_pause;
  847. bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
  848. return 0;
  849. }
  850. /* standard ethtool support functions. */
  851. enum bcmgenet_stat_type {
  852. BCMGENET_STAT_NETDEV = -1,
  853. BCMGENET_STAT_MIB_RX,
  854. BCMGENET_STAT_MIB_TX,
  855. BCMGENET_STAT_RUNT,
  856. BCMGENET_STAT_MISC,
  857. BCMGENET_STAT_SOFT,
  858. };
  859. struct bcmgenet_stats {
  860. char stat_string[ETH_GSTRING_LEN];
  861. int stat_sizeof;
  862. int stat_offset;
  863. enum bcmgenet_stat_type type;
  864. /* reg offset from UMAC base for misc counters */
  865. u16 reg_offset;
  866. };
  867. #define STAT_NETDEV(m) { \
  868. .stat_string = __stringify(m), \
  869. .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
  870. .stat_offset = offsetof(struct net_device_stats, m), \
  871. .type = BCMGENET_STAT_NETDEV, \
  872. }
  873. #define STAT_GENET_MIB(str, m, _type) { \
  874. .stat_string = str, \
  875. .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
  876. .stat_offset = offsetof(struct bcmgenet_priv, m), \
  877. .type = _type, \
  878. }
  879. #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
  880. #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
  881. #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
  882. #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
  883. #define STAT_GENET_MISC(str, m, offset) { \
  884. .stat_string = str, \
  885. .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
  886. .stat_offset = offsetof(struct bcmgenet_priv, m), \
  887. .type = BCMGENET_STAT_MISC, \
  888. .reg_offset = offset, \
  889. }
  890. #define STAT_GENET_Q(num) \
  891. STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
  892. tx_rings[num].packets), \
  893. STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
  894. tx_rings[num].bytes), \
  895. STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
  896. rx_rings[num].bytes), \
  897. STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
  898. rx_rings[num].packets), \
  899. STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
  900. rx_rings[num].errors), \
  901. STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
  902. rx_rings[num].dropped)
  903. /* There is a 0xC gap between the end of RX and beginning of TX stats and then
  904. * between the end of TX stats and the beginning of the RX RUNT
  905. */
  906. #define BCMGENET_STAT_OFFSET 0xc
  907. /* Hardware counters must be kept in sync because the order/offset
  908. * is important here (order in structure declaration = order in hardware)
  909. */
  910. static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
  911. /* general stats */
  912. STAT_NETDEV(rx_packets),
  913. STAT_NETDEV(tx_packets),
  914. STAT_NETDEV(rx_bytes),
  915. STAT_NETDEV(tx_bytes),
  916. STAT_NETDEV(rx_errors),
  917. STAT_NETDEV(tx_errors),
  918. STAT_NETDEV(rx_dropped),
  919. STAT_NETDEV(tx_dropped),
  920. STAT_NETDEV(multicast),
  921. /* UniMAC RSV counters */
  922. STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
  923. STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
  924. STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
  925. STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
  926. STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
  927. STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
  928. STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
  929. STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
  930. STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
  931. STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
  932. STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
  933. STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
  934. STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
  935. STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
  936. STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
  937. STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
  938. STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
  939. STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
  940. STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
  941. STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
  942. STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
  943. STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
  944. STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
  945. STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
  946. STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
  947. STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
  948. STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
  949. STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
  950. STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
  951. /* UniMAC TSV counters */
  952. STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
  953. STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
  954. STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
  955. STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
  956. STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
  957. STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
  958. STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
  959. STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
  960. STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
  961. STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
  962. STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
  963. STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
  964. STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
  965. STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
  966. STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
  967. STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
  968. STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
  969. STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
  970. STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
  971. STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
  972. STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
  973. STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
  974. STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
  975. STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
  976. STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
  977. STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
  978. STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
  979. STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
  980. STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
  981. /* UniMAC RUNT counters */
  982. STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
  983. STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
  984. STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
  985. STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
  986. /* Misc UniMAC counters */
  987. STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
  988. UMAC_RBUF_OVFL_CNT_V1),
  989. STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
  990. UMAC_RBUF_ERR_CNT_V1),
  991. STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
  992. STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
  993. STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
  994. STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
  995. STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
  996. STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
  997. mib.tx_realloc_tsb_failed),
  998. /* Per TX queues */
  999. STAT_GENET_Q(0),
  1000. STAT_GENET_Q(1),
  1001. STAT_GENET_Q(2),
  1002. STAT_GENET_Q(3),
  1003. STAT_GENET_Q(16),
  1004. };
  1005. #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
  1006. static void bcmgenet_get_drvinfo(struct net_device *dev,
  1007. struct ethtool_drvinfo *info)
  1008. {
  1009. strscpy(info->driver, "bcmgenet", sizeof(info->driver));
  1010. }
  1011. static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
  1012. {
  1013. switch (string_set) {
  1014. case ETH_SS_STATS:
  1015. return BCMGENET_STATS_LEN;
  1016. default:
  1017. return -EOPNOTSUPP;
  1018. }
  1019. }
  1020. static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
  1021. u8 *data)
  1022. {
  1023. int i;
  1024. switch (stringset) {
  1025. case ETH_SS_STATS:
  1026. for (i = 0; i < BCMGENET_STATS_LEN; i++) {
  1027. memcpy(data + i * ETH_GSTRING_LEN,
  1028. bcmgenet_gstrings_stats[i].stat_string,
  1029. ETH_GSTRING_LEN);
  1030. }
  1031. break;
  1032. }
  1033. }
  1034. static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
  1035. {
  1036. u16 new_offset;
  1037. u32 val;
  1038. switch (offset) {
  1039. case UMAC_RBUF_OVFL_CNT_V1:
  1040. if (GENET_IS_V2(priv))
  1041. new_offset = RBUF_OVFL_CNT_V2;
  1042. else
  1043. new_offset = RBUF_OVFL_CNT_V3PLUS;
  1044. val = bcmgenet_rbuf_readl(priv, new_offset);
  1045. /* clear if overflowed */
  1046. if (val == ~0)
  1047. bcmgenet_rbuf_writel(priv, 0, new_offset);
  1048. break;
  1049. case UMAC_RBUF_ERR_CNT_V1:
  1050. if (GENET_IS_V2(priv))
  1051. new_offset = RBUF_ERR_CNT_V2;
  1052. else
  1053. new_offset = RBUF_ERR_CNT_V3PLUS;
  1054. val = bcmgenet_rbuf_readl(priv, new_offset);
  1055. /* clear if overflowed */
  1056. if (val == ~0)
  1057. bcmgenet_rbuf_writel(priv, 0, new_offset);
  1058. break;
  1059. default:
  1060. val = bcmgenet_umac_readl(priv, offset);
  1061. /* clear if overflowed */
  1062. if (val == ~0)
  1063. bcmgenet_umac_writel(priv, 0, offset);
  1064. break;
  1065. }
  1066. return val;
  1067. }
  1068. static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
  1069. {
  1070. int i, j = 0;
  1071. for (i = 0; i < BCMGENET_STATS_LEN; i++) {
  1072. const struct bcmgenet_stats *s;
  1073. u8 offset = 0;
  1074. u32 val = 0;
  1075. char *p;
  1076. s = &bcmgenet_gstrings_stats[i];
  1077. switch (s->type) {
  1078. case BCMGENET_STAT_NETDEV:
  1079. case BCMGENET_STAT_SOFT:
  1080. continue;
  1081. case BCMGENET_STAT_RUNT:
  1082. offset += BCMGENET_STAT_OFFSET;
  1083. fallthrough;
  1084. case BCMGENET_STAT_MIB_TX:
  1085. offset += BCMGENET_STAT_OFFSET;
  1086. fallthrough;
  1087. case BCMGENET_STAT_MIB_RX:
  1088. val = bcmgenet_umac_readl(priv,
  1089. UMAC_MIB_START + j + offset);
  1090. offset = 0; /* Reset Offset */
  1091. break;
  1092. case BCMGENET_STAT_MISC:
  1093. if (GENET_IS_V1(priv)) {
  1094. val = bcmgenet_umac_readl(priv, s->reg_offset);
  1095. /* clear if overflowed */
  1096. if (val == ~0)
  1097. bcmgenet_umac_writel(priv, 0,
  1098. s->reg_offset);
  1099. } else {
  1100. val = bcmgenet_update_stat_misc(priv,
  1101. s->reg_offset);
  1102. }
  1103. break;
  1104. }
  1105. j += s->stat_sizeof;
  1106. p = (char *)priv + s->stat_offset;
  1107. *(u32 *)p = val;
  1108. }
  1109. }
  1110. static void bcmgenet_get_ethtool_stats(struct net_device *dev,
  1111. struct ethtool_stats *stats,
  1112. u64 *data)
  1113. {
  1114. struct bcmgenet_priv *priv = netdev_priv(dev);
  1115. int i;
  1116. if (netif_running(dev))
  1117. bcmgenet_update_mib_counters(priv);
  1118. dev->netdev_ops->ndo_get_stats(dev);
  1119. for (i = 0; i < BCMGENET_STATS_LEN; i++) {
  1120. const struct bcmgenet_stats *s;
  1121. char *p;
  1122. s = &bcmgenet_gstrings_stats[i];
  1123. if (s->type == BCMGENET_STAT_NETDEV)
  1124. p = (char *)&dev->stats;
  1125. else
  1126. p = (char *)priv;
  1127. p += s->stat_offset;
  1128. if (sizeof(unsigned long) != sizeof(u32) &&
  1129. s->stat_sizeof == sizeof(unsigned long))
  1130. data[i] = *(unsigned long *)p;
  1131. else
  1132. data[i] = *(u32 *)p;
  1133. }
  1134. }
  1135. void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
  1136. bool tx_lpi_enabled)
  1137. {
  1138. struct bcmgenet_priv *priv = netdev_priv(dev);
  1139. u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
  1140. u32 reg;
  1141. if (enable && !priv->clk_eee_enabled) {
  1142. clk_prepare_enable(priv->clk_eee);
  1143. priv->clk_eee_enabled = true;
  1144. }
  1145. reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
  1146. if (enable)
  1147. reg |= EEE_EN;
  1148. else
  1149. reg &= ~EEE_EN;
  1150. bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
  1151. /* Enable EEE and switch to a 27Mhz clock automatically */
  1152. reg = bcmgenet_readl(priv->base + off);
  1153. if (tx_lpi_enabled)
  1154. reg |= TBUF_EEE_EN | TBUF_PM_EN;
  1155. else
  1156. reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
  1157. bcmgenet_writel(reg, priv->base + off);
  1158. /* Do the same for thing for RBUF */
  1159. reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
  1160. if (enable)
  1161. reg |= RBUF_EEE_EN | RBUF_PM_EN;
  1162. else
  1163. reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
  1164. bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
  1165. if (!enable && priv->clk_eee_enabled) {
  1166. clk_disable_unprepare(priv->clk_eee);
  1167. priv->clk_eee_enabled = false;
  1168. }
  1169. priv->eee.eee_enabled = enable;
  1170. priv->eee.eee_active = enable;
  1171. priv->eee.tx_lpi_enabled = tx_lpi_enabled;
  1172. }
  1173. static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
  1174. {
  1175. struct bcmgenet_priv *priv = netdev_priv(dev);
  1176. struct ethtool_eee *p = &priv->eee;
  1177. if (GENET_IS_V1(priv))
  1178. return -EOPNOTSUPP;
  1179. if (!dev->phydev)
  1180. return -ENODEV;
  1181. e->eee_enabled = p->eee_enabled;
  1182. e->eee_active = p->eee_active;
  1183. e->tx_lpi_enabled = p->tx_lpi_enabled;
  1184. e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
  1185. return phy_ethtool_get_eee(dev->phydev, e);
  1186. }
  1187. static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
  1188. {
  1189. struct bcmgenet_priv *priv = netdev_priv(dev);
  1190. struct ethtool_eee *p = &priv->eee;
  1191. if (GENET_IS_V1(priv))
  1192. return -EOPNOTSUPP;
  1193. if (!dev->phydev)
  1194. return -ENODEV;
  1195. p->eee_enabled = e->eee_enabled;
  1196. if (!p->eee_enabled) {
  1197. bcmgenet_eee_enable_set(dev, false, false);
  1198. } else {
  1199. p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
  1200. bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
  1201. bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
  1202. }
  1203. return phy_ethtool_set_eee(dev->phydev, e);
  1204. }
  1205. static int bcmgenet_validate_flow(struct net_device *dev,
  1206. struct ethtool_rxnfc *cmd)
  1207. {
  1208. struct ethtool_usrip4_spec *l4_mask;
  1209. struct ethhdr *eth_mask;
  1210. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) {
  1211. netdev_err(dev, "rxnfc: Invalid location (%d)\n",
  1212. cmd->fs.location);
  1213. return -EINVAL;
  1214. }
  1215. switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  1216. case IP_USER_FLOW:
  1217. l4_mask = &cmd->fs.m_u.usr_ip4_spec;
  1218. /* don't allow mask which isn't valid */
  1219. if (VALIDATE_MASK(l4_mask->ip4src) ||
  1220. VALIDATE_MASK(l4_mask->ip4dst) ||
  1221. VALIDATE_MASK(l4_mask->l4_4_bytes) ||
  1222. VALIDATE_MASK(l4_mask->proto) ||
  1223. VALIDATE_MASK(l4_mask->ip_ver) ||
  1224. VALIDATE_MASK(l4_mask->tos)) {
  1225. netdev_err(dev, "rxnfc: Unsupported mask\n");
  1226. return -EINVAL;
  1227. }
  1228. break;
  1229. case ETHER_FLOW:
  1230. eth_mask = &cmd->fs.m_u.ether_spec;
  1231. /* don't allow mask which isn't valid */
  1232. if (VALIDATE_MASK(eth_mask->h_dest) ||
  1233. VALIDATE_MASK(eth_mask->h_source) ||
  1234. VALIDATE_MASK(eth_mask->h_proto)) {
  1235. netdev_err(dev, "rxnfc: Unsupported mask\n");
  1236. return -EINVAL;
  1237. }
  1238. break;
  1239. default:
  1240. netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n",
  1241. cmd->fs.flow_type);
  1242. return -EINVAL;
  1243. }
  1244. if ((cmd->fs.flow_type & FLOW_EXT)) {
  1245. /* don't allow mask which isn't valid */
  1246. if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) ||
  1247. VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) {
  1248. netdev_err(dev, "rxnfc: Unsupported mask\n");
  1249. return -EINVAL;
  1250. }
  1251. if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) {
  1252. netdev_err(dev, "rxnfc: user-def not supported\n");
  1253. return -EINVAL;
  1254. }
  1255. }
  1256. if ((cmd->fs.flow_type & FLOW_MAC_EXT)) {
  1257. /* don't allow mask which isn't valid */
  1258. if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) {
  1259. netdev_err(dev, "rxnfc: Unsupported mask\n");
  1260. return -EINVAL;
  1261. }
  1262. }
  1263. return 0;
  1264. }
  1265. static int bcmgenet_insert_flow(struct net_device *dev,
  1266. struct ethtool_rxnfc *cmd)
  1267. {
  1268. struct bcmgenet_priv *priv = netdev_priv(dev);
  1269. struct bcmgenet_rxnfc_rule *loc_rule;
  1270. int err;
  1271. if (priv->hw_params->hfb_filter_size < 128) {
  1272. netdev_err(dev, "rxnfc: Not supported by this device\n");
  1273. return -EINVAL;
  1274. }
  1275. if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
  1276. cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
  1277. netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
  1278. cmd->fs.ring_cookie);
  1279. return -EINVAL;
  1280. }
  1281. err = bcmgenet_validate_flow(dev, cmd);
  1282. if (err)
  1283. return err;
  1284. loc_rule = &priv->rxnfc_rules[cmd->fs.location];
  1285. if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
  1286. bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
  1287. if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
  1288. list_del(&loc_rule->list);
  1289. bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
  1290. }
  1291. loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
  1292. memcpy(&loc_rule->fs, &cmd->fs,
  1293. sizeof(struct ethtool_rx_flow_spec));
  1294. bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
  1295. list_add_tail(&loc_rule->list, &priv->rxnfc_list);
  1296. return 0;
  1297. }
  1298. static int bcmgenet_delete_flow(struct net_device *dev,
  1299. struct ethtool_rxnfc *cmd)
  1300. {
  1301. struct bcmgenet_priv *priv = netdev_priv(dev);
  1302. struct bcmgenet_rxnfc_rule *rule;
  1303. int err = 0;
  1304. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  1305. return -EINVAL;
  1306. rule = &priv->rxnfc_rules[cmd->fs.location];
  1307. if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
  1308. err = -ENOENT;
  1309. goto out;
  1310. }
  1311. if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
  1312. bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
  1313. if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
  1314. list_del(&rule->list);
  1315. bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
  1316. }
  1317. rule->state = BCMGENET_RXNFC_STATE_UNUSED;
  1318. memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
  1319. out:
  1320. return err;
  1321. }
  1322. static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  1323. {
  1324. struct bcmgenet_priv *priv = netdev_priv(dev);
  1325. int err = 0;
  1326. switch (cmd->cmd) {
  1327. case ETHTOOL_SRXCLSRLINS:
  1328. err = bcmgenet_insert_flow(dev, cmd);
  1329. break;
  1330. case ETHTOOL_SRXCLSRLDEL:
  1331. err = bcmgenet_delete_flow(dev, cmd);
  1332. break;
  1333. default:
  1334. netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n",
  1335. cmd->cmd);
  1336. return -EINVAL;
  1337. }
  1338. return err;
  1339. }
  1340. static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1341. int loc)
  1342. {
  1343. struct bcmgenet_priv *priv = netdev_priv(dev);
  1344. struct bcmgenet_rxnfc_rule *rule;
  1345. int err = 0;
  1346. if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
  1347. return -EINVAL;
  1348. rule = &priv->rxnfc_rules[loc];
  1349. if (rule->state == BCMGENET_RXNFC_STATE_UNUSED)
  1350. err = -ENOENT;
  1351. else
  1352. memcpy(&cmd->fs, &rule->fs,
  1353. sizeof(struct ethtool_rx_flow_spec));
  1354. return err;
  1355. }
  1356. static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
  1357. {
  1358. struct list_head *pos;
  1359. int res = 0;
  1360. list_for_each(pos, &priv->rxnfc_list)
  1361. res++;
  1362. return res;
  1363. }
  1364. static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1365. u32 *rule_locs)
  1366. {
  1367. struct bcmgenet_priv *priv = netdev_priv(dev);
  1368. struct bcmgenet_rxnfc_rule *rule;
  1369. int err = 0;
  1370. int i = 0;
  1371. switch (cmd->cmd) {
  1372. case ETHTOOL_GRXRINGS:
  1373. cmd->data = priv->hw_params->rx_queues ?: 1;
  1374. break;
  1375. case ETHTOOL_GRXCLSRLCNT:
  1376. cmd->rule_cnt = bcmgenet_get_num_flows(priv);
  1377. cmd->data = MAX_NUM_OF_FS_RULES;
  1378. break;
  1379. case ETHTOOL_GRXCLSRULE:
  1380. err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
  1381. break;
  1382. case ETHTOOL_GRXCLSRLALL:
  1383. list_for_each_entry(rule, &priv->rxnfc_list, list)
  1384. if (i < cmd->rule_cnt)
  1385. rule_locs[i++] = rule->fs.location;
  1386. cmd->rule_cnt = i;
  1387. cmd->data = MAX_NUM_OF_FS_RULES;
  1388. break;
  1389. default:
  1390. err = -EOPNOTSUPP;
  1391. break;
  1392. }
  1393. return err;
  1394. }
  1395. /* standard ethtool support functions. */
  1396. static const struct ethtool_ops bcmgenet_ethtool_ops = {
  1397. .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
  1398. ETHTOOL_COALESCE_MAX_FRAMES |
  1399. ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
  1400. .begin = bcmgenet_begin,
  1401. .complete = bcmgenet_complete,
  1402. .get_strings = bcmgenet_get_strings,
  1403. .get_sset_count = bcmgenet_get_sset_count,
  1404. .get_ethtool_stats = bcmgenet_get_ethtool_stats,
  1405. .get_drvinfo = bcmgenet_get_drvinfo,
  1406. .get_link = ethtool_op_get_link,
  1407. .get_msglevel = bcmgenet_get_msglevel,
  1408. .set_msglevel = bcmgenet_set_msglevel,
  1409. .get_wol = bcmgenet_get_wol,
  1410. .set_wol = bcmgenet_set_wol,
  1411. .get_eee = bcmgenet_get_eee,
  1412. .set_eee = bcmgenet_set_eee,
  1413. .nway_reset = phy_ethtool_nway_reset,
  1414. .get_coalesce = bcmgenet_get_coalesce,
  1415. .set_coalesce = bcmgenet_set_coalesce,
  1416. .get_link_ksettings = bcmgenet_get_link_ksettings,
  1417. .set_link_ksettings = bcmgenet_set_link_ksettings,
  1418. .get_ts_info = ethtool_op_get_ts_info,
  1419. .get_rxnfc = bcmgenet_get_rxnfc,
  1420. .set_rxnfc = bcmgenet_set_rxnfc,
  1421. .get_pauseparam = bcmgenet_get_pauseparam,
  1422. .set_pauseparam = bcmgenet_set_pauseparam,
  1423. };
  1424. /* Power down the unimac, based on mode. */
  1425. static int bcmgenet_power_down(struct bcmgenet_priv *priv,
  1426. enum bcmgenet_power_mode mode)
  1427. {
  1428. int ret = 0;
  1429. u32 reg;
  1430. switch (mode) {
  1431. case GENET_POWER_CABLE_SENSE:
  1432. phy_detach(priv->dev->phydev);
  1433. break;
  1434. case GENET_POWER_WOL_MAGIC:
  1435. ret = bcmgenet_wol_power_down_cfg(priv, mode);
  1436. break;
  1437. case GENET_POWER_PASSIVE:
  1438. /* Power down LED */
  1439. if (priv->hw_params->flags & GENET_HAS_EXT) {
  1440. reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
  1441. if (GENET_IS_V5(priv) && !priv->ephy_16nm)
  1442. reg |= EXT_PWR_DOWN_PHY_EN |
  1443. EXT_PWR_DOWN_PHY_RD |
  1444. EXT_PWR_DOWN_PHY_SD |
  1445. EXT_PWR_DOWN_PHY_RX |
  1446. EXT_PWR_DOWN_PHY_TX |
  1447. EXT_IDDQ_GLBL_PWR;
  1448. else
  1449. reg |= EXT_PWR_DOWN_PHY;
  1450. reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
  1451. bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
  1452. bcmgenet_phy_power_set(priv->dev, false);
  1453. }
  1454. break;
  1455. default:
  1456. break;
  1457. }
  1458. return ret;
  1459. }
  1460. static void bcmgenet_power_up(struct bcmgenet_priv *priv,
  1461. enum bcmgenet_power_mode mode)
  1462. {
  1463. u32 reg;
  1464. if (!(priv->hw_params->flags & GENET_HAS_EXT))
  1465. return;
  1466. reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
  1467. switch (mode) {
  1468. case GENET_POWER_PASSIVE:
  1469. reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
  1470. EXT_ENERGY_DET_MASK);
  1471. if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
  1472. reg &= ~(EXT_PWR_DOWN_PHY_EN |
  1473. EXT_PWR_DOWN_PHY_RD |
  1474. EXT_PWR_DOWN_PHY_SD |
  1475. EXT_PWR_DOWN_PHY_RX |
  1476. EXT_PWR_DOWN_PHY_TX |
  1477. EXT_IDDQ_GLBL_PWR);
  1478. reg |= EXT_PHY_RESET;
  1479. bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
  1480. mdelay(1);
  1481. reg &= ~EXT_PHY_RESET;
  1482. } else {
  1483. reg &= ~EXT_PWR_DOWN_PHY;
  1484. reg |= EXT_PWR_DN_EN_LD;
  1485. }
  1486. bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
  1487. bcmgenet_phy_power_set(priv->dev, true);
  1488. break;
  1489. case GENET_POWER_CABLE_SENSE:
  1490. /* enable APD */
  1491. if (!GENET_IS_V5(priv)) {
  1492. reg |= EXT_PWR_DN_EN_LD;
  1493. bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
  1494. }
  1495. break;
  1496. case GENET_POWER_WOL_MAGIC:
  1497. bcmgenet_wol_power_up_cfg(priv, mode);
  1498. return;
  1499. default:
  1500. break;
  1501. }
  1502. }
  1503. static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
  1504. struct bcmgenet_tx_ring *ring)
  1505. {
  1506. struct enet_cb *tx_cb_ptr;
  1507. tx_cb_ptr = ring->cbs;
  1508. tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
  1509. /* Advancing local write pointer */
  1510. if (ring->write_ptr == ring->end_ptr)
  1511. ring->write_ptr = ring->cb_ptr;
  1512. else
  1513. ring->write_ptr++;
  1514. return tx_cb_ptr;
  1515. }
  1516. static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
  1517. struct bcmgenet_tx_ring *ring)
  1518. {
  1519. struct enet_cb *tx_cb_ptr;
  1520. tx_cb_ptr = ring->cbs;
  1521. tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
  1522. /* Rewinding local write pointer */
  1523. if (ring->write_ptr == ring->cb_ptr)
  1524. ring->write_ptr = ring->end_ptr;
  1525. else
  1526. ring->write_ptr--;
  1527. return tx_cb_ptr;
  1528. }
  1529. static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
  1530. {
  1531. bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
  1532. INTRL2_CPU_MASK_SET);
  1533. }
  1534. static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
  1535. {
  1536. bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
  1537. INTRL2_CPU_MASK_CLEAR);
  1538. }
  1539. static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
  1540. {
  1541. bcmgenet_intrl2_1_writel(ring->priv,
  1542. 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
  1543. INTRL2_CPU_MASK_SET);
  1544. }
  1545. static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
  1546. {
  1547. bcmgenet_intrl2_1_writel(ring->priv,
  1548. 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
  1549. INTRL2_CPU_MASK_CLEAR);
  1550. }
  1551. static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
  1552. {
  1553. bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
  1554. INTRL2_CPU_MASK_SET);
  1555. }
  1556. static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
  1557. {
  1558. bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
  1559. INTRL2_CPU_MASK_CLEAR);
  1560. }
  1561. static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
  1562. {
  1563. bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
  1564. INTRL2_CPU_MASK_CLEAR);
  1565. }
  1566. static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
  1567. {
  1568. bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
  1569. INTRL2_CPU_MASK_SET);
  1570. }
  1571. /* Simple helper to free a transmit control block's resources
  1572. * Returns an skb when the last transmit control block associated with the
  1573. * skb is freed. The skb should be freed by the caller if necessary.
  1574. */
  1575. static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
  1576. struct enet_cb *cb)
  1577. {
  1578. struct sk_buff *skb;
  1579. skb = cb->skb;
  1580. if (skb) {
  1581. cb->skb = NULL;
  1582. if (cb == GENET_CB(skb)->first_cb)
  1583. dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
  1584. dma_unmap_len(cb, dma_len),
  1585. DMA_TO_DEVICE);
  1586. else
  1587. dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
  1588. dma_unmap_len(cb, dma_len),
  1589. DMA_TO_DEVICE);
  1590. dma_unmap_addr_set(cb, dma_addr, 0);
  1591. if (cb == GENET_CB(skb)->last_cb)
  1592. return skb;
  1593. } else if (dma_unmap_addr(cb, dma_addr)) {
  1594. dma_unmap_page(dev,
  1595. dma_unmap_addr(cb, dma_addr),
  1596. dma_unmap_len(cb, dma_len),
  1597. DMA_TO_DEVICE);
  1598. dma_unmap_addr_set(cb, dma_addr, 0);
  1599. }
  1600. return NULL;
  1601. }
  1602. /* Simple helper to free a receive control block's resources */
  1603. static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
  1604. struct enet_cb *cb)
  1605. {
  1606. struct sk_buff *skb;
  1607. skb = cb->skb;
  1608. cb->skb = NULL;
  1609. if (dma_unmap_addr(cb, dma_addr)) {
  1610. dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
  1611. dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
  1612. dma_unmap_addr_set(cb, dma_addr, 0);
  1613. }
  1614. return skb;
  1615. }
  1616. /* Unlocked version of the reclaim routine */
  1617. static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
  1618. struct bcmgenet_tx_ring *ring)
  1619. {
  1620. struct bcmgenet_priv *priv = netdev_priv(dev);
  1621. unsigned int txbds_processed = 0;
  1622. unsigned int bytes_compl = 0;
  1623. unsigned int pkts_compl = 0;
  1624. unsigned int txbds_ready;
  1625. unsigned int c_index;
  1626. struct sk_buff *skb;
  1627. /* Clear status before servicing to reduce spurious interrupts */
  1628. if (ring->index == DESC_INDEX)
  1629. bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
  1630. INTRL2_CPU_CLEAR);
  1631. else
  1632. bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
  1633. INTRL2_CPU_CLEAR);
  1634. /* Compute how many buffers are transmitted since last xmit call */
  1635. c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
  1636. & DMA_C_INDEX_MASK;
  1637. txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
  1638. netif_dbg(priv, tx_done, dev,
  1639. "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
  1640. __func__, ring->index, ring->c_index, c_index, txbds_ready);
  1641. /* Reclaim transmitted buffers */
  1642. while (txbds_processed < txbds_ready) {
  1643. skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
  1644. &priv->tx_cbs[ring->clean_ptr]);
  1645. if (skb) {
  1646. pkts_compl++;
  1647. bytes_compl += GENET_CB(skb)->bytes_sent;
  1648. dev_consume_skb_any(skb);
  1649. }
  1650. txbds_processed++;
  1651. if (likely(ring->clean_ptr < ring->end_ptr))
  1652. ring->clean_ptr++;
  1653. else
  1654. ring->clean_ptr = ring->cb_ptr;
  1655. }
  1656. ring->free_bds += txbds_processed;
  1657. ring->c_index = c_index;
  1658. ring->packets += pkts_compl;
  1659. ring->bytes += bytes_compl;
  1660. netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
  1661. pkts_compl, bytes_compl);
  1662. return txbds_processed;
  1663. }
  1664. static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
  1665. struct bcmgenet_tx_ring *ring)
  1666. {
  1667. unsigned int released;
  1668. spin_lock_bh(&ring->lock);
  1669. released = __bcmgenet_tx_reclaim(dev, ring);
  1670. spin_unlock_bh(&ring->lock);
  1671. return released;
  1672. }
  1673. static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
  1674. {
  1675. struct bcmgenet_tx_ring *ring =
  1676. container_of(napi, struct bcmgenet_tx_ring, napi);
  1677. unsigned int work_done = 0;
  1678. struct netdev_queue *txq;
  1679. spin_lock(&ring->lock);
  1680. work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
  1681. if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
  1682. txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
  1683. netif_tx_wake_queue(txq);
  1684. }
  1685. spin_unlock(&ring->lock);
  1686. if (work_done == 0) {
  1687. napi_complete(napi);
  1688. ring->int_enable(ring);
  1689. return 0;
  1690. }
  1691. return budget;
  1692. }
  1693. static void bcmgenet_tx_reclaim_all(struct net_device *dev)
  1694. {
  1695. struct bcmgenet_priv *priv = netdev_priv(dev);
  1696. int i;
  1697. if (netif_is_multiqueue(dev)) {
  1698. for (i = 0; i < priv->hw_params->tx_queues; i++)
  1699. bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
  1700. }
  1701. bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
  1702. }
  1703. /* Reallocate the SKB to put enough headroom in front of it and insert
  1704. * the transmit checksum offsets in the descriptors
  1705. */
  1706. static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
  1707. struct sk_buff *skb)
  1708. {
  1709. struct bcmgenet_priv *priv = netdev_priv(dev);
  1710. struct status_64 *status = NULL;
  1711. struct sk_buff *new_skb;
  1712. u16 offset;
  1713. u8 ip_proto;
  1714. __be16 ip_ver;
  1715. u32 tx_csum_info;
  1716. if (unlikely(skb_headroom(skb) < sizeof(*status))) {
  1717. /* If 64 byte status block enabled, must make sure skb has
  1718. * enough headroom for us to insert 64B status block.
  1719. */
  1720. new_skb = skb_realloc_headroom(skb, sizeof(*status));
  1721. if (!new_skb) {
  1722. dev_kfree_skb_any(skb);
  1723. priv->mib.tx_realloc_tsb_failed++;
  1724. dev->stats.tx_dropped++;
  1725. return NULL;
  1726. }
  1727. dev_consume_skb_any(skb);
  1728. skb = new_skb;
  1729. priv->mib.tx_realloc_tsb++;
  1730. }
  1731. skb_push(skb, sizeof(*status));
  1732. status = (struct status_64 *)skb->data;
  1733. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1734. ip_ver = skb->protocol;
  1735. switch (ip_ver) {
  1736. case htons(ETH_P_IP):
  1737. ip_proto = ip_hdr(skb)->protocol;
  1738. break;
  1739. case htons(ETH_P_IPV6):
  1740. ip_proto = ipv6_hdr(skb)->nexthdr;
  1741. break;
  1742. default:
  1743. /* don't use UDP flag */
  1744. ip_proto = 0;
  1745. break;
  1746. }
  1747. offset = skb_checksum_start_offset(skb) - sizeof(*status);
  1748. tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
  1749. (offset + skb->csum_offset) |
  1750. STATUS_TX_CSUM_LV;
  1751. /* Set the special UDP flag for UDP */
  1752. if (ip_proto == IPPROTO_UDP)
  1753. tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
  1754. status->tx_csum_info = tx_csum_info;
  1755. }
  1756. return skb;
  1757. }
  1758. static void bcmgenet_hide_tsb(struct sk_buff *skb)
  1759. {
  1760. __skb_pull(skb, sizeof(struct status_64));
  1761. }
  1762. static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
  1763. {
  1764. struct bcmgenet_priv *priv = netdev_priv(dev);
  1765. struct device *kdev = &priv->pdev->dev;
  1766. struct bcmgenet_tx_ring *ring = NULL;
  1767. struct enet_cb *tx_cb_ptr;
  1768. struct netdev_queue *txq;
  1769. int nr_frags, index;
  1770. dma_addr_t mapping;
  1771. unsigned int size;
  1772. skb_frag_t *frag;
  1773. u32 len_stat;
  1774. int ret;
  1775. int i;
  1776. index = skb_get_queue_mapping(skb);
  1777. /* Mapping strategy:
  1778. * queue_mapping = 0, unclassified, packet xmited through ring16
  1779. * queue_mapping = 1, goes to ring 0. (highest priority queue
  1780. * queue_mapping = 2, goes to ring 1.
  1781. * queue_mapping = 3, goes to ring 2.
  1782. * queue_mapping = 4, goes to ring 3.
  1783. */
  1784. if (index == 0)
  1785. index = DESC_INDEX;
  1786. else
  1787. index -= 1;
  1788. ring = &priv->tx_rings[index];
  1789. txq = netdev_get_tx_queue(dev, ring->queue);
  1790. nr_frags = skb_shinfo(skb)->nr_frags;
  1791. spin_lock(&ring->lock);
  1792. if (ring->free_bds <= (nr_frags + 1)) {
  1793. if (!netif_tx_queue_stopped(txq)) {
  1794. netif_tx_stop_queue(txq);
  1795. netdev_err(dev,
  1796. "%s: tx ring %d full when queue %d awake\n",
  1797. __func__, index, ring->queue);
  1798. }
  1799. ret = NETDEV_TX_BUSY;
  1800. goto out;
  1801. }
  1802. /* Retain how many bytes will be sent on the wire, without TSB inserted
  1803. * by transmit checksum offload
  1804. */
  1805. GENET_CB(skb)->bytes_sent = skb->len;
  1806. /* add the Transmit Status Block */
  1807. skb = bcmgenet_add_tsb(dev, skb);
  1808. if (!skb) {
  1809. ret = NETDEV_TX_OK;
  1810. goto out;
  1811. }
  1812. for (i = 0; i <= nr_frags; i++) {
  1813. tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
  1814. BUG_ON(!tx_cb_ptr);
  1815. if (!i) {
  1816. /* Transmit single SKB or head of fragment list */
  1817. GENET_CB(skb)->first_cb = tx_cb_ptr;
  1818. size = skb_headlen(skb);
  1819. mapping = dma_map_single(kdev, skb->data, size,
  1820. DMA_TO_DEVICE);
  1821. } else {
  1822. /* xmit fragment */
  1823. frag = &skb_shinfo(skb)->frags[i - 1];
  1824. size = skb_frag_size(frag);
  1825. mapping = skb_frag_dma_map(kdev, frag, 0, size,
  1826. DMA_TO_DEVICE);
  1827. }
  1828. ret = dma_mapping_error(kdev, mapping);
  1829. if (ret) {
  1830. priv->mib.tx_dma_failed++;
  1831. netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
  1832. ret = NETDEV_TX_OK;
  1833. goto out_unmap_frags;
  1834. }
  1835. dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
  1836. dma_unmap_len_set(tx_cb_ptr, dma_len, size);
  1837. tx_cb_ptr->skb = skb;
  1838. len_stat = (size << DMA_BUFLENGTH_SHIFT) |
  1839. (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
  1840. /* Note: if we ever change from DMA_TX_APPEND_CRC below we
  1841. * will need to restore software padding of "runt" packets
  1842. */
  1843. if (!i) {
  1844. len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
  1845. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1846. len_stat |= DMA_TX_DO_CSUM;
  1847. }
  1848. if (i == nr_frags)
  1849. len_stat |= DMA_EOP;
  1850. dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
  1851. }
  1852. GENET_CB(skb)->last_cb = tx_cb_ptr;
  1853. bcmgenet_hide_tsb(skb);
  1854. skb_tx_timestamp(skb);
  1855. /* Decrement total BD count and advance our write pointer */
  1856. ring->free_bds -= nr_frags + 1;
  1857. ring->prod_index += nr_frags + 1;
  1858. ring->prod_index &= DMA_P_INDEX_MASK;
  1859. netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
  1860. if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
  1861. netif_tx_stop_queue(txq);
  1862. if (!netdev_xmit_more() || netif_xmit_stopped(txq))
  1863. /* Packets are ready, update producer index */
  1864. bcmgenet_tdma_ring_writel(priv, ring->index,
  1865. ring->prod_index, TDMA_PROD_INDEX);
  1866. out:
  1867. spin_unlock(&ring->lock);
  1868. return ret;
  1869. out_unmap_frags:
  1870. /* Back up for failed control block mapping */
  1871. bcmgenet_put_txcb(priv, ring);
  1872. /* Unmap successfully mapped control blocks */
  1873. while (i-- > 0) {
  1874. tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
  1875. bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
  1876. }
  1877. dev_kfree_skb(skb);
  1878. goto out;
  1879. }
  1880. static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
  1881. struct enet_cb *cb)
  1882. {
  1883. struct device *kdev = &priv->pdev->dev;
  1884. struct sk_buff *skb;
  1885. struct sk_buff *rx_skb;
  1886. dma_addr_t mapping;
  1887. /* Allocate a new Rx skb */
  1888. skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
  1889. GFP_ATOMIC | __GFP_NOWARN);
  1890. if (!skb) {
  1891. priv->mib.alloc_rx_buff_failed++;
  1892. netif_err(priv, rx_err, priv->dev,
  1893. "%s: Rx skb allocation failed\n", __func__);
  1894. return NULL;
  1895. }
  1896. /* DMA-map the new Rx skb */
  1897. mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
  1898. DMA_FROM_DEVICE);
  1899. if (dma_mapping_error(kdev, mapping)) {
  1900. priv->mib.rx_dma_failed++;
  1901. dev_kfree_skb_any(skb);
  1902. netif_err(priv, rx_err, priv->dev,
  1903. "%s: Rx skb DMA mapping failed\n", __func__);
  1904. return NULL;
  1905. }
  1906. /* Grab the current Rx skb from the ring and DMA-unmap it */
  1907. rx_skb = bcmgenet_free_rx_cb(kdev, cb);
  1908. /* Put the new Rx skb on the ring */
  1909. cb->skb = skb;
  1910. dma_unmap_addr_set(cb, dma_addr, mapping);
  1911. dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
  1912. dmadesc_set_addr(priv, cb->bd_addr, mapping);
  1913. /* Return the current Rx skb to caller */
  1914. return rx_skb;
  1915. }
  1916. /* bcmgenet_desc_rx - descriptor based rx process.
  1917. * this could be called from bottom half, or from NAPI polling method.
  1918. */
  1919. static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
  1920. unsigned int budget)
  1921. {
  1922. struct bcmgenet_priv *priv = ring->priv;
  1923. struct net_device *dev = priv->dev;
  1924. struct enet_cb *cb;
  1925. struct sk_buff *skb;
  1926. u32 dma_length_status;
  1927. unsigned long dma_flag;
  1928. int len;
  1929. unsigned int rxpktprocessed = 0, rxpkttoprocess;
  1930. unsigned int bytes_processed = 0;
  1931. unsigned int p_index, mask;
  1932. unsigned int discards;
  1933. /* Clear status before servicing to reduce spurious interrupts */
  1934. if (ring->index == DESC_INDEX) {
  1935. bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
  1936. INTRL2_CPU_CLEAR);
  1937. } else {
  1938. mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
  1939. bcmgenet_intrl2_1_writel(priv,
  1940. mask,
  1941. INTRL2_CPU_CLEAR);
  1942. }
  1943. p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
  1944. discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
  1945. DMA_P_INDEX_DISCARD_CNT_MASK;
  1946. if (discards > ring->old_discards) {
  1947. discards = discards - ring->old_discards;
  1948. ring->errors += discards;
  1949. ring->old_discards += discards;
  1950. /* Clear HW register when we reach 75% of maximum 0xFFFF */
  1951. if (ring->old_discards >= 0xC000) {
  1952. ring->old_discards = 0;
  1953. bcmgenet_rdma_ring_writel(priv, ring->index, 0,
  1954. RDMA_PROD_INDEX);
  1955. }
  1956. }
  1957. p_index &= DMA_P_INDEX_MASK;
  1958. rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
  1959. netif_dbg(priv, rx_status, dev,
  1960. "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
  1961. while ((rxpktprocessed < rxpkttoprocess) &&
  1962. (rxpktprocessed < budget)) {
  1963. struct status_64 *status;
  1964. __be16 rx_csum;
  1965. cb = &priv->rx_cbs[ring->read_ptr];
  1966. skb = bcmgenet_rx_refill(priv, cb);
  1967. if (unlikely(!skb)) {
  1968. ring->dropped++;
  1969. goto next;
  1970. }
  1971. status = (struct status_64 *)skb->data;
  1972. dma_length_status = status->length_status;
  1973. if (dev->features & NETIF_F_RXCSUM) {
  1974. rx_csum = (__force __be16)(status->rx_csum & 0xffff);
  1975. if (rx_csum) {
  1976. skb->csum = (__force __wsum)ntohs(rx_csum);
  1977. skb->ip_summed = CHECKSUM_COMPLETE;
  1978. }
  1979. }
  1980. /* DMA flags and length are still valid no matter how
  1981. * we got the Receive Status Vector (64B RSB or register)
  1982. */
  1983. dma_flag = dma_length_status & 0xffff;
  1984. len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
  1985. netif_dbg(priv, rx_status, dev,
  1986. "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
  1987. __func__, p_index, ring->c_index,
  1988. ring->read_ptr, dma_length_status);
  1989. if (unlikely(len > RX_BUF_LENGTH)) {
  1990. netif_err(priv, rx_status, dev, "oversized packet\n");
  1991. dev->stats.rx_length_errors++;
  1992. dev->stats.rx_errors++;
  1993. dev_kfree_skb_any(skb);
  1994. goto next;
  1995. }
  1996. if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
  1997. netif_err(priv, rx_status, dev,
  1998. "dropping fragmented packet!\n");
  1999. ring->errors++;
  2000. dev_kfree_skb_any(skb);
  2001. goto next;
  2002. }
  2003. /* report errors */
  2004. if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
  2005. DMA_RX_OV |
  2006. DMA_RX_NO |
  2007. DMA_RX_LG |
  2008. DMA_RX_RXER))) {
  2009. netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
  2010. (unsigned int)dma_flag);
  2011. if (dma_flag & DMA_RX_CRC_ERROR)
  2012. dev->stats.rx_crc_errors++;
  2013. if (dma_flag & DMA_RX_OV)
  2014. dev->stats.rx_over_errors++;
  2015. if (dma_flag & DMA_RX_NO)
  2016. dev->stats.rx_frame_errors++;
  2017. if (dma_flag & DMA_RX_LG)
  2018. dev->stats.rx_length_errors++;
  2019. dev->stats.rx_errors++;
  2020. dev_kfree_skb_any(skb);
  2021. goto next;
  2022. } /* error packet */
  2023. skb_put(skb, len);
  2024. /* remove RSB and hardware 2bytes added for IP alignment */
  2025. skb_pull(skb, 66);
  2026. len -= 66;
  2027. if (priv->crc_fwd_en) {
  2028. skb_trim(skb, len - ETH_FCS_LEN);
  2029. len -= ETH_FCS_LEN;
  2030. }
  2031. bytes_processed += len;
  2032. /*Finish setting up the received SKB and send it to the kernel*/
  2033. skb->protocol = eth_type_trans(skb, priv->dev);
  2034. ring->packets++;
  2035. ring->bytes += len;
  2036. if (dma_flag & DMA_RX_MULT)
  2037. dev->stats.multicast++;
  2038. /* Notify kernel */
  2039. napi_gro_receive(&ring->napi, skb);
  2040. netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
  2041. next:
  2042. rxpktprocessed++;
  2043. if (likely(ring->read_ptr < ring->end_ptr))
  2044. ring->read_ptr++;
  2045. else
  2046. ring->read_ptr = ring->cb_ptr;
  2047. ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
  2048. bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
  2049. }
  2050. ring->dim.bytes = bytes_processed;
  2051. ring->dim.packets = rxpktprocessed;
  2052. return rxpktprocessed;
  2053. }
  2054. /* Rx NAPI polling method */
  2055. static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
  2056. {
  2057. struct bcmgenet_rx_ring *ring = container_of(napi,
  2058. struct bcmgenet_rx_ring, napi);
  2059. struct dim_sample dim_sample = {};
  2060. unsigned int work_done;
  2061. work_done = bcmgenet_desc_rx(ring, budget);
  2062. if (work_done < budget) {
  2063. napi_complete_done(napi, work_done);
  2064. ring->int_enable(ring);
  2065. }
  2066. if (ring->dim.use_dim) {
  2067. dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
  2068. ring->dim.bytes, &dim_sample);
  2069. net_dim(&ring->dim.dim, dim_sample);
  2070. }
  2071. return work_done;
  2072. }
  2073. static void bcmgenet_dim_work(struct work_struct *work)
  2074. {
  2075. struct dim *dim = container_of(work, struct dim, work);
  2076. struct bcmgenet_net_dim *ndim =
  2077. container_of(dim, struct bcmgenet_net_dim, dim);
  2078. struct bcmgenet_rx_ring *ring =
  2079. container_of(ndim, struct bcmgenet_rx_ring, dim);
  2080. struct dim_cq_moder cur_profile =
  2081. net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
  2082. bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
  2083. dim->state = DIM_START_MEASURE;
  2084. }
  2085. /* Assign skb to RX DMA descriptor. */
  2086. static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
  2087. struct bcmgenet_rx_ring *ring)
  2088. {
  2089. struct enet_cb *cb;
  2090. struct sk_buff *skb;
  2091. int i;
  2092. netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
  2093. /* loop here for each buffer needing assign */
  2094. for (i = 0; i < ring->size; i++) {
  2095. cb = ring->cbs + i;
  2096. skb = bcmgenet_rx_refill(priv, cb);
  2097. if (skb)
  2098. dev_consume_skb_any(skb);
  2099. if (!cb->skb)
  2100. return -ENOMEM;
  2101. }
  2102. return 0;
  2103. }
  2104. static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
  2105. {
  2106. struct sk_buff *skb;
  2107. struct enet_cb *cb;
  2108. int i;
  2109. for (i = 0; i < priv->num_rx_bds; i++) {
  2110. cb = &priv->rx_cbs[i];
  2111. skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
  2112. if (skb)
  2113. dev_consume_skb_any(skb);
  2114. }
  2115. }
  2116. static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
  2117. {
  2118. u32 reg;
  2119. reg = bcmgenet_umac_readl(priv, UMAC_CMD);
  2120. if (reg & CMD_SW_RESET)
  2121. return;
  2122. if (enable)
  2123. reg |= mask;
  2124. else
  2125. reg &= ~mask;
  2126. bcmgenet_umac_writel(priv, reg, UMAC_CMD);
  2127. /* UniMAC stops on a packet boundary, wait for a full-size packet
  2128. * to be processed
  2129. */
  2130. if (enable == 0)
  2131. usleep_range(1000, 2000);
  2132. }
  2133. static void reset_umac(struct bcmgenet_priv *priv)
  2134. {
  2135. /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
  2136. bcmgenet_rbuf_ctrl_set(priv, 0);
  2137. udelay(10);
  2138. /* issue soft reset and disable MAC while updating its registers */
  2139. bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
  2140. udelay(2);
  2141. }
  2142. static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
  2143. {
  2144. /* Mask all interrupts.*/
  2145. bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
  2146. bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
  2147. bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
  2148. bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
  2149. }
  2150. static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
  2151. {
  2152. u32 int0_enable = 0;
  2153. /* Monitor cable plug/unplugged event for internal PHY, external PHY
  2154. * and MoCA PHY
  2155. */
  2156. if (priv->internal_phy) {
  2157. int0_enable |= UMAC_IRQ_LINK_EVENT;
  2158. if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
  2159. int0_enable |= UMAC_IRQ_PHY_DET_R;
  2160. } else if (priv->ext_phy) {
  2161. int0_enable |= UMAC_IRQ_LINK_EVENT;
  2162. } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
  2163. if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
  2164. int0_enable |= UMAC_IRQ_LINK_EVENT;
  2165. }
  2166. bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
  2167. }
  2168. static void init_umac(struct bcmgenet_priv *priv)
  2169. {
  2170. struct device *kdev = &priv->pdev->dev;
  2171. u32 reg;
  2172. u32 int0_enable = 0;
  2173. dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
  2174. reset_umac(priv);
  2175. /* clear tx/rx counter */
  2176. bcmgenet_umac_writel(priv,
  2177. MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
  2178. UMAC_MIB_CTRL);
  2179. bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
  2180. bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
  2181. /* init tx registers, enable TSB */
  2182. reg = bcmgenet_tbuf_ctrl_get(priv);
  2183. reg |= TBUF_64B_EN;
  2184. bcmgenet_tbuf_ctrl_set(priv, reg);
  2185. /* init rx registers, enable ip header optimization and RSB */
  2186. reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
  2187. reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
  2188. bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
  2189. /* enable rx checksumming */
  2190. reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
  2191. reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
  2192. /* If UniMAC forwards CRC, we need to skip over it to get
  2193. * a valid CHK bit to be set in the per-packet status word
  2194. */
  2195. if (priv->crc_fwd_en)
  2196. reg |= RBUF_SKIP_FCS;
  2197. else
  2198. reg &= ~RBUF_SKIP_FCS;
  2199. bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
  2200. if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
  2201. bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
  2202. bcmgenet_intr_disable(priv);
  2203. /* Configure backpressure vectors for MoCA */
  2204. if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
  2205. reg = bcmgenet_bp_mc_get(priv);
  2206. reg |= BIT(priv->hw_params->bp_in_en_shift);
  2207. /* bp_mask: back pressure mask */
  2208. if (netif_is_multiqueue(priv->dev))
  2209. reg |= priv->hw_params->bp_in_mask;
  2210. else
  2211. reg &= ~priv->hw_params->bp_in_mask;
  2212. bcmgenet_bp_mc_set(priv, reg);
  2213. }
  2214. /* Enable MDIO interrupts on GENET v3+ */
  2215. if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
  2216. int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
  2217. bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
  2218. dev_dbg(kdev, "done init umac\n");
  2219. }
  2220. static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
  2221. void (*cb)(struct work_struct *work))
  2222. {
  2223. struct bcmgenet_net_dim *dim = &ring->dim;
  2224. INIT_WORK(&dim->dim.work, cb);
  2225. dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
  2226. dim->event_ctr = 0;
  2227. dim->packets = 0;
  2228. dim->bytes = 0;
  2229. }
  2230. static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
  2231. {
  2232. struct bcmgenet_net_dim *dim = &ring->dim;
  2233. struct dim_cq_moder moder;
  2234. u32 usecs, pkts;
  2235. usecs = ring->rx_coalesce_usecs;
  2236. pkts = ring->rx_max_coalesced_frames;
  2237. /* If DIM was enabled, re-apply default parameters */
  2238. if (dim->use_dim) {
  2239. moder = net_dim_get_def_rx_moderation(dim->dim.mode);
  2240. usecs = moder.usec;
  2241. pkts = moder.pkts;
  2242. }
  2243. bcmgenet_set_rx_coalesce(ring, usecs, pkts);
  2244. }
  2245. /* Initialize a Tx ring along with corresponding hardware registers */
  2246. static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
  2247. unsigned int index, unsigned int size,
  2248. unsigned int start_ptr, unsigned int end_ptr)
  2249. {
  2250. struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
  2251. u32 words_per_bd = WORDS_PER_BD(priv);
  2252. u32 flow_period_val = 0;
  2253. spin_lock_init(&ring->lock);
  2254. ring->priv = priv;
  2255. ring->index = index;
  2256. if (index == DESC_INDEX) {
  2257. ring->queue = 0;
  2258. ring->int_enable = bcmgenet_tx_ring16_int_enable;
  2259. ring->int_disable = bcmgenet_tx_ring16_int_disable;
  2260. } else {
  2261. ring->queue = index + 1;
  2262. ring->int_enable = bcmgenet_tx_ring_int_enable;
  2263. ring->int_disable = bcmgenet_tx_ring_int_disable;
  2264. }
  2265. ring->cbs = priv->tx_cbs + start_ptr;
  2266. ring->size = size;
  2267. ring->clean_ptr = start_ptr;
  2268. ring->c_index = 0;
  2269. ring->free_bds = size;
  2270. ring->write_ptr = start_ptr;
  2271. ring->cb_ptr = start_ptr;
  2272. ring->end_ptr = end_ptr - 1;
  2273. ring->prod_index = 0;
  2274. /* Set flow period for ring != 16 */
  2275. if (index != DESC_INDEX)
  2276. flow_period_val = ENET_MAX_MTU_SIZE << 16;
  2277. bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
  2278. bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
  2279. bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
  2280. /* Disable rate control for now */
  2281. bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
  2282. TDMA_FLOW_PERIOD);
  2283. bcmgenet_tdma_ring_writel(priv, index,
  2284. ((size << DMA_RING_SIZE_SHIFT) |
  2285. RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
  2286. /* Set start and end address, read and write pointers */
  2287. bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
  2288. DMA_START_ADDR);
  2289. bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
  2290. TDMA_READ_PTR);
  2291. bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
  2292. TDMA_WRITE_PTR);
  2293. bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
  2294. DMA_END_ADDR);
  2295. /* Initialize Tx NAPI */
  2296. netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll);
  2297. }
  2298. /* Initialize a RDMA ring */
  2299. static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
  2300. unsigned int index, unsigned int size,
  2301. unsigned int start_ptr, unsigned int end_ptr)
  2302. {
  2303. struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
  2304. u32 words_per_bd = WORDS_PER_BD(priv);
  2305. int ret;
  2306. ring->priv = priv;
  2307. ring->index = index;
  2308. if (index == DESC_INDEX) {
  2309. ring->int_enable = bcmgenet_rx_ring16_int_enable;
  2310. ring->int_disable = bcmgenet_rx_ring16_int_disable;
  2311. } else {
  2312. ring->int_enable = bcmgenet_rx_ring_int_enable;
  2313. ring->int_disable = bcmgenet_rx_ring_int_disable;
  2314. }
  2315. ring->cbs = priv->rx_cbs + start_ptr;
  2316. ring->size = size;
  2317. ring->c_index = 0;
  2318. ring->read_ptr = start_ptr;
  2319. ring->cb_ptr = start_ptr;
  2320. ring->end_ptr = end_ptr - 1;
  2321. ret = bcmgenet_alloc_rx_buffers(priv, ring);
  2322. if (ret)
  2323. return ret;
  2324. bcmgenet_init_dim(ring, bcmgenet_dim_work);
  2325. bcmgenet_init_rx_coalesce(ring);
  2326. /* Initialize Rx NAPI */
  2327. netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll);
  2328. bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
  2329. bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
  2330. bcmgenet_rdma_ring_writel(priv, index,
  2331. ((size << DMA_RING_SIZE_SHIFT) |
  2332. RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
  2333. bcmgenet_rdma_ring_writel(priv, index,
  2334. (DMA_FC_THRESH_LO <<
  2335. DMA_XOFF_THRESHOLD_SHIFT) |
  2336. DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
  2337. /* Set start and end address, read and write pointers */
  2338. bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
  2339. DMA_START_ADDR);
  2340. bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
  2341. RDMA_READ_PTR);
  2342. bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
  2343. RDMA_WRITE_PTR);
  2344. bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
  2345. DMA_END_ADDR);
  2346. return ret;
  2347. }
  2348. static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
  2349. {
  2350. unsigned int i;
  2351. struct bcmgenet_tx_ring *ring;
  2352. for (i = 0; i < priv->hw_params->tx_queues; ++i) {
  2353. ring = &priv->tx_rings[i];
  2354. napi_enable(&ring->napi);
  2355. ring->int_enable(ring);
  2356. }
  2357. ring = &priv->tx_rings[DESC_INDEX];
  2358. napi_enable(&ring->napi);
  2359. ring->int_enable(ring);
  2360. }
  2361. static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
  2362. {
  2363. unsigned int i;
  2364. struct bcmgenet_tx_ring *ring;
  2365. for (i = 0; i < priv->hw_params->tx_queues; ++i) {
  2366. ring = &priv->tx_rings[i];
  2367. napi_disable(&ring->napi);
  2368. }
  2369. ring = &priv->tx_rings[DESC_INDEX];
  2370. napi_disable(&ring->napi);
  2371. }
  2372. static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
  2373. {
  2374. unsigned int i;
  2375. struct bcmgenet_tx_ring *ring;
  2376. for (i = 0; i < priv->hw_params->tx_queues; ++i) {
  2377. ring = &priv->tx_rings[i];
  2378. netif_napi_del(&ring->napi);
  2379. }
  2380. ring = &priv->tx_rings[DESC_INDEX];
  2381. netif_napi_del(&ring->napi);
  2382. }
  2383. /* Initialize Tx queues
  2384. *
  2385. * Queues 0-3 are priority-based, each one has 32 descriptors,
  2386. * with queue 0 being the highest priority queue.
  2387. *
  2388. * Queue 16 is the default Tx queue with
  2389. * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
  2390. *
  2391. * The transmit control block pool is then partitioned as follows:
  2392. * - Tx queue 0 uses tx_cbs[0..31]
  2393. * - Tx queue 1 uses tx_cbs[32..63]
  2394. * - Tx queue 2 uses tx_cbs[64..95]
  2395. * - Tx queue 3 uses tx_cbs[96..127]
  2396. * - Tx queue 16 uses tx_cbs[128..255]
  2397. */
  2398. static void bcmgenet_init_tx_queues(struct net_device *dev)
  2399. {
  2400. struct bcmgenet_priv *priv = netdev_priv(dev);
  2401. u32 i, dma_enable;
  2402. u32 dma_ctrl, ring_cfg;
  2403. u32 dma_priority[3] = {0, 0, 0};
  2404. dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
  2405. dma_enable = dma_ctrl & DMA_EN;
  2406. dma_ctrl &= ~DMA_EN;
  2407. bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
  2408. dma_ctrl = 0;
  2409. ring_cfg = 0;
  2410. /* Enable strict priority arbiter mode */
  2411. bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
  2412. /* Initialize Tx priority queues */
  2413. for (i = 0; i < priv->hw_params->tx_queues; i++) {
  2414. bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
  2415. i * priv->hw_params->tx_bds_per_q,
  2416. (i + 1) * priv->hw_params->tx_bds_per_q);
  2417. ring_cfg |= (1 << i);
  2418. dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
  2419. dma_priority[DMA_PRIO_REG_INDEX(i)] |=
  2420. ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
  2421. }
  2422. /* Initialize Tx default queue 16 */
  2423. bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
  2424. priv->hw_params->tx_queues *
  2425. priv->hw_params->tx_bds_per_q,
  2426. TOTAL_DESC);
  2427. ring_cfg |= (1 << DESC_INDEX);
  2428. dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
  2429. dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
  2430. ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
  2431. DMA_PRIO_REG_SHIFT(DESC_INDEX));
  2432. /* Set Tx queue priorities */
  2433. bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
  2434. bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
  2435. bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
  2436. /* Enable Tx queues */
  2437. bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
  2438. /* Enable Tx DMA */
  2439. if (dma_enable)
  2440. dma_ctrl |= DMA_EN;
  2441. bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
  2442. }
  2443. static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
  2444. {
  2445. unsigned int i;
  2446. struct bcmgenet_rx_ring *ring;
  2447. for (i = 0; i < priv->hw_params->rx_queues; ++i) {
  2448. ring = &priv->rx_rings[i];
  2449. napi_enable(&ring->napi);
  2450. ring->int_enable(ring);
  2451. }
  2452. ring = &priv->rx_rings[DESC_INDEX];
  2453. napi_enable(&ring->napi);
  2454. ring->int_enable(ring);
  2455. }
  2456. static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
  2457. {
  2458. unsigned int i;
  2459. struct bcmgenet_rx_ring *ring;
  2460. for (i = 0; i < priv->hw_params->rx_queues; ++i) {
  2461. ring = &priv->rx_rings[i];
  2462. napi_disable(&ring->napi);
  2463. cancel_work_sync(&ring->dim.dim.work);
  2464. }
  2465. ring = &priv->rx_rings[DESC_INDEX];
  2466. napi_disable(&ring->napi);
  2467. cancel_work_sync(&ring->dim.dim.work);
  2468. }
  2469. static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
  2470. {
  2471. unsigned int i;
  2472. struct bcmgenet_rx_ring *ring;
  2473. for (i = 0; i < priv->hw_params->rx_queues; ++i) {
  2474. ring = &priv->rx_rings[i];
  2475. netif_napi_del(&ring->napi);
  2476. }
  2477. ring = &priv->rx_rings[DESC_INDEX];
  2478. netif_napi_del(&ring->napi);
  2479. }
  2480. /* Initialize Rx queues
  2481. *
  2482. * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
  2483. * used to direct traffic to these queues.
  2484. *
  2485. * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
  2486. */
  2487. static int bcmgenet_init_rx_queues(struct net_device *dev)
  2488. {
  2489. struct bcmgenet_priv *priv = netdev_priv(dev);
  2490. u32 i;
  2491. u32 dma_enable;
  2492. u32 dma_ctrl;
  2493. u32 ring_cfg;
  2494. int ret;
  2495. dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
  2496. dma_enable = dma_ctrl & DMA_EN;
  2497. dma_ctrl &= ~DMA_EN;
  2498. bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
  2499. dma_ctrl = 0;
  2500. ring_cfg = 0;
  2501. /* Initialize Rx priority queues */
  2502. for (i = 0; i < priv->hw_params->rx_queues; i++) {
  2503. ret = bcmgenet_init_rx_ring(priv, i,
  2504. priv->hw_params->rx_bds_per_q,
  2505. i * priv->hw_params->rx_bds_per_q,
  2506. (i + 1) *
  2507. priv->hw_params->rx_bds_per_q);
  2508. if (ret)
  2509. return ret;
  2510. ring_cfg |= (1 << i);
  2511. dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
  2512. }
  2513. /* Initialize Rx default queue 16 */
  2514. ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
  2515. priv->hw_params->rx_queues *
  2516. priv->hw_params->rx_bds_per_q,
  2517. TOTAL_DESC);
  2518. if (ret)
  2519. return ret;
  2520. ring_cfg |= (1 << DESC_INDEX);
  2521. dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
  2522. /* Enable rings */
  2523. bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
  2524. /* Configure ring as descriptor ring and re-enable DMA if enabled */
  2525. if (dma_enable)
  2526. dma_ctrl |= DMA_EN;
  2527. bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
  2528. return 0;
  2529. }
  2530. static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
  2531. {
  2532. int ret = 0;
  2533. int timeout = 0;
  2534. u32 reg;
  2535. u32 dma_ctrl;
  2536. int i;
  2537. /* Disable TDMA to stop add more frames in TX DMA */
  2538. reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
  2539. reg &= ~DMA_EN;
  2540. bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
  2541. /* Check TDMA status register to confirm TDMA is disabled */
  2542. while (timeout++ < DMA_TIMEOUT_VAL) {
  2543. reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
  2544. if (reg & DMA_DISABLED)
  2545. break;
  2546. udelay(1);
  2547. }
  2548. if (timeout == DMA_TIMEOUT_VAL) {
  2549. netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
  2550. ret = -ETIMEDOUT;
  2551. }
  2552. /* Wait 10ms for packet drain in both tx and rx dma */
  2553. usleep_range(10000, 20000);
  2554. /* Disable RDMA */
  2555. reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
  2556. reg &= ~DMA_EN;
  2557. bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
  2558. timeout = 0;
  2559. /* Check RDMA status register to confirm RDMA is disabled */
  2560. while (timeout++ < DMA_TIMEOUT_VAL) {
  2561. reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
  2562. if (reg & DMA_DISABLED)
  2563. break;
  2564. udelay(1);
  2565. }
  2566. if (timeout == DMA_TIMEOUT_VAL) {
  2567. netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
  2568. ret = -ETIMEDOUT;
  2569. }
  2570. dma_ctrl = 0;
  2571. for (i = 0; i < priv->hw_params->rx_queues; i++)
  2572. dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
  2573. reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
  2574. reg &= ~dma_ctrl;
  2575. bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
  2576. dma_ctrl = 0;
  2577. for (i = 0; i < priv->hw_params->tx_queues; i++)
  2578. dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
  2579. reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
  2580. reg &= ~dma_ctrl;
  2581. bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
  2582. return ret;
  2583. }
  2584. static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
  2585. {
  2586. struct netdev_queue *txq;
  2587. int i;
  2588. bcmgenet_fini_rx_napi(priv);
  2589. bcmgenet_fini_tx_napi(priv);
  2590. for (i = 0; i < priv->num_tx_bds; i++)
  2591. dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
  2592. priv->tx_cbs + i));
  2593. for (i = 0; i < priv->hw_params->tx_queues; i++) {
  2594. txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
  2595. netdev_tx_reset_queue(txq);
  2596. }
  2597. txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
  2598. netdev_tx_reset_queue(txq);
  2599. bcmgenet_free_rx_buffers(priv);
  2600. kfree(priv->rx_cbs);
  2601. kfree(priv->tx_cbs);
  2602. }
  2603. /* init_edma: Initialize DMA control register */
  2604. static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
  2605. {
  2606. int ret;
  2607. unsigned int i;
  2608. struct enet_cb *cb;
  2609. netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
  2610. /* Initialize common Rx ring structures */
  2611. priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
  2612. priv->num_rx_bds = TOTAL_DESC;
  2613. priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
  2614. GFP_KERNEL);
  2615. if (!priv->rx_cbs)
  2616. return -ENOMEM;
  2617. for (i = 0; i < priv->num_rx_bds; i++) {
  2618. cb = priv->rx_cbs + i;
  2619. cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
  2620. }
  2621. /* Initialize common TX ring structures */
  2622. priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
  2623. priv->num_tx_bds = TOTAL_DESC;
  2624. priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
  2625. GFP_KERNEL);
  2626. if (!priv->tx_cbs) {
  2627. kfree(priv->rx_cbs);
  2628. return -ENOMEM;
  2629. }
  2630. for (i = 0; i < priv->num_tx_bds; i++) {
  2631. cb = priv->tx_cbs + i;
  2632. cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
  2633. }
  2634. /* Init rDma */
  2635. bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
  2636. DMA_SCB_BURST_SIZE);
  2637. /* Initialize Rx queues */
  2638. ret = bcmgenet_init_rx_queues(priv->dev);
  2639. if (ret) {
  2640. netdev_err(priv->dev, "failed to initialize Rx queues\n");
  2641. bcmgenet_free_rx_buffers(priv);
  2642. kfree(priv->rx_cbs);
  2643. kfree(priv->tx_cbs);
  2644. return ret;
  2645. }
  2646. /* Init tDma */
  2647. bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
  2648. DMA_SCB_BURST_SIZE);
  2649. /* Initialize Tx queues */
  2650. bcmgenet_init_tx_queues(priv->dev);
  2651. return 0;
  2652. }
  2653. /* Interrupt bottom half */
  2654. static void bcmgenet_irq_task(struct work_struct *work)
  2655. {
  2656. unsigned int status;
  2657. struct bcmgenet_priv *priv = container_of(
  2658. work, struct bcmgenet_priv, bcmgenet_irq_work);
  2659. netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
  2660. spin_lock_irq(&priv->lock);
  2661. status = priv->irq0_stat;
  2662. priv->irq0_stat = 0;
  2663. spin_unlock_irq(&priv->lock);
  2664. if (status & UMAC_IRQ_PHY_DET_R &&
  2665. priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
  2666. phy_init_hw(priv->dev->phydev);
  2667. genphy_config_aneg(priv->dev->phydev);
  2668. }
  2669. /* Link UP/DOWN event */
  2670. if (status & UMAC_IRQ_LINK_EVENT)
  2671. phy_mac_interrupt(priv->dev->phydev);
  2672. }
  2673. /* bcmgenet_isr1: handle Rx and Tx priority queues */
  2674. static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
  2675. {
  2676. struct bcmgenet_priv *priv = dev_id;
  2677. struct bcmgenet_rx_ring *rx_ring;
  2678. struct bcmgenet_tx_ring *tx_ring;
  2679. unsigned int index, status;
  2680. /* Read irq status */
  2681. status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
  2682. ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
  2683. /* clear interrupts */
  2684. bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
  2685. netif_dbg(priv, intr, priv->dev,
  2686. "%s: IRQ=0x%x\n", __func__, status);
  2687. /* Check Rx priority queue interrupts */
  2688. for (index = 0; index < priv->hw_params->rx_queues; index++) {
  2689. if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
  2690. continue;
  2691. rx_ring = &priv->rx_rings[index];
  2692. rx_ring->dim.event_ctr++;
  2693. if (likely(napi_schedule_prep(&rx_ring->napi))) {
  2694. rx_ring->int_disable(rx_ring);
  2695. __napi_schedule_irqoff(&rx_ring->napi);
  2696. }
  2697. }
  2698. /* Check Tx priority queue interrupts */
  2699. for (index = 0; index < priv->hw_params->tx_queues; index++) {
  2700. if (!(status & BIT(index)))
  2701. continue;
  2702. tx_ring = &priv->tx_rings[index];
  2703. if (likely(napi_schedule_prep(&tx_ring->napi))) {
  2704. tx_ring->int_disable(tx_ring);
  2705. __napi_schedule_irqoff(&tx_ring->napi);
  2706. }
  2707. }
  2708. return IRQ_HANDLED;
  2709. }
  2710. /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
  2711. static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
  2712. {
  2713. struct bcmgenet_priv *priv = dev_id;
  2714. struct bcmgenet_rx_ring *rx_ring;
  2715. struct bcmgenet_tx_ring *tx_ring;
  2716. unsigned int status;
  2717. unsigned long flags;
  2718. /* Read irq status */
  2719. status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
  2720. ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
  2721. /* clear interrupts */
  2722. bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
  2723. netif_dbg(priv, intr, priv->dev,
  2724. "IRQ=0x%x\n", status);
  2725. if (status & UMAC_IRQ_RXDMA_DONE) {
  2726. rx_ring = &priv->rx_rings[DESC_INDEX];
  2727. rx_ring->dim.event_ctr++;
  2728. if (likely(napi_schedule_prep(&rx_ring->napi))) {
  2729. rx_ring->int_disable(rx_ring);
  2730. __napi_schedule_irqoff(&rx_ring->napi);
  2731. }
  2732. }
  2733. if (status & UMAC_IRQ_TXDMA_DONE) {
  2734. tx_ring = &priv->tx_rings[DESC_INDEX];
  2735. if (likely(napi_schedule_prep(&tx_ring->napi))) {
  2736. tx_ring->int_disable(tx_ring);
  2737. __napi_schedule_irqoff(&tx_ring->napi);
  2738. }
  2739. }
  2740. if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
  2741. status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
  2742. wake_up(&priv->wq);
  2743. }
  2744. /* all other interested interrupts handled in bottom half */
  2745. status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
  2746. if (status) {
  2747. /* Save irq status for bottom-half processing. */
  2748. spin_lock_irqsave(&priv->lock, flags);
  2749. priv->irq0_stat |= status;
  2750. spin_unlock_irqrestore(&priv->lock, flags);
  2751. schedule_work(&priv->bcmgenet_irq_work);
  2752. }
  2753. return IRQ_HANDLED;
  2754. }
  2755. static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
  2756. {
  2757. /* Acknowledge the interrupt */
  2758. return IRQ_HANDLED;
  2759. }
  2760. #ifdef CONFIG_NET_POLL_CONTROLLER
  2761. static void bcmgenet_poll_controller(struct net_device *dev)
  2762. {
  2763. struct bcmgenet_priv *priv = netdev_priv(dev);
  2764. /* Invoke the main RX/TX interrupt handler */
  2765. disable_irq(priv->irq0);
  2766. bcmgenet_isr0(priv->irq0, priv);
  2767. enable_irq(priv->irq0);
  2768. /* And the interrupt handler for RX/TX priority queues */
  2769. disable_irq(priv->irq1);
  2770. bcmgenet_isr1(priv->irq1, priv);
  2771. enable_irq(priv->irq1);
  2772. }
  2773. #endif
  2774. static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
  2775. {
  2776. u32 reg;
  2777. reg = bcmgenet_rbuf_ctrl_get(priv);
  2778. reg |= BIT(1);
  2779. bcmgenet_rbuf_ctrl_set(priv, reg);
  2780. udelay(10);
  2781. reg &= ~BIT(1);
  2782. bcmgenet_rbuf_ctrl_set(priv, reg);
  2783. udelay(10);
  2784. }
  2785. static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
  2786. const unsigned char *addr)
  2787. {
  2788. bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
  2789. bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
  2790. }
  2791. static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
  2792. unsigned char *addr)
  2793. {
  2794. u32 addr_tmp;
  2795. addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
  2796. put_unaligned_be32(addr_tmp, &addr[0]);
  2797. addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
  2798. put_unaligned_be16(addr_tmp, &addr[4]);
  2799. }
  2800. /* Returns a reusable dma control register value */
  2801. static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
  2802. {
  2803. unsigned int i;
  2804. u32 reg;
  2805. u32 dma_ctrl;
  2806. /* disable DMA */
  2807. dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
  2808. for (i = 0; i < priv->hw_params->tx_queues; i++)
  2809. dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
  2810. reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
  2811. reg &= ~dma_ctrl;
  2812. bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
  2813. dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
  2814. for (i = 0; i < priv->hw_params->rx_queues; i++)
  2815. dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
  2816. reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
  2817. reg &= ~dma_ctrl;
  2818. bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
  2819. bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
  2820. udelay(10);
  2821. bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
  2822. return dma_ctrl;
  2823. }
  2824. static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
  2825. {
  2826. u32 reg;
  2827. reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
  2828. reg |= dma_ctrl;
  2829. bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
  2830. reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
  2831. reg |= dma_ctrl;
  2832. bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
  2833. }
  2834. static void bcmgenet_netif_start(struct net_device *dev)
  2835. {
  2836. struct bcmgenet_priv *priv = netdev_priv(dev);
  2837. /* Start the network engine */
  2838. bcmgenet_set_rx_mode(dev);
  2839. bcmgenet_enable_rx_napi(priv);
  2840. umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
  2841. bcmgenet_enable_tx_napi(priv);
  2842. /* Monitor link interrupts now */
  2843. bcmgenet_link_intr_enable(priv);
  2844. phy_start(dev->phydev);
  2845. }
  2846. static int bcmgenet_open(struct net_device *dev)
  2847. {
  2848. struct bcmgenet_priv *priv = netdev_priv(dev);
  2849. unsigned long dma_ctrl;
  2850. int ret;
  2851. netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
  2852. /* Turn on the clock */
  2853. clk_prepare_enable(priv->clk);
  2854. /* If this is an internal GPHY, power it back on now, before UniMAC is
  2855. * brought out of reset as absolutely no UniMAC activity is allowed
  2856. */
  2857. if (priv->internal_phy)
  2858. bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
  2859. /* take MAC out of reset */
  2860. bcmgenet_umac_reset(priv);
  2861. init_umac(priv);
  2862. /* Apply features again in case we changed them while interface was
  2863. * down
  2864. */
  2865. bcmgenet_set_features(dev, dev->features);
  2866. bcmgenet_set_hw_addr(priv, dev->dev_addr);
  2867. /* Disable RX/TX DMA and flush TX queues */
  2868. dma_ctrl = bcmgenet_dma_disable(priv);
  2869. /* Reinitialize TDMA and RDMA and SW housekeeping */
  2870. ret = bcmgenet_init_dma(priv);
  2871. if (ret) {
  2872. netdev_err(dev, "failed to initialize DMA\n");
  2873. goto err_clk_disable;
  2874. }
  2875. /* Always enable ring 16 - descriptor ring */
  2876. bcmgenet_enable_dma(priv, dma_ctrl);
  2877. /* HFB init */
  2878. bcmgenet_hfb_init(priv);
  2879. ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
  2880. dev->name, priv);
  2881. if (ret < 0) {
  2882. netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
  2883. goto err_fini_dma;
  2884. }
  2885. ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
  2886. dev->name, priv);
  2887. if (ret < 0) {
  2888. netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
  2889. goto err_irq0;
  2890. }
  2891. ret = bcmgenet_mii_probe(dev);
  2892. if (ret) {
  2893. netdev_err(dev, "failed to connect to PHY\n");
  2894. goto err_irq1;
  2895. }
  2896. bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
  2897. bcmgenet_netif_start(dev);
  2898. netif_tx_start_all_queues(dev);
  2899. return 0;
  2900. err_irq1:
  2901. free_irq(priv->irq1, priv);
  2902. err_irq0:
  2903. free_irq(priv->irq0, priv);
  2904. err_fini_dma:
  2905. bcmgenet_dma_teardown(priv);
  2906. bcmgenet_fini_dma(priv);
  2907. err_clk_disable:
  2908. if (priv->internal_phy)
  2909. bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
  2910. clk_disable_unprepare(priv->clk);
  2911. return ret;
  2912. }
  2913. static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
  2914. {
  2915. struct bcmgenet_priv *priv = netdev_priv(dev);
  2916. bcmgenet_disable_tx_napi(priv);
  2917. netif_tx_disable(dev);
  2918. /* Disable MAC receive */
  2919. umac_enable_set(priv, CMD_RX_EN, false);
  2920. bcmgenet_dma_teardown(priv);
  2921. /* Disable MAC transmit. TX DMA disabled must be done before this */
  2922. umac_enable_set(priv, CMD_TX_EN, false);
  2923. if (stop_phy)
  2924. phy_stop(dev->phydev);
  2925. bcmgenet_disable_rx_napi(priv);
  2926. bcmgenet_intr_disable(priv);
  2927. /* Wait for pending work items to complete. Since interrupts are
  2928. * disabled no new work will be scheduled.
  2929. */
  2930. cancel_work_sync(&priv->bcmgenet_irq_work);
  2931. /* tx reclaim */
  2932. bcmgenet_tx_reclaim_all(dev);
  2933. bcmgenet_fini_dma(priv);
  2934. }
  2935. static int bcmgenet_close(struct net_device *dev)
  2936. {
  2937. struct bcmgenet_priv *priv = netdev_priv(dev);
  2938. int ret = 0;
  2939. netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
  2940. bcmgenet_netif_stop(dev, false);
  2941. /* Really kill the PHY state machine and disconnect from it */
  2942. phy_disconnect(dev->phydev);
  2943. free_irq(priv->irq0, priv);
  2944. free_irq(priv->irq1, priv);
  2945. if (priv->internal_phy)
  2946. ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
  2947. clk_disable_unprepare(priv->clk);
  2948. return ret;
  2949. }
  2950. static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
  2951. {
  2952. struct bcmgenet_priv *priv = ring->priv;
  2953. u32 p_index, c_index, intsts, intmsk;
  2954. struct netdev_queue *txq;
  2955. unsigned int free_bds;
  2956. bool txq_stopped;
  2957. if (!netif_msg_tx_err(priv))
  2958. return;
  2959. txq = netdev_get_tx_queue(priv->dev, ring->queue);
  2960. spin_lock(&ring->lock);
  2961. if (ring->index == DESC_INDEX) {
  2962. intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
  2963. intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
  2964. } else {
  2965. intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
  2966. intmsk = 1 << ring->index;
  2967. }
  2968. c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
  2969. p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
  2970. txq_stopped = netif_tx_queue_stopped(txq);
  2971. free_bds = ring->free_bds;
  2972. spin_unlock(&ring->lock);
  2973. netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
  2974. "TX queue status: %s, interrupts: %s\n"
  2975. "(sw)free_bds: %d (sw)size: %d\n"
  2976. "(sw)p_index: %d (hw)p_index: %d\n"
  2977. "(sw)c_index: %d (hw)c_index: %d\n"
  2978. "(sw)clean_p: %d (sw)write_p: %d\n"
  2979. "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
  2980. ring->index, ring->queue,
  2981. txq_stopped ? "stopped" : "active",
  2982. intsts & intmsk ? "enabled" : "disabled",
  2983. free_bds, ring->size,
  2984. ring->prod_index, p_index & DMA_P_INDEX_MASK,
  2985. ring->c_index, c_index & DMA_C_INDEX_MASK,
  2986. ring->clean_ptr, ring->write_ptr,
  2987. ring->cb_ptr, ring->end_ptr);
  2988. }
  2989. static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
  2990. {
  2991. struct bcmgenet_priv *priv = netdev_priv(dev);
  2992. u32 int0_enable = 0;
  2993. u32 int1_enable = 0;
  2994. unsigned int q;
  2995. netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
  2996. for (q = 0; q < priv->hw_params->tx_queues; q++)
  2997. bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
  2998. bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
  2999. bcmgenet_tx_reclaim_all(dev);
  3000. for (q = 0; q < priv->hw_params->tx_queues; q++)
  3001. int1_enable |= (1 << q);
  3002. int0_enable = UMAC_IRQ_TXDMA_DONE;
  3003. /* Re-enable TX interrupts if disabled */
  3004. bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
  3005. bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
  3006. netif_trans_update(dev);
  3007. dev->stats.tx_errors++;
  3008. netif_tx_wake_all_queues(dev);
  3009. }
  3010. #define MAX_MDF_FILTER 17
  3011. static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
  3012. const unsigned char *addr,
  3013. int *i)
  3014. {
  3015. bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
  3016. UMAC_MDF_ADDR + (*i * 4));
  3017. bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
  3018. addr[4] << 8 | addr[5],
  3019. UMAC_MDF_ADDR + ((*i + 1) * 4));
  3020. *i += 2;
  3021. }
  3022. static void bcmgenet_set_rx_mode(struct net_device *dev)
  3023. {
  3024. struct bcmgenet_priv *priv = netdev_priv(dev);
  3025. struct netdev_hw_addr *ha;
  3026. int i, nfilter;
  3027. u32 reg;
  3028. netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
  3029. /* Number of filters needed */
  3030. nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
  3031. /*
  3032. * Turn on promicuous mode for three scenarios
  3033. * 1. IFF_PROMISC flag is set
  3034. * 2. IFF_ALLMULTI flag is set
  3035. * 3. The number of filters needed exceeds the number filters
  3036. * supported by the hardware.
  3037. */
  3038. reg = bcmgenet_umac_readl(priv, UMAC_CMD);
  3039. if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
  3040. (nfilter > MAX_MDF_FILTER)) {
  3041. reg |= CMD_PROMISC;
  3042. bcmgenet_umac_writel(priv, reg, UMAC_CMD);
  3043. bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
  3044. return;
  3045. } else {
  3046. reg &= ~CMD_PROMISC;
  3047. bcmgenet_umac_writel(priv, reg, UMAC_CMD);
  3048. }
  3049. /* update MDF filter */
  3050. i = 0;
  3051. /* Broadcast */
  3052. bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
  3053. /* my own address.*/
  3054. bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
  3055. /* Unicast */
  3056. netdev_for_each_uc_addr(ha, dev)
  3057. bcmgenet_set_mdf_addr(priv, ha->addr, &i);
  3058. /* Multicast */
  3059. netdev_for_each_mc_addr(ha, dev)
  3060. bcmgenet_set_mdf_addr(priv, ha->addr, &i);
  3061. /* Enable filters */
  3062. reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
  3063. bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
  3064. }
  3065. /* Set the hardware MAC address. */
  3066. static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
  3067. {
  3068. struct sockaddr *addr = p;
  3069. /* Setting the MAC address at the hardware level is not possible
  3070. * without disabling the UniMAC RX/TX enable bits.
  3071. */
  3072. if (netif_running(dev))
  3073. return -EBUSY;
  3074. eth_hw_addr_set(dev, addr->sa_data);
  3075. return 0;
  3076. }
  3077. static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
  3078. {
  3079. struct bcmgenet_priv *priv = netdev_priv(dev);
  3080. unsigned long tx_bytes = 0, tx_packets = 0;
  3081. unsigned long rx_bytes = 0, rx_packets = 0;
  3082. unsigned long rx_errors = 0, rx_dropped = 0;
  3083. struct bcmgenet_tx_ring *tx_ring;
  3084. struct bcmgenet_rx_ring *rx_ring;
  3085. unsigned int q;
  3086. for (q = 0; q < priv->hw_params->tx_queues; q++) {
  3087. tx_ring = &priv->tx_rings[q];
  3088. tx_bytes += tx_ring->bytes;
  3089. tx_packets += tx_ring->packets;
  3090. }
  3091. tx_ring = &priv->tx_rings[DESC_INDEX];
  3092. tx_bytes += tx_ring->bytes;
  3093. tx_packets += tx_ring->packets;
  3094. for (q = 0; q < priv->hw_params->rx_queues; q++) {
  3095. rx_ring = &priv->rx_rings[q];
  3096. rx_bytes += rx_ring->bytes;
  3097. rx_packets += rx_ring->packets;
  3098. rx_errors += rx_ring->errors;
  3099. rx_dropped += rx_ring->dropped;
  3100. }
  3101. rx_ring = &priv->rx_rings[DESC_INDEX];
  3102. rx_bytes += rx_ring->bytes;
  3103. rx_packets += rx_ring->packets;
  3104. rx_errors += rx_ring->errors;
  3105. rx_dropped += rx_ring->dropped;
  3106. dev->stats.tx_bytes = tx_bytes;
  3107. dev->stats.tx_packets = tx_packets;
  3108. dev->stats.rx_bytes = rx_bytes;
  3109. dev->stats.rx_packets = rx_packets;
  3110. dev->stats.rx_errors = rx_errors;
  3111. dev->stats.rx_missed_errors = rx_errors;
  3112. dev->stats.rx_dropped = rx_dropped;
  3113. return &dev->stats;
  3114. }
  3115. static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
  3116. {
  3117. struct bcmgenet_priv *priv = netdev_priv(dev);
  3118. if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) ||
  3119. priv->phy_interface != PHY_INTERFACE_MODE_MOCA)
  3120. return -EOPNOTSUPP;
  3121. if (new_carrier)
  3122. netif_carrier_on(dev);
  3123. else
  3124. netif_carrier_off(dev);
  3125. return 0;
  3126. }
  3127. static const struct net_device_ops bcmgenet_netdev_ops = {
  3128. .ndo_open = bcmgenet_open,
  3129. .ndo_stop = bcmgenet_close,
  3130. .ndo_start_xmit = bcmgenet_xmit,
  3131. .ndo_tx_timeout = bcmgenet_timeout,
  3132. .ndo_set_rx_mode = bcmgenet_set_rx_mode,
  3133. .ndo_set_mac_address = bcmgenet_set_mac_addr,
  3134. .ndo_eth_ioctl = phy_do_ioctl_running,
  3135. .ndo_set_features = bcmgenet_set_features,
  3136. #ifdef CONFIG_NET_POLL_CONTROLLER
  3137. .ndo_poll_controller = bcmgenet_poll_controller,
  3138. #endif
  3139. .ndo_get_stats = bcmgenet_get_stats,
  3140. .ndo_change_carrier = bcmgenet_change_carrier,
  3141. };
  3142. /* Array of GENET hardware parameters/characteristics */
  3143. static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
  3144. [GENET_V1] = {
  3145. .tx_queues = 0,
  3146. .tx_bds_per_q = 0,
  3147. .rx_queues = 0,
  3148. .rx_bds_per_q = 0,
  3149. .bp_in_en_shift = 16,
  3150. .bp_in_mask = 0xffff,
  3151. .hfb_filter_cnt = 16,
  3152. .qtag_mask = 0x1F,
  3153. .hfb_offset = 0x1000,
  3154. .rdma_offset = 0x2000,
  3155. .tdma_offset = 0x3000,
  3156. .words_per_bd = 2,
  3157. },
  3158. [GENET_V2] = {
  3159. .tx_queues = 4,
  3160. .tx_bds_per_q = 32,
  3161. .rx_queues = 0,
  3162. .rx_bds_per_q = 0,
  3163. .bp_in_en_shift = 16,
  3164. .bp_in_mask = 0xffff,
  3165. .hfb_filter_cnt = 16,
  3166. .qtag_mask = 0x1F,
  3167. .tbuf_offset = 0x0600,
  3168. .hfb_offset = 0x1000,
  3169. .hfb_reg_offset = 0x2000,
  3170. .rdma_offset = 0x3000,
  3171. .tdma_offset = 0x4000,
  3172. .words_per_bd = 2,
  3173. .flags = GENET_HAS_EXT,
  3174. },
  3175. [GENET_V3] = {
  3176. .tx_queues = 4,
  3177. .tx_bds_per_q = 32,
  3178. .rx_queues = 0,
  3179. .rx_bds_per_q = 0,
  3180. .bp_in_en_shift = 17,
  3181. .bp_in_mask = 0x1ffff,
  3182. .hfb_filter_cnt = 48,
  3183. .hfb_filter_size = 128,
  3184. .qtag_mask = 0x3F,
  3185. .tbuf_offset = 0x0600,
  3186. .hfb_offset = 0x8000,
  3187. .hfb_reg_offset = 0xfc00,
  3188. .rdma_offset = 0x10000,
  3189. .tdma_offset = 0x11000,
  3190. .words_per_bd = 2,
  3191. .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
  3192. GENET_HAS_MOCA_LINK_DET,
  3193. },
  3194. [GENET_V4] = {
  3195. .tx_queues = 4,
  3196. .tx_bds_per_q = 32,
  3197. .rx_queues = 0,
  3198. .rx_bds_per_q = 0,
  3199. .bp_in_en_shift = 17,
  3200. .bp_in_mask = 0x1ffff,
  3201. .hfb_filter_cnt = 48,
  3202. .hfb_filter_size = 128,
  3203. .qtag_mask = 0x3F,
  3204. .tbuf_offset = 0x0600,
  3205. .hfb_offset = 0x8000,
  3206. .hfb_reg_offset = 0xfc00,
  3207. .rdma_offset = 0x2000,
  3208. .tdma_offset = 0x4000,
  3209. .words_per_bd = 3,
  3210. .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
  3211. GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
  3212. },
  3213. [GENET_V5] = {
  3214. .tx_queues = 4,
  3215. .tx_bds_per_q = 32,
  3216. .rx_queues = 0,
  3217. .rx_bds_per_q = 0,
  3218. .bp_in_en_shift = 17,
  3219. .bp_in_mask = 0x1ffff,
  3220. .hfb_filter_cnt = 48,
  3221. .hfb_filter_size = 128,
  3222. .qtag_mask = 0x3F,
  3223. .tbuf_offset = 0x0600,
  3224. .hfb_offset = 0x8000,
  3225. .hfb_reg_offset = 0xfc00,
  3226. .rdma_offset = 0x2000,
  3227. .tdma_offset = 0x4000,
  3228. .words_per_bd = 3,
  3229. .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
  3230. GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
  3231. },
  3232. };
  3233. /* Infer hardware parameters from the detected GENET version */
  3234. static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
  3235. {
  3236. struct bcmgenet_hw_params *params;
  3237. u32 reg;
  3238. u8 major;
  3239. u16 gphy_rev;
  3240. if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
  3241. bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
  3242. genet_dma_ring_regs = genet_dma_ring_regs_v4;
  3243. } else if (GENET_IS_V3(priv)) {
  3244. bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
  3245. genet_dma_ring_regs = genet_dma_ring_regs_v123;
  3246. } else if (GENET_IS_V2(priv)) {
  3247. bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
  3248. genet_dma_ring_regs = genet_dma_ring_regs_v123;
  3249. } else if (GENET_IS_V1(priv)) {
  3250. bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
  3251. genet_dma_ring_regs = genet_dma_ring_regs_v123;
  3252. }
  3253. /* enum genet_version starts at 1 */
  3254. priv->hw_params = &bcmgenet_hw_params[priv->version];
  3255. params = priv->hw_params;
  3256. /* Read GENET HW version */
  3257. reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
  3258. major = (reg >> 24 & 0x0f);
  3259. if (major == 6)
  3260. major = 5;
  3261. else if (major == 5)
  3262. major = 4;
  3263. else if (major == 0)
  3264. major = 1;
  3265. if (major != priv->version) {
  3266. dev_err(&priv->pdev->dev,
  3267. "GENET version mismatch, got: %d, configured for: %d\n",
  3268. major, priv->version);
  3269. }
  3270. /* Print the GENET core version */
  3271. dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
  3272. major, (reg >> 16) & 0x0f, reg & 0xffff);
  3273. /* Store the integrated PHY revision for the MDIO probing function
  3274. * to pass this information to the PHY driver. The PHY driver expects
  3275. * to find the PHY major revision in bits 15:8 while the GENET register
  3276. * stores that information in bits 7:0, account for that.
  3277. *
  3278. * On newer chips, starting with PHY revision G0, a new scheme is
  3279. * deployed similar to the Starfighter 2 switch with GPHY major
  3280. * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
  3281. * is reserved as well as special value 0x01ff, we have a small
  3282. * heuristic to check for the new GPHY revision and re-arrange things
  3283. * so the GPHY driver is happy.
  3284. */
  3285. gphy_rev = reg & 0xffff;
  3286. if (GENET_IS_V5(priv)) {
  3287. /* The EPHY revision should come from the MDIO registers of
  3288. * the PHY not from GENET.
  3289. */
  3290. if (gphy_rev != 0) {
  3291. pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
  3292. gphy_rev);
  3293. }
  3294. /* This is reserved so should require special treatment */
  3295. } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
  3296. pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
  3297. return;
  3298. /* This is the good old scheme, just GPHY major, no minor nor patch */
  3299. } else if ((gphy_rev & 0xf0) != 0) {
  3300. priv->gphy_rev = gphy_rev << 8;
  3301. /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
  3302. } else if ((gphy_rev & 0xff00) != 0) {
  3303. priv->gphy_rev = gphy_rev;
  3304. }
  3305. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  3306. if (!(params->flags & GENET_HAS_40BITS))
  3307. pr_warn("GENET does not support 40-bits PA\n");
  3308. #endif
  3309. pr_debug("Configuration for version: %d\n"
  3310. "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
  3311. "BP << en: %2d, BP msk: 0x%05x\n"
  3312. "HFB count: %2d, QTAQ msk: 0x%05x\n"
  3313. "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
  3314. "RDMA: 0x%05x, TDMA: 0x%05x\n"
  3315. "Words/BD: %d\n",
  3316. priv->version,
  3317. params->tx_queues, params->tx_bds_per_q,
  3318. params->rx_queues, params->rx_bds_per_q,
  3319. params->bp_in_en_shift, params->bp_in_mask,
  3320. params->hfb_filter_cnt, params->qtag_mask,
  3321. params->tbuf_offset, params->hfb_offset,
  3322. params->hfb_reg_offset,
  3323. params->rdma_offset, params->tdma_offset,
  3324. params->words_per_bd);
  3325. }
  3326. struct bcmgenet_plat_data {
  3327. enum bcmgenet_version version;
  3328. u32 dma_max_burst_length;
  3329. bool ephy_16nm;
  3330. };
  3331. static const struct bcmgenet_plat_data v1_plat_data = {
  3332. .version = GENET_V1,
  3333. .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
  3334. };
  3335. static const struct bcmgenet_plat_data v2_plat_data = {
  3336. .version = GENET_V2,
  3337. .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
  3338. };
  3339. static const struct bcmgenet_plat_data v3_plat_data = {
  3340. .version = GENET_V3,
  3341. .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
  3342. };
  3343. static const struct bcmgenet_plat_data v4_plat_data = {
  3344. .version = GENET_V4,
  3345. .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
  3346. };
  3347. static const struct bcmgenet_plat_data v5_plat_data = {
  3348. .version = GENET_V5,
  3349. .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
  3350. };
  3351. static const struct bcmgenet_plat_data bcm2711_plat_data = {
  3352. .version = GENET_V5,
  3353. .dma_max_burst_length = 0x08,
  3354. };
  3355. static const struct bcmgenet_plat_data bcm7712_plat_data = {
  3356. .version = GENET_V5,
  3357. .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
  3358. .ephy_16nm = true,
  3359. };
  3360. static const struct of_device_id bcmgenet_match[] = {
  3361. { .compatible = "brcm,genet-v1", .data = &v1_plat_data },
  3362. { .compatible = "brcm,genet-v2", .data = &v2_plat_data },
  3363. { .compatible = "brcm,genet-v3", .data = &v3_plat_data },
  3364. { .compatible = "brcm,genet-v4", .data = &v4_plat_data },
  3365. { .compatible = "brcm,genet-v5", .data = &v5_plat_data },
  3366. { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
  3367. { .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data },
  3368. { },
  3369. };
  3370. MODULE_DEVICE_TABLE(of, bcmgenet_match);
  3371. static int bcmgenet_probe(struct platform_device *pdev)
  3372. {
  3373. struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
  3374. const struct bcmgenet_plat_data *pdata;
  3375. struct bcmgenet_priv *priv;
  3376. struct net_device *dev;
  3377. unsigned int i;
  3378. int err = -EIO;
  3379. /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
  3380. dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
  3381. GENET_MAX_MQ_CNT + 1);
  3382. if (!dev) {
  3383. dev_err(&pdev->dev, "can't allocate net device\n");
  3384. return -ENOMEM;
  3385. }
  3386. priv = netdev_priv(dev);
  3387. priv->irq0 = platform_get_irq(pdev, 0);
  3388. if (priv->irq0 < 0) {
  3389. err = priv->irq0;
  3390. goto err;
  3391. }
  3392. priv->irq1 = platform_get_irq(pdev, 1);
  3393. if (priv->irq1 < 0) {
  3394. err = priv->irq1;
  3395. goto err;
  3396. }
  3397. priv->wol_irq = platform_get_irq_optional(pdev, 2);
  3398. if (priv->wol_irq == -EPROBE_DEFER) {
  3399. err = priv->wol_irq;
  3400. goto err;
  3401. }
  3402. priv->base = devm_platform_ioremap_resource(pdev, 0);
  3403. if (IS_ERR(priv->base)) {
  3404. err = PTR_ERR(priv->base);
  3405. goto err;
  3406. }
  3407. spin_lock_init(&priv->lock);
  3408. /* Set default pause parameters */
  3409. priv->autoneg_pause = 1;
  3410. priv->tx_pause = 1;
  3411. priv->rx_pause = 1;
  3412. SET_NETDEV_DEV(dev, &pdev->dev);
  3413. dev_set_drvdata(&pdev->dev, dev);
  3414. dev->watchdog_timeo = 2 * HZ;
  3415. dev->ethtool_ops = &bcmgenet_ethtool_ops;
  3416. dev->netdev_ops = &bcmgenet_netdev_ops;
  3417. priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
  3418. /* Set default features */
  3419. dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
  3420. NETIF_F_RXCSUM;
  3421. dev->hw_features |= dev->features;
  3422. dev->vlan_features |= dev->features;
  3423. /* Request the WOL interrupt and advertise suspend if available */
  3424. priv->wol_irq_disabled = true;
  3425. if (priv->wol_irq > 0) {
  3426. err = devm_request_irq(&pdev->dev, priv->wol_irq,
  3427. bcmgenet_wol_isr, 0, dev->name, priv);
  3428. if (!err)
  3429. device_set_wakeup_capable(&pdev->dev, 1);
  3430. }
  3431. /* Set the needed headroom to account for any possible
  3432. * features enabling/disabling at runtime
  3433. */
  3434. dev->needed_headroom += 64;
  3435. priv->dev = dev;
  3436. priv->pdev = pdev;
  3437. pdata = device_get_match_data(&pdev->dev);
  3438. if (pdata) {
  3439. priv->version = pdata->version;
  3440. priv->dma_max_burst_length = pdata->dma_max_burst_length;
  3441. priv->ephy_16nm = pdata->ephy_16nm;
  3442. } else {
  3443. priv->version = pd->genet_version;
  3444. priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
  3445. }
  3446. priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
  3447. if (IS_ERR(priv->clk)) {
  3448. dev_dbg(&priv->pdev->dev, "failed to get enet clock\n");
  3449. err = PTR_ERR(priv->clk);
  3450. goto err;
  3451. }
  3452. err = clk_prepare_enable(priv->clk);
  3453. if (err)
  3454. goto err;
  3455. bcmgenet_set_hw_params(priv);
  3456. err = -EIO;
  3457. if (priv->hw_params->flags & GENET_HAS_40BITS)
  3458. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
  3459. if (err)
  3460. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  3461. if (err)
  3462. goto err_clk_disable;
  3463. /* Mii wait queue */
  3464. init_waitqueue_head(&priv->wq);
  3465. /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
  3466. priv->rx_buf_len = RX_BUF_LENGTH;
  3467. INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
  3468. priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol");
  3469. if (IS_ERR(priv->clk_wol)) {
  3470. dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
  3471. err = PTR_ERR(priv->clk_wol);
  3472. goto err_clk_disable;
  3473. }
  3474. priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
  3475. if (IS_ERR(priv->clk_eee)) {
  3476. dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
  3477. err = PTR_ERR(priv->clk_eee);
  3478. goto err_clk_disable;
  3479. }
  3480. /* If this is an internal GPHY, power it on now, before UniMAC is
  3481. * brought out of reset as absolutely no UniMAC activity is allowed
  3482. */
  3483. if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
  3484. bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
  3485. if (pd && !IS_ERR_OR_NULL(pd->mac_address))
  3486. eth_hw_addr_set(dev, pd->mac_address);
  3487. else
  3488. if (device_get_ethdev_address(&pdev->dev, dev))
  3489. if (has_acpi_companion(&pdev->dev)) {
  3490. u8 addr[ETH_ALEN];
  3491. bcmgenet_get_hw_addr(priv, addr);
  3492. eth_hw_addr_set(dev, addr);
  3493. }
  3494. if (!is_valid_ether_addr(dev->dev_addr)) {
  3495. dev_warn(&pdev->dev, "using random Ethernet MAC\n");
  3496. eth_hw_addr_random(dev);
  3497. }
  3498. reset_umac(priv);
  3499. err = bcmgenet_mii_init(dev);
  3500. if (err)
  3501. goto err_clk_disable;
  3502. /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
  3503. * just the ring 16 descriptor based TX
  3504. */
  3505. netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
  3506. netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
  3507. /* Set default coalescing parameters */
  3508. for (i = 0; i < priv->hw_params->rx_queues; i++)
  3509. priv->rx_rings[i].rx_max_coalesced_frames = 1;
  3510. priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
  3511. /* libphy will determine the link state */
  3512. netif_carrier_off(dev);
  3513. /* Turn off the main clock, WOL clock is handled separately */
  3514. clk_disable_unprepare(priv->clk);
  3515. err = register_netdev(dev);
  3516. if (err) {
  3517. bcmgenet_mii_exit(dev);
  3518. goto err;
  3519. }
  3520. return err;
  3521. err_clk_disable:
  3522. clk_disable_unprepare(priv->clk);
  3523. err:
  3524. free_netdev(dev);
  3525. return err;
  3526. }
  3527. static int bcmgenet_remove(struct platform_device *pdev)
  3528. {
  3529. struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
  3530. dev_set_drvdata(&pdev->dev, NULL);
  3531. unregister_netdev(priv->dev);
  3532. bcmgenet_mii_exit(priv->dev);
  3533. free_netdev(priv->dev);
  3534. return 0;
  3535. }
  3536. static void bcmgenet_shutdown(struct platform_device *pdev)
  3537. {
  3538. bcmgenet_remove(pdev);
  3539. }
  3540. #ifdef CONFIG_PM_SLEEP
  3541. static int bcmgenet_resume_noirq(struct device *d)
  3542. {
  3543. struct net_device *dev = dev_get_drvdata(d);
  3544. struct bcmgenet_priv *priv = netdev_priv(dev);
  3545. int ret;
  3546. u32 reg;
  3547. if (!netif_running(dev))
  3548. return 0;
  3549. /* Turn on the clock */
  3550. ret = clk_prepare_enable(priv->clk);
  3551. if (ret)
  3552. return ret;
  3553. if (device_may_wakeup(d) && priv->wolopts) {
  3554. /* Account for Wake-on-LAN events and clear those events
  3555. * (Some devices need more time between enabling the clocks
  3556. * and the interrupt register reflecting the wake event so
  3557. * read the register twice)
  3558. */
  3559. reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
  3560. reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
  3561. if (reg & UMAC_IRQ_WAKE_EVENT)
  3562. pm_wakeup_event(&priv->pdev->dev, 0);
  3563. }
  3564. bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
  3565. return 0;
  3566. }
  3567. static int bcmgenet_resume(struct device *d)
  3568. {
  3569. struct net_device *dev = dev_get_drvdata(d);
  3570. struct bcmgenet_priv *priv = netdev_priv(dev);
  3571. struct bcmgenet_rxnfc_rule *rule;
  3572. unsigned long dma_ctrl;
  3573. int ret;
  3574. if (!netif_running(dev))
  3575. return 0;
  3576. /* From WOL-enabled suspend, switch to regular clock */
  3577. if (device_may_wakeup(d) && priv->wolopts)
  3578. bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
  3579. /* If this is an internal GPHY, power it back on now, before UniMAC is
  3580. * brought out of reset as absolutely no UniMAC activity is allowed
  3581. */
  3582. if (priv->internal_phy)
  3583. bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
  3584. bcmgenet_umac_reset(priv);
  3585. init_umac(priv);
  3586. phy_init_hw(dev->phydev);
  3587. /* Speed settings must be restored */
  3588. genphy_config_aneg(dev->phydev);
  3589. bcmgenet_mii_config(priv->dev, false);
  3590. /* Restore enabled features */
  3591. bcmgenet_set_features(dev, dev->features);
  3592. bcmgenet_set_hw_addr(priv, dev->dev_addr);
  3593. /* Restore hardware filters */
  3594. bcmgenet_hfb_clear(priv);
  3595. list_for_each_entry(rule, &priv->rxnfc_list, list)
  3596. if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
  3597. bcmgenet_hfb_create_rxnfc_filter(priv, rule);
  3598. /* Disable RX/TX DMA and flush TX queues */
  3599. dma_ctrl = bcmgenet_dma_disable(priv);
  3600. /* Reinitialize TDMA and RDMA and SW housekeeping */
  3601. ret = bcmgenet_init_dma(priv);
  3602. if (ret) {
  3603. netdev_err(dev, "failed to initialize DMA\n");
  3604. goto out_clk_disable;
  3605. }
  3606. /* Always enable ring 16 - descriptor ring */
  3607. bcmgenet_enable_dma(priv, dma_ctrl);
  3608. if (!device_may_wakeup(d))
  3609. phy_resume(dev->phydev);
  3610. bcmgenet_netif_start(dev);
  3611. netif_device_attach(dev);
  3612. return 0;
  3613. out_clk_disable:
  3614. if (priv->internal_phy)
  3615. bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
  3616. clk_disable_unprepare(priv->clk);
  3617. return ret;
  3618. }
  3619. static int bcmgenet_suspend(struct device *d)
  3620. {
  3621. struct net_device *dev = dev_get_drvdata(d);
  3622. struct bcmgenet_priv *priv = netdev_priv(dev);
  3623. if (!netif_running(dev))
  3624. return 0;
  3625. netif_device_detach(dev);
  3626. bcmgenet_netif_stop(dev, true);
  3627. if (!device_may_wakeup(d))
  3628. phy_suspend(dev->phydev);
  3629. /* Disable filtering */
  3630. bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
  3631. return 0;
  3632. }
  3633. static int bcmgenet_suspend_noirq(struct device *d)
  3634. {
  3635. struct net_device *dev = dev_get_drvdata(d);
  3636. struct bcmgenet_priv *priv = netdev_priv(dev);
  3637. int ret = 0;
  3638. if (!netif_running(dev))
  3639. return 0;
  3640. /* Prepare the device for Wake-on-LAN and switch to the slow clock */
  3641. if (device_may_wakeup(d) && priv->wolopts)
  3642. ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
  3643. else if (priv->internal_phy)
  3644. ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
  3645. /* Let the framework handle resumption and leave the clocks on */
  3646. if (ret)
  3647. return ret;
  3648. /* Turn off the clocks */
  3649. clk_disable_unprepare(priv->clk);
  3650. return 0;
  3651. }
  3652. #else
  3653. #define bcmgenet_suspend NULL
  3654. #define bcmgenet_suspend_noirq NULL
  3655. #define bcmgenet_resume NULL
  3656. #define bcmgenet_resume_noirq NULL
  3657. #endif /* CONFIG_PM_SLEEP */
  3658. static const struct dev_pm_ops bcmgenet_pm_ops = {
  3659. .suspend = bcmgenet_suspend,
  3660. .suspend_noirq = bcmgenet_suspend_noirq,
  3661. .resume = bcmgenet_resume,
  3662. .resume_noirq = bcmgenet_resume_noirq,
  3663. };
  3664. static const struct acpi_device_id genet_acpi_match[] = {
  3665. { "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data },
  3666. { },
  3667. };
  3668. MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
  3669. static struct platform_driver bcmgenet_driver = {
  3670. .probe = bcmgenet_probe,
  3671. .remove = bcmgenet_remove,
  3672. .shutdown = bcmgenet_shutdown,
  3673. .driver = {
  3674. .name = "bcmgenet",
  3675. .of_match_table = bcmgenet_match,
  3676. .pm = &bcmgenet_pm_ops,
  3677. .acpi_match_table = genet_acpi_match,
  3678. },
  3679. };
  3680. module_platform_driver(bcmgenet_driver);
  3681. MODULE_AUTHOR("Broadcom Corporation");
  3682. MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
  3683. MODULE_ALIAS("platform:bcmgenet");
  3684. MODULE_LICENSE("GPL");
  3685. MODULE_SOFTDEP("pre: mdio-bcm-unimac");