dwc-xlgmac-hw.c 86 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147
  1. /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
  2. *
  3. * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
  4. *
  5. * This program is dual-licensed; you may select either version 2 of
  6. * the GNU General Public License ("GPL") or BSD license ("BSD").
  7. *
  8. * This Synopsys DWC XLGMAC software driver and associated documentation
  9. * (hereinafter the "Software") is an unsupported proprietary work of
  10. * Synopsys, Inc. unless otherwise expressly agreed to in writing between
  11. * Synopsys and you. The Software IS NOT an item of Licensed Software or a
  12. * Licensed Product under any End User Software License Agreement or
  13. * Agreement for Licensed Products with Synopsys or any supplement thereto.
  14. * Synopsys is a registered trademark of Synopsys, Inc. Other names included
  15. * in the SOFTWARE may be the trademarks of their respective owners.
  16. */
  17. #include <linux/phy.h>
  18. #include <linux/mdio.h>
  19. #include <linux/clk.h>
  20. #include <linux/bitrev.h>
  21. #include <linux/crc32.h>
  22. #include <linux/crc32poly.h>
  23. #include <linux/dcbnl.h>
  24. #include "dwc-xlgmac.h"
  25. #include "dwc-xlgmac-reg.h"
  26. static int xlgmac_tx_complete(struct xlgmac_dma_desc *dma_desc)
  27. {
  28. return !XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  29. TX_NORMAL_DESC3_OWN_POS,
  30. TX_NORMAL_DESC3_OWN_LEN);
  31. }
  32. static int xlgmac_disable_rx_csum(struct xlgmac_pdata *pdata)
  33. {
  34. u32 regval;
  35. regval = readl(pdata->mac_regs + MAC_RCR);
  36. regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
  37. MAC_RCR_IPC_LEN, 0);
  38. writel(regval, pdata->mac_regs + MAC_RCR);
  39. return 0;
  40. }
  41. static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata)
  42. {
  43. u32 regval;
  44. regval = readl(pdata->mac_regs + MAC_RCR);
  45. regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
  46. MAC_RCR_IPC_LEN, 1);
  47. writel(regval, pdata->mac_regs + MAC_RCR);
  48. return 0;
  49. }
  50. static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, const u8 *addr)
  51. {
  52. unsigned int mac_addr_hi, mac_addr_lo;
  53. mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
  54. mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
  55. (addr[1] << 8) | (addr[0] << 0);
  56. writel(mac_addr_hi, pdata->mac_regs + MAC_MACA0HR);
  57. writel(mac_addr_lo, pdata->mac_regs + MAC_MACA0LR);
  58. return 0;
  59. }
  60. static void xlgmac_set_mac_reg(struct xlgmac_pdata *pdata,
  61. struct netdev_hw_addr *ha,
  62. unsigned int *mac_reg)
  63. {
  64. unsigned int mac_addr_hi, mac_addr_lo;
  65. u8 *mac_addr;
  66. mac_addr_lo = 0;
  67. mac_addr_hi = 0;
  68. if (ha) {
  69. mac_addr = (u8 *)&mac_addr_lo;
  70. mac_addr[0] = ha->addr[0];
  71. mac_addr[1] = ha->addr[1];
  72. mac_addr[2] = ha->addr[2];
  73. mac_addr[3] = ha->addr[3];
  74. mac_addr = (u8 *)&mac_addr_hi;
  75. mac_addr[0] = ha->addr[4];
  76. mac_addr[1] = ha->addr[5];
  77. netif_dbg(pdata, drv, pdata->netdev,
  78. "adding mac address %pM at %#x\n",
  79. ha->addr, *mac_reg);
  80. mac_addr_hi = XLGMAC_SET_REG_BITS(mac_addr_hi,
  81. MAC_MACA1HR_AE_POS,
  82. MAC_MACA1HR_AE_LEN,
  83. 1);
  84. }
  85. writel(mac_addr_hi, pdata->mac_regs + *mac_reg);
  86. *mac_reg += MAC_MACA_INC;
  87. writel(mac_addr_lo, pdata->mac_regs + *mac_reg);
  88. *mac_reg += MAC_MACA_INC;
  89. }
  90. static int xlgmac_enable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
  91. {
  92. u32 regval;
  93. regval = readl(pdata->mac_regs + MAC_VLANTR);
  94. /* Put the VLAN tag in the Rx descriptor */
  95. regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS,
  96. MAC_VLANTR_EVLRXS_LEN, 1);
  97. /* Don't check the VLAN type */
  98. regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS,
  99. MAC_VLANTR_DOVLTC_LEN, 1);
  100. /* Check only C-TAG (0x8100) packets */
  101. regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS,
  102. MAC_VLANTR_ERSVLM_LEN, 0);
  103. /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
  104. regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS,
  105. MAC_VLANTR_ESVL_LEN, 0);
  106. /* Enable VLAN tag stripping */
  107. regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
  108. MAC_VLANTR_EVLS_LEN, 0x3);
  109. writel(regval, pdata->mac_regs + MAC_VLANTR);
  110. return 0;
  111. }
  112. static int xlgmac_disable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
  113. {
  114. u32 regval;
  115. regval = readl(pdata->mac_regs + MAC_VLANTR);
  116. regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
  117. MAC_VLANTR_EVLS_LEN, 0);
  118. writel(regval, pdata->mac_regs + MAC_VLANTR);
  119. return 0;
  120. }
  121. static int xlgmac_enable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
  122. {
  123. u32 regval;
  124. regval = readl(pdata->mac_regs + MAC_PFR);
  125. /* Enable VLAN filtering */
  126. regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
  127. MAC_PFR_VTFE_LEN, 1);
  128. writel(regval, pdata->mac_regs + MAC_PFR);
  129. regval = readl(pdata->mac_regs + MAC_VLANTR);
  130. /* Enable VLAN Hash Table filtering */
  131. regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS,
  132. MAC_VLANTR_VTHM_LEN, 1);
  133. /* Disable VLAN tag inverse matching */
  134. regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS,
  135. MAC_VLANTR_VTIM_LEN, 0);
  136. /* Only filter on the lower 12-bits of the VLAN tag */
  137. regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS,
  138. MAC_VLANTR_ETV_LEN, 1);
  139. /* In order for the VLAN Hash Table filtering to be effective,
  140. * the VLAN tag identifier in the VLAN Tag Register must not
  141. * be zero. Set the VLAN tag identifier to "1" to enable the
  142. * VLAN Hash Table filtering. This implies that a VLAN tag of
  143. * 1 will always pass filtering.
  144. */
  145. regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS,
  146. MAC_VLANTR_VL_LEN, 1);
  147. writel(regval, pdata->mac_regs + MAC_VLANTR);
  148. return 0;
  149. }
  150. static int xlgmac_disable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
  151. {
  152. u32 regval;
  153. regval = readl(pdata->mac_regs + MAC_PFR);
  154. /* Disable VLAN filtering */
  155. regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
  156. MAC_PFR_VTFE_LEN, 0);
  157. writel(regval, pdata->mac_regs + MAC_PFR);
  158. return 0;
  159. }
  160. static u32 xlgmac_vid_crc32_le(__le16 vid_le)
  161. {
  162. unsigned char *data = (unsigned char *)&vid_le;
  163. unsigned char data_byte = 0;
  164. u32 crc = ~0;
  165. u32 temp = 0;
  166. int i, bits;
  167. bits = get_bitmask_order(VLAN_VID_MASK);
  168. for (i = 0; i < bits; i++) {
  169. if ((i % 8) == 0)
  170. data_byte = data[i / 8];
  171. temp = ((crc & 1) ^ data_byte) & 1;
  172. crc >>= 1;
  173. data_byte >>= 1;
  174. if (temp)
  175. crc ^= CRC32_POLY_LE;
  176. }
  177. return crc;
  178. }
  179. static int xlgmac_update_vlan_hash_table(struct xlgmac_pdata *pdata)
  180. {
  181. u16 vlan_hash_table = 0;
  182. __le16 vid_le;
  183. u32 regval;
  184. u32 crc;
  185. u16 vid;
  186. /* Generate the VLAN Hash Table value */
  187. for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
  188. /* Get the CRC32 value of the VLAN ID */
  189. vid_le = cpu_to_le16(vid);
  190. crc = bitrev32(~xlgmac_vid_crc32_le(vid_le)) >> 28;
  191. vlan_hash_table |= (1 << crc);
  192. }
  193. regval = readl(pdata->mac_regs + MAC_VLANHTR);
  194. /* Set the VLAN Hash Table filtering register */
  195. regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS,
  196. MAC_VLANHTR_VLHT_LEN, vlan_hash_table);
  197. writel(regval, pdata->mac_regs + MAC_VLANHTR);
  198. return 0;
  199. }
  200. static int xlgmac_set_promiscuous_mode(struct xlgmac_pdata *pdata,
  201. unsigned int enable)
  202. {
  203. unsigned int val = enable ? 1 : 0;
  204. u32 regval;
  205. regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
  206. MAC_PFR_PR_POS, MAC_PFR_PR_LEN);
  207. if (regval == val)
  208. return 0;
  209. netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
  210. enable ? "entering" : "leaving");
  211. regval = readl(pdata->mac_regs + MAC_PFR);
  212. regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS,
  213. MAC_PFR_PR_LEN, val);
  214. writel(regval, pdata->mac_regs + MAC_PFR);
  215. /* Hardware will still perform VLAN filtering in promiscuous mode */
  216. if (enable) {
  217. xlgmac_disable_rx_vlan_filtering(pdata);
  218. } else {
  219. if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
  220. xlgmac_enable_rx_vlan_filtering(pdata);
  221. }
  222. return 0;
  223. }
  224. static int xlgmac_set_all_multicast_mode(struct xlgmac_pdata *pdata,
  225. unsigned int enable)
  226. {
  227. unsigned int val = enable ? 1 : 0;
  228. u32 regval;
  229. regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
  230. MAC_PFR_PM_POS, MAC_PFR_PM_LEN);
  231. if (regval == val)
  232. return 0;
  233. netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
  234. enable ? "entering" : "leaving");
  235. regval = readl(pdata->mac_regs + MAC_PFR);
  236. regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS,
  237. MAC_PFR_PM_LEN, val);
  238. writel(regval, pdata->mac_regs + MAC_PFR);
  239. return 0;
  240. }
  241. static void xlgmac_set_mac_addn_addrs(struct xlgmac_pdata *pdata)
  242. {
  243. struct net_device *netdev = pdata->netdev;
  244. struct netdev_hw_addr *ha;
  245. unsigned int addn_macs;
  246. unsigned int mac_reg;
  247. mac_reg = MAC_MACA1HR;
  248. addn_macs = pdata->hw_feat.addn_mac;
  249. if (netdev_uc_count(netdev) > addn_macs) {
  250. xlgmac_set_promiscuous_mode(pdata, 1);
  251. } else {
  252. netdev_for_each_uc_addr(ha, netdev) {
  253. xlgmac_set_mac_reg(pdata, ha, &mac_reg);
  254. addn_macs--;
  255. }
  256. if (netdev_mc_count(netdev) > addn_macs) {
  257. xlgmac_set_all_multicast_mode(pdata, 1);
  258. } else {
  259. netdev_for_each_mc_addr(ha, netdev) {
  260. xlgmac_set_mac_reg(pdata, ha, &mac_reg);
  261. addn_macs--;
  262. }
  263. }
  264. }
  265. /* Clear remaining additional MAC address entries */
  266. while (addn_macs--)
  267. xlgmac_set_mac_reg(pdata, NULL, &mac_reg);
  268. }
  269. static void xlgmac_set_mac_hash_table(struct xlgmac_pdata *pdata)
  270. {
  271. unsigned int hash_table_shift, hash_table_count;
  272. u32 hash_table[XLGMAC_MAC_HASH_TABLE_SIZE];
  273. struct net_device *netdev = pdata->netdev;
  274. struct netdev_hw_addr *ha;
  275. unsigned int hash_reg;
  276. unsigned int i;
  277. u32 crc;
  278. hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
  279. hash_table_count = pdata->hw_feat.hash_table_size / 32;
  280. memset(hash_table, 0, sizeof(hash_table));
  281. /* Build the MAC Hash Table register values */
  282. netdev_for_each_uc_addr(ha, netdev) {
  283. crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
  284. crc >>= hash_table_shift;
  285. hash_table[crc >> 5] |= (1 << (crc & 0x1f));
  286. }
  287. netdev_for_each_mc_addr(ha, netdev) {
  288. crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
  289. crc >>= hash_table_shift;
  290. hash_table[crc >> 5] |= (1 << (crc & 0x1f));
  291. }
  292. /* Set the MAC Hash Table registers */
  293. hash_reg = MAC_HTR0;
  294. for (i = 0; i < hash_table_count; i++) {
  295. writel(hash_table[i], pdata->mac_regs + hash_reg);
  296. hash_reg += MAC_HTR_INC;
  297. }
  298. }
  299. static int xlgmac_add_mac_addresses(struct xlgmac_pdata *pdata)
  300. {
  301. if (pdata->hw_feat.hash_table_size)
  302. xlgmac_set_mac_hash_table(pdata);
  303. else
  304. xlgmac_set_mac_addn_addrs(pdata);
  305. return 0;
  306. }
  307. static void xlgmac_config_mac_address(struct xlgmac_pdata *pdata)
  308. {
  309. u32 regval;
  310. xlgmac_set_mac_address(pdata, pdata->netdev->dev_addr);
  311. /* Filtering is done using perfect filtering and hash filtering */
  312. if (pdata->hw_feat.hash_table_size) {
  313. regval = readl(pdata->mac_regs + MAC_PFR);
  314. regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS,
  315. MAC_PFR_HPF_LEN, 1);
  316. regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS,
  317. MAC_PFR_HUC_LEN, 1);
  318. regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS,
  319. MAC_PFR_HMC_LEN, 1);
  320. writel(regval, pdata->mac_regs + MAC_PFR);
  321. }
  322. }
  323. static void xlgmac_config_jumbo_enable(struct xlgmac_pdata *pdata)
  324. {
  325. unsigned int val;
  326. u32 regval;
  327. val = (pdata->netdev->mtu > XLGMAC_STD_PACKET_MTU) ? 1 : 0;
  328. regval = readl(pdata->mac_regs + MAC_RCR);
  329. regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_JE_POS,
  330. MAC_RCR_JE_LEN, val);
  331. writel(regval, pdata->mac_regs + MAC_RCR);
  332. }
  333. static void xlgmac_config_checksum_offload(struct xlgmac_pdata *pdata)
  334. {
  335. if (pdata->netdev->features & NETIF_F_RXCSUM)
  336. xlgmac_enable_rx_csum(pdata);
  337. else
  338. xlgmac_disable_rx_csum(pdata);
  339. }
  340. static void xlgmac_config_vlan_support(struct xlgmac_pdata *pdata)
  341. {
  342. u32 regval;
  343. regval = readl(pdata->mac_regs + MAC_VLANIR);
  344. /* Indicate that VLAN Tx CTAGs come from context descriptors */
  345. regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS,
  346. MAC_VLANIR_CSVL_LEN, 0);
  347. regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS,
  348. MAC_VLANIR_VLTI_LEN, 1);
  349. writel(regval, pdata->mac_regs + MAC_VLANIR);
  350. /* Set the current VLAN Hash Table register value */
  351. xlgmac_update_vlan_hash_table(pdata);
  352. if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
  353. xlgmac_enable_rx_vlan_filtering(pdata);
  354. else
  355. xlgmac_disable_rx_vlan_filtering(pdata);
  356. if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
  357. xlgmac_enable_rx_vlan_stripping(pdata);
  358. else
  359. xlgmac_disable_rx_vlan_stripping(pdata);
  360. }
  361. static int xlgmac_config_rx_mode(struct xlgmac_pdata *pdata)
  362. {
  363. struct net_device *netdev = pdata->netdev;
  364. unsigned int pr_mode, am_mode;
  365. pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
  366. am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
  367. xlgmac_set_promiscuous_mode(pdata, pr_mode);
  368. xlgmac_set_all_multicast_mode(pdata, am_mode);
  369. xlgmac_add_mac_addresses(pdata);
  370. return 0;
  371. }
  372. static void xlgmac_prepare_tx_stop(struct xlgmac_pdata *pdata,
  373. struct xlgmac_channel *channel)
  374. {
  375. unsigned int tx_dsr, tx_pos, tx_qidx;
  376. unsigned long tx_timeout;
  377. unsigned int tx_status;
  378. /* Calculate the status register to read and the position within */
  379. if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
  380. tx_dsr = DMA_DSR0;
  381. tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) +
  382. DMA_DSR0_TPS_START;
  383. } else {
  384. tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
  385. tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
  386. tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) +
  387. DMA_DSRX_TPS_START;
  388. }
  389. /* The Tx engine cannot be stopped if it is actively processing
  390. * descriptors. Wait for the Tx engine to enter the stopped or
  391. * suspended state. Don't wait forever though...
  392. */
  393. tx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
  394. while (time_before(jiffies, tx_timeout)) {
  395. tx_status = readl(pdata->mac_regs + tx_dsr);
  396. tx_status = XLGMAC_GET_REG_BITS(tx_status, tx_pos,
  397. DMA_DSR_TPS_LEN);
  398. if ((tx_status == DMA_TPS_STOPPED) ||
  399. (tx_status == DMA_TPS_SUSPENDED))
  400. break;
  401. usleep_range(500, 1000);
  402. }
  403. if (!time_before(jiffies, tx_timeout))
  404. netdev_info(pdata->netdev,
  405. "timed out waiting for Tx DMA channel %u to stop\n",
  406. channel->queue_index);
  407. }
  408. static void xlgmac_enable_tx(struct xlgmac_pdata *pdata)
  409. {
  410. struct xlgmac_channel *channel;
  411. unsigned int i;
  412. u32 regval;
  413. /* Enable each Tx DMA channel */
  414. channel = pdata->channel_head;
  415. for (i = 0; i < pdata->channel_count; i++, channel++) {
  416. if (!channel->tx_ring)
  417. break;
  418. regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
  419. regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
  420. DMA_CH_TCR_ST_LEN, 1);
  421. writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
  422. }
  423. /* Enable each Tx queue */
  424. for (i = 0; i < pdata->tx_q_count; i++) {
  425. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
  426. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
  427. MTL_Q_TQOMR_TXQEN_LEN,
  428. MTL_Q_ENABLED);
  429. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
  430. }
  431. /* Enable MAC Tx */
  432. regval = readl(pdata->mac_regs + MAC_TCR);
  433. regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
  434. MAC_TCR_TE_LEN, 1);
  435. writel(regval, pdata->mac_regs + MAC_TCR);
  436. }
  437. static void xlgmac_disable_tx(struct xlgmac_pdata *pdata)
  438. {
  439. struct xlgmac_channel *channel;
  440. unsigned int i;
  441. u32 regval;
  442. /* Prepare for Tx DMA channel stop */
  443. channel = pdata->channel_head;
  444. for (i = 0; i < pdata->channel_count; i++, channel++) {
  445. if (!channel->tx_ring)
  446. break;
  447. xlgmac_prepare_tx_stop(pdata, channel);
  448. }
  449. /* Disable MAC Tx */
  450. regval = readl(pdata->mac_regs + MAC_TCR);
  451. regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
  452. MAC_TCR_TE_LEN, 0);
  453. writel(regval, pdata->mac_regs + MAC_TCR);
  454. /* Disable each Tx queue */
  455. for (i = 0; i < pdata->tx_q_count; i++) {
  456. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
  457. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
  458. MTL_Q_TQOMR_TXQEN_LEN, 0);
  459. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
  460. }
  461. /* Disable each Tx DMA channel */
  462. channel = pdata->channel_head;
  463. for (i = 0; i < pdata->channel_count; i++, channel++) {
  464. if (!channel->tx_ring)
  465. break;
  466. regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
  467. regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
  468. DMA_CH_TCR_ST_LEN, 0);
  469. writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
  470. }
  471. }
  472. static void xlgmac_prepare_rx_stop(struct xlgmac_pdata *pdata,
  473. unsigned int queue)
  474. {
  475. unsigned int rx_status, prxq, rxqsts;
  476. unsigned long rx_timeout;
  477. /* The Rx engine cannot be stopped if it is actively processing
  478. * packets. Wait for the Rx queue to empty the Rx fifo. Don't
  479. * wait forever though...
  480. */
  481. rx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
  482. while (time_before(jiffies, rx_timeout)) {
  483. rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR));
  484. prxq = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS,
  485. MTL_Q_RQDR_PRXQ_LEN);
  486. rxqsts = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS,
  487. MTL_Q_RQDR_RXQSTS_LEN);
  488. if ((prxq == 0) && (rxqsts == 0))
  489. break;
  490. usleep_range(500, 1000);
  491. }
  492. if (!time_before(jiffies, rx_timeout))
  493. netdev_info(pdata->netdev,
  494. "timed out waiting for Rx queue %u to empty\n",
  495. queue);
  496. }
  497. static void xlgmac_enable_rx(struct xlgmac_pdata *pdata)
  498. {
  499. struct xlgmac_channel *channel;
  500. unsigned int regval, i;
  501. /* Enable each Rx DMA channel */
  502. channel = pdata->channel_head;
  503. for (i = 0; i < pdata->channel_count; i++, channel++) {
  504. if (!channel->rx_ring)
  505. break;
  506. regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
  507. regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
  508. DMA_CH_RCR_SR_LEN, 1);
  509. writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
  510. }
  511. /* Enable each Rx queue */
  512. regval = 0;
  513. for (i = 0; i < pdata->rx_q_count; i++)
  514. regval |= (0x02 << (i << 1));
  515. writel(regval, pdata->mac_regs + MAC_RQC0R);
  516. /* Enable MAC Rx */
  517. regval = readl(pdata->mac_regs + MAC_RCR);
  518. regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
  519. MAC_RCR_DCRCC_LEN, 1);
  520. regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
  521. MAC_RCR_CST_LEN, 1);
  522. regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
  523. MAC_RCR_ACS_LEN, 1);
  524. regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
  525. MAC_RCR_RE_LEN, 1);
  526. writel(regval, pdata->mac_regs + MAC_RCR);
  527. }
  528. static void xlgmac_disable_rx(struct xlgmac_pdata *pdata)
  529. {
  530. struct xlgmac_channel *channel;
  531. unsigned int i;
  532. u32 regval;
  533. /* Disable MAC Rx */
  534. regval = readl(pdata->mac_regs + MAC_RCR);
  535. regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
  536. MAC_RCR_DCRCC_LEN, 0);
  537. regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
  538. MAC_RCR_CST_LEN, 0);
  539. regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
  540. MAC_RCR_ACS_LEN, 0);
  541. regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
  542. MAC_RCR_RE_LEN, 0);
  543. writel(regval, pdata->mac_regs + MAC_RCR);
  544. /* Prepare for Rx DMA channel stop */
  545. for (i = 0; i < pdata->rx_q_count; i++)
  546. xlgmac_prepare_rx_stop(pdata, i);
  547. /* Disable each Rx queue */
  548. writel(0, pdata->mac_regs + MAC_RQC0R);
  549. /* Disable each Rx DMA channel */
  550. channel = pdata->channel_head;
  551. for (i = 0; i < pdata->channel_count; i++, channel++) {
  552. if (!channel->rx_ring)
  553. break;
  554. regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
  555. regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
  556. DMA_CH_RCR_SR_LEN, 0);
  557. writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
  558. }
  559. }
  560. static void xlgmac_tx_start_xmit(struct xlgmac_channel *channel,
  561. struct xlgmac_ring *ring)
  562. {
  563. struct xlgmac_pdata *pdata = channel->pdata;
  564. struct xlgmac_desc_data *desc_data;
  565. /* Make sure everything is written before the register write */
  566. wmb();
  567. /* Issue a poll command to Tx DMA by writing address
  568. * of next immediate free descriptor
  569. */
  570. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
  571. writel(lower_32_bits(desc_data->dma_desc_addr),
  572. XLGMAC_DMA_REG(channel, DMA_CH_TDTR_LO));
  573. /* Start the Tx timer */
  574. if (pdata->tx_usecs && !channel->tx_timer_active) {
  575. channel->tx_timer_active = 1;
  576. mod_timer(&channel->tx_timer,
  577. jiffies + usecs_to_jiffies(pdata->tx_usecs));
  578. }
  579. ring->tx.xmit_more = 0;
  580. }
  581. static void xlgmac_dev_xmit(struct xlgmac_channel *channel)
  582. {
  583. struct xlgmac_pdata *pdata = channel->pdata;
  584. struct xlgmac_ring *ring = channel->tx_ring;
  585. unsigned int tso_context, vlan_context;
  586. struct xlgmac_desc_data *desc_data;
  587. struct xlgmac_dma_desc *dma_desc;
  588. struct xlgmac_pkt_info *pkt_info;
  589. unsigned int csum, tso, vlan;
  590. int start_index = ring->cur;
  591. int cur_index = ring->cur;
  592. unsigned int tx_set_ic;
  593. int i;
  594. pkt_info = &ring->pkt_info;
  595. csum = XLGMAC_GET_REG_BITS(pkt_info->attributes,
  596. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
  597. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN);
  598. tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
  599. TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
  600. TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
  601. vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
  602. TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
  603. TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
  604. if (tso && (pkt_info->mss != ring->tx.cur_mss))
  605. tso_context = 1;
  606. else
  607. tso_context = 0;
  608. if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag))
  609. vlan_context = 1;
  610. else
  611. vlan_context = 0;
  612. /* Determine if an interrupt should be generated for this Tx:
  613. * Interrupt:
  614. * - Tx frame count exceeds the frame count setting
  615. * - Addition of Tx frame count to the frame count since the
  616. * last interrupt was set exceeds the frame count setting
  617. * No interrupt:
  618. * - No frame count setting specified (ethtool -C ethX tx-frames 0)
  619. * - Addition of Tx frame count to the frame count since the
  620. * last interrupt was set does not exceed the frame count setting
  621. */
  622. ring->coalesce_count += pkt_info->tx_packets;
  623. if (!pdata->tx_frames)
  624. tx_set_ic = 0;
  625. else if (pkt_info->tx_packets > pdata->tx_frames)
  626. tx_set_ic = 1;
  627. else if ((ring->coalesce_count % pdata->tx_frames) <
  628. pkt_info->tx_packets)
  629. tx_set_ic = 1;
  630. else
  631. tx_set_ic = 0;
  632. desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
  633. dma_desc = desc_data->dma_desc;
  634. /* Create a context descriptor if this is a TSO pkt_info */
  635. if (tso_context || vlan_context) {
  636. if (tso_context) {
  637. netif_dbg(pdata, tx_queued, pdata->netdev,
  638. "TSO context descriptor, mss=%u\n",
  639. pkt_info->mss);
  640. /* Set the MSS size */
  641. dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
  642. dma_desc->desc2,
  643. TX_CONTEXT_DESC2_MSS_POS,
  644. TX_CONTEXT_DESC2_MSS_LEN,
  645. pkt_info->mss);
  646. /* Mark it as a CONTEXT descriptor */
  647. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  648. dma_desc->desc3,
  649. TX_CONTEXT_DESC3_CTXT_POS,
  650. TX_CONTEXT_DESC3_CTXT_LEN,
  651. 1);
  652. /* Indicate this descriptor contains the MSS */
  653. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  654. dma_desc->desc3,
  655. TX_CONTEXT_DESC3_TCMSSV_POS,
  656. TX_CONTEXT_DESC3_TCMSSV_LEN,
  657. 1);
  658. ring->tx.cur_mss = pkt_info->mss;
  659. }
  660. if (vlan_context) {
  661. netif_dbg(pdata, tx_queued, pdata->netdev,
  662. "VLAN context descriptor, ctag=%u\n",
  663. pkt_info->vlan_ctag);
  664. /* Mark it as a CONTEXT descriptor */
  665. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  666. dma_desc->desc3,
  667. TX_CONTEXT_DESC3_CTXT_POS,
  668. TX_CONTEXT_DESC3_CTXT_LEN,
  669. 1);
  670. /* Set the VLAN tag */
  671. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  672. dma_desc->desc3,
  673. TX_CONTEXT_DESC3_VT_POS,
  674. TX_CONTEXT_DESC3_VT_LEN,
  675. pkt_info->vlan_ctag);
  676. /* Indicate this descriptor contains the VLAN tag */
  677. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  678. dma_desc->desc3,
  679. TX_CONTEXT_DESC3_VLTV_POS,
  680. TX_CONTEXT_DESC3_VLTV_LEN,
  681. 1);
  682. ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag;
  683. }
  684. cur_index++;
  685. desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
  686. dma_desc = desc_data->dma_desc;
  687. }
  688. /* Update buffer address (for TSO this is the header) */
  689. dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma));
  690. dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma));
  691. /* Update the buffer length */
  692. dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
  693. dma_desc->desc2,
  694. TX_NORMAL_DESC2_HL_B1L_POS,
  695. TX_NORMAL_DESC2_HL_B1L_LEN,
  696. desc_data->skb_dma_len);
  697. /* VLAN tag insertion check */
  698. if (vlan) {
  699. dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
  700. dma_desc->desc2,
  701. TX_NORMAL_DESC2_VTIR_POS,
  702. TX_NORMAL_DESC2_VTIR_LEN,
  703. TX_NORMAL_DESC2_VLAN_INSERT);
  704. pdata->stats.tx_vlan_packets++;
  705. }
  706. /* Timestamp enablement check */
  707. if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
  708. TX_PACKET_ATTRIBUTES_PTP_POS,
  709. TX_PACKET_ATTRIBUTES_PTP_LEN))
  710. dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
  711. dma_desc->desc2,
  712. TX_NORMAL_DESC2_TTSE_POS,
  713. TX_NORMAL_DESC2_TTSE_LEN,
  714. 1);
  715. /* Mark it as First Descriptor */
  716. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  717. dma_desc->desc3,
  718. TX_NORMAL_DESC3_FD_POS,
  719. TX_NORMAL_DESC3_FD_LEN,
  720. 1);
  721. /* Mark it as a NORMAL descriptor */
  722. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  723. dma_desc->desc3,
  724. TX_NORMAL_DESC3_CTXT_POS,
  725. TX_NORMAL_DESC3_CTXT_LEN,
  726. 0);
  727. /* Set OWN bit if not the first descriptor */
  728. if (cur_index != start_index)
  729. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  730. dma_desc->desc3,
  731. TX_NORMAL_DESC3_OWN_POS,
  732. TX_NORMAL_DESC3_OWN_LEN,
  733. 1);
  734. if (tso) {
  735. /* Enable TSO */
  736. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  737. dma_desc->desc3,
  738. TX_NORMAL_DESC3_TSE_POS,
  739. TX_NORMAL_DESC3_TSE_LEN, 1);
  740. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  741. dma_desc->desc3,
  742. TX_NORMAL_DESC3_TCPPL_POS,
  743. TX_NORMAL_DESC3_TCPPL_LEN,
  744. pkt_info->tcp_payload_len);
  745. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  746. dma_desc->desc3,
  747. TX_NORMAL_DESC3_TCPHDRLEN_POS,
  748. TX_NORMAL_DESC3_TCPHDRLEN_LEN,
  749. pkt_info->tcp_header_len / 4);
  750. pdata->stats.tx_tso_packets++;
  751. } else {
  752. /* Enable CRC and Pad Insertion */
  753. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  754. dma_desc->desc3,
  755. TX_NORMAL_DESC3_CPC_POS,
  756. TX_NORMAL_DESC3_CPC_LEN, 0);
  757. /* Enable HW CSUM */
  758. if (csum)
  759. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  760. dma_desc->desc3,
  761. TX_NORMAL_DESC3_CIC_POS,
  762. TX_NORMAL_DESC3_CIC_LEN,
  763. 0x3);
  764. /* Set the total length to be transmitted */
  765. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  766. dma_desc->desc3,
  767. TX_NORMAL_DESC3_FL_POS,
  768. TX_NORMAL_DESC3_FL_LEN,
  769. pkt_info->length);
  770. }
  771. for (i = cur_index - start_index + 1; i < pkt_info->desc_count; i++) {
  772. cur_index++;
  773. desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
  774. dma_desc = desc_data->dma_desc;
  775. /* Update buffer address */
  776. dma_desc->desc0 =
  777. cpu_to_le32(lower_32_bits(desc_data->skb_dma));
  778. dma_desc->desc1 =
  779. cpu_to_le32(upper_32_bits(desc_data->skb_dma));
  780. /* Update the buffer length */
  781. dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
  782. dma_desc->desc2,
  783. TX_NORMAL_DESC2_HL_B1L_POS,
  784. TX_NORMAL_DESC2_HL_B1L_LEN,
  785. desc_data->skb_dma_len);
  786. /* Set OWN bit */
  787. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  788. dma_desc->desc3,
  789. TX_NORMAL_DESC3_OWN_POS,
  790. TX_NORMAL_DESC3_OWN_LEN, 1);
  791. /* Mark it as NORMAL descriptor */
  792. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  793. dma_desc->desc3,
  794. TX_NORMAL_DESC3_CTXT_POS,
  795. TX_NORMAL_DESC3_CTXT_LEN, 0);
  796. /* Enable HW CSUM */
  797. if (csum)
  798. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  799. dma_desc->desc3,
  800. TX_NORMAL_DESC3_CIC_POS,
  801. TX_NORMAL_DESC3_CIC_LEN,
  802. 0x3);
  803. }
  804. /* Set LAST bit for the last descriptor */
  805. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  806. dma_desc->desc3,
  807. TX_NORMAL_DESC3_LD_POS,
  808. TX_NORMAL_DESC3_LD_LEN, 1);
  809. /* Set IC bit based on Tx coalescing settings */
  810. if (tx_set_ic)
  811. dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
  812. dma_desc->desc2,
  813. TX_NORMAL_DESC2_IC_POS,
  814. TX_NORMAL_DESC2_IC_LEN, 1);
  815. /* Save the Tx info to report back during cleanup */
  816. desc_data->tx.packets = pkt_info->tx_packets;
  817. desc_data->tx.bytes = pkt_info->tx_bytes;
  818. /* In case the Tx DMA engine is running, make sure everything
  819. * is written to the descriptor(s) before setting the OWN bit
  820. * for the first descriptor
  821. */
  822. dma_wmb();
  823. /* Set OWN bit for the first descriptor */
  824. desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
  825. dma_desc = desc_data->dma_desc;
  826. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  827. dma_desc->desc3,
  828. TX_NORMAL_DESC3_OWN_POS,
  829. TX_NORMAL_DESC3_OWN_LEN, 1);
  830. if (netif_msg_tx_queued(pdata))
  831. xlgmac_dump_tx_desc(pdata, ring, start_index,
  832. pkt_info->desc_count, 1);
  833. /* Make sure ownership is written to the descriptor */
  834. smp_wmb();
  835. ring->cur = cur_index + 1;
  836. if (!netdev_xmit_more() ||
  837. netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
  838. channel->queue_index)))
  839. xlgmac_tx_start_xmit(channel, ring);
  840. else
  841. ring->tx.xmit_more = 1;
  842. XLGMAC_PR("%s: descriptors %u to %u written\n",
  843. channel->name, start_index & (ring->dma_desc_count - 1),
  844. (ring->cur - 1) & (ring->dma_desc_count - 1));
  845. }
  846. static void xlgmac_get_rx_tstamp(struct xlgmac_pkt_info *pkt_info,
  847. struct xlgmac_dma_desc *dma_desc)
  848. {
  849. u32 tsa, tsd;
  850. u64 nsec;
  851. tsa = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  852. RX_CONTEXT_DESC3_TSA_POS,
  853. RX_CONTEXT_DESC3_TSA_LEN);
  854. tsd = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  855. RX_CONTEXT_DESC3_TSD_POS,
  856. RX_CONTEXT_DESC3_TSD_LEN);
  857. if (tsa && !tsd) {
  858. nsec = le32_to_cpu(dma_desc->desc1);
  859. nsec <<= 32;
  860. nsec |= le32_to_cpu(dma_desc->desc0);
  861. if (nsec != 0xffffffffffffffffULL) {
  862. pkt_info->rx_tstamp = nsec;
  863. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  864. pkt_info->attributes,
  865. RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS,
  866. RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN,
  867. 1);
  868. }
  869. }
  870. }
  871. static void xlgmac_tx_desc_reset(struct xlgmac_desc_data *desc_data)
  872. {
  873. struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
  874. /* Reset the Tx descriptor
  875. * Set buffer 1 (lo) address to zero
  876. * Set buffer 1 (hi) address to zero
  877. * Reset all other control bits (IC, TTSE, B2L & B1L)
  878. * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
  879. */
  880. dma_desc->desc0 = 0;
  881. dma_desc->desc1 = 0;
  882. dma_desc->desc2 = 0;
  883. dma_desc->desc3 = 0;
  884. /* Make sure ownership is written to the descriptor */
  885. dma_wmb();
  886. }
  887. static void xlgmac_tx_desc_init(struct xlgmac_channel *channel)
  888. {
  889. struct xlgmac_ring *ring = channel->tx_ring;
  890. struct xlgmac_desc_data *desc_data;
  891. int start_index = ring->cur;
  892. int i;
  893. /* Initialze all descriptors */
  894. for (i = 0; i < ring->dma_desc_count; i++) {
  895. desc_data = XLGMAC_GET_DESC_DATA(ring, i);
  896. /* Initialize Tx descriptor */
  897. xlgmac_tx_desc_reset(desc_data);
  898. }
  899. /* Update the total number of Tx descriptors */
  900. writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR));
  901. /* Update the starting address of descriptor ring */
  902. desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
  903. writel(upper_32_bits(desc_data->dma_desc_addr),
  904. XLGMAC_DMA_REG(channel, DMA_CH_TDLR_HI));
  905. writel(lower_32_bits(desc_data->dma_desc_addr),
  906. XLGMAC_DMA_REG(channel, DMA_CH_TDLR_LO));
  907. }
  908. static void xlgmac_rx_desc_reset(struct xlgmac_pdata *pdata,
  909. struct xlgmac_desc_data *desc_data,
  910. unsigned int index)
  911. {
  912. struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
  913. unsigned int rx_frames = pdata->rx_frames;
  914. unsigned int rx_usecs = pdata->rx_usecs;
  915. dma_addr_t hdr_dma, buf_dma;
  916. unsigned int inte;
  917. if (!rx_usecs && !rx_frames) {
  918. /* No coalescing, interrupt for every descriptor */
  919. inte = 1;
  920. } else {
  921. /* Set interrupt based on Rx frame coalescing setting */
  922. if (rx_frames && !((index + 1) % rx_frames))
  923. inte = 1;
  924. else
  925. inte = 0;
  926. }
  927. /* Reset the Rx descriptor
  928. * Set buffer 1 (lo) address to header dma address (lo)
  929. * Set buffer 1 (hi) address to header dma address (hi)
  930. * Set buffer 2 (lo) address to buffer dma address (lo)
  931. * Set buffer 2 (hi) address to buffer dma address (hi) and
  932. * set control bits OWN and INTE
  933. */
  934. hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off;
  935. buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off;
  936. dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
  937. dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
  938. dma_desc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
  939. dma_desc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
  940. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  941. dma_desc->desc3,
  942. RX_NORMAL_DESC3_INTE_POS,
  943. RX_NORMAL_DESC3_INTE_LEN,
  944. inte);
  945. /* Since the Rx DMA engine is likely running, make sure everything
  946. * is written to the descriptor(s) before setting the OWN bit
  947. * for the descriptor
  948. */
  949. dma_wmb();
  950. dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
  951. dma_desc->desc3,
  952. RX_NORMAL_DESC3_OWN_POS,
  953. RX_NORMAL_DESC3_OWN_LEN,
  954. 1);
  955. /* Make sure ownership is written to the descriptor */
  956. dma_wmb();
  957. }
  958. static void xlgmac_rx_desc_init(struct xlgmac_channel *channel)
  959. {
  960. struct xlgmac_pdata *pdata = channel->pdata;
  961. struct xlgmac_ring *ring = channel->rx_ring;
  962. unsigned int start_index = ring->cur;
  963. struct xlgmac_desc_data *desc_data;
  964. unsigned int i;
  965. /* Initialize all descriptors */
  966. for (i = 0; i < ring->dma_desc_count; i++) {
  967. desc_data = XLGMAC_GET_DESC_DATA(ring, i);
  968. /* Initialize Rx descriptor */
  969. xlgmac_rx_desc_reset(pdata, desc_data, i);
  970. }
  971. /* Update the total number of Rx descriptors */
  972. writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR));
  973. /* Update the starting address of descriptor ring */
  974. desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
  975. writel(upper_32_bits(desc_data->dma_desc_addr),
  976. XLGMAC_DMA_REG(channel, DMA_CH_RDLR_HI));
  977. writel(lower_32_bits(desc_data->dma_desc_addr),
  978. XLGMAC_DMA_REG(channel, DMA_CH_RDLR_LO));
  979. /* Update the Rx Descriptor Tail Pointer */
  980. desc_data = XLGMAC_GET_DESC_DATA(ring, start_index +
  981. ring->dma_desc_count - 1);
  982. writel(lower_32_bits(desc_data->dma_desc_addr),
  983. XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
  984. }
  985. static int xlgmac_is_context_desc(struct xlgmac_dma_desc *dma_desc)
  986. {
  987. /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
  988. return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  989. TX_NORMAL_DESC3_CTXT_POS,
  990. TX_NORMAL_DESC3_CTXT_LEN);
  991. }
  992. static int xlgmac_is_last_desc(struct xlgmac_dma_desc *dma_desc)
  993. {
  994. /* Rx and Tx share LD bit, so check TDES3.LD bit */
  995. return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  996. TX_NORMAL_DESC3_LD_POS,
  997. TX_NORMAL_DESC3_LD_LEN);
  998. }
  999. static int xlgmac_disable_tx_flow_control(struct xlgmac_pdata *pdata)
  1000. {
  1001. unsigned int max_q_count, q_count;
  1002. unsigned int reg, regval;
  1003. unsigned int i;
  1004. /* Clear MTL flow control */
  1005. for (i = 0; i < pdata->rx_q_count; i++) {
  1006. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1007. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
  1008. MTL_Q_RQOMR_EHFC_LEN, 0);
  1009. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1010. }
  1011. /* Clear MAC flow control */
  1012. max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
  1013. q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
  1014. reg = MAC_Q0TFCR;
  1015. for (i = 0; i < q_count; i++) {
  1016. regval = readl(pdata->mac_regs + reg);
  1017. regval = XLGMAC_SET_REG_BITS(regval,
  1018. MAC_Q0TFCR_TFE_POS,
  1019. MAC_Q0TFCR_TFE_LEN,
  1020. 0);
  1021. writel(regval, pdata->mac_regs + reg);
  1022. reg += MAC_QTFCR_INC;
  1023. }
  1024. return 0;
  1025. }
  1026. static int xlgmac_enable_tx_flow_control(struct xlgmac_pdata *pdata)
  1027. {
  1028. unsigned int max_q_count, q_count;
  1029. unsigned int reg, regval;
  1030. unsigned int i;
  1031. /* Set MTL flow control */
  1032. for (i = 0; i < pdata->rx_q_count; i++) {
  1033. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1034. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
  1035. MTL_Q_RQOMR_EHFC_LEN, 1);
  1036. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1037. }
  1038. /* Set MAC flow control */
  1039. max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
  1040. q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
  1041. reg = MAC_Q0TFCR;
  1042. for (i = 0; i < q_count; i++) {
  1043. regval = readl(pdata->mac_regs + reg);
  1044. /* Enable transmit flow control */
  1045. regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS,
  1046. MAC_Q0TFCR_TFE_LEN, 1);
  1047. /* Set pause time */
  1048. regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS,
  1049. MAC_Q0TFCR_PT_LEN, 0xffff);
  1050. writel(regval, pdata->mac_regs + reg);
  1051. reg += MAC_QTFCR_INC;
  1052. }
  1053. return 0;
  1054. }
  1055. static int xlgmac_disable_rx_flow_control(struct xlgmac_pdata *pdata)
  1056. {
  1057. u32 regval;
  1058. regval = readl(pdata->mac_regs + MAC_RFCR);
  1059. regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
  1060. MAC_RFCR_RFE_LEN, 0);
  1061. writel(regval, pdata->mac_regs + MAC_RFCR);
  1062. return 0;
  1063. }
  1064. static int xlgmac_enable_rx_flow_control(struct xlgmac_pdata *pdata)
  1065. {
  1066. u32 regval;
  1067. regval = readl(pdata->mac_regs + MAC_RFCR);
  1068. regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
  1069. MAC_RFCR_RFE_LEN, 1);
  1070. writel(regval, pdata->mac_regs + MAC_RFCR);
  1071. return 0;
  1072. }
  1073. static int xlgmac_config_tx_flow_control(struct xlgmac_pdata *pdata)
  1074. {
  1075. if (pdata->tx_pause)
  1076. xlgmac_enable_tx_flow_control(pdata);
  1077. else
  1078. xlgmac_disable_tx_flow_control(pdata);
  1079. return 0;
  1080. }
  1081. static int xlgmac_config_rx_flow_control(struct xlgmac_pdata *pdata)
  1082. {
  1083. if (pdata->rx_pause)
  1084. xlgmac_enable_rx_flow_control(pdata);
  1085. else
  1086. xlgmac_disable_rx_flow_control(pdata);
  1087. return 0;
  1088. }
  1089. static int xlgmac_config_rx_coalesce(struct xlgmac_pdata *pdata)
  1090. {
  1091. struct xlgmac_channel *channel;
  1092. unsigned int i;
  1093. u32 regval;
  1094. channel = pdata->channel_head;
  1095. for (i = 0; i < pdata->channel_count; i++, channel++) {
  1096. if (!channel->rx_ring)
  1097. break;
  1098. regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
  1099. regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS,
  1100. DMA_CH_RIWT_RWT_LEN,
  1101. pdata->rx_riwt);
  1102. writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
  1103. }
  1104. return 0;
  1105. }
  1106. static void xlgmac_config_flow_control(struct xlgmac_pdata *pdata)
  1107. {
  1108. xlgmac_config_tx_flow_control(pdata);
  1109. xlgmac_config_rx_flow_control(pdata);
  1110. }
  1111. static void xlgmac_config_rx_fep_enable(struct xlgmac_pdata *pdata)
  1112. {
  1113. unsigned int i;
  1114. u32 regval;
  1115. for (i = 0; i < pdata->rx_q_count; i++) {
  1116. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1117. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS,
  1118. MTL_Q_RQOMR_FEP_LEN, 1);
  1119. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1120. }
  1121. }
  1122. static void xlgmac_config_rx_fup_enable(struct xlgmac_pdata *pdata)
  1123. {
  1124. unsigned int i;
  1125. u32 regval;
  1126. for (i = 0; i < pdata->rx_q_count; i++) {
  1127. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1128. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS,
  1129. MTL_Q_RQOMR_FUP_LEN, 1);
  1130. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1131. }
  1132. }
  1133. static int xlgmac_config_tx_coalesce(struct xlgmac_pdata *pdata)
  1134. {
  1135. return 0;
  1136. }
  1137. static void xlgmac_config_rx_buffer_size(struct xlgmac_pdata *pdata)
  1138. {
  1139. struct xlgmac_channel *channel;
  1140. unsigned int i;
  1141. u32 regval;
  1142. channel = pdata->channel_head;
  1143. for (i = 0; i < pdata->channel_count; i++, channel++) {
  1144. if (!channel->rx_ring)
  1145. break;
  1146. regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
  1147. regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS,
  1148. DMA_CH_RCR_RBSZ_LEN,
  1149. pdata->rx_buf_size);
  1150. writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
  1151. }
  1152. }
  1153. static void xlgmac_config_tso_mode(struct xlgmac_pdata *pdata)
  1154. {
  1155. struct xlgmac_channel *channel;
  1156. unsigned int i;
  1157. u32 regval;
  1158. channel = pdata->channel_head;
  1159. for (i = 0; i < pdata->channel_count; i++, channel++) {
  1160. if (!channel->tx_ring)
  1161. break;
  1162. if (pdata->hw_feat.tso) {
  1163. regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
  1164. regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS,
  1165. DMA_CH_TCR_TSE_LEN, 1);
  1166. writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
  1167. }
  1168. }
  1169. }
  1170. static void xlgmac_config_sph_mode(struct xlgmac_pdata *pdata)
  1171. {
  1172. struct xlgmac_channel *channel;
  1173. unsigned int i;
  1174. u32 regval;
  1175. channel = pdata->channel_head;
  1176. for (i = 0; i < pdata->channel_count; i++, channel++) {
  1177. if (!channel->rx_ring)
  1178. break;
  1179. regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
  1180. regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS,
  1181. DMA_CH_CR_SPH_LEN, 1);
  1182. writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
  1183. }
  1184. regval = readl(pdata->mac_regs + MAC_RCR);
  1185. regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_HDSMS_POS,
  1186. MAC_RCR_HDSMS_LEN,
  1187. XLGMAC_SPH_HDSMS_SIZE);
  1188. writel(regval, pdata->mac_regs + MAC_RCR);
  1189. }
  1190. static unsigned int xlgmac_usec_to_riwt(struct xlgmac_pdata *pdata,
  1191. unsigned int usec)
  1192. {
  1193. unsigned long rate;
  1194. unsigned int ret;
  1195. rate = pdata->sysclk_rate;
  1196. /* Convert the input usec value to the watchdog timer value. Each
  1197. * watchdog timer value is equivalent to 256 clock cycles.
  1198. * Calculate the required value as:
  1199. * ( usec * ( system_clock_mhz / 10^6 ) / 256
  1200. */
  1201. ret = (usec * (rate / 1000000)) / 256;
  1202. return ret;
  1203. }
  1204. static unsigned int xlgmac_riwt_to_usec(struct xlgmac_pdata *pdata,
  1205. unsigned int riwt)
  1206. {
  1207. unsigned long rate;
  1208. unsigned int ret;
  1209. rate = pdata->sysclk_rate;
  1210. /* Convert the input watchdog timer value to the usec value. Each
  1211. * watchdog timer value is equivalent to 256 clock cycles.
  1212. * Calculate the required value as:
  1213. * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
  1214. */
  1215. ret = (riwt * 256) / (rate / 1000000);
  1216. return ret;
  1217. }
  1218. static int xlgmac_config_rx_threshold(struct xlgmac_pdata *pdata,
  1219. unsigned int val)
  1220. {
  1221. unsigned int i;
  1222. u32 regval;
  1223. for (i = 0; i < pdata->rx_q_count; i++) {
  1224. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1225. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS,
  1226. MTL_Q_RQOMR_RTC_LEN, val);
  1227. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1228. }
  1229. return 0;
  1230. }
  1231. static void xlgmac_config_mtl_mode(struct xlgmac_pdata *pdata)
  1232. {
  1233. unsigned int i;
  1234. u32 regval;
  1235. /* Set Tx to weighted round robin scheduling algorithm */
  1236. regval = readl(pdata->mac_regs + MTL_OMR);
  1237. regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS,
  1238. MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR);
  1239. writel(regval, pdata->mac_regs + MTL_OMR);
  1240. /* Set Tx traffic classes to use WRR algorithm with equal weights */
  1241. for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
  1242. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
  1243. regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_ETSCR_TSA_POS,
  1244. MTL_TC_ETSCR_TSA_LEN, MTL_TSA_ETS);
  1245. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
  1246. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
  1247. regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS,
  1248. MTL_TC_QWR_QW_LEN, 1);
  1249. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
  1250. }
  1251. /* Set Rx to strict priority algorithm */
  1252. regval = readl(pdata->mac_regs + MTL_OMR);
  1253. regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS,
  1254. MTL_OMR_RAA_LEN, MTL_RAA_SP);
  1255. writel(regval, pdata->mac_regs + MTL_OMR);
  1256. }
  1257. static void xlgmac_config_queue_mapping(struct xlgmac_pdata *pdata)
  1258. {
  1259. unsigned int ppq, ppq_extra, prio, prio_queues;
  1260. unsigned int qptc, qptc_extra, queue;
  1261. unsigned int reg, regval;
  1262. unsigned int mask;
  1263. unsigned int i, j;
  1264. /* Map the MTL Tx Queues to Traffic Classes
  1265. * Note: Tx Queues >= Traffic Classes
  1266. */
  1267. qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
  1268. qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
  1269. for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
  1270. for (j = 0; j < qptc; j++) {
  1271. netif_dbg(pdata, drv, pdata->netdev,
  1272. "TXq%u mapped to TC%u\n", queue, i);
  1273. regval = readl(XLGMAC_MTL_REG(pdata, queue,
  1274. MTL_Q_TQOMR));
  1275. regval = XLGMAC_SET_REG_BITS(regval,
  1276. MTL_Q_TQOMR_Q2TCMAP_POS,
  1277. MTL_Q_TQOMR_Q2TCMAP_LEN,
  1278. i);
  1279. writel(regval, XLGMAC_MTL_REG(pdata, queue,
  1280. MTL_Q_TQOMR));
  1281. queue++;
  1282. }
  1283. if (i < qptc_extra) {
  1284. netif_dbg(pdata, drv, pdata->netdev,
  1285. "TXq%u mapped to TC%u\n", queue, i);
  1286. regval = readl(XLGMAC_MTL_REG(pdata, queue,
  1287. MTL_Q_TQOMR));
  1288. regval = XLGMAC_SET_REG_BITS(regval,
  1289. MTL_Q_TQOMR_Q2TCMAP_POS,
  1290. MTL_Q_TQOMR_Q2TCMAP_LEN,
  1291. i);
  1292. writel(regval, XLGMAC_MTL_REG(pdata, queue,
  1293. MTL_Q_TQOMR));
  1294. queue++;
  1295. }
  1296. }
  1297. /* Map the 8 VLAN priority values to available MTL Rx queues */
  1298. prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
  1299. pdata->rx_q_count);
  1300. ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
  1301. ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
  1302. reg = MAC_RQC2R;
  1303. regval = 0;
  1304. for (i = 0, prio = 0; i < prio_queues;) {
  1305. mask = 0;
  1306. for (j = 0; j < ppq; j++) {
  1307. netif_dbg(pdata, drv, pdata->netdev,
  1308. "PRIO%u mapped to RXq%u\n", prio, i);
  1309. mask |= (1 << prio);
  1310. prio++;
  1311. }
  1312. if (i < ppq_extra) {
  1313. netif_dbg(pdata, drv, pdata->netdev,
  1314. "PRIO%u mapped to RXq%u\n", prio, i);
  1315. mask |= (1 << prio);
  1316. prio++;
  1317. }
  1318. regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
  1319. if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
  1320. continue;
  1321. writel(regval, pdata->mac_regs + reg);
  1322. reg += MAC_RQC2_INC;
  1323. regval = 0;
  1324. }
  1325. /* Configure one to one, MTL Rx queue to DMA Rx channel mapping
  1326. * ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11
  1327. */
  1328. reg = MTL_RQDCM0R;
  1329. regval = readl(pdata->mac_regs + reg);
  1330. regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH |
  1331. MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH);
  1332. writel(regval, pdata->mac_regs + reg);
  1333. reg += MTL_RQDCM_INC;
  1334. regval = readl(pdata->mac_regs + reg);
  1335. regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH |
  1336. MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH);
  1337. writel(regval, pdata->mac_regs + reg);
  1338. reg += MTL_RQDCM_INC;
  1339. regval = readl(pdata->mac_regs + reg);
  1340. regval |= (MTL_RQDCM2R_Q8MDMACH | MTL_RQDCM2R_Q9MDMACH |
  1341. MTL_RQDCM2R_Q10MDMACH | MTL_RQDCM2R_Q11MDMACH);
  1342. writel(regval, pdata->mac_regs + reg);
  1343. }
  1344. static unsigned int xlgmac_calculate_per_queue_fifo(
  1345. unsigned int fifo_size,
  1346. unsigned int queue_count)
  1347. {
  1348. unsigned int q_fifo_size;
  1349. unsigned int p_fifo;
  1350. /* Calculate the configured fifo size */
  1351. q_fifo_size = 1 << (fifo_size + 7);
  1352. /* The configured value may not be the actual amount of fifo RAM */
  1353. q_fifo_size = min_t(unsigned int, XLGMAC_MAX_FIFO, q_fifo_size);
  1354. q_fifo_size = q_fifo_size / queue_count;
  1355. /* Each increment in the queue fifo size represents 256 bytes of
  1356. * fifo, with 0 representing 256 bytes. Distribute the fifo equally
  1357. * between the queues.
  1358. */
  1359. p_fifo = q_fifo_size / 256;
  1360. if (p_fifo)
  1361. p_fifo--;
  1362. return p_fifo;
  1363. }
  1364. static void xlgmac_config_tx_fifo_size(struct xlgmac_pdata *pdata)
  1365. {
  1366. unsigned int fifo_size;
  1367. unsigned int i;
  1368. u32 regval;
  1369. fifo_size = xlgmac_calculate_per_queue_fifo(
  1370. pdata->hw_feat.tx_fifo_size,
  1371. pdata->tx_q_count);
  1372. for (i = 0; i < pdata->tx_q_count; i++) {
  1373. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
  1374. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS,
  1375. MTL_Q_TQOMR_TQS_LEN, fifo_size);
  1376. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
  1377. }
  1378. netif_info(pdata, drv, pdata->netdev,
  1379. "%d Tx hardware queues, %d byte fifo per queue\n",
  1380. pdata->tx_q_count, ((fifo_size + 1) * 256));
  1381. }
  1382. static void xlgmac_config_rx_fifo_size(struct xlgmac_pdata *pdata)
  1383. {
  1384. unsigned int fifo_size;
  1385. unsigned int i;
  1386. u32 regval;
  1387. fifo_size = xlgmac_calculate_per_queue_fifo(
  1388. pdata->hw_feat.rx_fifo_size,
  1389. pdata->rx_q_count);
  1390. for (i = 0; i < pdata->rx_q_count; i++) {
  1391. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1392. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS,
  1393. MTL_Q_RQOMR_RQS_LEN, fifo_size);
  1394. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1395. }
  1396. netif_info(pdata, drv, pdata->netdev,
  1397. "%d Rx hardware queues, %d byte fifo per queue\n",
  1398. pdata->rx_q_count, ((fifo_size + 1) * 256));
  1399. }
  1400. static void xlgmac_config_flow_control_threshold(struct xlgmac_pdata *pdata)
  1401. {
  1402. unsigned int i;
  1403. u32 regval;
  1404. for (i = 0; i < pdata->rx_q_count; i++) {
  1405. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
  1406. /* Activate flow control when less than 4k left in fifo */
  1407. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFA_POS,
  1408. MTL_Q_RQFCR_RFA_LEN, 2);
  1409. /* De-activate flow control when more than 6k left in fifo */
  1410. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFD_POS,
  1411. MTL_Q_RQFCR_RFD_LEN, 4);
  1412. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
  1413. }
  1414. }
  1415. static int xlgmac_config_tx_threshold(struct xlgmac_pdata *pdata,
  1416. unsigned int val)
  1417. {
  1418. unsigned int i;
  1419. u32 regval;
  1420. for (i = 0; i < pdata->tx_q_count; i++) {
  1421. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
  1422. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS,
  1423. MTL_Q_TQOMR_TTC_LEN, val);
  1424. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
  1425. }
  1426. return 0;
  1427. }
  1428. static int xlgmac_config_rsf_mode(struct xlgmac_pdata *pdata,
  1429. unsigned int val)
  1430. {
  1431. unsigned int i;
  1432. u32 regval;
  1433. for (i = 0; i < pdata->rx_q_count; i++) {
  1434. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1435. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS,
  1436. MTL_Q_RQOMR_RSF_LEN, val);
  1437. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
  1438. }
  1439. return 0;
  1440. }
  1441. static int xlgmac_config_tsf_mode(struct xlgmac_pdata *pdata,
  1442. unsigned int val)
  1443. {
  1444. unsigned int i;
  1445. u32 regval;
  1446. for (i = 0; i < pdata->tx_q_count; i++) {
  1447. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
  1448. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS,
  1449. MTL_Q_TQOMR_TSF_LEN, val);
  1450. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
  1451. }
  1452. return 0;
  1453. }
  1454. static int xlgmac_config_osp_mode(struct xlgmac_pdata *pdata)
  1455. {
  1456. struct xlgmac_channel *channel;
  1457. unsigned int i;
  1458. u32 regval;
  1459. channel = pdata->channel_head;
  1460. for (i = 0; i < pdata->channel_count; i++, channel++) {
  1461. if (!channel->tx_ring)
  1462. break;
  1463. regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
  1464. regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS,
  1465. DMA_CH_TCR_OSP_LEN,
  1466. pdata->tx_osp_mode);
  1467. writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
  1468. }
  1469. return 0;
  1470. }
  1471. static int xlgmac_config_pblx8(struct xlgmac_pdata *pdata)
  1472. {
  1473. struct xlgmac_channel *channel;
  1474. unsigned int i;
  1475. u32 regval;
  1476. channel = pdata->channel_head;
  1477. for (i = 0; i < pdata->channel_count; i++, channel++) {
  1478. regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
  1479. regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS,
  1480. DMA_CH_CR_PBLX8_LEN,
  1481. pdata->pblx8);
  1482. writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
  1483. }
  1484. return 0;
  1485. }
  1486. static int xlgmac_get_tx_pbl_val(struct xlgmac_pdata *pdata)
  1487. {
  1488. u32 regval;
  1489. regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR));
  1490. regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
  1491. DMA_CH_TCR_PBL_LEN);
  1492. return regval;
  1493. }
  1494. static int xlgmac_config_tx_pbl_val(struct xlgmac_pdata *pdata)
  1495. {
  1496. struct xlgmac_channel *channel;
  1497. unsigned int i;
  1498. u32 regval;
  1499. channel = pdata->channel_head;
  1500. for (i = 0; i < pdata->channel_count; i++, channel++) {
  1501. if (!channel->tx_ring)
  1502. break;
  1503. regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
  1504. regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
  1505. DMA_CH_TCR_PBL_LEN,
  1506. pdata->tx_pbl);
  1507. writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
  1508. }
  1509. return 0;
  1510. }
  1511. static int xlgmac_get_rx_pbl_val(struct xlgmac_pdata *pdata)
  1512. {
  1513. u32 regval;
  1514. regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR));
  1515. regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
  1516. DMA_CH_RCR_PBL_LEN);
  1517. return regval;
  1518. }
  1519. static int xlgmac_config_rx_pbl_val(struct xlgmac_pdata *pdata)
  1520. {
  1521. struct xlgmac_channel *channel;
  1522. unsigned int i;
  1523. u32 regval;
  1524. channel = pdata->channel_head;
  1525. for (i = 0; i < pdata->channel_count; i++, channel++) {
  1526. if (!channel->rx_ring)
  1527. break;
  1528. regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
  1529. regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
  1530. DMA_CH_RCR_PBL_LEN,
  1531. pdata->rx_pbl);
  1532. writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
  1533. }
  1534. return 0;
  1535. }
  1536. static u64 xlgmac_mmc_read(struct xlgmac_pdata *pdata, unsigned int reg_lo)
  1537. {
  1538. bool read_hi;
  1539. u64 val;
  1540. switch (reg_lo) {
  1541. /* These registers are always 64 bit */
  1542. case MMC_TXOCTETCOUNT_GB_LO:
  1543. case MMC_TXOCTETCOUNT_G_LO:
  1544. case MMC_RXOCTETCOUNT_GB_LO:
  1545. case MMC_RXOCTETCOUNT_G_LO:
  1546. read_hi = true;
  1547. break;
  1548. default:
  1549. read_hi = false;
  1550. }
  1551. val = (u64)readl(pdata->mac_regs + reg_lo);
  1552. if (read_hi)
  1553. val |= ((u64)readl(pdata->mac_regs + reg_lo + 4) << 32);
  1554. return val;
  1555. }
  1556. static void xlgmac_tx_mmc_int(struct xlgmac_pdata *pdata)
  1557. {
  1558. unsigned int mmc_isr = readl(pdata->mac_regs + MMC_TISR);
  1559. struct xlgmac_stats *stats = &pdata->stats;
  1560. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1561. MMC_TISR_TXOCTETCOUNT_GB_POS,
  1562. MMC_TISR_TXOCTETCOUNT_GB_LEN))
  1563. stats->txoctetcount_gb +=
  1564. xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
  1565. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1566. MMC_TISR_TXFRAMECOUNT_GB_POS,
  1567. MMC_TISR_TXFRAMECOUNT_GB_LEN))
  1568. stats->txframecount_gb +=
  1569. xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
  1570. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1571. MMC_TISR_TXBROADCASTFRAMES_G_POS,
  1572. MMC_TISR_TXBROADCASTFRAMES_G_LEN))
  1573. stats->txbroadcastframes_g +=
  1574. xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
  1575. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1576. MMC_TISR_TXMULTICASTFRAMES_G_POS,
  1577. MMC_TISR_TXMULTICASTFRAMES_G_LEN))
  1578. stats->txmulticastframes_g +=
  1579. xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
  1580. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1581. MMC_TISR_TX64OCTETS_GB_POS,
  1582. MMC_TISR_TX64OCTETS_GB_LEN))
  1583. stats->tx64octets_gb +=
  1584. xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
  1585. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1586. MMC_TISR_TX65TO127OCTETS_GB_POS,
  1587. MMC_TISR_TX65TO127OCTETS_GB_LEN))
  1588. stats->tx65to127octets_gb +=
  1589. xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
  1590. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1591. MMC_TISR_TX128TO255OCTETS_GB_POS,
  1592. MMC_TISR_TX128TO255OCTETS_GB_LEN))
  1593. stats->tx128to255octets_gb +=
  1594. xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
  1595. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1596. MMC_TISR_TX256TO511OCTETS_GB_POS,
  1597. MMC_TISR_TX256TO511OCTETS_GB_LEN))
  1598. stats->tx256to511octets_gb +=
  1599. xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
  1600. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1601. MMC_TISR_TX512TO1023OCTETS_GB_POS,
  1602. MMC_TISR_TX512TO1023OCTETS_GB_LEN))
  1603. stats->tx512to1023octets_gb +=
  1604. xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
  1605. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1606. MMC_TISR_TX1024TOMAXOCTETS_GB_POS,
  1607. MMC_TISR_TX1024TOMAXOCTETS_GB_LEN))
  1608. stats->tx1024tomaxoctets_gb +=
  1609. xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
  1610. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1611. MMC_TISR_TXUNICASTFRAMES_GB_POS,
  1612. MMC_TISR_TXUNICASTFRAMES_GB_LEN))
  1613. stats->txunicastframes_gb +=
  1614. xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
  1615. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1616. MMC_TISR_TXMULTICASTFRAMES_GB_POS,
  1617. MMC_TISR_TXMULTICASTFRAMES_GB_LEN))
  1618. stats->txmulticastframes_gb +=
  1619. xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
  1620. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1621. MMC_TISR_TXBROADCASTFRAMES_GB_POS,
  1622. MMC_TISR_TXBROADCASTFRAMES_GB_LEN))
  1623. stats->txbroadcastframes_g +=
  1624. xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
  1625. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1626. MMC_TISR_TXUNDERFLOWERROR_POS,
  1627. MMC_TISR_TXUNDERFLOWERROR_LEN))
  1628. stats->txunderflowerror +=
  1629. xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
  1630. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1631. MMC_TISR_TXOCTETCOUNT_G_POS,
  1632. MMC_TISR_TXOCTETCOUNT_G_LEN))
  1633. stats->txoctetcount_g +=
  1634. xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
  1635. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1636. MMC_TISR_TXFRAMECOUNT_G_POS,
  1637. MMC_TISR_TXFRAMECOUNT_G_LEN))
  1638. stats->txframecount_g +=
  1639. xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
  1640. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1641. MMC_TISR_TXPAUSEFRAMES_POS,
  1642. MMC_TISR_TXPAUSEFRAMES_LEN))
  1643. stats->txpauseframes +=
  1644. xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
  1645. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1646. MMC_TISR_TXVLANFRAMES_G_POS,
  1647. MMC_TISR_TXVLANFRAMES_G_LEN))
  1648. stats->txvlanframes_g +=
  1649. xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
  1650. }
  1651. static void xlgmac_rx_mmc_int(struct xlgmac_pdata *pdata)
  1652. {
  1653. unsigned int mmc_isr = readl(pdata->mac_regs + MMC_RISR);
  1654. struct xlgmac_stats *stats = &pdata->stats;
  1655. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1656. MMC_RISR_RXFRAMECOUNT_GB_POS,
  1657. MMC_RISR_RXFRAMECOUNT_GB_LEN))
  1658. stats->rxframecount_gb +=
  1659. xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
  1660. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1661. MMC_RISR_RXOCTETCOUNT_GB_POS,
  1662. MMC_RISR_RXOCTETCOUNT_GB_LEN))
  1663. stats->rxoctetcount_gb +=
  1664. xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
  1665. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1666. MMC_RISR_RXOCTETCOUNT_G_POS,
  1667. MMC_RISR_RXOCTETCOUNT_G_LEN))
  1668. stats->rxoctetcount_g +=
  1669. xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
  1670. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1671. MMC_RISR_RXBROADCASTFRAMES_G_POS,
  1672. MMC_RISR_RXBROADCASTFRAMES_G_LEN))
  1673. stats->rxbroadcastframes_g +=
  1674. xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
  1675. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1676. MMC_RISR_RXMULTICASTFRAMES_G_POS,
  1677. MMC_RISR_RXMULTICASTFRAMES_G_LEN))
  1678. stats->rxmulticastframes_g +=
  1679. xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
  1680. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1681. MMC_RISR_RXCRCERROR_POS,
  1682. MMC_RISR_RXCRCERROR_LEN))
  1683. stats->rxcrcerror +=
  1684. xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
  1685. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1686. MMC_RISR_RXRUNTERROR_POS,
  1687. MMC_RISR_RXRUNTERROR_LEN))
  1688. stats->rxrunterror +=
  1689. xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
  1690. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1691. MMC_RISR_RXJABBERERROR_POS,
  1692. MMC_RISR_RXJABBERERROR_LEN))
  1693. stats->rxjabbererror +=
  1694. xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
  1695. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1696. MMC_RISR_RXUNDERSIZE_G_POS,
  1697. MMC_RISR_RXUNDERSIZE_G_LEN))
  1698. stats->rxundersize_g +=
  1699. xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
  1700. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1701. MMC_RISR_RXOVERSIZE_G_POS,
  1702. MMC_RISR_RXOVERSIZE_G_LEN))
  1703. stats->rxoversize_g +=
  1704. xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
  1705. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1706. MMC_RISR_RX64OCTETS_GB_POS,
  1707. MMC_RISR_RX64OCTETS_GB_LEN))
  1708. stats->rx64octets_gb +=
  1709. xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
  1710. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1711. MMC_RISR_RX65TO127OCTETS_GB_POS,
  1712. MMC_RISR_RX65TO127OCTETS_GB_LEN))
  1713. stats->rx65to127octets_gb +=
  1714. xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
  1715. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1716. MMC_RISR_RX128TO255OCTETS_GB_POS,
  1717. MMC_RISR_RX128TO255OCTETS_GB_LEN))
  1718. stats->rx128to255octets_gb +=
  1719. xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
  1720. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1721. MMC_RISR_RX256TO511OCTETS_GB_POS,
  1722. MMC_RISR_RX256TO511OCTETS_GB_LEN))
  1723. stats->rx256to511octets_gb +=
  1724. xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
  1725. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1726. MMC_RISR_RX512TO1023OCTETS_GB_POS,
  1727. MMC_RISR_RX512TO1023OCTETS_GB_LEN))
  1728. stats->rx512to1023octets_gb +=
  1729. xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
  1730. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1731. MMC_RISR_RX1024TOMAXOCTETS_GB_POS,
  1732. MMC_RISR_RX1024TOMAXOCTETS_GB_LEN))
  1733. stats->rx1024tomaxoctets_gb +=
  1734. xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
  1735. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1736. MMC_RISR_RXUNICASTFRAMES_G_POS,
  1737. MMC_RISR_RXUNICASTFRAMES_G_LEN))
  1738. stats->rxunicastframes_g +=
  1739. xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
  1740. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1741. MMC_RISR_RXLENGTHERROR_POS,
  1742. MMC_RISR_RXLENGTHERROR_LEN))
  1743. stats->rxlengtherror +=
  1744. xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
  1745. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1746. MMC_RISR_RXOUTOFRANGETYPE_POS,
  1747. MMC_RISR_RXOUTOFRANGETYPE_LEN))
  1748. stats->rxoutofrangetype +=
  1749. xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
  1750. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1751. MMC_RISR_RXPAUSEFRAMES_POS,
  1752. MMC_RISR_RXPAUSEFRAMES_LEN))
  1753. stats->rxpauseframes +=
  1754. xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
  1755. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1756. MMC_RISR_RXFIFOOVERFLOW_POS,
  1757. MMC_RISR_RXFIFOOVERFLOW_LEN))
  1758. stats->rxfifooverflow +=
  1759. xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
  1760. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1761. MMC_RISR_RXVLANFRAMES_GB_POS,
  1762. MMC_RISR_RXVLANFRAMES_GB_LEN))
  1763. stats->rxvlanframes_gb +=
  1764. xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
  1765. if (XLGMAC_GET_REG_BITS(mmc_isr,
  1766. MMC_RISR_RXWATCHDOGERROR_POS,
  1767. MMC_RISR_RXWATCHDOGERROR_LEN))
  1768. stats->rxwatchdogerror +=
  1769. xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
  1770. }
  1771. static void xlgmac_read_mmc_stats(struct xlgmac_pdata *pdata)
  1772. {
  1773. struct xlgmac_stats *stats = &pdata->stats;
  1774. u32 regval;
  1775. /* Freeze counters */
  1776. regval = readl(pdata->mac_regs + MMC_CR);
  1777. regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
  1778. MMC_CR_MCF_LEN, 1);
  1779. writel(regval, pdata->mac_regs + MMC_CR);
  1780. stats->txoctetcount_gb +=
  1781. xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
  1782. stats->txframecount_gb +=
  1783. xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
  1784. stats->txbroadcastframes_g +=
  1785. xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
  1786. stats->txmulticastframes_g +=
  1787. xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
  1788. stats->tx64octets_gb +=
  1789. xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
  1790. stats->tx65to127octets_gb +=
  1791. xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
  1792. stats->tx128to255octets_gb +=
  1793. xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
  1794. stats->tx256to511octets_gb +=
  1795. xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
  1796. stats->tx512to1023octets_gb +=
  1797. xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
  1798. stats->tx1024tomaxoctets_gb +=
  1799. xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
  1800. stats->txunicastframes_gb +=
  1801. xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
  1802. stats->txmulticastframes_gb +=
  1803. xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
  1804. stats->txbroadcastframes_g +=
  1805. xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
  1806. stats->txunderflowerror +=
  1807. xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
  1808. stats->txoctetcount_g +=
  1809. xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
  1810. stats->txframecount_g +=
  1811. xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
  1812. stats->txpauseframes +=
  1813. xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
  1814. stats->txvlanframes_g +=
  1815. xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
  1816. stats->rxframecount_gb +=
  1817. xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
  1818. stats->rxoctetcount_gb +=
  1819. xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
  1820. stats->rxoctetcount_g +=
  1821. xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
  1822. stats->rxbroadcastframes_g +=
  1823. xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
  1824. stats->rxmulticastframes_g +=
  1825. xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
  1826. stats->rxcrcerror +=
  1827. xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
  1828. stats->rxrunterror +=
  1829. xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
  1830. stats->rxjabbererror +=
  1831. xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
  1832. stats->rxundersize_g +=
  1833. xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
  1834. stats->rxoversize_g +=
  1835. xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
  1836. stats->rx64octets_gb +=
  1837. xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
  1838. stats->rx65to127octets_gb +=
  1839. xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
  1840. stats->rx128to255octets_gb +=
  1841. xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
  1842. stats->rx256to511octets_gb +=
  1843. xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
  1844. stats->rx512to1023octets_gb +=
  1845. xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
  1846. stats->rx1024tomaxoctets_gb +=
  1847. xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
  1848. stats->rxunicastframes_g +=
  1849. xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
  1850. stats->rxlengtherror +=
  1851. xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
  1852. stats->rxoutofrangetype +=
  1853. xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
  1854. stats->rxpauseframes +=
  1855. xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
  1856. stats->rxfifooverflow +=
  1857. xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
  1858. stats->rxvlanframes_gb +=
  1859. xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
  1860. stats->rxwatchdogerror +=
  1861. xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
  1862. /* Un-freeze counters */
  1863. regval = readl(pdata->mac_regs + MMC_CR);
  1864. regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
  1865. MMC_CR_MCF_LEN, 0);
  1866. writel(regval, pdata->mac_regs + MMC_CR);
  1867. }
  1868. static void xlgmac_config_mmc(struct xlgmac_pdata *pdata)
  1869. {
  1870. u32 regval;
  1871. regval = readl(pdata->mac_regs + MMC_CR);
  1872. /* Set counters to reset on read */
  1873. regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS,
  1874. MMC_CR_ROR_LEN, 1);
  1875. /* Reset the counters */
  1876. regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS,
  1877. MMC_CR_CR_LEN, 1);
  1878. writel(regval, pdata->mac_regs + MMC_CR);
  1879. }
  1880. static int xlgmac_write_rss_reg(struct xlgmac_pdata *pdata, unsigned int type,
  1881. unsigned int index, unsigned int val)
  1882. {
  1883. unsigned int wait;
  1884. int ret = 0;
  1885. u32 regval;
  1886. mutex_lock(&pdata->rss_mutex);
  1887. regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
  1888. MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN);
  1889. if (regval) {
  1890. ret = -EBUSY;
  1891. goto unlock;
  1892. }
  1893. writel(val, pdata->mac_regs + MAC_RSSDR);
  1894. regval = readl(pdata->mac_regs + MAC_RSSAR);
  1895. regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_RSSIA_POS,
  1896. MAC_RSSAR_RSSIA_LEN, index);
  1897. regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_ADDRT_POS,
  1898. MAC_RSSAR_ADDRT_LEN, type);
  1899. regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_CT_POS,
  1900. MAC_RSSAR_CT_LEN, 0);
  1901. regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_OB_POS,
  1902. MAC_RSSAR_OB_LEN, 1);
  1903. writel(regval, pdata->mac_regs + MAC_RSSAR);
  1904. wait = 1000;
  1905. while (wait--) {
  1906. regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
  1907. MAC_RSSAR_OB_POS,
  1908. MAC_RSSAR_OB_LEN);
  1909. if (!regval)
  1910. goto unlock;
  1911. usleep_range(1000, 1500);
  1912. }
  1913. ret = -EBUSY;
  1914. unlock:
  1915. mutex_unlock(&pdata->rss_mutex);
  1916. return ret;
  1917. }
  1918. static int xlgmac_write_rss_hash_key(struct xlgmac_pdata *pdata)
  1919. {
  1920. unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
  1921. unsigned int *key = (unsigned int *)&pdata->rss_key;
  1922. int ret;
  1923. while (key_regs--) {
  1924. ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_HASH_KEY_TYPE,
  1925. key_regs, *key++);
  1926. if (ret)
  1927. return ret;
  1928. }
  1929. return 0;
  1930. }
  1931. static int xlgmac_write_rss_lookup_table(struct xlgmac_pdata *pdata)
  1932. {
  1933. unsigned int i;
  1934. int ret;
  1935. for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
  1936. ret = xlgmac_write_rss_reg(pdata,
  1937. XLGMAC_RSS_LOOKUP_TABLE_TYPE, i,
  1938. pdata->rss_table[i]);
  1939. if (ret)
  1940. return ret;
  1941. }
  1942. return 0;
  1943. }
  1944. static int xlgmac_set_rss_hash_key(struct xlgmac_pdata *pdata, const u8 *key)
  1945. {
  1946. memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
  1947. return xlgmac_write_rss_hash_key(pdata);
  1948. }
  1949. static int xlgmac_set_rss_lookup_table(struct xlgmac_pdata *pdata,
  1950. const u32 *table)
  1951. {
  1952. unsigned int i;
  1953. u32 tval;
  1954. for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
  1955. tval = table[i];
  1956. pdata->rss_table[i] = XLGMAC_SET_REG_BITS(
  1957. pdata->rss_table[i],
  1958. MAC_RSSDR_DMCH_POS,
  1959. MAC_RSSDR_DMCH_LEN,
  1960. tval);
  1961. }
  1962. return xlgmac_write_rss_lookup_table(pdata);
  1963. }
  1964. static int xlgmac_enable_rss(struct xlgmac_pdata *pdata)
  1965. {
  1966. u32 regval;
  1967. int ret;
  1968. if (!pdata->hw_feat.rss)
  1969. return -EOPNOTSUPP;
  1970. /* Program the hash key */
  1971. ret = xlgmac_write_rss_hash_key(pdata);
  1972. if (ret)
  1973. return ret;
  1974. /* Program the lookup table */
  1975. ret = xlgmac_write_rss_lookup_table(pdata);
  1976. if (ret)
  1977. return ret;
  1978. /* Set the RSS options */
  1979. writel(pdata->rss_options, pdata->mac_regs + MAC_RSSCR);
  1980. /* Enable RSS */
  1981. regval = readl(pdata->mac_regs + MAC_RSSCR);
  1982. regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
  1983. MAC_RSSCR_RSSE_LEN, 1);
  1984. writel(regval, pdata->mac_regs + MAC_RSSCR);
  1985. return 0;
  1986. }
  1987. static int xlgmac_disable_rss(struct xlgmac_pdata *pdata)
  1988. {
  1989. u32 regval;
  1990. if (!pdata->hw_feat.rss)
  1991. return -EOPNOTSUPP;
  1992. regval = readl(pdata->mac_regs + MAC_RSSCR);
  1993. regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
  1994. MAC_RSSCR_RSSE_LEN, 0);
  1995. writel(regval, pdata->mac_regs + MAC_RSSCR);
  1996. return 0;
  1997. }
  1998. static void xlgmac_config_rss(struct xlgmac_pdata *pdata)
  1999. {
  2000. int ret;
  2001. if (!pdata->hw_feat.rss)
  2002. return;
  2003. if (pdata->netdev->features & NETIF_F_RXHASH)
  2004. ret = xlgmac_enable_rss(pdata);
  2005. else
  2006. ret = xlgmac_disable_rss(pdata);
  2007. if (ret)
  2008. netdev_err(pdata->netdev,
  2009. "error configuring RSS, RSS disabled\n");
  2010. }
  2011. static void xlgmac_enable_dma_interrupts(struct xlgmac_pdata *pdata)
  2012. {
  2013. unsigned int dma_ch_isr, dma_ch_ier;
  2014. struct xlgmac_channel *channel;
  2015. unsigned int i;
  2016. channel = pdata->channel_head;
  2017. for (i = 0; i < pdata->channel_count; i++, channel++) {
  2018. /* Clear all the interrupts which are set */
  2019. dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
  2020. writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
  2021. /* Clear all interrupt enable bits */
  2022. dma_ch_ier = 0;
  2023. /* Enable following interrupts
  2024. * NIE - Normal Interrupt Summary Enable
  2025. * AIE - Abnormal Interrupt Summary Enable
  2026. * FBEE - Fatal Bus Error Enable
  2027. */
  2028. dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
  2029. DMA_CH_IER_NIE_POS,
  2030. DMA_CH_IER_NIE_LEN, 1);
  2031. dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
  2032. DMA_CH_IER_AIE_POS,
  2033. DMA_CH_IER_AIE_LEN, 1);
  2034. dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
  2035. DMA_CH_IER_FBEE_POS,
  2036. DMA_CH_IER_FBEE_LEN, 1);
  2037. if (channel->tx_ring) {
  2038. /* Enable the following Tx interrupts
  2039. * TIE - Transmit Interrupt Enable (unless using
  2040. * per channel interrupts)
  2041. */
  2042. if (!pdata->per_channel_irq)
  2043. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2044. dma_ch_ier,
  2045. DMA_CH_IER_TIE_POS,
  2046. DMA_CH_IER_TIE_LEN,
  2047. 1);
  2048. }
  2049. if (channel->rx_ring) {
  2050. /* Enable following Rx interrupts
  2051. * RBUE - Receive Buffer Unavailable Enable
  2052. * RIE - Receive Interrupt Enable (unless using
  2053. * per channel interrupts)
  2054. */
  2055. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2056. dma_ch_ier,
  2057. DMA_CH_IER_RBUE_POS,
  2058. DMA_CH_IER_RBUE_LEN,
  2059. 1);
  2060. if (!pdata->per_channel_irq)
  2061. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2062. dma_ch_ier,
  2063. DMA_CH_IER_RIE_POS,
  2064. DMA_CH_IER_RIE_LEN,
  2065. 1);
  2066. }
  2067. writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_IER));
  2068. }
  2069. }
  2070. static void xlgmac_enable_mtl_interrupts(struct xlgmac_pdata *pdata)
  2071. {
  2072. unsigned int q_count, i;
  2073. unsigned int mtl_q_isr;
  2074. q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
  2075. for (i = 0; i < q_count; i++) {
  2076. /* Clear all the interrupts which are set */
  2077. mtl_q_isr = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
  2078. writel(mtl_q_isr, XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
  2079. /* No MTL interrupts to be enabled */
  2080. writel(0, XLGMAC_MTL_REG(pdata, i, MTL_Q_IER));
  2081. }
  2082. }
  2083. static void xlgmac_enable_mac_interrupts(struct xlgmac_pdata *pdata)
  2084. {
  2085. unsigned int mac_ier = 0;
  2086. u32 regval;
  2087. /* Enable Timestamp interrupt */
  2088. mac_ier = XLGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS,
  2089. MAC_IER_TSIE_LEN, 1);
  2090. writel(mac_ier, pdata->mac_regs + MAC_IER);
  2091. /* Enable all counter interrupts */
  2092. regval = readl(pdata->mac_regs + MMC_RIER);
  2093. regval = XLGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS,
  2094. MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff);
  2095. writel(regval, pdata->mac_regs + MMC_RIER);
  2096. regval = readl(pdata->mac_regs + MMC_TIER);
  2097. regval = XLGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS,
  2098. MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff);
  2099. writel(regval, pdata->mac_regs + MMC_TIER);
  2100. }
  2101. static int xlgmac_set_xlgmii_25000_speed(struct xlgmac_pdata *pdata)
  2102. {
  2103. u32 regval;
  2104. regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
  2105. MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
  2106. if (regval == 0x1)
  2107. return 0;
  2108. regval = readl(pdata->mac_regs + MAC_TCR);
  2109. regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
  2110. MAC_TCR_SS_LEN, 0x1);
  2111. writel(regval, pdata->mac_regs + MAC_TCR);
  2112. return 0;
  2113. }
  2114. static int xlgmac_set_xlgmii_40000_speed(struct xlgmac_pdata *pdata)
  2115. {
  2116. u32 regval;
  2117. regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
  2118. MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
  2119. if (regval == 0)
  2120. return 0;
  2121. regval = readl(pdata->mac_regs + MAC_TCR);
  2122. regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
  2123. MAC_TCR_SS_LEN, 0);
  2124. writel(regval, pdata->mac_regs + MAC_TCR);
  2125. return 0;
  2126. }
  2127. static int xlgmac_set_xlgmii_50000_speed(struct xlgmac_pdata *pdata)
  2128. {
  2129. u32 regval;
  2130. regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
  2131. MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
  2132. if (regval == 0x2)
  2133. return 0;
  2134. regval = readl(pdata->mac_regs + MAC_TCR);
  2135. regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
  2136. MAC_TCR_SS_LEN, 0x2);
  2137. writel(regval, pdata->mac_regs + MAC_TCR);
  2138. return 0;
  2139. }
  2140. static int xlgmac_set_xlgmii_100000_speed(struct xlgmac_pdata *pdata)
  2141. {
  2142. u32 regval;
  2143. regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
  2144. MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
  2145. if (regval == 0x3)
  2146. return 0;
  2147. regval = readl(pdata->mac_regs + MAC_TCR);
  2148. regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
  2149. MAC_TCR_SS_LEN, 0x3);
  2150. writel(regval, pdata->mac_regs + MAC_TCR);
  2151. return 0;
  2152. }
  2153. static void xlgmac_config_mac_speed(struct xlgmac_pdata *pdata)
  2154. {
  2155. switch (pdata->phy_speed) {
  2156. case SPEED_100000:
  2157. xlgmac_set_xlgmii_100000_speed(pdata);
  2158. break;
  2159. case SPEED_50000:
  2160. xlgmac_set_xlgmii_50000_speed(pdata);
  2161. break;
  2162. case SPEED_40000:
  2163. xlgmac_set_xlgmii_40000_speed(pdata);
  2164. break;
  2165. case SPEED_25000:
  2166. xlgmac_set_xlgmii_25000_speed(pdata);
  2167. break;
  2168. }
  2169. }
  2170. static int xlgmac_dev_read(struct xlgmac_channel *channel)
  2171. {
  2172. struct xlgmac_pdata *pdata = channel->pdata;
  2173. struct xlgmac_ring *ring = channel->rx_ring;
  2174. struct net_device *netdev = pdata->netdev;
  2175. struct xlgmac_desc_data *desc_data;
  2176. struct xlgmac_dma_desc *dma_desc;
  2177. struct xlgmac_pkt_info *pkt_info;
  2178. unsigned int err, etlt, l34t;
  2179. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
  2180. dma_desc = desc_data->dma_desc;
  2181. pkt_info = &ring->pkt_info;
  2182. /* Check for data availability */
  2183. if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  2184. RX_NORMAL_DESC3_OWN_POS,
  2185. RX_NORMAL_DESC3_OWN_LEN))
  2186. return 1;
  2187. /* Make sure descriptor fields are read after reading the OWN bit */
  2188. dma_rmb();
  2189. if (netif_msg_rx_status(pdata))
  2190. xlgmac_dump_rx_desc(pdata, ring, ring->cur);
  2191. if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  2192. RX_NORMAL_DESC3_CTXT_POS,
  2193. RX_NORMAL_DESC3_CTXT_LEN)) {
  2194. /* Timestamp Context Descriptor */
  2195. xlgmac_get_rx_tstamp(pkt_info, dma_desc);
  2196. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  2197. pkt_info->attributes,
  2198. RX_PACKET_ATTRIBUTES_CONTEXT_POS,
  2199. RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
  2200. 1);
  2201. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  2202. pkt_info->attributes,
  2203. RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
  2204. RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
  2205. 0);
  2206. return 0;
  2207. }
  2208. /* Normal Descriptor, be sure Context Descriptor bit is off */
  2209. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  2210. pkt_info->attributes,
  2211. RX_PACKET_ATTRIBUTES_CONTEXT_POS,
  2212. RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
  2213. 0);
  2214. /* Indicate if a Context Descriptor is next */
  2215. if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  2216. RX_NORMAL_DESC3_CDA_POS,
  2217. RX_NORMAL_DESC3_CDA_LEN))
  2218. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  2219. pkt_info->attributes,
  2220. RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
  2221. RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
  2222. 1);
  2223. /* Get the header length */
  2224. if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  2225. RX_NORMAL_DESC3_FD_POS,
  2226. RX_NORMAL_DESC3_FD_LEN)) {
  2227. desc_data->rx.hdr_len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc2,
  2228. RX_NORMAL_DESC2_HL_POS,
  2229. RX_NORMAL_DESC2_HL_LEN);
  2230. if (desc_data->rx.hdr_len)
  2231. pdata->stats.rx_split_header_packets++;
  2232. }
  2233. /* Get the RSS hash */
  2234. if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  2235. RX_NORMAL_DESC3_RSV_POS,
  2236. RX_NORMAL_DESC3_RSV_LEN)) {
  2237. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  2238. pkt_info->attributes,
  2239. RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
  2240. RX_PACKET_ATTRIBUTES_RSS_HASH_LEN,
  2241. 1);
  2242. pkt_info->rss_hash = le32_to_cpu(dma_desc->desc1);
  2243. l34t = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  2244. RX_NORMAL_DESC3_L34T_POS,
  2245. RX_NORMAL_DESC3_L34T_LEN);
  2246. switch (l34t) {
  2247. case RX_DESC3_L34T_IPV4_TCP:
  2248. case RX_DESC3_L34T_IPV4_UDP:
  2249. case RX_DESC3_L34T_IPV6_TCP:
  2250. case RX_DESC3_L34T_IPV6_UDP:
  2251. pkt_info->rss_hash_type = PKT_HASH_TYPE_L4;
  2252. break;
  2253. default:
  2254. pkt_info->rss_hash_type = PKT_HASH_TYPE_L3;
  2255. }
  2256. }
  2257. /* Get the pkt_info length */
  2258. desc_data->rx.len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  2259. RX_NORMAL_DESC3_PL_POS,
  2260. RX_NORMAL_DESC3_PL_LEN);
  2261. if (!XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  2262. RX_NORMAL_DESC3_LD_POS,
  2263. RX_NORMAL_DESC3_LD_LEN)) {
  2264. /* Not all the data has been transferred for this pkt_info */
  2265. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  2266. pkt_info->attributes,
  2267. RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
  2268. RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
  2269. 1);
  2270. return 0;
  2271. }
  2272. /* This is the last of the data for this pkt_info */
  2273. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  2274. pkt_info->attributes,
  2275. RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
  2276. RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
  2277. 0);
  2278. /* Set checksum done indicator as appropriate */
  2279. if (netdev->features & NETIF_F_RXCSUM)
  2280. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  2281. pkt_info->attributes,
  2282. RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
  2283. RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
  2284. 1);
  2285. /* Check for errors (only valid in last descriptor) */
  2286. err = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  2287. RX_NORMAL_DESC3_ES_POS,
  2288. RX_NORMAL_DESC3_ES_LEN);
  2289. etlt = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
  2290. RX_NORMAL_DESC3_ETLT_POS,
  2291. RX_NORMAL_DESC3_ETLT_LEN);
  2292. netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
  2293. if (!err || !etlt) {
  2294. /* No error if err is 0 or etlt is 0 */
  2295. if ((etlt == 0x09) &&
  2296. (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
  2297. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  2298. pkt_info->attributes,
  2299. RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
  2300. RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
  2301. 1);
  2302. pkt_info->vlan_ctag =
  2303. XLGMAC_GET_REG_BITS_LE(dma_desc->desc0,
  2304. RX_NORMAL_DESC0_OVT_POS,
  2305. RX_NORMAL_DESC0_OVT_LEN);
  2306. netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
  2307. pkt_info->vlan_ctag);
  2308. }
  2309. } else {
  2310. if ((etlt == 0x05) || (etlt == 0x06))
  2311. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  2312. pkt_info->attributes,
  2313. RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
  2314. RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
  2315. 0);
  2316. else
  2317. pkt_info->errors = XLGMAC_SET_REG_BITS(
  2318. pkt_info->errors,
  2319. RX_PACKET_ERRORS_FRAME_POS,
  2320. RX_PACKET_ERRORS_FRAME_LEN,
  2321. 1);
  2322. }
  2323. XLGMAC_PR("%s - descriptor=%u (cur=%d)\n", channel->name,
  2324. ring->cur & (ring->dma_desc_count - 1), ring->cur);
  2325. return 0;
  2326. }
  2327. static int xlgmac_enable_int(struct xlgmac_channel *channel,
  2328. enum xlgmac_int int_id)
  2329. {
  2330. unsigned int dma_ch_ier;
  2331. dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
  2332. switch (int_id) {
  2333. case XLGMAC_INT_DMA_CH_SR_TI:
  2334. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2335. dma_ch_ier, DMA_CH_IER_TIE_POS,
  2336. DMA_CH_IER_TIE_LEN, 1);
  2337. break;
  2338. case XLGMAC_INT_DMA_CH_SR_TPS:
  2339. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2340. dma_ch_ier, DMA_CH_IER_TXSE_POS,
  2341. DMA_CH_IER_TXSE_LEN, 1);
  2342. break;
  2343. case XLGMAC_INT_DMA_CH_SR_TBU:
  2344. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2345. dma_ch_ier, DMA_CH_IER_TBUE_POS,
  2346. DMA_CH_IER_TBUE_LEN, 1);
  2347. break;
  2348. case XLGMAC_INT_DMA_CH_SR_RI:
  2349. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2350. dma_ch_ier, DMA_CH_IER_RIE_POS,
  2351. DMA_CH_IER_RIE_LEN, 1);
  2352. break;
  2353. case XLGMAC_INT_DMA_CH_SR_RBU:
  2354. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2355. dma_ch_ier, DMA_CH_IER_RBUE_POS,
  2356. DMA_CH_IER_RBUE_LEN, 1);
  2357. break;
  2358. case XLGMAC_INT_DMA_CH_SR_RPS:
  2359. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2360. dma_ch_ier, DMA_CH_IER_RSE_POS,
  2361. DMA_CH_IER_RSE_LEN, 1);
  2362. break;
  2363. case XLGMAC_INT_DMA_CH_SR_TI_RI:
  2364. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2365. dma_ch_ier, DMA_CH_IER_TIE_POS,
  2366. DMA_CH_IER_TIE_LEN, 1);
  2367. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2368. dma_ch_ier, DMA_CH_IER_RIE_POS,
  2369. DMA_CH_IER_RIE_LEN, 1);
  2370. break;
  2371. case XLGMAC_INT_DMA_CH_SR_FBE:
  2372. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2373. dma_ch_ier, DMA_CH_IER_FBEE_POS,
  2374. DMA_CH_IER_FBEE_LEN, 1);
  2375. break;
  2376. case XLGMAC_INT_DMA_ALL:
  2377. dma_ch_ier |= channel->saved_ier;
  2378. break;
  2379. default:
  2380. return -1;
  2381. }
  2382. writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
  2383. return 0;
  2384. }
  2385. static int xlgmac_disable_int(struct xlgmac_channel *channel,
  2386. enum xlgmac_int int_id)
  2387. {
  2388. unsigned int dma_ch_ier;
  2389. dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
  2390. switch (int_id) {
  2391. case XLGMAC_INT_DMA_CH_SR_TI:
  2392. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2393. dma_ch_ier, DMA_CH_IER_TIE_POS,
  2394. DMA_CH_IER_TIE_LEN, 0);
  2395. break;
  2396. case XLGMAC_INT_DMA_CH_SR_TPS:
  2397. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2398. dma_ch_ier, DMA_CH_IER_TXSE_POS,
  2399. DMA_CH_IER_TXSE_LEN, 0);
  2400. break;
  2401. case XLGMAC_INT_DMA_CH_SR_TBU:
  2402. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2403. dma_ch_ier, DMA_CH_IER_TBUE_POS,
  2404. DMA_CH_IER_TBUE_LEN, 0);
  2405. break;
  2406. case XLGMAC_INT_DMA_CH_SR_RI:
  2407. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2408. dma_ch_ier, DMA_CH_IER_RIE_POS,
  2409. DMA_CH_IER_RIE_LEN, 0);
  2410. break;
  2411. case XLGMAC_INT_DMA_CH_SR_RBU:
  2412. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2413. dma_ch_ier, DMA_CH_IER_RBUE_POS,
  2414. DMA_CH_IER_RBUE_LEN, 0);
  2415. break;
  2416. case XLGMAC_INT_DMA_CH_SR_RPS:
  2417. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2418. dma_ch_ier, DMA_CH_IER_RSE_POS,
  2419. DMA_CH_IER_RSE_LEN, 0);
  2420. break;
  2421. case XLGMAC_INT_DMA_CH_SR_TI_RI:
  2422. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2423. dma_ch_ier, DMA_CH_IER_TIE_POS,
  2424. DMA_CH_IER_TIE_LEN, 0);
  2425. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2426. dma_ch_ier, DMA_CH_IER_RIE_POS,
  2427. DMA_CH_IER_RIE_LEN, 0);
  2428. break;
  2429. case XLGMAC_INT_DMA_CH_SR_FBE:
  2430. dma_ch_ier = XLGMAC_SET_REG_BITS(
  2431. dma_ch_ier, DMA_CH_IER_FBEE_POS,
  2432. DMA_CH_IER_FBEE_LEN, 0);
  2433. break;
  2434. case XLGMAC_INT_DMA_ALL:
  2435. channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK;
  2436. dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK;
  2437. break;
  2438. default:
  2439. return -1;
  2440. }
  2441. writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
  2442. return 0;
  2443. }
  2444. static int xlgmac_flush_tx_queues(struct xlgmac_pdata *pdata)
  2445. {
  2446. unsigned int i, count;
  2447. u32 regval;
  2448. for (i = 0; i < pdata->tx_q_count; i++) {
  2449. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
  2450. regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
  2451. MTL_Q_TQOMR_FTQ_LEN, 1);
  2452. writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
  2453. }
  2454. /* Poll Until Poll Condition */
  2455. for (i = 0; i < pdata->tx_q_count; i++) {
  2456. count = 2000;
  2457. regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
  2458. regval = XLGMAC_GET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
  2459. MTL_Q_TQOMR_FTQ_LEN);
  2460. while (--count && regval)
  2461. usleep_range(500, 600);
  2462. if (!count)
  2463. return -EBUSY;
  2464. }
  2465. return 0;
  2466. }
  2467. static void xlgmac_config_dma_bus(struct xlgmac_pdata *pdata)
  2468. {
  2469. u32 regval;
  2470. regval = readl(pdata->mac_regs + DMA_SBMR);
  2471. /* Set enhanced addressing mode */
  2472. regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS,
  2473. DMA_SBMR_EAME_LEN, 1);
  2474. /* Set the System Bus mode */
  2475. regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_UNDEF_POS,
  2476. DMA_SBMR_UNDEF_LEN, 1);
  2477. regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_256_POS,
  2478. DMA_SBMR_BLEN_256_LEN, 1);
  2479. writel(regval, pdata->mac_regs + DMA_SBMR);
  2480. }
  2481. static int xlgmac_hw_init(struct xlgmac_pdata *pdata)
  2482. {
  2483. struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
  2484. int ret;
  2485. /* Flush Tx queues */
  2486. ret = xlgmac_flush_tx_queues(pdata);
  2487. if (ret)
  2488. return ret;
  2489. /* Initialize DMA related features */
  2490. xlgmac_config_dma_bus(pdata);
  2491. xlgmac_config_osp_mode(pdata);
  2492. xlgmac_config_pblx8(pdata);
  2493. xlgmac_config_tx_pbl_val(pdata);
  2494. xlgmac_config_rx_pbl_val(pdata);
  2495. xlgmac_config_rx_coalesce(pdata);
  2496. xlgmac_config_tx_coalesce(pdata);
  2497. xlgmac_config_rx_buffer_size(pdata);
  2498. xlgmac_config_tso_mode(pdata);
  2499. xlgmac_config_sph_mode(pdata);
  2500. xlgmac_config_rss(pdata);
  2501. desc_ops->tx_desc_init(pdata);
  2502. desc_ops->rx_desc_init(pdata);
  2503. xlgmac_enable_dma_interrupts(pdata);
  2504. /* Initialize MTL related features */
  2505. xlgmac_config_mtl_mode(pdata);
  2506. xlgmac_config_queue_mapping(pdata);
  2507. xlgmac_config_tsf_mode(pdata, pdata->tx_sf_mode);
  2508. xlgmac_config_rsf_mode(pdata, pdata->rx_sf_mode);
  2509. xlgmac_config_tx_threshold(pdata, pdata->tx_threshold);
  2510. xlgmac_config_rx_threshold(pdata, pdata->rx_threshold);
  2511. xlgmac_config_tx_fifo_size(pdata);
  2512. xlgmac_config_rx_fifo_size(pdata);
  2513. xlgmac_config_flow_control_threshold(pdata);
  2514. xlgmac_config_rx_fep_enable(pdata);
  2515. xlgmac_config_rx_fup_enable(pdata);
  2516. xlgmac_enable_mtl_interrupts(pdata);
  2517. /* Initialize MAC related features */
  2518. xlgmac_config_mac_address(pdata);
  2519. xlgmac_config_rx_mode(pdata);
  2520. xlgmac_config_jumbo_enable(pdata);
  2521. xlgmac_config_flow_control(pdata);
  2522. xlgmac_config_mac_speed(pdata);
  2523. xlgmac_config_checksum_offload(pdata);
  2524. xlgmac_config_vlan_support(pdata);
  2525. xlgmac_config_mmc(pdata);
  2526. xlgmac_enable_mac_interrupts(pdata);
  2527. return 0;
  2528. }
  2529. static int xlgmac_hw_exit(struct xlgmac_pdata *pdata)
  2530. {
  2531. unsigned int count = 2000;
  2532. u32 regval;
  2533. /* Issue a software reset */
  2534. regval = readl(pdata->mac_regs + DMA_MR);
  2535. regval = XLGMAC_SET_REG_BITS(regval, DMA_MR_SWR_POS,
  2536. DMA_MR_SWR_LEN, 1);
  2537. writel(regval, pdata->mac_regs + DMA_MR);
  2538. usleep_range(10, 15);
  2539. /* Poll Until Poll Condition */
  2540. while (--count &&
  2541. XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + DMA_MR),
  2542. DMA_MR_SWR_POS, DMA_MR_SWR_LEN))
  2543. usleep_range(500, 600);
  2544. if (!count)
  2545. return -EBUSY;
  2546. return 0;
  2547. }
  2548. void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops)
  2549. {
  2550. hw_ops->init = xlgmac_hw_init;
  2551. hw_ops->exit = xlgmac_hw_exit;
  2552. hw_ops->tx_complete = xlgmac_tx_complete;
  2553. hw_ops->enable_tx = xlgmac_enable_tx;
  2554. hw_ops->disable_tx = xlgmac_disable_tx;
  2555. hw_ops->enable_rx = xlgmac_enable_rx;
  2556. hw_ops->disable_rx = xlgmac_disable_rx;
  2557. hw_ops->dev_xmit = xlgmac_dev_xmit;
  2558. hw_ops->dev_read = xlgmac_dev_read;
  2559. hw_ops->enable_int = xlgmac_enable_int;
  2560. hw_ops->disable_int = xlgmac_disable_int;
  2561. hw_ops->set_mac_address = xlgmac_set_mac_address;
  2562. hw_ops->config_rx_mode = xlgmac_config_rx_mode;
  2563. hw_ops->enable_rx_csum = xlgmac_enable_rx_csum;
  2564. hw_ops->disable_rx_csum = xlgmac_disable_rx_csum;
  2565. /* For MII speed configuration */
  2566. hw_ops->set_xlgmii_25000_speed = xlgmac_set_xlgmii_25000_speed;
  2567. hw_ops->set_xlgmii_40000_speed = xlgmac_set_xlgmii_40000_speed;
  2568. hw_ops->set_xlgmii_50000_speed = xlgmac_set_xlgmii_50000_speed;
  2569. hw_ops->set_xlgmii_100000_speed = xlgmac_set_xlgmii_100000_speed;
  2570. /* For descriptor related operation */
  2571. hw_ops->tx_desc_init = xlgmac_tx_desc_init;
  2572. hw_ops->rx_desc_init = xlgmac_rx_desc_init;
  2573. hw_ops->tx_desc_reset = xlgmac_tx_desc_reset;
  2574. hw_ops->rx_desc_reset = xlgmac_rx_desc_reset;
  2575. hw_ops->is_last_desc = xlgmac_is_last_desc;
  2576. hw_ops->is_context_desc = xlgmac_is_context_desc;
  2577. hw_ops->tx_start_xmit = xlgmac_tx_start_xmit;
  2578. /* For Flow Control */
  2579. hw_ops->config_tx_flow_control = xlgmac_config_tx_flow_control;
  2580. hw_ops->config_rx_flow_control = xlgmac_config_rx_flow_control;
  2581. /* For Vlan related config */
  2582. hw_ops->enable_rx_vlan_stripping = xlgmac_enable_rx_vlan_stripping;
  2583. hw_ops->disable_rx_vlan_stripping = xlgmac_disable_rx_vlan_stripping;
  2584. hw_ops->enable_rx_vlan_filtering = xlgmac_enable_rx_vlan_filtering;
  2585. hw_ops->disable_rx_vlan_filtering = xlgmac_disable_rx_vlan_filtering;
  2586. hw_ops->update_vlan_hash_table = xlgmac_update_vlan_hash_table;
  2587. /* For RX coalescing */
  2588. hw_ops->config_rx_coalesce = xlgmac_config_rx_coalesce;
  2589. hw_ops->config_tx_coalesce = xlgmac_config_tx_coalesce;
  2590. hw_ops->usec_to_riwt = xlgmac_usec_to_riwt;
  2591. hw_ops->riwt_to_usec = xlgmac_riwt_to_usec;
  2592. /* For RX and TX threshold config */
  2593. hw_ops->config_rx_threshold = xlgmac_config_rx_threshold;
  2594. hw_ops->config_tx_threshold = xlgmac_config_tx_threshold;
  2595. /* For RX and TX Store and Forward Mode config */
  2596. hw_ops->config_rsf_mode = xlgmac_config_rsf_mode;
  2597. hw_ops->config_tsf_mode = xlgmac_config_tsf_mode;
  2598. /* For TX DMA Operating on Second Frame config */
  2599. hw_ops->config_osp_mode = xlgmac_config_osp_mode;
  2600. /* For RX and TX PBL config */
  2601. hw_ops->config_rx_pbl_val = xlgmac_config_rx_pbl_val;
  2602. hw_ops->get_rx_pbl_val = xlgmac_get_rx_pbl_val;
  2603. hw_ops->config_tx_pbl_val = xlgmac_config_tx_pbl_val;
  2604. hw_ops->get_tx_pbl_val = xlgmac_get_tx_pbl_val;
  2605. hw_ops->config_pblx8 = xlgmac_config_pblx8;
  2606. /* For MMC statistics support */
  2607. hw_ops->tx_mmc_int = xlgmac_tx_mmc_int;
  2608. hw_ops->rx_mmc_int = xlgmac_rx_mmc_int;
  2609. hw_ops->read_mmc_stats = xlgmac_read_mmc_stats;
  2610. /* For Receive Side Scaling */
  2611. hw_ops->enable_rss = xlgmac_enable_rss;
  2612. hw_ops->disable_rss = xlgmac_disable_rss;
  2613. hw_ops->set_rss_hash_key = xlgmac_set_rss_hash_key;
  2614. hw_ops->set_rss_lookup_table = xlgmac_set_rss_lookup_table;
  2615. }