ag71xx.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Atheros AR71xx built-in ethernet mac driver
  3. *
  4. * Copyright (C) 2019 Oleksij Rempel <[email protected]>
  5. *
  6. * List of authors contributed to this driver before mainlining:
  7. * Alexander Couzens <[email protected]>
  8. * Christian Lamparter <[email protected]>
  9. * Chuanhong Guo <[email protected]>
  10. * Daniel F. Dickinson <[email protected]>
  11. * David Bauer <[email protected]>
  12. * Felix Fietkau <[email protected]>
  13. * Gabor Juhos <[email protected]>
  14. * Hauke Mehrtens <[email protected]>
  15. * Johann Neuhauser <[email protected]>
  16. * John Crispin <[email protected]>
  17. * Jo-Philipp Wich <[email protected]>
  18. * Koen Vandeputte <[email protected]>
  19. * Lucian Cristian <[email protected]>
  20. * Matt Merhar <[email protected]>
  21. * Milan Krstic <[email protected]>
  22. * Petr Štetiar <[email protected]>
  23. * Rosen Penev <[email protected]>
  24. * Stephen Walker <[email protected]>
  25. * Vittorio Gambaletta <[email protected]>
  26. * Weijie Gao <[email protected]>
  27. * Imre Kaloz <[email protected]>
  28. */
  29. #include <linux/if_vlan.h>
  30. #include <linux/mfd/syscon.h>
  31. #include <linux/of_mdio.h>
  32. #include <linux/of_net.h>
  33. #include <linux/of_platform.h>
  34. #include <linux/phylink.h>
  35. #include <linux/regmap.h>
  36. #include <linux/reset.h>
  37. #include <linux/clk.h>
  38. #include <linux/io.h>
  39. #include <net/selftests.h>
  40. /* For our NAPI weight bigger does *NOT* mean better - it means more
  41. * D-cache misses and lots more wasted cycles than we'll ever
  42. * possibly gain from saving instructions.
  43. */
  44. #define AG71XX_NAPI_WEIGHT 32
  45. #define AG71XX_OOM_REFILL (1 + HZ / 10)
  46. #define AG71XX_INT_ERR (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE)
  47. #define AG71XX_INT_TX (AG71XX_INT_TX_PS)
  48. #define AG71XX_INT_RX (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF)
  49. #define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX)
  50. #define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL)
  51. #define AG71XX_TX_MTU_LEN 1540
  52. #define AG71XX_TX_RING_SPLIT 512
  53. #define AG71XX_TX_RING_DS_PER_PKT DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \
  54. AG71XX_TX_RING_SPLIT)
  55. #define AG71XX_TX_RING_SIZE_DEFAULT 128
  56. #define AG71XX_RX_RING_SIZE_DEFAULT 256
  57. #define AG71XX_MDIO_RETRY 1000
  58. #define AG71XX_MDIO_DELAY 5
  59. #define AG71XX_MDIO_MAX_CLK 5000000
  60. /* Register offsets */
  61. #define AG71XX_REG_MAC_CFG1 0x0000
  62. #define MAC_CFG1_TXE BIT(0) /* Tx Enable */
  63. #define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */
  64. #define MAC_CFG1_RXE BIT(2) /* Rx Enable */
  65. #define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */
  66. #define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */
  67. #define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */
  68. #define MAC_CFG1_SR BIT(31) /* Soft Reset */
  69. #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
  70. MAC_CFG1_SRX | MAC_CFG1_STX)
  71. #define AG71XX_REG_MAC_CFG2 0x0004
  72. #define MAC_CFG2_FDX BIT(0)
  73. #define MAC_CFG2_PAD_CRC_EN BIT(2)
  74. #define MAC_CFG2_LEN_CHECK BIT(4)
  75. #define MAC_CFG2_IF_1000 BIT(9)
  76. #define MAC_CFG2_IF_10_100 BIT(8)
  77. #define AG71XX_REG_MAC_MFL 0x0010
  78. #define AG71XX_REG_MII_CFG 0x0020
  79. #define MII_CFG_CLK_DIV_4 0
  80. #define MII_CFG_CLK_DIV_6 2
  81. #define MII_CFG_CLK_DIV_8 3
  82. #define MII_CFG_CLK_DIV_10 4
  83. #define MII_CFG_CLK_DIV_14 5
  84. #define MII_CFG_CLK_DIV_20 6
  85. #define MII_CFG_CLK_DIV_28 7
  86. #define MII_CFG_CLK_DIV_34 8
  87. #define MII_CFG_CLK_DIV_42 9
  88. #define MII_CFG_CLK_DIV_50 10
  89. #define MII_CFG_CLK_DIV_58 11
  90. #define MII_CFG_CLK_DIV_66 12
  91. #define MII_CFG_CLK_DIV_74 13
  92. #define MII_CFG_CLK_DIV_82 14
  93. #define MII_CFG_CLK_DIV_98 15
  94. #define MII_CFG_RESET BIT(31)
  95. #define AG71XX_REG_MII_CMD 0x0024
  96. #define MII_CMD_READ BIT(0)
  97. #define AG71XX_REG_MII_ADDR 0x0028
  98. #define MII_ADDR_SHIFT 8
  99. #define AG71XX_REG_MII_CTRL 0x002c
  100. #define AG71XX_REG_MII_STATUS 0x0030
  101. #define AG71XX_REG_MII_IND 0x0034
  102. #define MII_IND_BUSY BIT(0)
  103. #define MII_IND_INVALID BIT(2)
  104. #define AG71XX_REG_MAC_IFCTL 0x0038
  105. #define MAC_IFCTL_SPEED BIT(16)
  106. #define AG71XX_REG_MAC_ADDR1 0x0040
  107. #define AG71XX_REG_MAC_ADDR2 0x0044
  108. #define AG71XX_REG_FIFO_CFG0 0x0048
  109. #define FIFO_CFG0_WTM BIT(0) /* Watermark Module */
  110. #define FIFO_CFG0_RXS BIT(1) /* Rx System Module */
  111. #define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */
  112. #define FIFO_CFG0_TXS BIT(3) /* Tx System Module */
  113. #define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */
  114. #define FIFO_CFG0_ALL (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \
  115. | FIFO_CFG0_TXS | FIFO_CFG0_TXF)
  116. #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
  117. #define FIFO_CFG0_ENABLE_SHIFT 8
  118. #define AG71XX_REG_FIFO_CFG1 0x004c
  119. #define AG71XX_REG_FIFO_CFG2 0x0050
  120. #define AG71XX_REG_FIFO_CFG3 0x0054
  121. #define AG71XX_REG_FIFO_CFG4 0x0058
  122. #define FIFO_CFG4_DE BIT(0) /* Drop Event */
  123. #define FIFO_CFG4_DV BIT(1) /* RX_DV Event */
  124. #define FIFO_CFG4_FC BIT(2) /* False Carrier */
  125. #define FIFO_CFG4_CE BIT(3) /* Code Error */
  126. #define FIFO_CFG4_CR BIT(4) /* CRC error */
  127. #define FIFO_CFG4_LM BIT(5) /* Length Mismatch */
  128. #define FIFO_CFG4_LO BIT(6) /* Length out of range */
  129. #define FIFO_CFG4_OK BIT(7) /* Packet is OK */
  130. #define FIFO_CFG4_MC BIT(8) /* Multicast Packet */
  131. #define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */
  132. #define FIFO_CFG4_DR BIT(10) /* Dribble */
  133. #define FIFO_CFG4_LE BIT(11) /* Long Event */
  134. #define FIFO_CFG4_CF BIT(12) /* Control Frame */
  135. #define FIFO_CFG4_PF BIT(13) /* Pause Frame */
  136. #define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */
  137. #define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */
  138. #define FIFO_CFG4_FT BIT(16) /* Frame Truncated */
  139. #define FIFO_CFG4_UC BIT(17) /* Unicast Packet */
  140. #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
  141. FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
  142. FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
  143. FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
  144. FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
  145. FIFO_CFG4_VT)
  146. #define AG71XX_REG_FIFO_CFG5 0x005c
  147. #define FIFO_CFG5_DE BIT(0) /* Drop Event */
  148. #define FIFO_CFG5_DV BIT(1) /* RX_DV Event */
  149. #define FIFO_CFG5_FC BIT(2) /* False Carrier */
  150. #define FIFO_CFG5_CE BIT(3) /* Code Error */
  151. #define FIFO_CFG5_LM BIT(4) /* Length Mismatch */
  152. #define FIFO_CFG5_LO BIT(5) /* Length Out of Range */
  153. #define FIFO_CFG5_OK BIT(6) /* Packet is OK */
  154. #define FIFO_CFG5_MC BIT(7) /* Multicast Packet */
  155. #define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */
  156. #define FIFO_CFG5_DR BIT(9) /* Dribble */
  157. #define FIFO_CFG5_CF BIT(10) /* Control Frame */
  158. #define FIFO_CFG5_PF BIT(11) /* Pause Frame */
  159. #define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */
  160. #define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */
  161. #define FIFO_CFG5_LE BIT(14) /* Long Event */
  162. #define FIFO_CFG5_FT BIT(15) /* Frame Truncated */
  163. #define FIFO_CFG5_16 BIT(16) /* unknown */
  164. #define FIFO_CFG5_17 BIT(17) /* unknown */
  165. #define FIFO_CFG5_SF BIT(18) /* Short Frame */
  166. #define FIFO_CFG5_BM BIT(19) /* Byte Mode */
  167. #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
  168. FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
  169. FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
  170. FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
  171. FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
  172. FIFO_CFG5_17 | FIFO_CFG5_SF)
  173. #define AG71XX_REG_TX_CTRL 0x0180
  174. #define TX_CTRL_TXE BIT(0) /* Tx Enable */
  175. #define AG71XX_REG_TX_DESC 0x0184
  176. #define AG71XX_REG_TX_STATUS 0x0188
  177. #define TX_STATUS_PS BIT(0) /* Packet Sent */
  178. #define TX_STATUS_UR BIT(1) /* Tx Underrun */
  179. #define TX_STATUS_BE BIT(3) /* Bus Error */
  180. #define AG71XX_REG_RX_CTRL 0x018c
  181. #define RX_CTRL_RXE BIT(0) /* Rx Enable */
  182. #define AG71XX_DMA_RETRY 10
  183. #define AG71XX_DMA_DELAY 1
  184. #define AG71XX_REG_RX_DESC 0x0190
  185. #define AG71XX_REG_RX_STATUS 0x0194
  186. #define RX_STATUS_PR BIT(0) /* Packet Received */
  187. #define RX_STATUS_OF BIT(2) /* Rx Overflow */
  188. #define RX_STATUS_BE BIT(3) /* Bus Error */
  189. #define AG71XX_REG_INT_ENABLE 0x0198
  190. #define AG71XX_REG_INT_STATUS 0x019c
  191. #define AG71XX_INT_TX_PS BIT(0)
  192. #define AG71XX_INT_TX_UR BIT(1)
  193. #define AG71XX_INT_TX_BE BIT(3)
  194. #define AG71XX_INT_RX_PR BIT(4)
  195. #define AG71XX_INT_RX_OF BIT(6)
  196. #define AG71XX_INT_RX_BE BIT(7)
  197. #define AG71XX_REG_FIFO_DEPTH 0x01a8
  198. #define AG71XX_REG_RX_SM 0x01b0
  199. #define AG71XX_REG_TX_SM 0x01b4
  200. #define AG71XX_DEFAULT_MSG_ENABLE \
  201. (NETIF_MSG_DRV \
  202. | NETIF_MSG_PROBE \
  203. | NETIF_MSG_LINK \
  204. | NETIF_MSG_TIMER \
  205. | NETIF_MSG_IFDOWN \
  206. | NETIF_MSG_IFUP \
  207. | NETIF_MSG_RX_ERR \
  208. | NETIF_MSG_TX_ERR)
  209. struct ag71xx_statistic {
  210. unsigned short offset;
  211. u32 mask;
  212. const char name[ETH_GSTRING_LEN];
  213. };
  214. static const struct ag71xx_statistic ag71xx_statistics[] = {
  215. { 0x0080, GENMASK(17, 0), "Tx/Rx 64 Byte", },
  216. { 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", },
  217. { 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", },
  218. { 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", },
  219. { 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", },
  220. { 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", },
  221. { 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", },
  222. { 0x009C, GENMASK(23, 0), "Rx Byte", },
  223. { 0x00A0, GENMASK(17, 0), "Rx Packet", },
  224. { 0x00A4, GENMASK(11, 0), "Rx FCS Error", },
  225. { 0x00A8, GENMASK(17, 0), "Rx Multicast Packet", },
  226. { 0x00AC, GENMASK(21, 0), "Rx Broadcast Packet", },
  227. { 0x00B0, GENMASK(17, 0), "Rx Control Frame Packet", },
  228. { 0x00B4, GENMASK(11, 0), "Rx Pause Frame Packet", },
  229. { 0x00B8, GENMASK(11, 0), "Rx Unknown OPCode Packet", },
  230. { 0x00BC, GENMASK(11, 0), "Rx Alignment Error", },
  231. { 0x00C0, GENMASK(15, 0), "Rx Frame Length Error", },
  232. { 0x00C4, GENMASK(11, 0), "Rx Code Error", },
  233. { 0x00C8, GENMASK(11, 0), "Rx Carrier Sense Error", },
  234. { 0x00CC, GENMASK(11, 0), "Rx Undersize Packet", },
  235. { 0x00D0, GENMASK(11, 0), "Rx Oversize Packet", },
  236. { 0x00D4, GENMASK(11, 0), "Rx Fragments", },
  237. { 0x00D8, GENMASK(11, 0), "Rx Jabber", },
  238. { 0x00DC, GENMASK(11, 0), "Rx Dropped Packet", },
  239. { 0x00E0, GENMASK(23, 0), "Tx Byte", },
  240. { 0x00E4, GENMASK(17, 0), "Tx Packet", },
  241. { 0x00E8, GENMASK(17, 0), "Tx Multicast Packet", },
  242. { 0x00EC, GENMASK(17, 0), "Tx Broadcast Packet", },
  243. { 0x00F0, GENMASK(11, 0), "Tx Pause Control Frame", },
  244. { 0x00F4, GENMASK(11, 0), "Tx Deferral Packet", },
  245. { 0x00F8, GENMASK(11, 0), "Tx Excessive Deferral Packet", },
  246. { 0x00FC, GENMASK(11, 0), "Tx Single Collision Packet", },
  247. { 0x0100, GENMASK(11, 0), "Tx Multiple Collision", },
  248. { 0x0104, GENMASK(11, 0), "Tx Late Collision Packet", },
  249. { 0x0108, GENMASK(11, 0), "Tx Excessive Collision Packet", },
  250. { 0x010C, GENMASK(12, 0), "Tx Total Collision", },
  251. { 0x0110, GENMASK(11, 0), "Tx Pause Frames Honored", },
  252. { 0x0114, GENMASK(11, 0), "Tx Drop Frame", },
  253. { 0x0118, GENMASK(11, 0), "Tx Jabber Frame", },
  254. { 0x011C, GENMASK(11, 0), "Tx FCS Error", },
  255. { 0x0120, GENMASK(11, 0), "Tx Control Frame", },
  256. { 0x0124, GENMASK(11, 0), "Tx Oversize Frame", },
  257. { 0x0128, GENMASK(11, 0), "Tx Undersize Frame", },
  258. { 0x012C, GENMASK(11, 0), "Tx Fragment", },
  259. };
  260. #define DESC_EMPTY BIT(31)
  261. #define DESC_MORE BIT(24)
  262. #define DESC_PKTLEN_M 0xfff
  263. struct ag71xx_desc {
  264. u32 data;
  265. u32 ctrl;
  266. u32 next;
  267. u32 pad;
  268. } __aligned(4);
  269. #define AG71XX_DESC_SIZE roundup(sizeof(struct ag71xx_desc), \
  270. L1_CACHE_BYTES)
  271. struct ag71xx_buf {
  272. union {
  273. struct {
  274. struct sk_buff *skb;
  275. unsigned int len;
  276. } tx;
  277. struct {
  278. dma_addr_t dma_addr;
  279. void *rx_buf;
  280. } rx;
  281. };
  282. };
  283. struct ag71xx_ring {
  284. /* "Hot" fields in the data path. */
  285. unsigned int curr;
  286. unsigned int dirty;
  287. /* "Cold" fields - not used in the data path. */
  288. struct ag71xx_buf *buf;
  289. u16 order;
  290. u16 desc_split;
  291. dma_addr_t descs_dma;
  292. u8 *descs_cpu;
  293. };
  294. enum ag71xx_type {
  295. AR7100,
  296. AR7240,
  297. AR9130,
  298. AR9330,
  299. AR9340,
  300. QCA9530,
  301. QCA9550,
  302. };
  303. struct ag71xx_dcfg {
  304. u32 max_frame_len;
  305. const u32 *fifodata;
  306. u16 desc_pktlen_mask;
  307. bool tx_hang_workaround;
  308. enum ag71xx_type type;
  309. };
  310. struct ag71xx {
  311. /* Critical data related to the per-packet data path are clustered
  312. * early in this structure to help improve the D-cache footprint.
  313. */
  314. struct ag71xx_ring rx_ring ____cacheline_aligned;
  315. struct ag71xx_ring tx_ring ____cacheline_aligned;
  316. u16 rx_buf_size;
  317. u8 rx_buf_offset;
  318. struct net_device *ndev;
  319. struct platform_device *pdev;
  320. struct napi_struct napi;
  321. u32 msg_enable;
  322. const struct ag71xx_dcfg *dcfg;
  323. /* From this point onwards we're not looking at per-packet fields. */
  324. void __iomem *mac_base;
  325. struct ag71xx_desc *stop_desc;
  326. dma_addr_t stop_desc_dma;
  327. phy_interface_t phy_if_mode;
  328. struct phylink *phylink;
  329. struct phylink_config phylink_config;
  330. struct delayed_work restart_work;
  331. struct timer_list oom_timer;
  332. struct reset_control *mac_reset;
  333. u32 fifodata[3];
  334. int mac_idx;
  335. struct reset_control *mdio_reset;
  336. struct mii_bus *mii_bus;
  337. struct clk *clk_mdio;
  338. struct clk *clk_eth;
  339. };
  340. static int ag71xx_desc_empty(struct ag71xx_desc *desc)
  341. {
  342. return (desc->ctrl & DESC_EMPTY) != 0;
  343. }
  344. static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx)
  345. {
  346. return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE];
  347. }
  348. static int ag71xx_ring_size_order(int size)
  349. {
  350. return fls(size - 1);
  351. }
  352. static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type)
  353. {
  354. return ag->dcfg->type == type;
  355. }
  356. static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value)
  357. {
  358. iowrite32(value, ag->mac_base + reg);
  359. /* flush write */
  360. (void)ioread32(ag->mac_base + reg);
  361. }
  362. static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg)
  363. {
  364. return ioread32(ag->mac_base + reg);
  365. }
  366. static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask)
  367. {
  368. void __iomem *r;
  369. r = ag->mac_base + reg;
  370. iowrite32(ioread32(r) | mask, r);
  371. /* flush write */
  372. (void)ioread32(r);
  373. }
  374. static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask)
  375. {
  376. void __iomem *r;
  377. r = ag->mac_base + reg;
  378. iowrite32(ioread32(r) & ~mask, r);
  379. /* flush write */
  380. (void)ioread32(r);
  381. }
  382. static void ag71xx_int_enable(struct ag71xx *ag, u32 ints)
  383. {
  384. ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints);
  385. }
  386. static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
  387. {
  388. ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
  389. }
  390. static void ag71xx_get_drvinfo(struct net_device *ndev,
  391. struct ethtool_drvinfo *info)
  392. {
  393. struct ag71xx *ag = netdev_priv(ndev);
  394. strscpy(info->driver, "ag71xx", sizeof(info->driver));
  395. strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
  396. sizeof(info->bus_info));
  397. }
  398. static int ag71xx_get_link_ksettings(struct net_device *ndev,
  399. struct ethtool_link_ksettings *kset)
  400. {
  401. struct ag71xx *ag = netdev_priv(ndev);
  402. return phylink_ethtool_ksettings_get(ag->phylink, kset);
  403. }
  404. static int ag71xx_set_link_ksettings(struct net_device *ndev,
  405. const struct ethtool_link_ksettings *kset)
  406. {
  407. struct ag71xx *ag = netdev_priv(ndev);
  408. return phylink_ethtool_ksettings_set(ag->phylink, kset);
  409. }
  410. static int ag71xx_ethtool_nway_reset(struct net_device *ndev)
  411. {
  412. struct ag71xx *ag = netdev_priv(ndev);
  413. return phylink_ethtool_nway_reset(ag->phylink);
  414. }
  415. static void ag71xx_ethtool_get_pauseparam(struct net_device *ndev,
  416. struct ethtool_pauseparam *pause)
  417. {
  418. struct ag71xx *ag = netdev_priv(ndev);
  419. phylink_ethtool_get_pauseparam(ag->phylink, pause);
  420. }
  421. static int ag71xx_ethtool_set_pauseparam(struct net_device *ndev,
  422. struct ethtool_pauseparam *pause)
  423. {
  424. struct ag71xx *ag = netdev_priv(ndev);
  425. return phylink_ethtool_set_pauseparam(ag->phylink, pause);
  426. }
  427. static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
  428. u8 *data)
  429. {
  430. int i;
  431. switch (sset) {
  432. case ETH_SS_STATS:
  433. for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
  434. memcpy(data + i * ETH_GSTRING_LEN,
  435. ag71xx_statistics[i].name, ETH_GSTRING_LEN);
  436. break;
  437. case ETH_SS_TEST:
  438. net_selftest_get_strings(data);
  439. break;
  440. }
  441. }
  442. static void ag71xx_ethtool_get_stats(struct net_device *ndev,
  443. struct ethtool_stats *stats, u64 *data)
  444. {
  445. struct ag71xx *ag = netdev_priv(ndev);
  446. int i;
  447. for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
  448. *data++ = ag71xx_rr(ag, ag71xx_statistics[i].offset)
  449. & ag71xx_statistics[i].mask;
  450. }
  451. static int ag71xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
  452. {
  453. switch (sset) {
  454. case ETH_SS_STATS:
  455. return ARRAY_SIZE(ag71xx_statistics);
  456. case ETH_SS_TEST:
  457. return net_selftest_get_count();
  458. default:
  459. return -EOPNOTSUPP;
  460. }
  461. }
  462. static const struct ethtool_ops ag71xx_ethtool_ops = {
  463. .get_drvinfo = ag71xx_get_drvinfo,
  464. .get_link = ethtool_op_get_link,
  465. .get_ts_info = ethtool_op_get_ts_info,
  466. .get_link_ksettings = ag71xx_get_link_ksettings,
  467. .set_link_ksettings = ag71xx_set_link_ksettings,
  468. .nway_reset = ag71xx_ethtool_nway_reset,
  469. .get_pauseparam = ag71xx_ethtool_get_pauseparam,
  470. .set_pauseparam = ag71xx_ethtool_set_pauseparam,
  471. .get_strings = ag71xx_ethtool_get_strings,
  472. .get_ethtool_stats = ag71xx_ethtool_get_stats,
  473. .get_sset_count = ag71xx_ethtool_get_sset_count,
  474. .self_test = net_selftest,
  475. };
  476. static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
  477. {
  478. struct net_device *ndev = ag->ndev;
  479. int i;
  480. for (i = 0; i < AG71XX_MDIO_RETRY; i++) {
  481. u32 busy;
  482. udelay(AG71XX_MDIO_DELAY);
  483. busy = ag71xx_rr(ag, AG71XX_REG_MII_IND);
  484. if (!busy)
  485. return 0;
  486. udelay(AG71XX_MDIO_DELAY);
  487. }
  488. netif_err(ag, link, ndev, "MDIO operation timed out\n");
  489. return -ETIMEDOUT;
  490. }
  491. static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg)
  492. {
  493. struct ag71xx *ag = bus->priv;
  494. int err, val;
  495. err = ag71xx_mdio_wait_busy(ag);
  496. if (err)
  497. return err;
  498. ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
  499. ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
  500. /* enable read mode */
  501. ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ);
  502. err = ag71xx_mdio_wait_busy(ag);
  503. if (err)
  504. return err;
  505. val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS);
  506. /* disable read mode */
  507. ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0);
  508. netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
  509. addr, reg, val);
  510. return val;
  511. }
  512. static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg,
  513. u16 val)
  514. {
  515. struct ag71xx *ag = bus->priv;
  516. netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
  517. addr, reg, val);
  518. ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
  519. ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
  520. ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val);
  521. return ag71xx_mdio_wait_busy(ag);
  522. }
  523. static const u32 ar71xx_mdio_div_table[] = {
  524. 4, 4, 6, 8, 10, 14, 20, 28,
  525. };
  526. static const u32 ar7240_mdio_div_table[] = {
  527. 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96,
  528. };
  529. static const u32 ar933x_mdio_div_table[] = {
  530. 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98,
  531. };
  532. static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div)
  533. {
  534. unsigned long ref_clock;
  535. const u32 *table;
  536. int ndivs, i;
  537. ref_clock = clk_get_rate(ag->clk_mdio);
  538. if (!ref_clock)
  539. return -EINVAL;
  540. if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) {
  541. table = ar933x_mdio_div_table;
  542. ndivs = ARRAY_SIZE(ar933x_mdio_div_table);
  543. } else if (ag71xx_is(ag, AR7240)) {
  544. table = ar7240_mdio_div_table;
  545. ndivs = ARRAY_SIZE(ar7240_mdio_div_table);
  546. } else {
  547. table = ar71xx_mdio_div_table;
  548. ndivs = ARRAY_SIZE(ar71xx_mdio_div_table);
  549. }
  550. for (i = 0; i < ndivs; i++) {
  551. unsigned long t;
  552. t = ref_clock / table[i];
  553. if (t <= AG71XX_MDIO_MAX_CLK) {
  554. *div = i;
  555. return 0;
  556. }
  557. }
  558. return -ENOENT;
  559. }
  560. static int ag71xx_mdio_reset(struct mii_bus *bus)
  561. {
  562. struct ag71xx *ag = bus->priv;
  563. int err;
  564. u32 t;
  565. err = ag71xx_mdio_get_divider(ag, &t);
  566. if (err)
  567. return err;
  568. ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET);
  569. usleep_range(100, 200);
  570. ag71xx_wr(ag, AG71XX_REG_MII_CFG, t);
  571. usleep_range(100, 200);
  572. return 0;
  573. }
  574. static int ag71xx_mdio_probe(struct ag71xx *ag)
  575. {
  576. struct device *dev = &ag->pdev->dev;
  577. struct net_device *ndev = ag->ndev;
  578. static struct mii_bus *mii_bus;
  579. struct device_node *np, *mnp;
  580. int err;
  581. np = dev->of_node;
  582. ag->mii_bus = NULL;
  583. ag->clk_mdio = devm_clk_get(dev, "mdio");
  584. if (IS_ERR(ag->clk_mdio)) {
  585. netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
  586. return PTR_ERR(ag->clk_mdio);
  587. }
  588. err = clk_prepare_enable(ag->clk_mdio);
  589. if (err) {
  590. netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
  591. return err;
  592. }
  593. mii_bus = devm_mdiobus_alloc(dev);
  594. if (!mii_bus) {
  595. err = -ENOMEM;
  596. goto mdio_err_put_clk;
  597. }
  598. ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
  599. if (IS_ERR(ag->mdio_reset)) {
  600. netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
  601. err = PTR_ERR(ag->mdio_reset);
  602. goto mdio_err_put_clk;
  603. }
  604. mii_bus->name = "ag71xx_mdio";
  605. mii_bus->read = ag71xx_mdio_mii_read;
  606. mii_bus->write = ag71xx_mdio_mii_write;
  607. mii_bus->reset = ag71xx_mdio_reset;
  608. mii_bus->priv = ag;
  609. mii_bus->parent = dev;
  610. snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
  611. if (!IS_ERR(ag->mdio_reset)) {
  612. reset_control_assert(ag->mdio_reset);
  613. msleep(100);
  614. reset_control_deassert(ag->mdio_reset);
  615. msleep(200);
  616. }
  617. mnp = of_get_child_by_name(np, "mdio");
  618. err = of_mdiobus_register(mii_bus, mnp);
  619. of_node_put(mnp);
  620. if (err)
  621. goto mdio_err_put_clk;
  622. ag->mii_bus = mii_bus;
  623. return 0;
  624. mdio_err_put_clk:
  625. clk_disable_unprepare(ag->clk_mdio);
  626. return err;
  627. }
  628. static void ag71xx_mdio_remove(struct ag71xx *ag)
  629. {
  630. if (ag->mii_bus)
  631. mdiobus_unregister(ag->mii_bus);
  632. clk_disable_unprepare(ag->clk_mdio);
  633. }
  634. static void ag71xx_hw_stop(struct ag71xx *ag)
  635. {
  636. /* disable all interrupts and stop the rx/tx engine */
  637. ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
  638. ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
  639. ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
  640. }
  641. static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
  642. {
  643. unsigned long timestamp;
  644. u32 rx_sm, tx_sm, rx_fd;
  645. timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start);
  646. if (likely(time_before(jiffies, timestamp + HZ / 10)))
  647. return false;
  648. if (!netif_carrier_ok(ag->ndev))
  649. return false;
  650. rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
  651. if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
  652. return true;
  653. tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
  654. rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
  655. if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
  656. ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
  657. return true;
  658. return false;
  659. }
  660. static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget)
  661. {
  662. struct ag71xx_ring *ring = &ag->tx_ring;
  663. int sent = 0, bytes_compl = 0, n = 0;
  664. struct net_device *ndev = ag->ndev;
  665. int ring_mask, ring_size;
  666. bool dma_stuck = false;
  667. ring_mask = BIT(ring->order) - 1;
  668. ring_size = BIT(ring->order);
  669. netif_dbg(ag, tx_queued, ndev, "processing TX ring\n");
  670. while (ring->dirty + n != ring->curr) {
  671. struct ag71xx_desc *desc;
  672. struct sk_buff *skb;
  673. unsigned int i;
  674. i = (ring->dirty + n) & ring_mask;
  675. desc = ag71xx_ring_desc(ring, i);
  676. skb = ring->buf[i].tx.skb;
  677. if (!flush && !ag71xx_desc_empty(desc)) {
  678. if (ag->dcfg->tx_hang_workaround &&
  679. ag71xx_check_dma_stuck(ag)) {
  680. schedule_delayed_work(&ag->restart_work,
  681. HZ / 2);
  682. dma_stuck = true;
  683. }
  684. break;
  685. }
  686. if (flush)
  687. desc->ctrl |= DESC_EMPTY;
  688. n++;
  689. if (!skb)
  690. continue;
  691. napi_consume_skb(skb, budget);
  692. ring->buf[i].tx.skb = NULL;
  693. bytes_compl += ring->buf[i].tx.len;
  694. sent++;
  695. ring->dirty += n;
  696. while (n > 0) {
  697. ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
  698. n--;
  699. }
  700. }
  701. netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent);
  702. if (!sent)
  703. return 0;
  704. ag->ndev->stats.tx_bytes += bytes_compl;
  705. ag->ndev->stats.tx_packets += sent;
  706. netdev_completed_queue(ag->ndev, sent, bytes_compl);
  707. if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
  708. netif_wake_queue(ag->ndev);
  709. if (!dma_stuck)
  710. cancel_delayed_work(&ag->restart_work);
  711. return sent;
  712. }
  713. static void ag71xx_dma_wait_stop(struct ag71xx *ag)
  714. {
  715. struct net_device *ndev = ag->ndev;
  716. int i;
  717. for (i = 0; i < AG71XX_DMA_RETRY; i++) {
  718. u32 rx, tx;
  719. mdelay(AG71XX_DMA_DELAY);
  720. rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE;
  721. tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE;
  722. if (!rx && !tx)
  723. return;
  724. }
  725. netif_err(ag, hw, ndev, "DMA stop operation timed out\n");
  726. }
  727. static void ag71xx_dma_reset(struct ag71xx *ag)
  728. {
  729. struct net_device *ndev = ag->ndev;
  730. u32 val;
  731. int i;
  732. /* stop RX and TX */
  733. ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
  734. ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
  735. /* give the hardware some time to really stop all rx/tx activity
  736. * clearing the descriptors too early causes random memory corruption
  737. */
  738. ag71xx_dma_wait_stop(ag);
  739. /* clear descriptor addresses */
  740. ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
  741. ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
  742. /* clear pending RX/TX interrupts */
  743. for (i = 0; i < 256; i++) {
  744. ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
  745. ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
  746. }
  747. /* clear pending errors */
  748. ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
  749. ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
  750. val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
  751. if (val)
  752. netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n",
  753. val);
  754. val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
  755. /* mask out reserved bits */
  756. val &= ~0xff000000;
  757. if (val)
  758. netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n",
  759. val);
  760. }
  761. static void ag71xx_hw_setup(struct ag71xx *ag)
  762. {
  763. u32 init = MAC_CFG1_INIT;
  764. /* setup MAC configuration registers */
  765. ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
  766. ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
  767. MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
  768. /* setup max frame length to zero */
  769. ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
  770. /* setup FIFO configuration registers */
  771. ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
  772. ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
  773. ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
  774. ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
  775. ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
  776. }
  777. static unsigned int ag71xx_max_frame_len(unsigned int mtu)
  778. {
  779. return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
  780. }
  781. static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac)
  782. {
  783. u32 t;
  784. t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16)
  785. | (((u32)mac[3]) << 8) | ((u32)mac[2]);
  786. ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
  787. t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16);
  788. ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
  789. }
  790. static void ag71xx_fast_reset(struct ag71xx *ag)
  791. {
  792. struct net_device *dev = ag->ndev;
  793. u32 rx_ds;
  794. u32 mii_reg;
  795. ag71xx_hw_stop(ag);
  796. mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
  797. rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
  798. ag71xx_tx_packets(ag, true, 0);
  799. reset_control_assert(ag->mac_reset);
  800. usleep_range(10, 20);
  801. reset_control_deassert(ag->mac_reset);
  802. usleep_range(10, 20);
  803. ag71xx_dma_reset(ag);
  804. ag71xx_hw_setup(ag);
  805. ag->tx_ring.curr = 0;
  806. ag->tx_ring.dirty = 0;
  807. netdev_reset_queue(ag->ndev);
  808. /* setup max frame length */
  809. ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
  810. ag71xx_max_frame_len(ag->ndev->mtu));
  811. ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
  812. ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
  813. ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
  814. ag71xx_hw_set_macaddr(ag, dev->dev_addr);
  815. }
  816. static void ag71xx_hw_start(struct ag71xx *ag)
  817. {
  818. /* start RX engine */
  819. ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
  820. /* enable interrupts */
  821. ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
  822. netif_wake_queue(ag->ndev);
  823. }
  824. static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
  825. const struct phylink_link_state *state)
  826. {
  827. struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
  828. if (phylink_autoneg_inband(mode))
  829. return;
  830. if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
  831. ag71xx_fast_reset(ag);
  832. if (ag->tx_ring.desc_split) {
  833. ag->fifodata[2] &= 0xffff;
  834. ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
  835. }
  836. ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
  837. }
  838. static void ag71xx_mac_link_down(struct phylink_config *config,
  839. unsigned int mode, phy_interface_t interface)
  840. {
  841. struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
  842. ag71xx_hw_stop(ag);
  843. }
  844. static void ag71xx_mac_link_up(struct phylink_config *config,
  845. struct phy_device *phy,
  846. unsigned int mode, phy_interface_t interface,
  847. int speed, int duplex,
  848. bool tx_pause, bool rx_pause)
  849. {
  850. struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
  851. u32 cfg1, cfg2;
  852. u32 ifctl;
  853. u32 fifo5;
  854. cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
  855. cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
  856. cfg2 |= duplex ? MAC_CFG2_FDX : 0;
  857. ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
  858. ifctl &= ~(MAC_IFCTL_SPEED);
  859. fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
  860. fifo5 &= ~FIFO_CFG5_BM;
  861. switch (speed) {
  862. case SPEED_1000:
  863. cfg2 |= MAC_CFG2_IF_1000;
  864. fifo5 |= FIFO_CFG5_BM;
  865. break;
  866. case SPEED_100:
  867. cfg2 |= MAC_CFG2_IF_10_100;
  868. ifctl |= MAC_IFCTL_SPEED;
  869. break;
  870. case SPEED_10:
  871. cfg2 |= MAC_CFG2_IF_10_100;
  872. break;
  873. default:
  874. return;
  875. }
  876. ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
  877. ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
  878. ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
  879. cfg1 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG1);
  880. cfg1 &= ~(MAC_CFG1_TFC | MAC_CFG1_RFC);
  881. if (tx_pause)
  882. cfg1 |= MAC_CFG1_TFC;
  883. if (rx_pause)
  884. cfg1 |= MAC_CFG1_RFC;
  885. ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, cfg1);
  886. ag71xx_hw_start(ag);
  887. }
  888. static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
  889. .validate = phylink_generic_validate,
  890. .mac_config = ag71xx_mac_config,
  891. .mac_link_down = ag71xx_mac_link_down,
  892. .mac_link_up = ag71xx_mac_link_up,
  893. };
  894. static int ag71xx_phylink_setup(struct ag71xx *ag)
  895. {
  896. struct phylink *phylink;
  897. ag->phylink_config.dev = &ag->ndev->dev;
  898. ag->phylink_config.type = PHYLINK_NETDEV;
  899. ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
  900. MAC_10 | MAC_100 | MAC_1000FD;
  901. if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
  902. ag71xx_is(ag, AR9340) ||
  903. ag71xx_is(ag, QCA9530) ||
  904. (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
  905. __set_bit(PHY_INTERFACE_MODE_MII,
  906. ag->phylink_config.supported_interfaces);
  907. if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
  908. (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
  909. (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
  910. __set_bit(PHY_INTERFACE_MODE_GMII,
  911. ag->phylink_config.supported_interfaces);
  912. if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
  913. __set_bit(PHY_INTERFACE_MODE_SGMII,
  914. ag->phylink_config.supported_interfaces);
  915. if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
  916. __set_bit(PHY_INTERFACE_MODE_RMII,
  917. ag->phylink_config.supported_interfaces);
  918. if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
  919. (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
  920. __set_bit(PHY_INTERFACE_MODE_RGMII,
  921. ag->phylink_config.supported_interfaces);
  922. phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
  923. ag->phy_if_mode, &ag71xx_phylink_mac_ops);
  924. if (IS_ERR(phylink))
  925. return PTR_ERR(phylink);
  926. ag->phylink = phylink;
  927. return 0;
  928. }
  929. static void ag71xx_ring_tx_clean(struct ag71xx *ag)
  930. {
  931. struct ag71xx_ring *ring = &ag->tx_ring;
  932. int ring_mask = BIT(ring->order) - 1;
  933. u32 bytes_compl = 0, pkts_compl = 0;
  934. struct net_device *ndev = ag->ndev;
  935. while (ring->curr != ring->dirty) {
  936. struct ag71xx_desc *desc;
  937. u32 i = ring->dirty & ring_mask;
  938. desc = ag71xx_ring_desc(ring, i);
  939. if (!ag71xx_desc_empty(desc)) {
  940. desc->ctrl = 0;
  941. ndev->stats.tx_errors++;
  942. }
  943. if (ring->buf[i].tx.skb) {
  944. bytes_compl += ring->buf[i].tx.len;
  945. pkts_compl++;
  946. dev_kfree_skb_any(ring->buf[i].tx.skb);
  947. }
  948. ring->buf[i].tx.skb = NULL;
  949. ring->dirty++;
  950. }
  951. /* flush descriptors */
  952. wmb();
  953. netdev_completed_queue(ndev, pkts_compl, bytes_compl);
  954. }
  955. static void ag71xx_ring_tx_init(struct ag71xx *ag)
  956. {
  957. struct ag71xx_ring *ring = &ag->tx_ring;
  958. int ring_size = BIT(ring->order);
  959. int ring_mask = ring_size - 1;
  960. int i;
  961. for (i = 0; i < ring_size; i++) {
  962. struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
  963. desc->next = (u32)(ring->descs_dma +
  964. AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
  965. desc->ctrl = DESC_EMPTY;
  966. ring->buf[i].tx.skb = NULL;
  967. }
  968. /* flush descriptors */
  969. wmb();
  970. ring->curr = 0;
  971. ring->dirty = 0;
  972. netdev_reset_queue(ag->ndev);
  973. }
  974. static void ag71xx_ring_rx_clean(struct ag71xx *ag)
  975. {
  976. struct ag71xx_ring *ring = &ag->rx_ring;
  977. int ring_size = BIT(ring->order);
  978. int i;
  979. if (!ring->buf)
  980. return;
  981. for (i = 0; i < ring_size; i++)
  982. if (ring->buf[i].rx.rx_buf) {
  983. dma_unmap_single(&ag->pdev->dev,
  984. ring->buf[i].rx.dma_addr,
  985. ag->rx_buf_size, DMA_FROM_DEVICE);
  986. skb_free_frag(ring->buf[i].rx.rx_buf);
  987. }
  988. }
  989. static int ag71xx_buffer_size(struct ag71xx *ag)
  990. {
  991. return ag->rx_buf_size +
  992. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  993. }
  994. static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
  995. int offset,
  996. void *(*alloc)(unsigned int size))
  997. {
  998. struct ag71xx_ring *ring = &ag->rx_ring;
  999. struct ag71xx_desc *desc;
  1000. void *data;
  1001. desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
  1002. data = alloc(ag71xx_buffer_size(ag));
  1003. if (!data)
  1004. return false;
  1005. buf->rx.rx_buf = data;
  1006. buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
  1007. DMA_FROM_DEVICE);
  1008. desc->data = (u32)buf->rx.dma_addr + offset;
  1009. return true;
  1010. }
  1011. static int ag71xx_ring_rx_init(struct ag71xx *ag)
  1012. {
  1013. struct ag71xx_ring *ring = &ag->rx_ring;
  1014. struct net_device *ndev = ag->ndev;
  1015. int ring_mask = BIT(ring->order) - 1;
  1016. int ring_size = BIT(ring->order);
  1017. unsigned int i;
  1018. int ret;
  1019. ret = 0;
  1020. for (i = 0; i < ring_size; i++) {
  1021. struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
  1022. desc->next = (u32)(ring->descs_dma +
  1023. AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
  1024. netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n",
  1025. desc, desc->next);
  1026. }
  1027. for (i = 0; i < ring_size; i++) {
  1028. struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
  1029. if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
  1030. netdev_alloc_frag)) {
  1031. ret = -ENOMEM;
  1032. break;
  1033. }
  1034. desc->ctrl = DESC_EMPTY;
  1035. }
  1036. /* flush descriptors */
  1037. wmb();
  1038. ring->curr = 0;
  1039. ring->dirty = 0;
  1040. return ret;
  1041. }
  1042. static int ag71xx_ring_rx_refill(struct ag71xx *ag)
  1043. {
  1044. struct ag71xx_ring *ring = &ag->rx_ring;
  1045. int ring_mask = BIT(ring->order) - 1;
  1046. int offset = ag->rx_buf_offset;
  1047. unsigned int count;
  1048. count = 0;
  1049. for (; ring->curr - ring->dirty > 0; ring->dirty++) {
  1050. struct ag71xx_desc *desc;
  1051. unsigned int i;
  1052. i = ring->dirty & ring_mask;
  1053. desc = ag71xx_ring_desc(ring, i);
  1054. if (!ring->buf[i].rx.rx_buf &&
  1055. !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
  1056. napi_alloc_frag))
  1057. break;
  1058. desc->ctrl = DESC_EMPTY;
  1059. count++;
  1060. }
  1061. /* flush descriptors */
  1062. wmb();
  1063. netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n",
  1064. count);
  1065. return count;
  1066. }
  1067. static int ag71xx_rings_init(struct ag71xx *ag)
  1068. {
  1069. struct ag71xx_ring *tx = &ag->tx_ring;
  1070. struct ag71xx_ring *rx = &ag->rx_ring;
  1071. int ring_size, tx_size;
  1072. ring_size = BIT(tx->order) + BIT(rx->order);
  1073. tx_size = BIT(tx->order);
  1074. tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL);
  1075. if (!tx->buf)
  1076. return -ENOMEM;
  1077. tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
  1078. ring_size * AG71XX_DESC_SIZE,
  1079. &tx->descs_dma, GFP_KERNEL);
  1080. if (!tx->descs_cpu) {
  1081. kfree(tx->buf);
  1082. tx->buf = NULL;
  1083. return -ENOMEM;
  1084. }
  1085. rx->buf = &tx->buf[tx_size];
  1086. rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
  1087. rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
  1088. ag71xx_ring_tx_init(ag);
  1089. return ag71xx_ring_rx_init(ag);
  1090. }
  1091. static void ag71xx_rings_free(struct ag71xx *ag)
  1092. {
  1093. struct ag71xx_ring *tx = &ag->tx_ring;
  1094. struct ag71xx_ring *rx = &ag->rx_ring;
  1095. int ring_size;
  1096. ring_size = BIT(tx->order) + BIT(rx->order);
  1097. if (tx->descs_cpu)
  1098. dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
  1099. tx->descs_cpu, tx->descs_dma);
  1100. kfree(tx->buf);
  1101. tx->descs_cpu = NULL;
  1102. rx->descs_cpu = NULL;
  1103. tx->buf = NULL;
  1104. rx->buf = NULL;
  1105. }
  1106. static void ag71xx_rings_cleanup(struct ag71xx *ag)
  1107. {
  1108. ag71xx_ring_rx_clean(ag);
  1109. ag71xx_ring_tx_clean(ag);
  1110. ag71xx_rings_free(ag);
  1111. netdev_reset_queue(ag->ndev);
  1112. }
  1113. static void ag71xx_hw_init(struct ag71xx *ag)
  1114. {
  1115. ag71xx_hw_stop(ag);
  1116. ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
  1117. usleep_range(20, 30);
  1118. reset_control_assert(ag->mac_reset);
  1119. msleep(100);
  1120. reset_control_deassert(ag->mac_reset);
  1121. msleep(200);
  1122. ag71xx_hw_setup(ag);
  1123. ag71xx_dma_reset(ag);
  1124. }
  1125. static int ag71xx_hw_enable(struct ag71xx *ag)
  1126. {
  1127. int ret;
  1128. ret = ag71xx_rings_init(ag);
  1129. if (ret)
  1130. return ret;
  1131. napi_enable(&ag->napi);
  1132. ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
  1133. ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
  1134. netif_start_queue(ag->ndev);
  1135. return 0;
  1136. }
  1137. static void ag71xx_hw_disable(struct ag71xx *ag)
  1138. {
  1139. netif_stop_queue(ag->ndev);
  1140. ag71xx_hw_stop(ag);
  1141. ag71xx_dma_reset(ag);
  1142. napi_disable(&ag->napi);
  1143. del_timer_sync(&ag->oom_timer);
  1144. ag71xx_rings_cleanup(ag);
  1145. }
  1146. static int ag71xx_open(struct net_device *ndev)
  1147. {
  1148. struct ag71xx *ag = netdev_priv(ndev);
  1149. unsigned int max_frame_len;
  1150. int ret;
  1151. ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0);
  1152. if (ret) {
  1153. netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
  1154. ret);
  1155. return ret;
  1156. }
  1157. max_frame_len = ag71xx_max_frame_len(ndev->mtu);
  1158. ag->rx_buf_size =
  1159. SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
  1160. /* setup max frame length */
  1161. ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
  1162. ag71xx_hw_set_macaddr(ag, ndev->dev_addr);
  1163. ret = ag71xx_hw_enable(ag);
  1164. if (ret)
  1165. goto err;
  1166. phylink_start(ag->phylink);
  1167. return 0;
  1168. err:
  1169. ag71xx_rings_cleanup(ag);
  1170. phylink_disconnect_phy(ag->phylink);
  1171. return ret;
  1172. }
  1173. static int ag71xx_stop(struct net_device *ndev)
  1174. {
  1175. struct ag71xx *ag = netdev_priv(ndev);
  1176. phylink_stop(ag->phylink);
  1177. phylink_disconnect_phy(ag->phylink);
  1178. ag71xx_hw_disable(ag);
  1179. return 0;
  1180. }
  1181. static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
  1182. {
  1183. int i, ring_mask, ndesc, split;
  1184. struct ag71xx_desc *desc;
  1185. ring_mask = BIT(ring->order) - 1;
  1186. ndesc = 0;
  1187. split = ring->desc_split;
  1188. if (!split)
  1189. split = len;
  1190. while (len > 0) {
  1191. unsigned int cur_len = len;
  1192. i = (ring->curr + ndesc) & ring_mask;
  1193. desc = ag71xx_ring_desc(ring, i);
  1194. if (!ag71xx_desc_empty(desc))
  1195. return -1;
  1196. if (cur_len > split) {
  1197. cur_len = split;
  1198. /* TX will hang if DMA transfers <= 4 bytes,
  1199. * make sure next segment is more than 4 bytes long.
  1200. */
  1201. if (len <= split + 4)
  1202. cur_len -= 4;
  1203. }
  1204. desc->data = addr;
  1205. addr += cur_len;
  1206. len -= cur_len;
  1207. if (len > 0)
  1208. cur_len |= DESC_MORE;
  1209. /* prevent early tx attempt of this descriptor */
  1210. if (!ndesc)
  1211. cur_len |= DESC_EMPTY;
  1212. desc->ctrl = cur_len;
  1213. ndesc++;
  1214. }
  1215. return ndesc;
  1216. }
  1217. static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
  1218. struct net_device *ndev)
  1219. {
  1220. int i, n, ring_min, ring_mask, ring_size;
  1221. struct ag71xx *ag = netdev_priv(ndev);
  1222. struct ag71xx_ring *ring;
  1223. struct ag71xx_desc *desc;
  1224. dma_addr_t dma_addr;
  1225. ring = &ag->tx_ring;
  1226. ring_mask = BIT(ring->order) - 1;
  1227. ring_size = BIT(ring->order);
  1228. if (skb->len <= 4) {
  1229. netif_dbg(ag, tx_err, ndev, "packet len is too small\n");
  1230. goto err_drop;
  1231. }
  1232. dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
  1233. DMA_TO_DEVICE);
  1234. i = ring->curr & ring_mask;
  1235. desc = ag71xx_ring_desc(ring, i);
  1236. /* setup descriptor fields */
  1237. n = ag71xx_fill_dma_desc(ring, (u32)dma_addr,
  1238. skb->len & ag->dcfg->desc_pktlen_mask);
  1239. if (n < 0)
  1240. goto err_drop_unmap;
  1241. i = (ring->curr + n - 1) & ring_mask;
  1242. ring->buf[i].tx.len = skb->len;
  1243. ring->buf[i].tx.skb = skb;
  1244. netdev_sent_queue(ndev, skb->len);
  1245. skb_tx_timestamp(skb);
  1246. desc->ctrl &= ~DESC_EMPTY;
  1247. ring->curr += n;
  1248. /* flush descriptor */
  1249. wmb();
  1250. ring_min = 2;
  1251. if (ring->desc_split)
  1252. ring_min *= AG71XX_TX_RING_DS_PER_PKT;
  1253. if (ring->curr - ring->dirty >= ring_size - ring_min) {
  1254. netif_dbg(ag, tx_err, ndev, "tx queue full\n");
  1255. netif_stop_queue(ndev);
  1256. }
  1257. netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n");
  1258. /* enable TX engine */
  1259. ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
  1260. return NETDEV_TX_OK;
  1261. err_drop_unmap:
  1262. dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
  1263. err_drop:
  1264. ndev->stats.tx_dropped++;
  1265. dev_kfree_skb(skb);
  1266. return NETDEV_TX_OK;
  1267. }
  1268. static void ag71xx_oom_timer_handler(struct timer_list *t)
  1269. {
  1270. struct ag71xx *ag = from_timer(ag, t, oom_timer);
  1271. napi_schedule(&ag->napi);
  1272. }
  1273. static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
  1274. {
  1275. struct ag71xx *ag = netdev_priv(ndev);
  1276. netif_err(ag, tx_err, ndev, "tx timeout\n");
  1277. schedule_delayed_work(&ag->restart_work, 1);
  1278. }
  1279. static void ag71xx_restart_work_func(struct work_struct *work)
  1280. {
  1281. struct ag71xx *ag = container_of(work, struct ag71xx,
  1282. restart_work.work);
  1283. rtnl_lock();
  1284. ag71xx_hw_disable(ag);
  1285. ag71xx_hw_enable(ag);
  1286. phylink_stop(ag->phylink);
  1287. phylink_start(ag->phylink);
  1288. rtnl_unlock();
  1289. }
  1290. static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
  1291. {
  1292. struct net_device *ndev = ag->ndev;
  1293. int ring_mask, ring_size, done = 0;
  1294. unsigned int pktlen_mask, offset;
  1295. struct ag71xx_ring *ring;
  1296. struct list_head rx_list;
  1297. struct sk_buff *skb;
  1298. ring = &ag->rx_ring;
  1299. pktlen_mask = ag->dcfg->desc_pktlen_mask;
  1300. offset = ag->rx_buf_offset;
  1301. ring_mask = BIT(ring->order) - 1;
  1302. ring_size = BIT(ring->order);
  1303. netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
  1304. limit, ring->curr, ring->dirty);
  1305. INIT_LIST_HEAD(&rx_list);
  1306. while (done < limit) {
  1307. unsigned int i = ring->curr & ring_mask;
  1308. struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
  1309. int pktlen;
  1310. int err = 0;
  1311. if (ag71xx_desc_empty(desc))
  1312. break;
  1313. if ((ring->dirty + ring_size) == ring->curr) {
  1314. WARN_ONCE(1, "RX out of ring");
  1315. break;
  1316. }
  1317. ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
  1318. pktlen = desc->ctrl & pktlen_mask;
  1319. pktlen -= ETH_FCS_LEN;
  1320. dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr,
  1321. ag->rx_buf_size, DMA_FROM_DEVICE);
  1322. ndev->stats.rx_packets++;
  1323. ndev->stats.rx_bytes += pktlen;
  1324. skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
  1325. if (!skb) {
  1326. skb_free_frag(ring->buf[i].rx.rx_buf);
  1327. goto next;
  1328. }
  1329. skb_reserve(skb, offset);
  1330. skb_put(skb, pktlen);
  1331. if (err) {
  1332. ndev->stats.rx_dropped++;
  1333. kfree_skb(skb);
  1334. } else {
  1335. skb->dev = ndev;
  1336. skb->ip_summed = CHECKSUM_NONE;
  1337. list_add_tail(&skb->list, &rx_list);
  1338. }
  1339. next:
  1340. ring->buf[i].rx.rx_buf = NULL;
  1341. done++;
  1342. ring->curr++;
  1343. }
  1344. ag71xx_ring_rx_refill(ag);
  1345. list_for_each_entry(skb, &rx_list, list)
  1346. skb->protocol = eth_type_trans(skb, ndev);
  1347. netif_receive_skb_list(&rx_list);
  1348. netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
  1349. ring->curr, ring->dirty, done);
  1350. return done;
  1351. }
  1352. static int ag71xx_poll(struct napi_struct *napi, int limit)
  1353. {
  1354. struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
  1355. struct ag71xx_ring *rx_ring = &ag->rx_ring;
  1356. int rx_ring_size = BIT(rx_ring->order);
  1357. struct net_device *ndev = ag->ndev;
  1358. int tx_done, rx_done;
  1359. u32 status;
  1360. tx_done = ag71xx_tx_packets(ag, false, limit);
  1361. netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
  1362. rx_done = ag71xx_rx_packets(ag, limit);
  1363. if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf)
  1364. goto oom;
  1365. status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
  1366. if (unlikely(status & RX_STATUS_OF)) {
  1367. ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
  1368. ndev->stats.rx_fifo_errors++;
  1369. /* restart RX */
  1370. ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
  1371. }
  1372. if (rx_done < limit) {
  1373. if (status & RX_STATUS_PR)
  1374. goto more;
  1375. status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
  1376. if (status & TX_STATUS_PS)
  1377. goto more;
  1378. netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
  1379. rx_done, tx_done, limit);
  1380. napi_complete(napi);
  1381. /* enable interrupts */
  1382. ag71xx_int_enable(ag, AG71XX_INT_POLL);
  1383. return rx_done;
  1384. }
  1385. more:
  1386. netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
  1387. rx_done, tx_done, limit);
  1388. return limit;
  1389. oom:
  1390. netif_err(ag, rx_err, ndev, "out of memory\n");
  1391. mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
  1392. napi_complete(napi);
  1393. return 0;
  1394. }
  1395. static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
  1396. {
  1397. struct net_device *ndev = dev_id;
  1398. struct ag71xx *ag;
  1399. u32 status;
  1400. ag = netdev_priv(ndev);
  1401. status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
  1402. if (unlikely(!status))
  1403. return IRQ_NONE;
  1404. if (unlikely(status & AG71XX_INT_ERR)) {
  1405. if (status & AG71XX_INT_TX_BE) {
  1406. ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
  1407. netif_err(ag, intr, ndev, "TX BUS error\n");
  1408. }
  1409. if (status & AG71XX_INT_RX_BE) {
  1410. ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
  1411. netif_err(ag, intr, ndev, "RX BUS error\n");
  1412. }
  1413. }
  1414. if (likely(status & AG71XX_INT_POLL)) {
  1415. ag71xx_int_disable(ag, AG71XX_INT_POLL);
  1416. netif_dbg(ag, intr, ndev, "enable polling mode\n");
  1417. napi_schedule(&ag->napi);
  1418. }
  1419. return IRQ_HANDLED;
  1420. }
  1421. static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
  1422. {
  1423. struct ag71xx *ag = netdev_priv(ndev);
  1424. ndev->mtu = new_mtu;
  1425. ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
  1426. ag71xx_max_frame_len(ndev->mtu));
  1427. return 0;
  1428. }
  1429. static const struct net_device_ops ag71xx_netdev_ops = {
  1430. .ndo_open = ag71xx_open,
  1431. .ndo_stop = ag71xx_stop,
  1432. .ndo_start_xmit = ag71xx_hard_start_xmit,
  1433. .ndo_eth_ioctl = phy_do_ioctl,
  1434. .ndo_tx_timeout = ag71xx_tx_timeout,
  1435. .ndo_change_mtu = ag71xx_change_mtu,
  1436. .ndo_set_mac_address = eth_mac_addr,
  1437. .ndo_validate_addr = eth_validate_addr,
  1438. };
  1439. static const u32 ar71xx_addr_ar7100[] = {
  1440. 0x19000000, 0x1a000000,
  1441. };
  1442. static int ag71xx_probe(struct platform_device *pdev)
  1443. {
  1444. struct device_node *np = pdev->dev.of_node;
  1445. const struct ag71xx_dcfg *dcfg;
  1446. struct net_device *ndev;
  1447. struct resource *res;
  1448. int tx_size, err, i;
  1449. struct ag71xx *ag;
  1450. if (!np)
  1451. return -ENODEV;
  1452. ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
  1453. if (!ndev)
  1454. return -ENOMEM;
  1455. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1456. if (!res)
  1457. return -EINVAL;
  1458. dcfg = of_device_get_match_data(&pdev->dev);
  1459. if (!dcfg)
  1460. return -EINVAL;
  1461. ag = netdev_priv(ndev);
  1462. ag->mac_idx = -1;
  1463. for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) {
  1464. if (ar71xx_addr_ar7100[i] == res->start)
  1465. ag->mac_idx = i;
  1466. }
  1467. if (ag->mac_idx < 0) {
  1468. netif_err(ag, probe, ndev, "unknown mac idx\n");
  1469. return -EINVAL;
  1470. }
  1471. ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
  1472. if (IS_ERR(ag->clk_eth)) {
  1473. netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
  1474. return PTR_ERR(ag->clk_eth);
  1475. }
  1476. SET_NETDEV_DEV(ndev, &pdev->dev);
  1477. ag->pdev = pdev;
  1478. ag->ndev = ndev;
  1479. ag->dcfg = dcfg;
  1480. ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE);
  1481. memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
  1482. ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
  1483. if (IS_ERR(ag->mac_reset)) {
  1484. netif_err(ag, probe, ndev, "missing mac reset\n");
  1485. return PTR_ERR(ag->mac_reset);
  1486. }
  1487. ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
  1488. if (!ag->mac_base)
  1489. return -ENOMEM;
  1490. ndev->irq = platform_get_irq(pdev, 0);
  1491. err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
  1492. 0x0, dev_name(&pdev->dev), ndev);
  1493. if (err) {
  1494. netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
  1495. ndev->irq);
  1496. return err;
  1497. }
  1498. ndev->netdev_ops = &ag71xx_netdev_ops;
  1499. ndev->ethtool_ops = &ag71xx_ethtool_ops;
  1500. INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
  1501. timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
  1502. tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
  1503. ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
  1504. ndev->min_mtu = 68;
  1505. ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0);
  1506. ag->rx_buf_offset = NET_SKB_PAD;
  1507. if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
  1508. ag->rx_buf_offset += NET_IP_ALIGN;
  1509. if (ag71xx_is(ag, AR7100)) {
  1510. ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
  1511. tx_size *= AG71XX_TX_RING_DS_PER_PKT;
  1512. }
  1513. ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
  1514. ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
  1515. sizeof(struct ag71xx_desc),
  1516. &ag->stop_desc_dma, GFP_KERNEL);
  1517. if (!ag->stop_desc)
  1518. return -ENOMEM;
  1519. ag->stop_desc->data = 0;
  1520. ag->stop_desc->ctrl = 0;
  1521. ag->stop_desc->next = (u32)ag->stop_desc_dma;
  1522. err = of_get_ethdev_address(np, ndev);
  1523. if (err) {
  1524. netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
  1525. eth_hw_addr_random(ndev);
  1526. }
  1527. err = of_get_phy_mode(np, &ag->phy_if_mode);
  1528. if (err) {
  1529. netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
  1530. return err;
  1531. }
  1532. netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll,
  1533. AG71XX_NAPI_WEIGHT);
  1534. err = clk_prepare_enable(ag->clk_eth);
  1535. if (err) {
  1536. netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
  1537. return err;
  1538. }
  1539. ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
  1540. ag71xx_hw_init(ag);
  1541. err = ag71xx_mdio_probe(ag);
  1542. if (err)
  1543. goto err_put_clk;
  1544. platform_set_drvdata(pdev, ndev);
  1545. err = ag71xx_phylink_setup(ag);
  1546. if (err) {
  1547. netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
  1548. goto err_mdio_remove;
  1549. }
  1550. err = register_netdev(ndev);
  1551. if (err) {
  1552. netif_err(ag, probe, ndev, "unable to register net device\n");
  1553. platform_set_drvdata(pdev, NULL);
  1554. goto err_mdio_remove;
  1555. }
  1556. netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
  1557. (unsigned long)ag->mac_base, ndev->irq,
  1558. phy_modes(ag->phy_if_mode));
  1559. return 0;
  1560. err_mdio_remove:
  1561. ag71xx_mdio_remove(ag);
  1562. err_put_clk:
  1563. clk_disable_unprepare(ag->clk_eth);
  1564. return err;
  1565. }
  1566. static int ag71xx_remove(struct platform_device *pdev)
  1567. {
  1568. struct net_device *ndev = platform_get_drvdata(pdev);
  1569. struct ag71xx *ag;
  1570. if (!ndev)
  1571. return 0;
  1572. ag = netdev_priv(ndev);
  1573. unregister_netdev(ndev);
  1574. ag71xx_mdio_remove(ag);
  1575. clk_disable_unprepare(ag->clk_eth);
  1576. platform_set_drvdata(pdev, NULL);
  1577. return 0;
  1578. }
  1579. static const u32 ar71xx_fifo_ar7100[] = {
  1580. 0x0fff0000, 0x00001fff, 0x00780fff,
  1581. };
  1582. static const u32 ar71xx_fifo_ar9130[] = {
  1583. 0x0fff0000, 0x00001fff, 0x008001ff,
  1584. };
  1585. static const u32 ar71xx_fifo_ar9330[] = {
  1586. 0x0010ffff, 0x015500aa, 0x01f00140,
  1587. };
  1588. static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = {
  1589. .type = AR7100,
  1590. .fifodata = ar71xx_fifo_ar7100,
  1591. .max_frame_len = 1540,
  1592. .desc_pktlen_mask = SZ_4K - 1,
  1593. .tx_hang_workaround = false,
  1594. };
  1595. static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = {
  1596. .type = AR7240,
  1597. .fifodata = ar71xx_fifo_ar7100,
  1598. .max_frame_len = 1540,
  1599. .desc_pktlen_mask = SZ_4K - 1,
  1600. .tx_hang_workaround = true,
  1601. };
  1602. static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = {
  1603. .type = AR9130,
  1604. .fifodata = ar71xx_fifo_ar9130,
  1605. .max_frame_len = 1540,
  1606. .desc_pktlen_mask = SZ_4K - 1,
  1607. .tx_hang_workaround = false,
  1608. };
  1609. static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = {
  1610. .type = AR9330,
  1611. .fifodata = ar71xx_fifo_ar9330,
  1612. .max_frame_len = 1540,
  1613. .desc_pktlen_mask = SZ_4K - 1,
  1614. .tx_hang_workaround = true,
  1615. };
  1616. static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = {
  1617. .type = AR9340,
  1618. .fifodata = ar71xx_fifo_ar9330,
  1619. .max_frame_len = SZ_16K - 1,
  1620. .desc_pktlen_mask = SZ_16K - 1,
  1621. .tx_hang_workaround = true,
  1622. };
  1623. static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = {
  1624. .type = QCA9530,
  1625. .fifodata = ar71xx_fifo_ar9330,
  1626. .max_frame_len = SZ_16K - 1,
  1627. .desc_pktlen_mask = SZ_16K - 1,
  1628. .tx_hang_workaround = true,
  1629. };
  1630. static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = {
  1631. .type = QCA9550,
  1632. .fifodata = ar71xx_fifo_ar9330,
  1633. .max_frame_len = 1540,
  1634. .desc_pktlen_mask = SZ_16K - 1,
  1635. .tx_hang_workaround = true,
  1636. };
  1637. static const struct of_device_id ag71xx_match[] = {
  1638. { .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 },
  1639. { .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 },
  1640. { .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 },
  1641. { .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 },
  1642. { .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 },
  1643. { .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 },
  1644. { .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 },
  1645. { .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 },
  1646. { .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 },
  1647. { .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
  1648. {}
  1649. };
  1650. static struct platform_driver ag71xx_driver = {
  1651. .probe = ag71xx_probe,
  1652. .remove = ag71xx_remove,
  1653. .driver = {
  1654. .name = "ag71xx",
  1655. .of_match_table = ag71xx_match,
  1656. }
  1657. };
  1658. module_platform_driver(ag71xx_driver);
  1659. MODULE_LICENSE("GPL v2");