sunhme.c 87 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
  3. * auto carrier detecting ethernet driver. Also known as the
  4. * "Happy Meal Ethernet" found on SunSwift SBUS cards.
  5. *
  6. * Copyright (C) 1996, 1998, 1999, 2002, 2003,
  7. * 2006, 2008 David S. Miller ([email protected])
  8. *
  9. * Changes :
  10. * 2000/11/11 Willy Tarreau <willy AT meta-x.org>
  11. * - port to non-sparc architectures. Tested only on x86 and
  12. * only currently works with QFE PCI cards.
  13. * - ability to specify the MAC address at module load time by passing this
  14. * argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50
  15. */
  16. #include <linux/module.h>
  17. #include <linux/kernel.h>
  18. #include <linux/types.h>
  19. #include <linux/fcntl.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/ioport.h>
  22. #include <linux/in.h>
  23. #include <linux/slab.h>
  24. #include <linux/string.h>
  25. #include <linux/delay.h>
  26. #include <linux/init.h>
  27. #include <linux/ethtool.h>
  28. #include <linux/mii.h>
  29. #include <linux/crc32.h>
  30. #include <linux/random.h>
  31. #include <linux/errno.h>
  32. #include <linux/netdevice.h>
  33. #include <linux/etherdevice.h>
  34. #include <linux/skbuff.h>
  35. #include <linux/mm.h>
  36. #include <linux/bitops.h>
  37. #include <linux/dma-mapping.h>
  38. #include <asm/io.h>
  39. #include <asm/dma.h>
  40. #include <asm/byteorder.h>
  41. #ifdef CONFIG_SPARC
  42. #include <linux/of.h>
  43. #include <linux/of_device.h>
  44. #include <asm/idprom.h>
  45. #include <asm/openprom.h>
  46. #include <asm/oplib.h>
  47. #include <asm/prom.h>
  48. #include <asm/auxio.h>
  49. #endif
  50. #include <linux/uaccess.h>
  51. #include <asm/irq.h>
  52. #ifdef CONFIG_PCI
  53. #include <linux/pci.h>
  54. #endif
  55. #include "sunhme.h"
  56. #define DRV_NAME "sunhme"
  57. MODULE_AUTHOR("David S. Miller ([email protected])");
  58. MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
  59. MODULE_LICENSE("GPL");
  60. static int macaddr[6];
  61. /* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
  62. module_param_array(macaddr, int, NULL, 0);
  63. MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
  64. #ifdef CONFIG_SBUS
  65. static struct quattro *qfe_sbus_list;
  66. #endif
  67. #ifdef CONFIG_PCI
  68. static struct quattro *qfe_pci_list;
  69. #endif
  70. #define hme_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
  71. #define HMD hme_debug
  72. /* "Auto Switch Debug" aka phy debug */
  73. #if 1
  74. #define ASD hme_debug
  75. #else
  76. #define ASD(...)
  77. #endif
  78. #if 0
  79. struct hme_tx_logent {
  80. unsigned int tstamp;
  81. int tx_new, tx_old;
  82. unsigned int action;
  83. #define TXLOG_ACTION_IRQ 0x01
  84. #define TXLOG_ACTION_TXMIT 0x02
  85. #define TXLOG_ACTION_TBUSY 0x04
  86. #define TXLOG_ACTION_NBUFS 0x08
  87. unsigned int status;
  88. };
  89. #define TX_LOG_LEN 128
  90. static struct hme_tx_logent tx_log[TX_LOG_LEN];
  91. static int txlog_cur_entry;
  92. static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
  93. {
  94. struct hme_tx_logent *tlp;
  95. unsigned long flags;
  96. local_irq_save(flags);
  97. tlp = &tx_log[txlog_cur_entry];
  98. tlp->tstamp = (unsigned int)jiffies;
  99. tlp->tx_new = hp->tx_new;
  100. tlp->tx_old = hp->tx_old;
  101. tlp->action = a;
  102. tlp->status = s;
  103. txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
  104. local_irq_restore(flags);
  105. }
  106. static __inline__ void tx_dump_log(void)
  107. {
  108. int i, this;
  109. this = txlog_cur_entry;
  110. for (i = 0; i < TX_LOG_LEN; i++) {
  111. pr_err("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
  112. tx_log[this].tstamp,
  113. tx_log[this].tx_new, tx_log[this].tx_old,
  114. tx_log[this].action, tx_log[this].status);
  115. this = (this + 1) & (TX_LOG_LEN - 1);
  116. }
  117. }
  118. #else
  119. #define tx_add_log(hp, a, s)
  120. #define tx_dump_log()
  121. #endif
  122. #define DEFAULT_IPG0 16 /* For lance-mode only */
  123. #define DEFAULT_IPG1 8 /* For all modes */
  124. #define DEFAULT_IPG2 4 /* For all modes */
  125. #define DEFAULT_JAMSIZE 4 /* Toe jam */
  126. /* NOTE: In the descriptor writes one _must_ write the address
  127. * member _first_. The card must not be allowed to see
  128. * the updated descriptor flags until the address is
  129. * correct. I've added a write memory barrier between
  130. * the two stores so that I can sleep well at night... -DaveM
  131. */
  132. #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
  133. static void sbus_hme_write32(void __iomem *reg, u32 val)
  134. {
  135. sbus_writel(val, reg);
  136. }
  137. static u32 sbus_hme_read32(void __iomem *reg)
  138. {
  139. return sbus_readl(reg);
  140. }
  141. static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
  142. {
  143. rxd->rx_addr = (__force hme32)addr;
  144. dma_wmb();
  145. rxd->rx_flags = (__force hme32)flags;
  146. }
  147. static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
  148. {
  149. txd->tx_addr = (__force hme32)addr;
  150. dma_wmb();
  151. txd->tx_flags = (__force hme32)flags;
  152. }
  153. static u32 sbus_hme_read_desc32(hme32 *p)
  154. {
  155. return (__force u32)*p;
  156. }
  157. static void pci_hme_write32(void __iomem *reg, u32 val)
  158. {
  159. writel(val, reg);
  160. }
  161. static u32 pci_hme_read32(void __iomem *reg)
  162. {
  163. return readl(reg);
  164. }
  165. static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
  166. {
  167. rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
  168. dma_wmb();
  169. rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
  170. }
  171. static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
  172. {
  173. txd->tx_addr = (__force hme32)cpu_to_le32(addr);
  174. dma_wmb();
  175. txd->tx_flags = (__force hme32)cpu_to_le32(flags);
  176. }
  177. static u32 pci_hme_read_desc32(hme32 *p)
  178. {
  179. return le32_to_cpup((__le32 *)p);
  180. }
  181. #define hme_write32(__hp, __reg, __val) \
  182. ((__hp)->write32((__reg), (__val)))
  183. #define hme_read32(__hp, __reg) \
  184. ((__hp)->read32(__reg))
  185. #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
  186. ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
  187. #define hme_write_txd(__hp, __txd, __flags, __addr) \
  188. ((__hp)->write_txd((__txd), (__flags), (__addr)))
  189. #define hme_read_desc32(__hp, __p) \
  190. ((__hp)->read_desc32(__p))
  191. #else
  192. #ifdef CONFIG_SBUS
  193. /* SBUS only compilation */
  194. #define hme_write32(__hp, __reg, __val) \
  195. sbus_writel((__val), (__reg))
  196. #define hme_read32(__hp, __reg) \
  197. sbus_readl(__reg)
  198. #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
  199. do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
  200. dma_wmb(); \
  201. (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
  202. } while(0)
  203. #define hme_write_txd(__hp, __txd, __flags, __addr) \
  204. do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
  205. dma_wmb(); \
  206. (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
  207. } while(0)
  208. #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
  209. #else
  210. /* PCI only compilation */
  211. #define hme_write32(__hp, __reg, __val) \
  212. writel((__val), (__reg))
  213. #define hme_read32(__hp, __reg) \
  214. readl(__reg)
  215. #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
  216. do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
  217. dma_wmb(); \
  218. (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
  219. } while(0)
  220. #define hme_write_txd(__hp, __txd, __flags, __addr) \
  221. do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
  222. dma_wmb(); \
  223. (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
  224. } while(0)
  225. static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
  226. {
  227. return le32_to_cpup((__le32 *)p);
  228. }
  229. #endif
  230. #endif
  231. /* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */
  232. static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
  233. {
  234. hme_write32(hp, tregs + TCVR_BBDATA, bit);
  235. hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
  236. hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
  237. }
  238. #if 0
  239. static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
  240. {
  241. u32 ret;
  242. hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
  243. hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
  244. ret = hme_read32(hp, tregs + TCVR_CFG);
  245. if (internal)
  246. ret &= TCV_CFG_MDIO0;
  247. else
  248. ret &= TCV_CFG_MDIO1;
  249. return ret;
  250. }
  251. #endif
  252. static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
  253. {
  254. u32 retval;
  255. hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
  256. udelay(1);
  257. retval = hme_read32(hp, tregs + TCVR_CFG);
  258. if (internal)
  259. retval &= TCV_CFG_MDIO0;
  260. else
  261. retval &= TCV_CFG_MDIO1;
  262. hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
  263. return retval;
  264. }
  265. #define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */
  266. static int happy_meal_bb_read(struct happy_meal *hp,
  267. void __iomem *tregs, int reg)
  268. {
  269. u32 tmp;
  270. int retval = 0;
  271. int i;
  272. /* Enable the MIF BitBang outputs. */
  273. hme_write32(hp, tregs + TCVR_BBOENAB, 1);
  274. /* Force BitBang into the idle state. */
  275. for (i = 0; i < 32; i++)
  276. BB_PUT_BIT(hp, tregs, 1);
  277. /* Give it the read sequence. */
  278. BB_PUT_BIT(hp, tregs, 0);
  279. BB_PUT_BIT(hp, tregs, 1);
  280. BB_PUT_BIT(hp, tregs, 1);
  281. BB_PUT_BIT(hp, tregs, 0);
  282. /* Give it the PHY address. */
  283. tmp = hp->paddr & 0xff;
  284. for (i = 4; i >= 0; i--)
  285. BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
  286. /* Tell it what register we want to read. */
  287. tmp = (reg & 0xff);
  288. for (i = 4; i >= 0; i--)
  289. BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
  290. /* Close down the MIF BitBang outputs. */
  291. hme_write32(hp, tregs + TCVR_BBOENAB, 0);
  292. /* Now read in the value. */
  293. (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
  294. for (i = 15; i >= 0; i--)
  295. retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
  296. (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
  297. (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
  298. (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
  299. ASD("reg=%d value=%x\n", reg, retval);
  300. return retval;
  301. }
  302. static void happy_meal_bb_write(struct happy_meal *hp,
  303. void __iomem *tregs, int reg,
  304. unsigned short value)
  305. {
  306. u32 tmp;
  307. int i;
  308. ASD("reg=%d value=%x\n", reg, value);
  309. /* Enable the MIF BitBang outputs. */
  310. hme_write32(hp, tregs + TCVR_BBOENAB, 1);
  311. /* Force BitBang into the idle state. */
  312. for (i = 0; i < 32; i++)
  313. BB_PUT_BIT(hp, tregs, 1);
  314. /* Give it write sequence. */
  315. BB_PUT_BIT(hp, tregs, 0);
  316. BB_PUT_BIT(hp, tregs, 1);
  317. BB_PUT_BIT(hp, tregs, 0);
  318. BB_PUT_BIT(hp, tregs, 1);
  319. /* Give it the PHY address. */
  320. tmp = (hp->paddr & 0xff);
  321. for (i = 4; i >= 0; i--)
  322. BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
  323. /* Tell it what register we will be writing. */
  324. tmp = (reg & 0xff);
  325. for (i = 4; i >= 0; i--)
  326. BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
  327. /* Tell it to become ready for the bits. */
  328. BB_PUT_BIT(hp, tregs, 1);
  329. BB_PUT_BIT(hp, tregs, 0);
  330. for (i = 15; i >= 0; i--)
  331. BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
  332. /* Close down the MIF BitBang outputs. */
  333. hme_write32(hp, tregs + TCVR_BBOENAB, 0);
  334. }
  335. #define TCVR_READ_TRIES 16
  336. static int happy_meal_tcvr_read(struct happy_meal *hp,
  337. void __iomem *tregs, int reg)
  338. {
  339. int tries = TCVR_READ_TRIES;
  340. int retval;
  341. if (hp->tcvr_type == none) {
  342. ASD("no transceiver, value=TCVR_FAILURE\n");
  343. return TCVR_FAILURE;
  344. }
  345. if (!(hp->happy_flags & HFLAG_FENABLE)) {
  346. ASD("doing bit bang\n");
  347. return happy_meal_bb_read(hp, tregs, reg);
  348. }
  349. hme_write32(hp, tregs + TCVR_FRAME,
  350. (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
  351. while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
  352. udelay(20);
  353. if (!tries) {
  354. netdev_err(hp->dev, "Aieee, transceiver MIF read bolixed\n");
  355. return TCVR_FAILURE;
  356. }
  357. retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
  358. ASD("reg=0x%02x value=%04x\n", reg, retval);
  359. return retval;
  360. }
  361. #define TCVR_WRITE_TRIES 16
  362. static void happy_meal_tcvr_write(struct happy_meal *hp,
  363. void __iomem *tregs, int reg,
  364. unsigned short value)
  365. {
  366. int tries = TCVR_WRITE_TRIES;
  367. ASD("reg=0x%02x value=%04x\n", reg, value);
  368. /* Welcome to Sun Microsystems, can I take your order please? */
  369. if (!(hp->happy_flags & HFLAG_FENABLE)) {
  370. happy_meal_bb_write(hp, tregs, reg, value);
  371. return;
  372. }
  373. /* Would you like fries with that? */
  374. hme_write32(hp, tregs + TCVR_FRAME,
  375. (FRAME_WRITE | (hp->paddr << 23) |
  376. ((reg & 0xff) << 18) | (value & 0xffff)));
  377. while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
  378. udelay(20);
  379. /* Anything else? */
  380. if (!tries)
  381. netdev_err(hp->dev, "Aieee, transceiver MIF write bolixed\n");
  382. /* Fifty-two cents is your change, have a nice day. */
  383. }
  384. /* Auto negotiation. The scheme is very simple. We have a timer routine
  385. * that keeps watching the auto negotiation process as it progresses.
  386. * The DP83840 is first told to start doing it's thing, we set up the time
  387. * and place the timer state machine in it's initial state.
  388. *
  389. * Here the timer peeks at the DP83840 status registers at each click to see
  390. * if the auto negotiation has completed, we assume here that the DP83840 PHY
  391. * will time out at some point and just tell us what (didn't) happen. For
  392. * complete coverage we only allow so many of the ticks at this level to run,
  393. * when this has expired we print a warning message and try another strategy.
  394. * This "other" strategy is to force the interface into various speed/duplex
  395. * configurations and we stop when we see a link-up condition before the
  396. * maximum number of "peek" ticks have occurred.
  397. *
  398. * Once a valid link status has been detected we configure the BigMAC and
  399. * the rest of the Happy Meal to speak the most efficient protocol we could
  400. * get a clean link for. The priority for link configurations, highest first
  401. * is:
  402. * 100 Base-T Full Duplex
  403. * 100 Base-T Half Duplex
  404. * 10 Base-T Full Duplex
  405. * 10 Base-T Half Duplex
  406. *
  407. * We start a new timer now, after a successful auto negotiation status has
  408. * been detected. This timer just waits for the link-up bit to get set in
  409. * the BMCR of the DP83840. When this occurs we print a kernel log message
  410. * describing the link type in use and the fact that it is up.
  411. *
  412. * If a fatal error of some sort is signalled and detected in the interrupt
  413. * service routine, and the chip is reset, or the link is ifconfig'd down
  414. * and then back up, this entire process repeats itself all over again.
  415. */
  416. static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
  417. {
  418. hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
  419. /* Downgrade from full to half duplex. Only possible
  420. * via ethtool.
  421. */
  422. if (hp->sw_bmcr & BMCR_FULLDPLX) {
  423. hp->sw_bmcr &= ~(BMCR_FULLDPLX);
  424. happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
  425. return 0;
  426. }
  427. /* Downgrade from 100 to 10. */
  428. if (hp->sw_bmcr & BMCR_SPEED100) {
  429. hp->sw_bmcr &= ~(BMCR_SPEED100);
  430. happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
  431. return 0;
  432. }
  433. /* We've tried everything. */
  434. return -1;
  435. }
  436. static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
  437. {
  438. hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
  439. netdev_info(hp->dev,
  440. "Link is up using %s transceiver at %dMb/s, %s Duplex.\n",
  441. hp->tcvr_type == external ? "external" : "internal",
  442. hp->sw_lpa & (LPA_100HALF | LPA_100FULL) ? 100 : 10,
  443. hp->sw_lpa & (LPA_100FULL | LPA_10FULL) ? "Full" : "Half");
  444. }
  445. static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
  446. {
  447. hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
  448. netdev_info(hp->dev,
  449. "Link has been forced up using %s transceiver at %dMb/s, %s Duplex.\n",
  450. hp->tcvr_type == external ? "external" : "internal",
  451. hp->sw_bmcr & BMCR_SPEED100 ? 100 : 10,
  452. hp->sw_bmcr & BMCR_FULLDPLX ? "Full" : "Half");
  453. }
  454. static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
  455. {
  456. int full;
  457. /* All we care about is making sure the bigmac tx_cfg has a
  458. * proper duplex setting.
  459. */
  460. if (hp->timer_state == arbwait) {
  461. hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
  462. if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
  463. goto no_response;
  464. if (hp->sw_lpa & LPA_100FULL)
  465. full = 1;
  466. else if (hp->sw_lpa & LPA_100HALF)
  467. full = 0;
  468. else if (hp->sw_lpa & LPA_10FULL)
  469. full = 1;
  470. else
  471. full = 0;
  472. } else {
  473. /* Forcing a link mode. */
  474. hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
  475. if (hp->sw_bmcr & BMCR_FULLDPLX)
  476. full = 1;
  477. else
  478. full = 0;
  479. }
  480. /* Before changing other bits in the tx_cfg register, and in
  481. * general any of other the TX config registers too, you
  482. * must:
  483. * 1) Clear Enable
  484. * 2) Poll with reads until that bit reads back as zero
  485. * 3) Make TX configuration changes
  486. * 4) Set Enable once more
  487. */
  488. hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
  489. hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
  490. ~(BIGMAC_TXCFG_ENABLE));
  491. while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
  492. barrier();
  493. if (full) {
  494. hp->happy_flags |= HFLAG_FULL;
  495. hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
  496. hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
  497. BIGMAC_TXCFG_FULLDPLX);
  498. } else {
  499. hp->happy_flags &= ~(HFLAG_FULL);
  500. hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
  501. hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
  502. ~(BIGMAC_TXCFG_FULLDPLX));
  503. }
  504. hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
  505. hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
  506. BIGMAC_TXCFG_ENABLE);
  507. return 0;
  508. no_response:
  509. return 1;
  510. }
  511. static int happy_meal_init(struct happy_meal *hp);
  512. static int is_lucent_phy(struct happy_meal *hp)
  513. {
  514. void __iomem *tregs = hp->tcvregs;
  515. unsigned short mr2, mr3;
  516. int ret = 0;
  517. mr2 = happy_meal_tcvr_read(hp, tregs, 2);
  518. mr3 = happy_meal_tcvr_read(hp, tregs, 3);
  519. if ((mr2 & 0xffff) == 0x0180 &&
  520. ((mr3 & 0xffff) >> 10) == 0x1d)
  521. ret = 1;
  522. return ret;
  523. }
  524. static void happy_meal_timer(struct timer_list *t)
  525. {
  526. struct happy_meal *hp = from_timer(hp, t, happy_timer);
  527. void __iomem *tregs = hp->tcvregs;
  528. int restart_timer = 0;
  529. spin_lock_irq(&hp->happy_lock);
  530. hp->timer_ticks++;
  531. switch(hp->timer_state) {
  532. case arbwait:
  533. /* Only allow for 5 ticks, thats 10 seconds and much too
  534. * long to wait for arbitration to complete.
  535. */
  536. if (hp->timer_ticks >= 10) {
  537. /* Enter force mode. */
  538. do_force_mode:
  539. hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
  540. netdev_notice(hp->dev,
  541. "Auto-Negotiation unsuccessful, trying force link mode\n");
  542. hp->sw_bmcr = BMCR_SPEED100;
  543. happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
  544. if (!is_lucent_phy(hp)) {
  545. /* OK, seems we need do disable the transceiver for the first
  546. * tick to make sure we get an accurate link state at the
  547. * second tick.
  548. */
  549. hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
  550. hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
  551. happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
  552. }
  553. hp->timer_state = ltrywait;
  554. hp->timer_ticks = 0;
  555. restart_timer = 1;
  556. } else {
  557. /* Anything interesting happen? */
  558. hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
  559. if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
  560. int ret;
  561. /* Just what we've been waiting for... */
  562. ret = set_happy_link_modes(hp, tregs);
  563. if (ret) {
  564. /* Ooops, something bad happened, go to force
  565. * mode.
  566. *
  567. * XXX Broken hubs which don't support 802.3u
  568. * XXX auto-negotiation make this happen as well.
  569. */
  570. goto do_force_mode;
  571. }
  572. /* Success, at least so far, advance our state engine. */
  573. hp->timer_state = lupwait;
  574. restart_timer = 1;
  575. } else {
  576. restart_timer = 1;
  577. }
  578. }
  579. break;
  580. case lupwait:
  581. /* Auto negotiation was successful and we are awaiting a
  582. * link up status. I have decided to let this timer run
  583. * forever until some sort of error is signalled, reporting
  584. * a message to the user at 10 second intervals.
  585. */
  586. hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
  587. if (hp->sw_bmsr & BMSR_LSTATUS) {
  588. /* Wheee, it's up, display the link mode in use and put
  589. * the timer to sleep.
  590. */
  591. display_link_mode(hp, tregs);
  592. hp->timer_state = asleep;
  593. restart_timer = 0;
  594. } else {
  595. if (hp->timer_ticks >= 10) {
  596. netdev_notice(hp->dev,
  597. "Auto negotiation successful, link still not completely up.\n");
  598. hp->timer_ticks = 0;
  599. restart_timer = 1;
  600. } else {
  601. restart_timer = 1;
  602. }
  603. }
  604. break;
  605. case ltrywait:
  606. /* Making the timeout here too long can make it take
  607. * annoyingly long to attempt all of the link mode
  608. * permutations, but then again this is essentially
  609. * error recovery code for the most part.
  610. */
  611. hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
  612. hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
  613. if (hp->timer_ticks == 1) {
  614. if (!is_lucent_phy(hp)) {
  615. /* Re-enable transceiver, we'll re-enable the transceiver next
  616. * tick, then check link state on the following tick.
  617. */
  618. hp->sw_csconfig |= CSCONFIG_TCVDISAB;
  619. happy_meal_tcvr_write(hp, tregs,
  620. DP83840_CSCONFIG, hp->sw_csconfig);
  621. }
  622. restart_timer = 1;
  623. break;
  624. }
  625. if (hp->timer_ticks == 2) {
  626. if (!is_lucent_phy(hp)) {
  627. hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
  628. happy_meal_tcvr_write(hp, tregs,
  629. DP83840_CSCONFIG, hp->sw_csconfig);
  630. }
  631. restart_timer = 1;
  632. break;
  633. }
  634. if (hp->sw_bmsr & BMSR_LSTATUS) {
  635. /* Force mode selection success. */
  636. display_forced_link_mode(hp, tregs);
  637. set_happy_link_modes(hp, tregs); /* XXX error? then what? */
  638. hp->timer_state = asleep;
  639. restart_timer = 0;
  640. } else {
  641. if (hp->timer_ticks >= 4) { /* 6 seconds or so... */
  642. int ret;
  643. ret = try_next_permutation(hp, tregs);
  644. if (ret == -1) {
  645. /* Aieee, tried them all, reset the
  646. * chip and try all over again.
  647. */
  648. /* Let the user know... */
  649. netdev_notice(hp->dev,
  650. "Link down, cable problem?\n");
  651. ret = happy_meal_init(hp);
  652. if (ret) {
  653. /* ho hum... */
  654. netdev_err(hp->dev,
  655. "Error, cannot re-init the Happy Meal.\n");
  656. }
  657. goto out;
  658. }
  659. if (!is_lucent_phy(hp)) {
  660. hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
  661. DP83840_CSCONFIG);
  662. hp->sw_csconfig |= CSCONFIG_TCVDISAB;
  663. happy_meal_tcvr_write(hp, tregs,
  664. DP83840_CSCONFIG, hp->sw_csconfig);
  665. }
  666. hp->timer_ticks = 0;
  667. restart_timer = 1;
  668. } else {
  669. restart_timer = 1;
  670. }
  671. }
  672. break;
  673. case asleep:
  674. default:
  675. /* Can't happens.... */
  676. netdev_err(hp->dev,
  677. "Aieee, link timer is asleep but we got one anyways!\n");
  678. restart_timer = 0;
  679. hp->timer_ticks = 0;
  680. hp->timer_state = asleep; /* foo on you */
  681. break;
  682. }
  683. if (restart_timer) {
  684. hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
  685. add_timer(&hp->happy_timer);
  686. }
  687. out:
  688. spin_unlock_irq(&hp->happy_lock);
  689. }
  690. #define TX_RESET_TRIES 32
  691. #define RX_RESET_TRIES 32
  692. /* hp->happy_lock must be held */
  693. static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
  694. {
  695. int tries = TX_RESET_TRIES;
  696. HMD("reset...\n");
  697. /* Would you like to try our SMCC Delux? */
  698. hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
  699. while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
  700. udelay(20);
  701. /* Lettuce, tomato, buggy hardware (no extra charge)? */
  702. if (!tries)
  703. netdev_err(hp->dev, "Transceiver BigMac ATTACK!");
  704. /* Take care. */
  705. HMD("done\n");
  706. }
  707. /* hp->happy_lock must be held */
  708. static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
  709. {
  710. int tries = RX_RESET_TRIES;
  711. HMD("reset...\n");
  712. /* We have a special on GNU/Viking hardware bugs today. */
  713. hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
  714. while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
  715. udelay(20);
  716. /* Will that be all? */
  717. if (!tries)
  718. netdev_err(hp->dev, "Receiver BigMac ATTACK!\n");
  719. /* Don't forget your vik_1137125_wa. Have a nice day. */
  720. HMD("done\n");
  721. }
  722. #define STOP_TRIES 16
  723. /* hp->happy_lock must be held */
  724. static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
  725. {
  726. int tries = STOP_TRIES;
  727. HMD("reset...\n");
  728. /* We're consolidating our STB products, it's your lucky day. */
  729. hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
  730. while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
  731. udelay(20);
  732. /* Come back next week when we are "Sun Microelectronics". */
  733. if (!tries)
  734. netdev_err(hp->dev, "Fry guys.\n");
  735. /* Remember: "Different name, same old buggy as shit hardware." */
  736. HMD("done\n");
  737. }
  738. /* hp->happy_lock must be held */
  739. static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
  740. {
  741. struct net_device_stats *stats = &hp->dev->stats;
  742. stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
  743. hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
  744. stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
  745. hme_write32(hp, bregs + BMAC_UNALECTR, 0);
  746. stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
  747. hme_write32(hp, bregs + BMAC_GLECTR, 0);
  748. stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
  749. stats->collisions +=
  750. (hme_read32(hp, bregs + BMAC_EXCTR) +
  751. hme_read32(hp, bregs + BMAC_LTCTR));
  752. hme_write32(hp, bregs + BMAC_EXCTR, 0);
  753. hme_write32(hp, bregs + BMAC_LTCTR, 0);
  754. }
  755. /* hp->happy_lock must be held */
  756. static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
  757. {
  758. /* If polling disabled or not polling already, nothing to do. */
  759. if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
  760. (HFLAG_POLLENABLE | HFLAG_POLL)) {
  761. ASD("not polling, return\n");
  762. return;
  763. }
  764. /* Shut up the MIF. */
  765. ASD("were polling, mif ints off, polling off\n");
  766. hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
  767. /* Turn off polling. */
  768. hme_write32(hp, tregs + TCVR_CFG,
  769. hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
  770. /* We are no longer polling. */
  771. hp->happy_flags &= ~(HFLAG_POLL);
  772. /* Let the bits set. */
  773. udelay(200);
  774. ASD("done\n");
  775. }
  776. /* Only Sun can take such nice parts and fuck up the programming interface
  777. * like this. Good job guys...
  778. */
  779. #define TCVR_RESET_TRIES 16 /* It should reset quickly */
  780. #define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */
  781. /* hp->happy_lock must be held */
  782. static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
  783. {
  784. u32 tconfig;
  785. int result, tries = TCVR_RESET_TRIES;
  786. tconfig = hme_read32(hp, tregs + TCVR_CFG);
  787. ASD("tcfg=%08x\n", tconfig);
  788. if (hp->tcvr_type == external) {
  789. hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
  790. hp->tcvr_type = internal;
  791. hp->paddr = TCV_PADDR_ITX;
  792. happy_meal_tcvr_write(hp, tregs, MII_BMCR,
  793. (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
  794. result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
  795. if (result == TCVR_FAILURE) {
  796. ASD("phyread_fail\n");
  797. return -1;
  798. }
  799. ASD("external: ISOLATE, phyread_ok, PSELECT\n");
  800. hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
  801. hp->tcvr_type = external;
  802. hp->paddr = TCV_PADDR_ETX;
  803. } else {
  804. if (tconfig & TCV_CFG_MDIO1) {
  805. hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
  806. happy_meal_tcvr_write(hp, tregs, MII_BMCR,
  807. (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
  808. result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
  809. if (result == TCVR_FAILURE) {
  810. ASD("phyread_fail>\n");
  811. return -1;
  812. }
  813. ASD("internal: PSELECT, ISOLATE, phyread_ok, ~PSELECT\n");
  814. hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
  815. hp->tcvr_type = internal;
  816. hp->paddr = TCV_PADDR_ITX;
  817. }
  818. }
  819. ASD("BMCR_RESET...\n");
  820. happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
  821. while (--tries) {
  822. result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
  823. if (result == TCVR_FAILURE)
  824. return -1;
  825. hp->sw_bmcr = result;
  826. if (!(result & BMCR_RESET))
  827. break;
  828. udelay(20);
  829. }
  830. if (!tries) {
  831. ASD("BMCR RESET FAILED!\n");
  832. return -1;
  833. }
  834. ASD("RESET_OK\n");
  835. /* Get fresh copies of the PHY registers. */
  836. hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
  837. hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
  838. hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
  839. hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
  840. ASD("UNISOLATE...\n");
  841. hp->sw_bmcr &= ~(BMCR_ISOLATE);
  842. happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
  843. tries = TCVR_UNISOLATE_TRIES;
  844. while (--tries) {
  845. result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
  846. if (result == TCVR_FAILURE)
  847. return -1;
  848. if (!(result & BMCR_ISOLATE))
  849. break;
  850. udelay(20);
  851. }
  852. if (!tries) {
  853. ASD("UNISOLATE FAILED!\n");
  854. return -1;
  855. }
  856. ASD("SUCCESS and CSCONFIG_DFBYPASS\n");
  857. if (!is_lucent_phy(hp)) {
  858. result = happy_meal_tcvr_read(hp, tregs,
  859. DP83840_CSCONFIG);
  860. happy_meal_tcvr_write(hp, tregs,
  861. DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
  862. }
  863. return 0;
  864. }
  865. /* Figure out whether we have an internal or external transceiver.
  866. *
  867. * hp->happy_lock must be held
  868. */
  869. static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
  870. {
  871. unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
  872. ASD("tcfg=%08lx\n", tconfig);
  873. if (hp->happy_flags & HFLAG_POLL) {
  874. /* If we are polling, we must stop to get the transceiver type. */
  875. if (hp->tcvr_type == internal) {
  876. if (tconfig & TCV_CFG_MDIO1) {
  877. happy_meal_poll_stop(hp, tregs);
  878. hp->paddr = TCV_PADDR_ETX;
  879. hp->tcvr_type = external;
  880. tconfig &= ~(TCV_CFG_PENABLE);
  881. tconfig |= TCV_CFG_PSELECT;
  882. hme_write32(hp, tregs + TCVR_CFG, tconfig);
  883. ASD("poll stop, internal->external\n");
  884. }
  885. } else {
  886. if (hp->tcvr_type == external) {
  887. if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
  888. happy_meal_poll_stop(hp, tregs);
  889. hp->paddr = TCV_PADDR_ITX;
  890. hp->tcvr_type = internal;
  891. hme_write32(hp, tregs + TCVR_CFG,
  892. hme_read32(hp, tregs + TCVR_CFG) &
  893. ~(TCV_CFG_PSELECT));
  894. ASD("poll stop, external->internal\n");
  895. }
  896. } else {
  897. ASD("polling, none\n");
  898. }
  899. }
  900. } else {
  901. u32 reread = hme_read32(hp, tregs + TCVR_CFG);
  902. /* Else we can just work off of the MDIO bits. */
  903. if (reread & TCV_CFG_MDIO1) {
  904. hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
  905. hp->paddr = TCV_PADDR_ETX;
  906. hp->tcvr_type = external;
  907. ASD("not polling, external\n");
  908. } else {
  909. if (reread & TCV_CFG_MDIO0) {
  910. hme_write32(hp, tregs + TCVR_CFG,
  911. tconfig & ~(TCV_CFG_PSELECT));
  912. hp->paddr = TCV_PADDR_ITX;
  913. hp->tcvr_type = internal;
  914. ASD("not polling, internal\n");
  915. } else {
  916. netdev_err(hp->dev,
  917. "Transceiver and a coke please.");
  918. hp->tcvr_type = none; /* Grrr... */
  919. ASD("not polling, none\n");
  920. }
  921. }
  922. }
  923. }
  924. /* The receive ring buffers are a bit tricky to get right. Here goes...
  925. *
  926. * The buffers we dma into must be 64 byte aligned. So we use a special
  927. * alloc_skb() routine for the happy meal to allocate 64 bytes more than
  928. * we really need.
  929. *
  930. * We use skb_reserve() to align the data block we get in the skb. We
  931. * also program the etxregs->cfg register to use an offset of 2. This
  932. * imperical constant plus the ethernet header size will always leave
  933. * us with a nicely aligned ip header once we pass things up to the
  934. * protocol layers.
  935. *
  936. * The numbers work out to:
  937. *
  938. * Max ethernet frame size 1518
  939. * Ethernet header size 14
  940. * Happy Meal base offset 2
  941. *
  942. * Say a skb data area is at 0xf001b010, and its size alloced is
  943. * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes.
  944. *
  945. * First our alloc_skb() routine aligns the data base to a 64 byte
  946. * boundary. We now have 0xf001b040 as our skb data address. We
  947. * plug this into the receive descriptor address.
  948. *
  949. * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset.
  950. * So now the data we will end up looking at starts at 0xf001b042. When
  951. * the packet arrives, we will check out the size received and subtract
  952. * this from the skb->length. Then we just pass the packet up to the
  953. * protocols as is, and allocate a new skb to replace this slot we have
  954. * just received from.
  955. *
  956. * The ethernet layer will strip the ether header from the front of the
  957. * skb we just sent to it, this leaves us with the ip header sitting
  958. * nicely aligned at 0xf001b050. Also, for tcp and udp packets the
  959. * Happy Meal has even checksummed the tcp/udp data for us. The 16
  960. * bit checksum is obtained from the low bits of the receive descriptor
  961. * flags, thus:
  962. *
  963. * skb->csum = rxd->rx_flags & 0xffff;
  964. * skb->ip_summed = CHECKSUM_COMPLETE;
  965. *
  966. * before sending off the skb to the protocols, and we are good as gold.
  967. */
  968. static void happy_meal_clean_rings(struct happy_meal *hp)
  969. {
  970. int i;
  971. for (i = 0; i < RX_RING_SIZE; i++) {
  972. if (hp->rx_skbs[i] != NULL) {
  973. struct sk_buff *skb = hp->rx_skbs[i];
  974. struct happy_meal_rxd *rxd;
  975. u32 dma_addr;
  976. rxd = &hp->happy_block->happy_meal_rxd[i];
  977. dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
  978. dma_unmap_single(hp->dma_dev, dma_addr,
  979. RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
  980. dev_kfree_skb_any(skb);
  981. hp->rx_skbs[i] = NULL;
  982. }
  983. }
  984. for (i = 0; i < TX_RING_SIZE; i++) {
  985. if (hp->tx_skbs[i] != NULL) {
  986. struct sk_buff *skb = hp->tx_skbs[i];
  987. struct happy_meal_txd *txd;
  988. u32 dma_addr;
  989. int frag;
  990. hp->tx_skbs[i] = NULL;
  991. for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
  992. txd = &hp->happy_block->happy_meal_txd[i];
  993. dma_addr = hme_read_desc32(hp, &txd->tx_addr);
  994. if (!frag)
  995. dma_unmap_single(hp->dma_dev, dma_addr,
  996. (hme_read_desc32(hp, &txd->tx_flags)
  997. & TXFLAG_SIZE),
  998. DMA_TO_DEVICE);
  999. else
  1000. dma_unmap_page(hp->dma_dev, dma_addr,
  1001. (hme_read_desc32(hp, &txd->tx_flags)
  1002. & TXFLAG_SIZE),
  1003. DMA_TO_DEVICE);
  1004. if (frag != skb_shinfo(skb)->nr_frags)
  1005. i++;
  1006. }
  1007. dev_kfree_skb_any(skb);
  1008. }
  1009. }
  1010. }
  1011. /* hp->happy_lock must be held */
  1012. static void happy_meal_init_rings(struct happy_meal *hp)
  1013. {
  1014. struct hmeal_init_block *hb = hp->happy_block;
  1015. int i;
  1016. HMD("counters to zero\n");
  1017. hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
  1018. /* Free any skippy bufs left around in the rings. */
  1019. happy_meal_clean_rings(hp);
  1020. /* Now get new skippy bufs for the receive ring. */
  1021. HMD("init rxring\n");
  1022. for (i = 0; i < RX_RING_SIZE; i++) {
  1023. struct sk_buff *skb;
  1024. u32 mapping;
  1025. skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
  1026. if (!skb) {
  1027. hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
  1028. continue;
  1029. }
  1030. hp->rx_skbs[i] = skb;
  1031. /* Because we reserve afterwards. */
  1032. skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
  1033. mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
  1034. DMA_FROM_DEVICE);
  1035. if (dma_mapping_error(hp->dma_dev, mapping)) {
  1036. dev_kfree_skb_any(skb);
  1037. hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
  1038. continue;
  1039. }
  1040. hme_write_rxd(hp, &hb->happy_meal_rxd[i],
  1041. (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
  1042. mapping);
  1043. skb_reserve(skb, RX_OFFSET);
  1044. }
  1045. HMD("init txring\n");
  1046. for (i = 0; i < TX_RING_SIZE; i++)
  1047. hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
  1048. HMD("done\n");
  1049. }
  1050. /* hp->happy_lock must be held */
  1051. static void
  1052. happy_meal_begin_auto_negotiation(struct happy_meal *hp,
  1053. void __iomem *tregs,
  1054. const struct ethtool_link_ksettings *ep)
  1055. {
  1056. int timeout;
  1057. /* Read all of the registers we are interested in now. */
  1058. hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
  1059. hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
  1060. hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
  1061. hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
  1062. /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
  1063. hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
  1064. if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
  1065. /* Advertise everything we can support. */
  1066. if (hp->sw_bmsr & BMSR_10HALF)
  1067. hp->sw_advertise |= (ADVERTISE_10HALF);
  1068. else
  1069. hp->sw_advertise &= ~(ADVERTISE_10HALF);
  1070. if (hp->sw_bmsr & BMSR_10FULL)
  1071. hp->sw_advertise |= (ADVERTISE_10FULL);
  1072. else
  1073. hp->sw_advertise &= ~(ADVERTISE_10FULL);
  1074. if (hp->sw_bmsr & BMSR_100HALF)
  1075. hp->sw_advertise |= (ADVERTISE_100HALF);
  1076. else
  1077. hp->sw_advertise &= ~(ADVERTISE_100HALF);
  1078. if (hp->sw_bmsr & BMSR_100FULL)
  1079. hp->sw_advertise |= (ADVERTISE_100FULL);
  1080. else
  1081. hp->sw_advertise &= ~(ADVERTISE_100FULL);
  1082. happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
  1083. /* XXX Currently no Happy Meal cards I know off support 100BaseT4,
  1084. * XXX and this is because the DP83840 does not support it, changes
  1085. * XXX would need to be made to the tx/rx logic in the driver as well
  1086. * XXX so I completely skip checking for it in the BMSR for now.
  1087. */
  1088. ASD("Advertising [ %s%s%s%s]\n",
  1089. hp->sw_advertise & ADVERTISE_10HALF ? "10H " : "",
  1090. hp->sw_advertise & ADVERTISE_10FULL ? "10F " : "",
  1091. hp->sw_advertise & ADVERTISE_100HALF ? "100H " : "",
  1092. hp->sw_advertise & ADVERTISE_100FULL ? "100F " : "");
  1093. /* Enable Auto-Negotiation, this is usually on already... */
  1094. hp->sw_bmcr |= BMCR_ANENABLE;
  1095. happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
  1096. /* Restart it to make sure it is going. */
  1097. hp->sw_bmcr |= BMCR_ANRESTART;
  1098. happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
  1099. /* BMCR_ANRESTART self clears when the process has begun. */
  1100. timeout = 64; /* More than enough. */
  1101. while (--timeout) {
  1102. hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
  1103. if (!(hp->sw_bmcr & BMCR_ANRESTART))
  1104. break; /* got it. */
  1105. udelay(10);
  1106. }
  1107. if (!timeout) {
  1108. netdev_err(hp->dev,
  1109. "Happy Meal would not start auto negotiation BMCR=0x%04x\n",
  1110. hp->sw_bmcr);
  1111. netdev_notice(hp->dev,
  1112. "Performing force link detection.\n");
  1113. goto force_link;
  1114. } else {
  1115. hp->timer_state = arbwait;
  1116. }
  1117. } else {
  1118. force_link:
  1119. /* Force the link up, trying first a particular mode.
  1120. * Either we are here at the request of ethtool or
  1121. * because the Happy Meal would not start to autoneg.
  1122. */
  1123. /* Disable auto-negotiation in BMCR, enable the duplex and
  1124. * speed setting, init the timer state machine, and fire it off.
  1125. */
  1126. if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
  1127. hp->sw_bmcr = BMCR_SPEED100;
  1128. } else {
  1129. if (ep->base.speed == SPEED_100)
  1130. hp->sw_bmcr = BMCR_SPEED100;
  1131. else
  1132. hp->sw_bmcr = 0;
  1133. if (ep->base.duplex == DUPLEX_FULL)
  1134. hp->sw_bmcr |= BMCR_FULLDPLX;
  1135. }
  1136. happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
  1137. if (!is_lucent_phy(hp)) {
  1138. /* OK, seems we need do disable the transceiver for the first
  1139. * tick to make sure we get an accurate link state at the
  1140. * second tick.
  1141. */
  1142. hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
  1143. DP83840_CSCONFIG);
  1144. hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
  1145. happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
  1146. hp->sw_csconfig);
  1147. }
  1148. hp->timer_state = ltrywait;
  1149. }
  1150. hp->timer_ticks = 0;
  1151. hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
  1152. add_timer(&hp->happy_timer);
  1153. }
  1154. /* hp->happy_lock must be held */
  1155. static int happy_meal_init(struct happy_meal *hp)
  1156. {
  1157. const unsigned char *e = &hp->dev->dev_addr[0];
  1158. void __iomem *gregs = hp->gregs;
  1159. void __iomem *etxregs = hp->etxregs;
  1160. void __iomem *erxregs = hp->erxregs;
  1161. void __iomem *bregs = hp->bigmacregs;
  1162. void __iomem *tregs = hp->tcvregs;
  1163. const char *bursts = "64";
  1164. u32 regtmp, rxcfg;
  1165. /* If auto-negotiation timer is running, kill it. */
  1166. del_timer(&hp->happy_timer);
  1167. HMD("happy_flags[%08x]\n", hp->happy_flags);
  1168. if (!(hp->happy_flags & HFLAG_INIT)) {
  1169. HMD("set HFLAG_INIT\n");
  1170. hp->happy_flags |= HFLAG_INIT;
  1171. happy_meal_get_counters(hp, bregs);
  1172. }
  1173. /* Stop polling. */
  1174. HMD("to happy_meal_poll_stop\n");
  1175. happy_meal_poll_stop(hp, tregs);
  1176. /* Stop transmitter and receiver. */
  1177. HMD("to happy_meal_stop\n");
  1178. happy_meal_stop(hp, gregs);
  1179. /* Alloc and reset the tx/rx descriptor chains. */
  1180. HMD("to happy_meal_init_rings\n");
  1181. happy_meal_init_rings(hp);
  1182. /* Shut up the MIF. */
  1183. HMD("Disable all MIF irqs (old[%08x])\n",
  1184. hme_read32(hp, tregs + TCVR_IMASK));
  1185. hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
  1186. /* See if we can enable the MIF frame on this card to speak to the DP83840. */
  1187. if (hp->happy_flags & HFLAG_FENABLE) {
  1188. HMD("use frame old[%08x]\n",
  1189. hme_read32(hp, tregs + TCVR_CFG));
  1190. hme_write32(hp, tregs + TCVR_CFG,
  1191. hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
  1192. } else {
  1193. HMD("use bitbang old[%08x]\n",
  1194. hme_read32(hp, tregs + TCVR_CFG));
  1195. hme_write32(hp, tregs + TCVR_CFG,
  1196. hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
  1197. }
  1198. /* Check the state of the transceiver. */
  1199. HMD("to happy_meal_transceiver_check\n");
  1200. happy_meal_transceiver_check(hp, tregs);
  1201. /* Put the Big Mac into a sane state. */
  1202. switch(hp->tcvr_type) {
  1203. case none:
  1204. /* Cannot operate if we don't know the transceiver type! */
  1205. HMD("AAIEEE no transceiver type, EAGAIN\n");
  1206. return -EAGAIN;
  1207. case internal:
  1208. /* Using the MII buffers. */
  1209. HMD("internal, using MII\n");
  1210. hme_write32(hp, bregs + BMAC_XIFCFG, 0);
  1211. break;
  1212. case external:
  1213. /* Not using the MII, disable it. */
  1214. HMD("external, disable MII\n");
  1215. hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
  1216. break;
  1217. }
  1218. if (happy_meal_tcvr_reset(hp, tregs))
  1219. return -EAGAIN;
  1220. /* Reset the Happy Meal Big Mac transceiver and the receiver. */
  1221. HMD("tx/rx reset\n");
  1222. happy_meal_tx_reset(hp, bregs);
  1223. happy_meal_rx_reset(hp, bregs);
  1224. /* Set jam size and inter-packet gaps to reasonable defaults. */
  1225. hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
  1226. hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
  1227. hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
  1228. /* Load up the MAC address and random seed. */
  1229. /* The docs recommend to use the 10LSB of our MAC here. */
  1230. hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
  1231. hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
  1232. hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
  1233. hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
  1234. if ((hp->dev->flags & IFF_ALLMULTI) ||
  1235. (netdev_mc_count(hp->dev) > 64)) {
  1236. hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
  1237. hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
  1238. hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
  1239. hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
  1240. } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
  1241. u16 hash_table[4];
  1242. struct netdev_hw_addr *ha;
  1243. u32 crc;
  1244. memset(hash_table, 0, sizeof(hash_table));
  1245. netdev_for_each_mc_addr(ha, hp->dev) {
  1246. crc = ether_crc_le(6, ha->addr);
  1247. crc >>= 26;
  1248. hash_table[crc >> 4] |= 1 << (crc & 0xf);
  1249. }
  1250. hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
  1251. hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
  1252. hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
  1253. hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
  1254. } else {
  1255. hme_write32(hp, bregs + BMAC_HTABLE3, 0);
  1256. hme_write32(hp, bregs + BMAC_HTABLE2, 0);
  1257. hme_write32(hp, bregs + BMAC_HTABLE1, 0);
  1258. hme_write32(hp, bregs + BMAC_HTABLE0, 0);
  1259. }
  1260. /* Set the RX and TX ring ptrs. */
  1261. HMD("ring ptrs rxr[%08x] txr[%08x]\n",
  1262. ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
  1263. ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
  1264. hme_write32(hp, erxregs + ERX_RING,
  1265. ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
  1266. hme_write32(hp, etxregs + ETX_RING,
  1267. ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
  1268. /* Parity issues in the ERX unit of some HME revisions can cause some
  1269. * registers to not be written unless their parity is even. Detect such
  1270. * lost writes and simply rewrite with a low bit set (which will be ignored
  1271. * since the rxring needs to be 2K aligned).
  1272. */
  1273. if (hme_read32(hp, erxregs + ERX_RING) !=
  1274. ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
  1275. hme_write32(hp, erxregs + ERX_RING,
  1276. ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
  1277. | 0x4);
  1278. /* Set the supported burst sizes. */
  1279. #ifndef CONFIG_SPARC
  1280. /* It is always PCI and can handle 64byte bursts. */
  1281. hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
  1282. #else
  1283. if ((hp->happy_bursts & DMA_BURST64) &&
  1284. ((hp->happy_flags & HFLAG_PCI) != 0
  1285. #ifdef CONFIG_SBUS
  1286. || sbus_can_burst64()
  1287. #endif
  1288. || 0)) {
  1289. u32 gcfg = GREG_CFG_BURST64;
  1290. /* I have no idea if I should set the extended
  1291. * transfer mode bit for Cheerio, so for now I
  1292. * do not. -DaveM
  1293. */
  1294. #ifdef CONFIG_SBUS
  1295. if ((hp->happy_flags & HFLAG_PCI) == 0) {
  1296. struct platform_device *op = hp->happy_dev;
  1297. if (sbus_can_dma_64bit()) {
  1298. sbus_set_sbus64(&op->dev,
  1299. hp->happy_bursts);
  1300. gcfg |= GREG_CFG_64BIT;
  1301. }
  1302. }
  1303. #endif
  1304. bursts = "64";
  1305. hme_write32(hp, gregs + GREG_CFG, gcfg);
  1306. } else if (hp->happy_bursts & DMA_BURST32) {
  1307. bursts = "32";
  1308. hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
  1309. } else if (hp->happy_bursts & DMA_BURST16) {
  1310. bursts = "16";
  1311. hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
  1312. } else {
  1313. bursts = "XXX";
  1314. hme_write32(hp, gregs + GREG_CFG, 0);
  1315. }
  1316. #endif /* CONFIG_SPARC */
  1317. HMD("old[%08x] bursts<%s>\n",
  1318. hme_read32(hp, gregs + GREG_CFG), bursts);
  1319. /* Turn off interrupts we do not want to hear. */
  1320. hme_write32(hp, gregs + GREG_IMASK,
  1321. (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
  1322. GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
  1323. /* Set the transmit ring buffer size. */
  1324. HMD("tx rsize=%d oreg[%08x]\n", (int)TX_RING_SIZE,
  1325. hme_read32(hp, etxregs + ETX_RSIZE));
  1326. hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
  1327. /* Enable transmitter DVMA. */
  1328. HMD("tx dma enable old[%08x]\n", hme_read32(hp, etxregs + ETX_CFG));
  1329. hme_write32(hp, etxregs + ETX_CFG,
  1330. hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
  1331. /* This chip really rots, for the receiver sometimes when you
  1332. * write to its control registers not all the bits get there
  1333. * properly. I cannot think of a sane way to provide complete
  1334. * coverage for this hardware bug yet.
  1335. */
  1336. HMD("erx regs bug old[%08x]\n",
  1337. hme_read32(hp, erxregs + ERX_CFG));
  1338. hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
  1339. regtmp = hme_read32(hp, erxregs + ERX_CFG);
  1340. hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
  1341. if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
  1342. netdev_err(hp->dev,
  1343. "Eieee, rx config register gets greasy fries.\n");
  1344. netdev_err(hp->dev,
  1345. "Trying to set %08x, reread gives %08x\n",
  1346. ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
  1347. /* XXX Should return failure here... */
  1348. }
  1349. /* Enable Big Mac hash table filter. */
  1350. HMD("enable hash rx_cfg_old[%08x]\n",
  1351. hme_read32(hp, bregs + BMAC_RXCFG));
  1352. rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
  1353. if (hp->dev->flags & IFF_PROMISC)
  1354. rxcfg |= BIGMAC_RXCFG_PMISC;
  1355. hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
  1356. /* Let the bits settle in the chip. */
  1357. udelay(10);
  1358. /* Ok, configure the Big Mac transmitter. */
  1359. HMD("BIGMAC init\n");
  1360. regtmp = 0;
  1361. if (hp->happy_flags & HFLAG_FULL)
  1362. regtmp |= BIGMAC_TXCFG_FULLDPLX;
  1363. /* Don't turn on the "don't give up" bit for now. It could cause hme
  1364. * to deadlock with the PHY if a Jabber occurs.
  1365. */
  1366. hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/);
  1367. /* Give up after 16 TX attempts. */
  1368. hme_write32(hp, bregs + BMAC_ALIMIT, 16);
  1369. /* Enable the output drivers no matter what. */
  1370. regtmp = BIGMAC_XCFG_ODENABLE;
  1371. /* If card can do lance mode, enable it. */
  1372. if (hp->happy_flags & HFLAG_LANCE)
  1373. regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
  1374. /* Disable the MII buffers if using external transceiver. */
  1375. if (hp->tcvr_type == external)
  1376. regtmp |= BIGMAC_XCFG_MIIDISAB;
  1377. HMD("XIF config old[%08x]\n", hme_read32(hp, bregs + BMAC_XIFCFG));
  1378. hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
  1379. /* Start things up. */
  1380. HMD("tx old[%08x] and rx [%08x] ON!\n",
  1381. hme_read32(hp, bregs + BMAC_TXCFG),
  1382. hme_read32(hp, bregs + BMAC_RXCFG));
  1383. /* Set larger TX/RX size to allow for 802.1q */
  1384. hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
  1385. hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
  1386. hme_write32(hp, bregs + BMAC_TXCFG,
  1387. hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
  1388. hme_write32(hp, bregs + BMAC_RXCFG,
  1389. hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
  1390. /* Get the autonegotiation started, and the watch timer ticking. */
  1391. happy_meal_begin_auto_negotiation(hp, tregs, NULL);
  1392. /* Success. */
  1393. return 0;
  1394. }
  1395. /* hp->happy_lock must be held */
  1396. static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
  1397. {
  1398. void __iomem *tregs = hp->tcvregs;
  1399. void __iomem *bregs = hp->bigmacregs;
  1400. void __iomem *gregs = hp->gregs;
  1401. happy_meal_stop(hp, gregs);
  1402. hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
  1403. if (hp->happy_flags & HFLAG_FENABLE)
  1404. hme_write32(hp, tregs + TCVR_CFG,
  1405. hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
  1406. else
  1407. hme_write32(hp, tregs + TCVR_CFG,
  1408. hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
  1409. happy_meal_transceiver_check(hp, tregs);
  1410. switch(hp->tcvr_type) {
  1411. case none:
  1412. return;
  1413. case internal:
  1414. hme_write32(hp, bregs + BMAC_XIFCFG, 0);
  1415. break;
  1416. case external:
  1417. hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
  1418. break;
  1419. }
  1420. if (happy_meal_tcvr_reset(hp, tregs))
  1421. return;
  1422. /* Latch PHY registers as of now. */
  1423. hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
  1424. hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
  1425. /* Advertise everything we can support. */
  1426. if (hp->sw_bmsr & BMSR_10HALF)
  1427. hp->sw_advertise |= (ADVERTISE_10HALF);
  1428. else
  1429. hp->sw_advertise &= ~(ADVERTISE_10HALF);
  1430. if (hp->sw_bmsr & BMSR_10FULL)
  1431. hp->sw_advertise |= (ADVERTISE_10FULL);
  1432. else
  1433. hp->sw_advertise &= ~(ADVERTISE_10FULL);
  1434. if (hp->sw_bmsr & BMSR_100HALF)
  1435. hp->sw_advertise |= (ADVERTISE_100HALF);
  1436. else
  1437. hp->sw_advertise &= ~(ADVERTISE_100HALF);
  1438. if (hp->sw_bmsr & BMSR_100FULL)
  1439. hp->sw_advertise |= (ADVERTISE_100FULL);
  1440. else
  1441. hp->sw_advertise &= ~(ADVERTISE_100FULL);
  1442. /* Update the PHY advertisement register. */
  1443. happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
  1444. }
  1445. /* Once status is latched (by happy_meal_interrupt) it is cleared by
  1446. * the hardware, so we cannot re-read it and get a correct value.
  1447. *
  1448. * hp->happy_lock must be held
  1449. */
  1450. static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
  1451. {
  1452. int reset = 0;
  1453. /* Only print messages for non-counter related interrupts. */
  1454. if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
  1455. GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
  1456. GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
  1457. GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
  1458. GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
  1459. GREG_STAT_SLVPERR))
  1460. netdev_err(hp->dev,
  1461. "Error interrupt for happy meal, status = %08x\n",
  1462. status);
  1463. if (status & GREG_STAT_RFIFOVF) {
  1464. /* Receive FIFO overflow is harmless and the hardware will take
  1465. care of it, just some packets are lost. Who cares. */
  1466. netdev_dbg(hp->dev, "Happy Meal receive FIFO overflow.\n");
  1467. }
  1468. if (status & GREG_STAT_STSTERR) {
  1469. /* BigMAC SQE link test failed. */
  1470. netdev_err(hp->dev, "Happy Meal BigMAC SQE test failed.\n");
  1471. reset = 1;
  1472. }
  1473. if (status & GREG_STAT_TFIFO_UND) {
  1474. /* Transmit FIFO underrun, again DMA error likely. */
  1475. netdev_err(hp->dev,
  1476. "Happy Meal transmitter FIFO underrun, DMA error.\n");
  1477. reset = 1;
  1478. }
  1479. if (status & GREG_STAT_MAXPKTERR) {
  1480. /* Driver error, tried to transmit something larger
  1481. * than ethernet max mtu.
  1482. */
  1483. netdev_err(hp->dev, "Happy Meal MAX Packet size error.\n");
  1484. reset = 1;
  1485. }
  1486. if (status & GREG_STAT_NORXD) {
  1487. /* This is harmless, it just means the system is
  1488. * quite loaded and the incoming packet rate was
  1489. * faster than the interrupt handler could keep up
  1490. * with.
  1491. */
  1492. netdev_info(hp->dev,
  1493. "Happy Meal out of receive descriptors, packet dropped.\n");
  1494. }
  1495. if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
  1496. /* All sorts of DMA receive errors. */
  1497. netdev_err(hp->dev, "Happy Meal rx DMA errors [ %s%s%s]\n",
  1498. status & GREG_STAT_RXERR ? "GenericError " : "",
  1499. status & GREG_STAT_RXPERR ? "ParityError " : "",
  1500. status & GREG_STAT_RXTERR ? "RxTagBotch " : "");
  1501. reset = 1;
  1502. }
  1503. if (status & GREG_STAT_EOPERR) {
  1504. /* Driver bug, didn't set EOP bit in tx descriptor given
  1505. * to the happy meal.
  1506. */
  1507. netdev_err(hp->dev,
  1508. "EOP not set in happy meal transmit descriptor!\n");
  1509. reset = 1;
  1510. }
  1511. if (status & GREG_STAT_MIFIRQ) {
  1512. /* MIF signalled an interrupt, were we polling it? */
  1513. netdev_err(hp->dev, "Happy Meal MIF interrupt.\n");
  1514. }
  1515. if (status &
  1516. (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
  1517. /* All sorts of transmit DMA errors. */
  1518. netdev_err(hp->dev, "Happy Meal tx DMA errors [ %s%s%s%s]\n",
  1519. status & GREG_STAT_TXEACK ? "GenericError " : "",
  1520. status & GREG_STAT_TXLERR ? "LateError " : "",
  1521. status & GREG_STAT_TXPERR ? "ParityError " : "",
  1522. status & GREG_STAT_TXTERR ? "TagBotch " : "");
  1523. reset = 1;
  1524. }
  1525. if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
  1526. /* Bus or parity error when cpu accessed happy meal registers
  1527. * or it's internal FIFO's. Should never see this.
  1528. */
  1529. netdev_err(hp->dev,
  1530. "Happy Meal register access SBUS slave (%s) error.\n",
  1531. (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
  1532. reset = 1;
  1533. }
  1534. if (reset) {
  1535. netdev_notice(hp->dev, "Resetting...\n");
  1536. happy_meal_init(hp);
  1537. return 1;
  1538. }
  1539. return 0;
  1540. }
  1541. /* hp->happy_lock must be held */
  1542. static void happy_meal_mif_interrupt(struct happy_meal *hp)
  1543. {
  1544. void __iomem *tregs = hp->tcvregs;
  1545. netdev_info(hp->dev, "Link status change.\n");
  1546. hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
  1547. hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
  1548. /* Use the fastest transmission protocol possible. */
  1549. if (hp->sw_lpa & LPA_100FULL) {
  1550. netdev_info(hp->dev, "Switching to 100Mbps at full duplex.\n");
  1551. hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
  1552. } else if (hp->sw_lpa & LPA_100HALF) {
  1553. netdev_info(hp->dev, "Switching to 100MBps at half duplex.\n");
  1554. hp->sw_bmcr |= BMCR_SPEED100;
  1555. } else if (hp->sw_lpa & LPA_10FULL) {
  1556. netdev_info(hp->dev, "Switching to 10MBps at full duplex.\n");
  1557. hp->sw_bmcr |= BMCR_FULLDPLX;
  1558. } else {
  1559. netdev_info(hp->dev, "Using 10Mbps at half duplex.\n");
  1560. }
  1561. happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
  1562. /* Finally stop polling and shut up the MIF. */
  1563. happy_meal_poll_stop(hp, tregs);
  1564. }
  1565. /* hp->happy_lock must be held */
  1566. static void happy_meal_tx(struct happy_meal *hp)
  1567. {
  1568. struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
  1569. struct happy_meal_txd *this;
  1570. struct net_device *dev = hp->dev;
  1571. int elem;
  1572. elem = hp->tx_old;
  1573. while (elem != hp->tx_new) {
  1574. struct sk_buff *skb;
  1575. u32 flags, dma_addr, dma_len;
  1576. int frag;
  1577. netdev_vdbg(hp->dev, "TX[%d]\n", elem);
  1578. this = &txbase[elem];
  1579. flags = hme_read_desc32(hp, &this->tx_flags);
  1580. if (flags & TXFLAG_OWN)
  1581. break;
  1582. skb = hp->tx_skbs[elem];
  1583. if (skb_shinfo(skb)->nr_frags) {
  1584. int last;
  1585. last = elem + skb_shinfo(skb)->nr_frags;
  1586. last &= (TX_RING_SIZE - 1);
  1587. flags = hme_read_desc32(hp, &txbase[last].tx_flags);
  1588. if (flags & TXFLAG_OWN)
  1589. break;
  1590. }
  1591. hp->tx_skbs[elem] = NULL;
  1592. dev->stats.tx_bytes += skb->len;
  1593. for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
  1594. dma_addr = hme_read_desc32(hp, &this->tx_addr);
  1595. dma_len = hme_read_desc32(hp, &this->tx_flags);
  1596. dma_len &= TXFLAG_SIZE;
  1597. if (!frag)
  1598. dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
  1599. else
  1600. dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
  1601. elem = NEXT_TX(elem);
  1602. this = &txbase[elem];
  1603. }
  1604. dev_consume_skb_irq(skb);
  1605. dev->stats.tx_packets++;
  1606. }
  1607. hp->tx_old = elem;
  1608. if (netif_queue_stopped(dev) &&
  1609. TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
  1610. netif_wake_queue(dev);
  1611. }
  1612. /* Originally I used to handle the allocation failure by just giving back just
  1613. * that one ring buffer to the happy meal. Problem is that usually when that
  1614. * condition is triggered, the happy meal expects you to do something reasonable
  1615. * with all of the packets it has DMA'd in. So now I just drop the entire
  1616. * ring when we cannot get a new skb and give them all back to the happy meal,
  1617. * maybe things will be "happier" now.
  1618. *
  1619. * hp->happy_lock must be held
  1620. */
  1621. static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
  1622. {
  1623. struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
  1624. struct happy_meal_rxd *this;
  1625. int elem = hp->rx_new, drops = 0;
  1626. u32 flags;
  1627. this = &rxbase[elem];
  1628. while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
  1629. struct sk_buff *skb;
  1630. int len = flags >> 16;
  1631. u16 csum = flags & RXFLAG_CSUM;
  1632. u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
  1633. /* Check for errors. */
  1634. if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
  1635. netdev_vdbg(dev, "RX[%d ERR(%08x)]", elem, flags);
  1636. dev->stats.rx_errors++;
  1637. if (len < ETH_ZLEN)
  1638. dev->stats.rx_length_errors++;
  1639. if (len & (RXFLAG_OVERFLOW >> 16)) {
  1640. dev->stats.rx_over_errors++;
  1641. dev->stats.rx_fifo_errors++;
  1642. }
  1643. /* Return it to the Happy meal. */
  1644. drop_it:
  1645. dev->stats.rx_dropped++;
  1646. hme_write_rxd(hp, this,
  1647. (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
  1648. dma_addr);
  1649. goto next;
  1650. }
  1651. skb = hp->rx_skbs[elem];
  1652. if (len > RX_COPY_THRESHOLD) {
  1653. struct sk_buff *new_skb;
  1654. u32 mapping;
  1655. /* Now refill the entry, if we can. */
  1656. new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
  1657. if (new_skb == NULL) {
  1658. drops++;
  1659. goto drop_it;
  1660. }
  1661. skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
  1662. mapping = dma_map_single(hp->dma_dev, new_skb->data,
  1663. RX_BUF_ALLOC_SIZE,
  1664. DMA_FROM_DEVICE);
  1665. if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
  1666. dev_kfree_skb_any(new_skb);
  1667. drops++;
  1668. goto drop_it;
  1669. }
  1670. dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
  1671. hp->rx_skbs[elem] = new_skb;
  1672. hme_write_rxd(hp, this,
  1673. (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
  1674. mapping);
  1675. skb_reserve(new_skb, RX_OFFSET);
  1676. /* Trim the original skb for the netif. */
  1677. skb_trim(skb, len);
  1678. } else {
  1679. struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
  1680. if (copy_skb == NULL) {
  1681. drops++;
  1682. goto drop_it;
  1683. }
  1684. skb_reserve(copy_skb, 2);
  1685. skb_put(copy_skb, len);
  1686. dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
  1687. skb_copy_from_linear_data(skb, copy_skb->data, len);
  1688. dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
  1689. /* Reuse original ring buffer. */
  1690. hme_write_rxd(hp, this,
  1691. (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
  1692. dma_addr);
  1693. skb = copy_skb;
  1694. }
  1695. /* This card is _fucking_ hot... */
  1696. skb->csum = csum_unfold(~(__force __sum16)htons(csum));
  1697. skb->ip_summed = CHECKSUM_COMPLETE;
  1698. netdev_vdbg(dev, "RX[%d len=%d csum=%4x]", elem, len, csum);
  1699. skb->protocol = eth_type_trans(skb, dev);
  1700. netif_rx(skb);
  1701. dev->stats.rx_packets++;
  1702. dev->stats.rx_bytes += len;
  1703. next:
  1704. elem = NEXT_RX(elem);
  1705. this = &rxbase[elem];
  1706. }
  1707. hp->rx_new = elem;
  1708. if (drops)
  1709. netdev_info(hp->dev, "Memory squeeze, deferring packet.\n");
  1710. }
  1711. static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
  1712. {
  1713. struct net_device *dev = dev_id;
  1714. struct happy_meal *hp = netdev_priv(dev);
  1715. u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
  1716. HMD("status=%08x\n", happy_status);
  1717. spin_lock(&hp->happy_lock);
  1718. if (happy_status & GREG_STAT_ERRORS) {
  1719. if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
  1720. goto out;
  1721. }
  1722. if (happy_status & GREG_STAT_MIFIRQ)
  1723. happy_meal_mif_interrupt(hp);
  1724. if (happy_status & GREG_STAT_TXALL)
  1725. happy_meal_tx(hp);
  1726. if (happy_status & GREG_STAT_RXTOHOST)
  1727. happy_meal_rx(hp, dev);
  1728. HMD("done\n");
  1729. out:
  1730. spin_unlock(&hp->happy_lock);
  1731. return IRQ_HANDLED;
  1732. }
  1733. #ifdef CONFIG_SBUS
  1734. static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
  1735. {
  1736. struct quattro *qp = (struct quattro *) cookie;
  1737. int i;
  1738. for (i = 0; i < 4; i++) {
  1739. struct net_device *dev = qp->happy_meals[i];
  1740. struct happy_meal *hp = netdev_priv(dev);
  1741. u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
  1742. HMD("status=%08x\n", happy_status);
  1743. if (!(happy_status & (GREG_STAT_ERRORS |
  1744. GREG_STAT_MIFIRQ |
  1745. GREG_STAT_TXALL |
  1746. GREG_STAT_RXTOHOST)))
  1747. continue;
  1748. spin_lock(&hp->happy_lock);
  1749. if (happy_status & GREG_STAT_ERRORS)
  1750. if (happy_meal_is_not_so_happy(hp, happy_status))
  1751. goto next;
  1752. if (happy_status & GREG_STAT_MIFIRQ)
  1753. happy_meal_mif_interrupt(hp);
  1754. if (happy_status & GREG_STAT_TXALL)
  1755. happy_meal_tx(hp);
  1756. if (happy_status & GREG_STAT_RXTOHOST)
  1757. happy_meal_rx(hp, dev);
  1758. next:
  1759. spin_unlock(&hp->happy_lock);
  1760. }
  1761. HMD("done\n");
  1762. return IRQ_HANDLED;
  1763. }
  1764. #endif
  1765. static int happy_meal_open(struct net_device *dev)
  1766. {
  1767. struct happy_meal *hp = netdev_priv(dev);
  1768. int res;
  1769. /* On SBUS Quattro QFE cards, all hme interrupts are concentrated
  1770. * into a single source which we register handling at probe time.
  1771. */
  1772. if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
  1773. res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
  1774. dev->name, dev);
  1775. if (res) {
  1776. HMD("EAGAIN\n");
  1777. netdev_err(dev, "Can't order irq %d to go.\n", hp->irq);
  1778. return -EAGAIN;
  1779. }
  1780. }
  1781. HMD("to happy_meal_init\n");
  1782. spin_lock_irq(&hp->happy_lock);
  1783. res = happy_meal_init(hp);
  1784. spin_unlock_irq(&hp->happy_lock);
  1785. if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
  1786. free_irq(hp->irq, dev);
  1787. return res;
  1788. }
  1789. static int happy_meal_close(struct net_device *dev)
  1790. {
  1791. struct happy_meal *hp = netdev_priv(dev);
  1792. spin_lock_irq(&hp->happy_lock);
  1793. happy_meal_stop(hp, hp->gregs);
  1794. happy_meal_clean_rings(hp);
  1795. /* If auto-negotiation timer is running, kill it. */
  1796. del_timer(&hp->happy_timer);
  1797. spin_unlock_irq(&hp->happy_lock);
  1798. /* On Quattro QFE cards, all hme interrupts are concentrated
  1799. * into a single source which we register handling at probe
  1800. * time and never unregister.
  1801. */
  1802. if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
  1803. free_irq(hp->irq, dev);
  1804. return 0;
  1805. }
  1806. static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue)
  1807. {
  1808. struct happy_meal *hp = netdev_priv(dev);
  1809. netdev_err(dev, "transmit timed out, resetting\n");
  1810. tx_dump_log();
  1811. netdev_err(dev, "Happy Status %08x TX[%08x:%08x]\n",
  1812. hme_read32(hp, hp->gregs + GREG_STAT),
  1813. hme_read32(hp, hp->etxregs + ETX_CFG),
  1814. hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
  1815. spin_lock_irq(&hp->happy_lock);
  1816. happy_meal_init(hp);
  1817. spin_unlock_irq(&hp->happy_lock);
  1818. netif_wake_queue(dev);
  1819. }
  1820. static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
  1821. u32 first_len, u32 first_entry, u32 entry)
  1822. {
  1823. struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
  1824. dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
  1825. first_entry = NEXT_TX(first_entry);
  1826. while (first_entry != entry) {
  1827. struct happy_meal_txd *this = &txbase[first_entry];
  1828. u32 addr, len;
  1829. addr = hme_read_desc32(hp, &this->tx_addr);
  1830. len = hme_read_desc32(hp, &this->tx_flags);
  1831. len &= TXFLAG_SIZE;
  1832. dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
  1833. }
  1834. }
  1835. static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
  1836. struct net_device *dev)
  1837. {
  1838. struct happy_meal *hp = netdev_priv(dev);
  1839. int entry;
  1840. u32 tx_flags;
  1841. tx_flags = TXFLAG_OWN;
  1842. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1843. const u32 csum_start_off = skb_checksum_start_offset(skb);
  1844. const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
  1845. tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
  1846. ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
  1847. ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
  1848. }
  1849. spin_lock_irq(&hp->happy_lock);
  1850. if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
  1851. netif_stop_queue(dev);
  1852. spin_unlock_irq(&hp->happy_lock);
  1853. netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
  1854. return NETDEV_TX_BUSY;
  1855. }
  1856. entry = hp->tx_new;
  1857. netdev_vdbg(dev, "SX<l[%d]e[%d]>\n", skb->len, entry);
  1858. hp->tx_skbs[entry] = skb;
  1859. if (skb_shinfo(skb)->nr_frags == 0) {
  1860. u32 mapping, len;
  1861. len = skb->len;
  1862. mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
  1863. if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
  1864. goto out_dma_error;
  1865. tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
  1866. hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
  1867. (tx_flags | (len & TXFLAG_SIZE)),
  1868. mapping);
  1869. entry = NEXT_TX(entry);
  1870. } else {
  1871. u32 first_len, first_mapping;
  1872. int frag, first_entry = entry;
  1873. /* We must give this initial chunk to the device last.
  1874. * Otherwise we could race with the device.
  1875. */
  1876. first_len = skb_headlen(skb);
  1877. first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
  1878. DMA_TO_DEVICE);
  1879. if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
  1880. goto out_dma_error;
  1881. entry = NEXT_TX(entry);
  1882. for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
  1883. const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
  1884. u32 len, mapping, this_txflags;
  1885. len = skb_frag_size(this_frag);
  1886. mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
  1887. 0, len, DMA_TO_DEVICE);
  1888. if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
  1889. unmap_partial_tx_skb(hp, first_mapping, first_len,
  1890. first_entry, entry);
  1891. goto out_dma_error;
  1892. }
  1893. this_txflags = tx_flags;
  1894. if (frag == skb_shinfo(skb)->nr_frags - 1)
  1895. this_txflags |= TXFLAG_EOP;
  1896. hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
  1897. (this_txflags | (len & TXFLAG_SIZE)),
  1898. mapping);
  1899. entry = NEXT_TX(entry);
  1900. }
  1901. hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
  1902. (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
  1903. first_mapping);
  1904. }
  1905. hp->tx_new = entry;
  1906. if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
  1907. netif_stop_queue(dev);
  1908. /* Get it going. */
  1909. hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
  1910. spin_unlock_irq(&hp->happy_lock);
  1911. tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
  1912. return NETDEV_TX_OK;
  1913. out_dma_error:
  1914. hp->tx_skbs[hp->tx_new] = NULL;
  1915. spin_unlock_irq(&hp->happy_lock);
  1916. dev_kfree_skb_any(skb);
  1917. dev->stats.tx_dropped++;
  1918. return NETDEV_TX_OK;
  1919. }
  1920. static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
  1921. {
  1922. struct happy_meal *hp = netdev_priv(dev);
  1923. spin_lock_irq(&hp->happy_lock);
  1924. happy_meal_get_counters(hp, hp->bigmacregs);
  1925. spin_unlock_irq(&hp->happy_lock);
  1926. return &dev->stats;
  1927. }
  1928. static void happy_meal_set_multicast(struct net_device *dev)
  1929. {
  1930. struct happy_meal *hp = netdev_priv(dev);
  1931. void __iomem *bregs = hp->bigmacregs;
  1932. struct netdev_hw_addr *ha;
  1933. u32 crc;
  1934. spin_lock_irq(&hp->happy_lock);
  1935. if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
  1936. hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
  1937. hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
  1938. hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
  1939. hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
  1940. } else if (dev->flags & IFF_PROMISC) {
  1941. hme_write32(hp, bregs + BMAC_RXCFG,
  1942. hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
  1943. } else {
  1944. u16 hash_table[4];
  1945. memset(hash_table, 0, sizeof(hash_table));
  1946. netdev_for_each_mc_addr(ha, dev) {
  1947. crc = ether_crc_le(6, ha->addr);
  1948. crc >>= 26;
  1949. hash_table[crc >> 4] |= 1 << (crc & 0xf);
  1950. }
  1951. hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
  1952. hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
  1953. hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
  1954. hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
  1955. }
  1956. spin_unlock_irq(&hp->happy_lock);
  1957. }
  1958. /* Ethtool support... */
  1959. static int hme_get_link_ksettings(struct net_device *dev,
  1960. struct ethtool_link_ksettings *cmd)
  1961. {
  1962. struct happy_meal *hp = netdev_priv(dev);
  1963. u32 speed;
  1964. u32 supported;
  1965. supported =
  1966. (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
  1967. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
  1968. SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
  1969. /* XXX hardcoded stuff for now */
  1970. cmd->base.port = PORT_TP; /* XXX no MII support */
  1971. cmd->base.phy_address = 0; /* XXX fixed PHYAD */
  1972. /* Record PHY settings. */
  1973. spin_lock_irq(&hp->happy_lock);
  1974. hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
  1975. hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
  1976. spin_unlock_irq(&hp->happy_lock);
  1977. if (hp->sw_bmcr & BMCR_ANENABLE) {
  1978. cmd->base.autoneg = AUTONEG_ENABLE;
  1979. speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
  1980. SPEED_100 : SPEED_10);
  1981. if (speed == SPEED_100)
  1982. cmd->base.duplex =
  1983. (hp->sw_lpa & (LPA_100FULL)) ?
  1984. DUPLEX_FULL : DUPLEX_HALF;
  1985. else
  1986. cmd->base.duplex =
  1987. (hp->sw_lpa & (LPA_10FULL)) ?
  1988. DUPLEX_FULL : DUPLEX_HALF;
  1989. } else {
  1990. cmd->base.autoneg = AUTONEG_DISABLE;
  1991. speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
  1992. cmd->base.duplex =
  1993. (hp->sw_bmcr & BMCR_FULLDPLX) ?
  1994. DUPLEX_FULL : DUPLEX_HALF;
  1995. }
  1996. cmd->base.speed = speed;
  1997. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  1998. supported);
  1999. return 0;
  2000. }
  2001. static int hme_set_link_ksettings(struct net_device *dev,
  2002. const struct ethtool_link_ksettings *cmd)
  2003. {
  2004. struct happy_meal *hp = netdev_priv(dev);
  2005. /* Verify the settings we care about. */
  2006. if (cmd->base.autoneg != AUTONEG_ENABLE &&
  2007. cmd->base.autoneg != AUTONEG_DISABLE)
  2008. return -EINVAL;
  2009. if (cmd->base.autoneg == AUTONEG_DISABLE &&
  2010. ((cmd->base.speed != SPEED_100 &&
  2011. cmd->base.speed != SPEED_10) ||
  2012. (cmd->base.duplex != DUPLEX_HALF &&
  2013. cmd->base.duplex != DUPLEX_FULL)))
  2014. return -EINVAL;
  2015. /* Ok, do it to it. */
  2016. spin_lock_irq(&hp->happy_lock);
  2017. del_timer(&hp->happy_timer);
  2018. happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
  2019. spin_unlock_irq(&hp->happy_lock);
  2020. return 0;
  2021. }
  2022. static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  2023. {
  2024. struct happy_meal *hp = netdev_priv(dev);
  2025. strscpy(info->driver, DRV_NAME, sizeof(info->driver));
  2026. if (hp->happy_flags & HFLAG_PCI) {
  2027. struct pci_dev *pdev = hp->happy_dev;
  2028. strscpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
  2029. }
  2030. #ifdef CONFIG_SBUS
  2031. else {
  2032. const struct linux_prom_registers *regs;
  2033. struct platform_device *op = hp->happy_dev;
  2034. regs = of_get_property(op->dev.of_node, "regs", NULL);
  2035. if (regs)
  2036. snprintf(info->bus_info, sizeof(info->bus_info),
  2037. "SBUS:%d",
  2038. regs->which_io);
  2039. }
  2040. #endif
  2041. }
  2042. static u32 hme_get_link(struct net_device *dev)
  2043. {
  2044. struct happy_meal *hp = netdev_priv(dev);
  2045. spin_lock_irq(&hp->happy_lock);
  2046. hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
  2047. spin_unlock_irq(&hp->happy_lock);
  2048. return hp->sw_bmsr & BMSR_LSTATUS;
  2049. }
  2050. static const struct ethtool_ops hme_ethtool_ops = {
  2051. .get_drvinfo = hme_get_drvinfo,
  2052. .get_link = hme_get_link,
  2053. .get_link_ksettings = hme_get_link_ksettings,
  2054. .set_link_ksettings = hme_set_link_ksettings,
  2055. };
  2056. #ifdef CONFIG_SBUS
  2057. /* Given a happy meal sbus device, find it's quattro parent.
  2058. * If none exist, allocate and return a new one.
  2059. *
  2060. * Return NULL on failure.
  2061. */
  2062. static struct quattro *quattro_sbus_find(struct platform_device *child)
  2063. {
  2064. struct device *parent = child->dev.parent;
  2065. struct platform_device *op;
  2066. struct quattro *qp;
  2067. op = to_platform_device(parent);
  2068. qp = platform_get_drvdata(op);
  2069. if (qp)
  2070. return qp;
  2071. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  2072. if (!qp)
  2073. return NULL;
  2074. qp->quattro_dev = child;
  2075. qp->next = qfe_sbus_list;
  2076. qfe_sbus_list = qp;
  2077. platform_set_drvdata(op, qp);
  2078. return qp;
  2079. }
  2080. /* After all quattro cards have been probed, we call these functions
  2081. * to register the IRQ handlers for the cards that have been
  2082. * successfully probed and skip the cards that failed to initialize
  2083. */
  2084. static int __init quattro_sbus_register_irqs(void)
  2085. {
  2086. struct quattro *qp;
  2087. for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
  2088. struct platform_device *op = qp->quattro_dev;
  2089. int err, qfe_slot, skip = 0;
  2090. for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
  2091. if (!qp->happy_meals[qfe_slot])
  2092. skip = 1;
  2093. }
  2094. if (skip)
  2095. continue;
  2096. err = request_irq(op->archdata.irqs[0],
  2097. quattro_sbus_interrupt,
  2098. IRQF_SHARED, "Quattro",
  2099. qp);
  2100. if (err != 0) {
  2101. dev_err(&op->dev,
  2102. "Quattro HME: IRQ registration error %d.\n",
  2103. err);
  2104. return err;
  2105. }
  2106. }
  2107. return 0;
  2108. }
  2109. static void quattro_sbus_free_irqs(void)
  2110. {
  2111. struct quattro *qp;
  2112. for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
  2113. struct platform_device *op = qp->quattro_dev;
  2114. int qfe_slot, skip = 0;
  2115. for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
  2116. if (!qp->happy_meals[qfe_slot])
  2117. skip = 1;
  2118. }
  2119. if (skip)
  2120. continue;
  2121. free_irq(op->archdata.irqs[0], qp);
  2122. }
  2123. }
  2124. #endif /* CONFIG_SBUS */
  2125. #ifdef CONFIG_PCI
  2126. static struct quattro *quattro_pci_find(struct pci_dev *pdev)
  2127. {
  2128. int i;
  2129. struct pci_dev *bdev = pdev->bus->self;
  2130. struct quattro *qp;
  2131. if (!bdev)
  2132. return ERR_PTR(-ENODEV);
  2133. for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
  2134. struct pci_dev *qpdev = qp->quattro_dev;
  2135. if (qpdev == bdev)
  2136. return qp;
  2137. }
  2138. qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
  2139. if (!qp)
  2140. return ERR_PTR(-ENOMEM);
  2141. for (i = 0; i < 4; i++)
  2142. qp->happy_meals[i] = NULL;
  2143. qp->quattro_dev = bdev;
  2144. qp->next = qfe_pci_list;
  2145. qfe_pci_list = qp;
  2146. /* No range tricks necessary on PCI. */
  2147. qp->nranges = 0;
  2148. return qp;
  2149. }
  2150. #endif /* CONFIG_PCI */
  2151. static const struct net_device_ops hme_netdev_ops = {
  2152. .ndo_open = happy_meal_open,
  2153. .ndo_stop = happy_meal_close,
  2154. .ndo_start_xmit = happy_meal_start_xmit,
  2155. .ndo_tx_timeout = happy_meal_tx_timeout,
  2156. .ndo_get_stats = happy_meal_get_stats,
  2157. .ndo_set_rx_mode = happy_meal_set_multicast,
  2158. .ndo_set_mac_address = eth_mac_addr,
  2159. .ndo_validate_addr = eth_validate_addr,
  2160. };
  2161. #ifdef CONFIG_SBUS
  2162. static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
  2163. {
  2164. struct device_node *dp = op->dev.of_node, *sbus_dp;
  2165. struct quattro *qp = NULL;
  2166. struct happy_meal *hp;
  2167. struct net_device *dev;
  2168. int i, qfe_slot = -1;
  2169. u8 addr[ETH_ALEN];
  2170. int err = -ENODEV;
  2171. sbus_dp = op->dev.parent->of_node;
  2172. /* We can match PCI devices too, do not accept those here. */
  2173. if (!of_node_name_eq(sbus_dp, "sbus") && !of_node_name_eq(sbus_dp, "sbi"))
  2174. return err;
  2175. if (is_qfe) {
  2176. qp = quattro_sbus_find(op);
  2177. if (qp == NULL)
  2178. goto err_out;
  2179. for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
  2180. if (qp->happy_meals[qfe_slot] == NULL)
  2181. break;
  2182. if (qfe_slot == 4)
  2183. goto err_out;
  2184. }
  2185. err = -ENOMEM;
  2186. dev = alloc_etherdev(sizeof(struct happy_meal));
  2187. if (!dev)
  2188. goto err_out;
  2189. SET_NETDEV_DEV(dev, &op->dev);
  2190. /* If user did not specify a MAC address specifically, use
  2191. * the Quattro local-mac-address property...
  2192. */
  2193. for (i = 0; i < 6; i++) {
  2194. if (macaddr[i] != 0)
  2195. break;
  2196. }
  2197. if (i < 6) { /* a mac address was given */
  2198. for (i = 0; i < 6; i++)
  2199. addr[i] = macaddr[i];
  2200. eth_hw_addr_set(dev, addr);
  2201. macaddr[5]++;
  2202. } else {
  2203. const unsigned char *addr;
  2204. int len;
  2205. addr = of_get_property(dp, "local-mac-address", &len);
  2206. if (qfe_slot != -1 && addr && len == ETH_ALEN)
  2207. eth_hw_addr_set(dev, addr);
  2208. else
  2209. eth_hw_addr_set(dev, idprom->id_ethaddr);
  2210. }
  2211. hp = netdev_priv(dev);
  2212. hp->happy_dev = op;
  2213. hp->dma_dev = &op->dev;
  2214. spin_lock_init(&hp->happy_lock);
  2215. err = -ENODEV;
  2216. if (qp != NULL) {
  2217. hp->qfe_parent = qp;
  2218. hp->qfe_ent = qfe_slot;
  2219. qp->happy_meals[qfe_slot] = dev;
  2220. }
  2221. hp->gregs = of_ioremap(&op->resource[0], 0,
  2222. GREG_REG_SIZE, "HME Global Regs");
  2223. if (!hp->gregs) {
  2224. dev_err(&op->dev, "Cannot map global registers.\n");
  2225. goto err_out_free_netdev;
  2226. }
  2227. hp->etxregs = of_ioremap(&op->resource[1], 0,
  2228. ETX_REG_SIZE, "HME TX Regs");
  2229. if (!hp->etxregs) {
  2230. dev_err(&op->dev, "Cannot map MAC TX registers.\n");
  2231. goto err_out_iounmap;
  2232. }
  2233. hp->erxregs = of_ioremap(&op->resource[2], 0,
  2234. ERX_REG_SIZE, "HME RX Regs");
  2235. if (!hp->erxregs) {
  2236. dev_err(&op->dev, "Cannot map MAC RX registers.\n");
  2237. goto err_out_iounmap;
  2238. }
  2239. hp->bigmacregs = of_ioremap(&op->resource[3], 0,
  2240. BMAC_REG_SIZE, "HME BIGMAC Regs");
  2241. if (!hp->bigmacregs) {
  2242. dev_err(&op->dev, "Cannot map BIGMAC registers.\n");
  2243. goto err_out_iounmap;
  2244. }
  2245. hp->tcvregs = of_ioremap(&op->resource[4], 0,
  2246. TCVR_REG_SIZE, "HME Tranceiver Regs");
  2247. if (!hp->tcvregs) {
  2248. dev_err(&op->dev, "Cannot map TCVR registers.\n");
  2249. goto err_out_iounmap;
  2250. }
  2251. hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
  2252. if (hp->hm_revision == 0xff)
  2253. hp->hm_revision = 0xa0;
  2254. /* Now enable the feature flags we can. */
  2255. if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
  2256. hp->happy_flags = HFLAG_20_21;
  2257. else if (hp->hm_revision != 0xa0)
  2258. hp->happy_flags = HFLAG_NOT_A0;
  2259. if (qp != NULL)
  2260. hp->happy_flags |= HFLAG_QUATTRO;
  2261. /* Get the supported DVMA burst sizes from our Happy SBUS. */
  2262. hp->happy_bursts = of_getintprop_default(sbus_dp,
  2263. "burst-sizes", 0x00);
  2264. hp->happy_block = dma_alloc_coherent(hp->dma_dev,
  2265. PAGE_SIZE,
  2266. &hp->hblock_dvma,
  2267. GFP_ATOMIC);
  2268. err = -ENOMEM;
  2269. if (!hp->happy_block)
  2270. goto err_out_iounmap;
  2271. /* Force check of the link first time we are brought up. */
  2272. hp->linkcheck = 0;
  2273. /* Force timer state to 'asleep' with count of zero. */
  2274. hp->timer_state = asleep;
  2275. hp->timer_ticks = 0;
  2276. timer_setup(&hp->happy_timer, happy_meal_timer, 0);
  2277. hp->dev = dev;
  2278. dev->netdev_ops = &hme_netdev_ops;
  2279. dev->watchdog_timeo = 5*HZ;
  2280. dev->ethtool_ops = &hme_ethtool_ops;
  2281. /* Happy Meal can do it all... */
  2282. dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
  2283. dev->features |= dev->hw_features | NETIF_F_RXCSUM;
  2284. hp->irq = op->archdata.irqs[0];
  2285. #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
  2286. /* Hook up SBUS register/descriptor accessors. */
  2287. hp->read_desc32 = sbus_hme_read_desc32;
  2288. hp->write_txd = sbus_hme_write_txd;
  2289. hp->write_rxd = sbus_hme_write_rxd;
  2290. hp->read32 = sbus_hme_read32;
  2291. hp->write32 = sbus_hme_write32;
  2292. #endif
  2293. /* Grrr, Happy Meal comes up by default not advertising
  2294. * full duplex 100baseT capabilities, fix this.
  2295. */
  2296. spin_lock_irq(&hp->happy_lock);
  2297. happy_meal_set_initial_advertisement(hp);
  2298. spin_unlock_irq(&hp->happy_lock);
  2299. err = register_netdev(hp->dev);
  2300. if (err) {
  2301. dev_err(&op->dev, "Cannot register net device, aborting.\n");
  2302. goto err_out_free_coherent;
  2303. }
  2304. platform_set_drvdata(op, hp);
  2305. if (qfe_slot != -1)
  2306. netdev_info(dev,
  2307. "Quattro HME slot %d (SBUS) 10/100baseT Ethernet %pM\n",
  2308. qfe_slot, dev->dev_addr);
  2309. else
  2310. netdev_info(dev, "HAPPY MEAL (SBUS) 10/100baseT Ethernet %pM\n",
  2311. dev->dev_addr);
  2312. return 0;
  2313. err_out_free_coherent:
  2314. dma_free_coherent(hp->dma_dev,
  2315. PAGE_SIZE,
  2316. hp->happy_block,
  2317. hp->hblock_dvma);
  2318. err_out_iounmap:
  2319. if (hp->gregs)
  2320. of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
  2321. if (hp->etxregs)
  2322. of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
  2323. if (hp->erxregs)
  2324. of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
  2325. if (hp->bigmacregs)
  2326. of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
  2327. if (hp->tcvregs)
  2328. of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
  2329. if (qp)
  2330. qp->happy_meals[qfe_slot] = NULL;
  2331. err_out_free_netdev:
  2332. free_netdev(dev);
  2333. err_out:
  2334. return err;
  2335. }
  2336. #endif
  2337. #ifdef CONFIG_PCI
  2338. #ifndef CONFIG_SPARC
  2339. static int is_quattro_p(struct pci_dev *pdev)
  2340. {
  2341. struct pci_dev *busdev = pdev->bus->self;
  2342. struct pci_dev *this_pdev;
  2343. int n_hmes;
  2344. if (busdev == NULL ||
  2345. busdev->vendor != PCI_VENDOR_ID_DEC ||
  2346. busdev->device != PCI_DEVICE_ID_DEC_21153)
  2347. return 0;
  2348. n_hmes = 0;
  2349. list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
  2350. if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
  2351. this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
  2352. n_hmes++;
  2353. }
  2354. if (n_hmes != 4)
  2355. return 0;
  2356. return 1;
  2357. }
  2358. /* Fetch MAC address from vital product data of PCI ROM. */
  2359. static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
  2360. {
  2361. int this_offset;
  2362. for (this_offset = 0x20; this_offset < len; this_offset++) {
  2363. void __iomem *p = rom_base + this_offset;
  2364. if (readb(p + 0) != 0x90 ||
  2365. readb(p + 1) != 0x00 ||
  2366. readb(p + 2) != 0x09 ||
  2367. readb(p + 3) != 0x4e ||
  2368. readb(p + 4) != 0x41 ||
  2369. readb(p + 5) != 0x06)
  2370. continue;
  2371. this_offset += 6;
  2372. p += 6;
  2373. if (index == 0) {
  2374. int i;
  2375. for (i = 0; i < 6; i++)
  2376. dev_addr[i] = readb(p + i);
  2377. return 1;
  2378. }
  2379. index--;
  2380. }
  2381. return 0;
  2382. }
  2383. static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
  2384. {
  2385. size_t size;
  2386. void __iomem *p = pci_map_rom(pdev, &size);
  2387. if (p) {
  2388. int index = 0;
  2389. int found;
  2390. if (is_quattro_p(pdev))
  2391. index = PCI_SLOT(pdev->devfn);
  2392. found = readb(p) == 0x55 &&
  2393. readb(p + 1) == 0xaa &&
  2394. find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
  2395. pci_unmap_rom(pdev, p);
  2396. if (found)
  2397. return;
  2398. }
  2399. /* Sun MAC prefix then 3 random bytes. */
  2400. dev_addr[0] = 0x08;
  2401. dev_addr[1] = 0x00;
  2402. dev_addr[2] = 0x20;
  2403. get_random_bytes(&dev_addr[3], 3);
  2404. }
  2405. #endif /* !(CONFIG_SPARC) */
  2406. static int happy_meal_pci_probe(struct pci_dev *pdev,
  2407. const struct pci_device_id *ent)
  2408. {
  2409. struct quattro *qp = NULL;
  2410. #ifdef CONFIG_SPARC
  2411. struct device_node *dp;
  2412. #endif
  2413. struct happy_meal *hp;
  2414. struct net_device *dev;
  2415. void __iomem *hpreg_base;
  2416. struct resource *hpreg_res;
  2417. int i, qfe_slot = -1;
  2418. char prom_name[64];
  2419. u8 addr[ETH_ALEN];
  2420. int err = -ENODEV;
  2421. /* Now make sure pci_dev cookie is there. */
  2422. #ifdef CONFIG_SPARC
  2423. dp = pci_device_to_OF_node(pdev);
  2424. snprintf(prom_name, sizeof(prom_name), "%pOFn", dp);
  2425. #else
  2426. if (is_quattro_p(pdev))
  2427. strcpy(prom_name, "SUNW,qfe");
  2428. else
  2429. strcpy(prom_name, "SUNW,hme");
  2430. #endif
  2431. err = pcim_enable_device(pdev);
  2432. if (err)
  2433. goto err_out;
  2434. pci_set_master(pdev);
  2435. if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
  2436. qp = quattro_pci_find(pdev);
  2437. if (IS_ERR(qp)) {
  2438. err = PTR_ERR(qp);
  2439. goto err_out;
  2440. }
  2441. for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
  2442. if (!qp->happy_meals[qfe_slot])
  2443. break;
  2444. if (qfe_slot == 4)
  2445. goto err_out;
  2446. }
  2447. dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct happy_meal));
  2448. if (!dev) {
  2449. err = -ENOMEM;
  2450. goto err_out;
  2451. }
  2452. SET_NETDEV_DEV(dev, &pdev->dev);
  2453. hp = netdev_priv(dev);
  2454. hp->happy_dev = pdev;
  2455. hp->dma_dev = &pdev->dev;
  2456. spin_lock_init(&hp->happy_lock);
  2457. if (qp != NULL) {
  2458. hp->qfe_parent = qp;
  2459. hp->qfe_ent = qfe_slot;
  2460. qp->happy_meals[qfe_slot] = dev;
  2461. }
  2462. err = -EINVAL;
  2463. if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
  2464. dev_err(&pdev->dev,
  2465. "Cannot find proper PCI device base address.\n");
  2466. goto err_out_clear_quattro;
  2467. }
  2468. hpreg_res = devm_request_mem_region(&pdev->dev,
  2469. pci_resource_start(pdev, 0),
  2470. pci_resource_len(pdev, 0),
  2471. DRV_NAME);
  2472. if (!hpreg_res) {
  2473. err = -EBUSY;
  2474. dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
  2475. goto err_out_clear_quattro;
  2476. }
  2477. hpreg_base = pcim_iomap(pdev, 0, 0x8000);
  2478. if (!hpreg_base) {
  2479. err = -ENOMEM;
  2480. dev_err(&pdev->dev, "Unable to remap card memory.\n");
  2481. goto err_out_clear_quattro;
  2482. }
  2483. for (i = 0; i < 6; i++) {
  2484. if (macaddr[i] != 0)
  2485. break;
  2486. }
  2487. if (i < 6) { /* a mac address was given */
  2488. for (i = 0; i < 6; i++)
  2489. addr[i] = macaddr[i];
  2490. eth_hw_addr_set(dev, addr);
  2491. macaddr[5]++;
  2492. } else {
  2493. #ifdef CONFIG_SPARC
  2494. const unsigned char *addr;
  2495. int len;
  2496. if (qfe_slot != -1 &&
  2497. (addr = of_get_property(dp, "local-mac-address", &len))
  2498. != NULL &&
  2499. len == 6) {
  2500. eth_hw_addr_set(dev, addr);
  2501. } else {
  2502. eth_hw_addr_set(dev, idprom->id_ethaddr);
  2503. }
  2504. #else
  2505. u8 addr[ETH_ALEN];
  2506. get_hme_mac_nonsparc(pdev, addr);
  2507. eth_hw_addr_set(dev, addr);
  2508. #endif
  2509. }
  2510. /* Layout registers. */
  2511. hp->gregs = (hpreg_base + 0x0000UL);
  2512. hp->etxregs = (hpreg_base + 0x2000UL);
  2513. hp->erxregs = (hpreg_base + 0x4000UL);
  2514. hp->bigmacregs = (hpreg_base + 0x6000UL);
  2515. hp->tcvregs = (hpreg_base + 0x7000UL);
  2516. #ifdef CONFIG_SPARC
  2517. hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
  2518. if (hp->hm_revision == 0xff)
  2519. hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
  2520. #else
  2521. /* works with this on non-sparc hosts */
  2522. hp->hm_revision = 0x20;
  2523. #endif
  2524. /* Now enable the feature flags we can. */
  2525. if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
  2526. hp->happy_flags = HFLAG_20_21;
  2527. else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
  2528. hp->happy_flags = HFLAG_NOT_A0;
  2529. if (qp != NULL)
  2530. hp->happy_flags |= HFLAG_QUATTRO;
  2531. /* And of course, indicate this is PCI. */
  2532. hp->happy_flags |= HFLAG_PCI;
  2533. #ifdef CONFIG_SPARC
  2534. /* Assume PCI happy meals can handle all burst sizes. */
  2535. hp->happy_bursts = DMA_BURSTBITS;
  2536. #endif
  2537. hp->happy_block = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE,
  2538. &hp->hblock_dvma, GFP_KERNEL);
  2539. if (!hp->happy_block) {
  2540. err = -ENOMEM;
  2541. goto err_out_clear_quattro;
  2542. }
  2543. hp->linkcheck = 0;
  2544. hp->timer_state = asleep;
  2545. hp->timer_ticks = 0;
  2546. timer_setup(&hp->happy_timer, happy_meal_timer, 0);
  2547. hp->irq = pdev->irq;
  2548. hp->dev = dev;
  2549. dev->netdev_ops = &hme_netdev_ops;
  2550. dev->watchdog_timeo = 5*HZ;
  2551. dev->ethtool_ops = &hme_ethtool_ops;
  2552. /* Happy Meal can do it all... */
  2553. dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
  2554. dev->features |= dev->hw_features | NETIF_F_RXCSUM;
  2555. #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
  2556. /* Hook up PCI register/descriptor accessors. */
  2557. hp->read_desc32 = pci_hme_read_desc32;
  2558. hp->write_txd = pci_hme_write_txd;
  2559. hp->write_rxd = pci_hme_write_rxd;
  2560. hp->read32 = pci_hme_read32;
  2561. hp->write32 = pci_hme_write32;
  2562. #endif
  2563. /* Grrr, Happy Meal comes up by default not advertising
  2564. * full duplex 100baseT capabilities, fix this.
  2565. */
  2566. spin_lock_irq(&hp->happy_lock);
  2567. happy_meal_set_initial_advertisement(hp);
  2568. spin_unlock_irq(&hp->happy_lock);
  2569. err = devm_register_netdev(&pdev->dev, dev);
  2570. if (err) {
  2571. dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
  2572. goto err_out_clear_quattro;
  2573. }
  2574. pci_set_drvdata(pdev, hp);
  2575. if (!qfe_slot) {
  2576. struct pci_dev *qpdev = qp->quattro_dev;
  2577. prom_name[0] = 0;
  2578. if (!strncmp(dev->name, "eth", 3)) {
  2579. int i = simple_strtoul(dev->name + 3, NULL, 10);
  2580. sprintf(prom_name, "-%d", i + 3);
  2581. }
  2582. netdev_info(dev,
  2583. "%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet bridge %04x.%04x\n",
  2584. prom_name, qpdev->vendor, qpdev->device);
  2585. }
  2586. if (qfe_slot != -1)
  2587. netdev_info(dev,
  2588. "Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet %pM\n",
  2589. qfe_slot, dev->dev_addr);
  2590. else
  2591. netdev_info(dev,
  2592. "HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet %pM\n",
  2593. dev->dev_addr);
  2594. return 0;
  2595. err_out_clear_quattro:
  2596. if (qp != NULL)
  2597. qp->happy_meals[qfe_slot] = NULL;
  2598. err_out:
  2599. return err;
  2600. }
  2601. static const struct pci_device_id happymeal_pci_ids[] = {
  2602. { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
  2603. { } /* Terminating entry */
  2604. };
  2605. MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
  2606. static struct pci_driver hme_pci_driver = {
  2607. .name = "hme",
  2608. .id_table = happymeal_pci_ids,
  2609. .probe = happy_meal_pci_probe,
  2610. };
  2611. static int __init happy_meal_pci_init(void)
  2612. {
  2613. return pci_register_driver(&hme_pci_driver);
  2614. }
  2615. static void happy_meal_pci_exit(void)
  2616. {
  2617. pci_unregister_driver(&hme_pci_driver);
  2618. while (qfe_pci_list) {
  2619. struct quattro *qfe = qfe_pci_list;
  2620. struct quattro *next = qfe->next;
  2621. kfree(qfe);
  2622. qfe_pci_list = next;
  2623. }
  2624. }
  2625. #endif
  2626. #ifdef CONFIG_SBUS
  2627. static const struct of_device_id hme_sbus_match[];
  2628. static int hme_sbus_probe(struct platform_device *op)
  2629. {
  2630. const struct of_device_id *match;
  2631. struct device_node *dp = op->dev.of_node;
  2632. const char *model = of_get_property(dp, "model", NULL);
  2633. int is_qfe;
  2634. match = of_match_device(hme_sbus_match, &op->dev);
  2635. if (!match)
  2636. return -EINVAL;
  2637. is_qfe = (match->data != NULL);
  2638. if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
  2639. is_qfe = 1;
  2640. return happy_meal_sbus_probe_one(op, is_qfe);
  2641. }
  2642. static int hme_sbus_remove(struct platform_device *op)
  2643. {
  2644. struct happy_meal *hp = platform_get_drvdata(op);
  2645. struct net_device *net_dev = hp->dev;
  2646. unregister_netdev(net_dev);
  2647. /* XXX qfe parent interrupt... */
  2648. of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
  2649. of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
  2650. of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
  2651. of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
  2652. of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
  2653. dma_free_coherent(hp->dma_dev,
  2654. PAGE_SIZE,
  2655. hp->happy_block,
  2656. hp->hblock_dvma);
  2657. free_netdev(net_dev);
  2658. return 0;
  2659. }
  2660. static const struct of_device_id hme_sbus_match[] = {
  2661. {
  2662. .name = "SUNW,hme",
  2663. },
  2664. {
  2665. .name = "SUNW,qfe",
  2666. .data = (void *) 1,
  2667. },
  2668. {
  2669. .name = "qfe",
  2670. .data = (void *) 1,
  2671. },
  2672. {},
  2673. };
  2674. MODULE_DEVICE_TABLE(of, hme_sbus_match);
  2675. static struct platform_driver hme_sbus_driver = {
  2676. .driver = {
  2677. .name = "hme",
  2678. .of_match_table = hme_sbus_match,
  2679. },
  2680. .probe = hme_sbus_probe,
  2681. .remove = hme_sbus_remove,
  2682. };
  2683. static int __init happy_meal_sbus_init(void)
  2684. {
  2685. int err;
  2686. err = platform_driver_register(&hme_sbus_driver);
  2687. if (!err)
  2688. err = quattro_sbus_register_irqs();
  2689. return err;
  2690. }
  2691. static void happy_meal_sbus_exit(void)
  2692. {
  2693. platform_driver_unregister(&hme_sbus_driver);
  2694. quattro_sbus_free_irqs();
  2695. while (qfe_sbus_list) {
  2696. struct quattro *qfe = qfe_sbus_list;
  2697. struct quattro *next = qfe->next;
  2698. kfree(qfe);
  2699. qfe_sbus_list = next;
  2700. }
  2701. }
  2702. #endif
  2703. static int __init happy_meal_probe(void)
  2704. {
  2705. int err = 0;
  2706. #ifdef CONFIG_SBUS
  2707. err = happy_meal_sbus_init();
  2708. #endif
  2709. #ifdef CONFIG_PCI
  2710. if (!err) {
  2711. err = happy_meal_pci_init();
  2712. #ifdef CONFIG_SBUS
  2713. if (err)
  2714. happy_meal_sbus_exit();
  2715. #endif
  2716. }
  2717. #endif
  2718. return err;
  2719. }
  2720. static void __exit happy_meal_exit(void)
  2721. {
  2722. #ifdef CONFIG_SBUS
  2723. happy_meal_sbus_exit();
  2724. #endif
  2725. #ifdef CONFIG_PCI
  2726. happy_meal_pci_exit();
  2727. #endif
  2728. }
  2729. module_init(happy_meal_probe);
  2730. module_exit(happy_meal_exit);