smc911x.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * smc911x.c
  4. * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices.
  5. *
  6. * Copyright (C) 2005 Sensoria Corp
  7. * Derived from the unified SMC91x driver by Nicolas Pitre
  8. * and the smsc911x.c reference driver by SMSC
  9. *
  10. * Arguments:
  11. * watchdog = TX watchdog timeout
  12. * tx_fifo_kb = Size of TX FIFO in KB
  13. *
  14. * History:
  15. * 04/16/05 Dustin McIntire Initial version
  16. */
  17. static const char version[] =
  18. "smc911x.c: v1.0 04-16-2005 by Dustin McIntire <[email protected]>\n";
  19. /* Debugging options */
  20. #define ENABLE_SMC_DEBUG_RX 0
  21. #define ENABLE_SMC_DEBUG_TX 0
  22. #define ENABLE_SMC_DEBUG_DMA 0
  23. #define ENABLE_SMC_DEBUG_PKTS 0
  24. #define ENABLE_SMC_DEBUG_MISC 0
  25. #define ENABLE_SMC_DEBUG_FUNC 0
  26. #define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0)
  27. #define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1)
  28. #define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2)
  29. #define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3)
  30. #define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4)
  31. #define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5)
  32. #ifndef SMC_DEBUG
  33. #define SMC_DEBUG ( SMC_DEBUG_RX | \
  34. SMC_DEBUG_TX | \
  35. SMC_DEBUG_DMA | \
  36. SMC_DEBUG_PKTS | \
  37. SMC_DEBUG_MISC | \
  38. SMC_DEBUG_FUNC \
  39. )
  40. #endif
  41. #include <linux/module.h>
  42. #include <linux/kernel.h>
  43. #include <linux/sched.h>
  44. #include <linux/delay.h>
  45. #include <linux/interrupt.h>
  46. #include <linux/errno.h>
  47. #include <linux/ioport.h>
  48. #include <linux/crc32.h>
  49. #include <linux/device.h>
  50. #include <linux/platform_device.h>
  51. #include <linux/spinlock.h>
  52. #include <linux/ethtool.h>
  53. #include <linux/mii.h>
  54. #include <linux/workqueue.h>
  55. #include <linux/netdevice.h>
  56. #include <linux/etherdevice.h>
  57. #include <linux/skbuff.h>
  58. #include <linux/dmaengine.h>
  59. #include <asm/io.h>
  60. #include "smc911x.h"
  61. /*
  62. * Transmit timeout, default 5 seconds.
  63. */
  64. static int watchdog = 5000;
  65. module_param(watchdog, int, 0400);
  66. MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
  67. static int tx_fifo_kb=8;
  68. module_param(tx_fifo_kb, int, 0400);
  69. MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)");
  70. MODULE_LICENSE("GPL");
  71. MODULE_ALIAS("platform:smc911x");
  72. /*
  73. * The internal workings of the driver. If you are changing anything
  74. * here with the SMC stuff, you should have the datasheet and know
  75. * what you are doing.
  76. */
  77. #define CARDNAME "smc911x"
  78. /*
  79. * Use power-down feature of the chip
  80. */
  81. #define POWER_DOWN 1
  82. #if SMC_DEBUG > 0
  83. #define DBG(n, dev, args...) \
  84. do { \
  85. if (SMC_DEBUG & (n)) \
  86. netdev_dbg(dev, args); \
  87. } while (0)
  88. #define PRINTK(dev, args...) netdev_info(dev, args)
  89. #else
  90. #define DBG(n, dev, args...) \
  91. while (0) { \
  92. netdev_dbg(dev, args); \
  93. }
  94. #define PRINTK(dev, args...) netdev_dbg(dev, args)
  95. #endif
  96. #if SMC_DEBUG_PKTS > 0
  97. static void PRINT_PKT(u_char *buf, int length)
  98. {
  99. int i;
  100. int remainder;
  101. int lines;
  102. lines = length / 16;
  103. remainder = length % 16;
  104. for (i = 0; i < lines ; i ++) {
  105. int cur;
  106. printk(KERN_DEBUG);
  107. for (cur = 0; cur < 8; cur++) {
  108. u_char a, b;
  109. a = *buf++;
  110. b = *buf++;
  111. pr_cont("%02x%02x ", a, b);
  112. }
  113. pr_cont("\n");
  114. }
  115. printk(KERN_DEBUG);
  116. for (i = 0; i < remainder/2 ; i++) {
  117. u_char a, b;
  118. a = *buf++;
  119. b = *buf++;
  120. pr_cont("%02x%02x ", a, b);
  121. }
  122. pr_cont("\n");
  123. }
  124. #else
  125. static inline void PRINT_PKT(u_char *buf, int length) { }
  126. #endif
  127. /* this enables an interrupt in the interrupt mask register */
  128. #define SMC_ENABLE_INT(lp, x) do { \
  129. unsigned int __mask; \
  130. __mask = SMC_GET_INT_EN((lp)); \
  131. __mask |= (x); \
  132. SMC_SET_INT_EN((lp), __mask); \
  133. } while (0)
  134. /* this disables an interrupt from the interrupt mask register */
  135. #define SMC_DISABLE_INT(lp, x) do { \
  136. unsigned int __mask; \
  137. __mask = SMC_GET_INT_EN((lp)); \
  138. __mask &= ~(x); \
  139. SMC_SET_INT_EN((lp), __mask); \
  140. } while (0)
  141. /*
  142. * this does a soft reset on the device
  143. */
  144. static void smc911x_reset(struct net_device *dev)
  145. {
  146. struct smc911x_local *lp = netdev_priv(dev);
  147. unsigned int reg, timeout=0, resets=1, irq_cfg;
  148. unsigned long flags;
  149. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  150. /* Take out of PM setting first */
  151. if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
  152. /* Write to the bytetest will take out of powerdown */
  153. SMC_SET_BYTE_TEST(lp, 0);
  154. timeout=10;
  155. do {
  156. udelay(10);
  157. reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_;
  158. } while (--timeout && !reg);
  159. if (timeout == 0) {
  160. PRINTK(dev, "smc911x_reset timeout waiting for PM restore\n");
  161. return;
  162. }
  163. }
  164. /* Disable all interrupts */
  165. spin_lock_irqsave(&lp->lock, flags);
  166. SMC_SET_INT_EN(lp, 0);
  167. spin_unlock_irqrestore(&lp->lock, flags);
  168. while (resets--) {
  169. SMC_SET_HW_CFG(lp, HW_CFG_SRST_);
  170. timeout=10;
  171. do {
  172. udelay(10);
  173. reg = SMC_GET_HW_CFG(lp);
  174. /* If chip indicates reset timeout then try again */
  175. if (reg & HW_CFG_SRST_TO_) {
  176. PRINTK(dev, "chip reset timeout, retrying...\n");
  177. resets++;
  178. break;
  179. }
  180. } while (--timeout && (reg & HW_CFG_SRST_));
  181. }
  182. if (timeout == 0) {
  183. PRINTK(dev, "smc911x_reset timeout waiting for reset\n");
  184. return;
  185. }
  186. /* make sure EEPROM has finished loading before setting GPIO_CFG */
  187. timeout=1000;
  188. while (--timeout && (SMC_GET_E2P_CMD(lp) & E2P_CMD_EPC_BUSY_))
  189. udelay(10);
  190. if (timeout == 0){
  191. PRINTK(dev, "smc911x_reset timeout waiting for EEPROM busy\n");
  192. return;
  193. }
  194. /* Initialize interrupts */
  195. SMC_SET_INT_EN(lp, 0);
  196. SMC_ACK_INT(lp, -1);
  197. /* Reset the FIFO level and flow control settings */
  198. SMC_SET_HW_CFG(lp, (lp->tx_fifo_kb & 0xF) << 16);
  199. //TODO: Figure out what appropriate pause time is
  200. SMC_SET_FLOW(lp, FLOW_FCPT_ | FLOW_FCEN_);
  201. SMC_SET_AFC_CFG(lp, lp->afc_cfg);
  202. /* Set to LED outputs */
  203. SMC_SET_GPIO_CFG(lp, 0x70070000);
  204. /*
  205. * Deassert IRQ for 1*10us for edge type interrupts
  206. * and drive IRQ pin push-pull
  207. */
  208. irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_;
  209. #ifdef SMC_DYNAMIC_BUS_CONFIG
  210. if (lp->cfg.irq_polarity)
  211. irq_cfg |= INT_CFG_IRQ_POL_;
  212. #endif
  213. SMC_SET_IRQ_CFG(lp, irq_cfg);
  214. /* clear anything saved */
  215. if (lp->pending_tx_skb != NULL) {
  216. dev_kfree_skb (lp->pending_tx_skb);
  217. lp->pending_tx_skb = NULL;
  218. dev->stats.tx_errors++;
  219. dev->stats.tx_aborted_errors++;
  220. }
  221. }
  222. /*
  223. * Enable Interrupts, Receive, and Transmit
  224. */
  225. static void smc911x_enable(struct net_device *dev)
  226. {
  227. struct smc911x_local *lp = netdev_priv(dev);
  228. unsigned mask, cfg, cr;
  229. unsigned long flags;
  230. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  231. spin_lock_irqsave(&lp->lock, flags);
  232. SMC_SET_MAC_ADDR(lp, dev->dev_addr);
  233. /* Enable TX */
  234. cfg = SMC_GET_HW_CFG(lp);
  235. cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF;
  236. cfg |= HW_CFG_SF_;
  237. SMC_SET_HW_CFG(lp, cfg);
  238. SMC_SET_FIFO_TDA(lp, 0xFF);
  239. /* Update TX stats on every 64 packets received or every 1 sec */
  240. SMC_SET_FIFO_TSL(lp, 64);
  241. SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
  242. SMC_GET_MAC_CR(lp, cr);
  243. cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
  244. SMC_SET_MAC_CR(lp, cr);
  245. SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_);
  246. /* Add 2 byte padding to start of packets */
  247. SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_);
  248. /* Turn on receiver and enable RX */
  249. if (cr & MAC_CR_RXEN_)
  250. DBG(SMC_DEBUG_RX, dev, "Receiver already enabled\n");
  251. SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
  252. /* Interrupt on every received packet */
  253. SMC_SET_FIFO_RSA(lp, 0x01);
  254. SMC_SET_FIFO_RSL(lp, 0x00);
  255. /* now, enable interrupts */
  256. mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ |
  257. INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ |
  258. INT_EN_PHY_INT_EN_;
  259. if (IS_REV_A(lp->revision))
  260. mask|=INT_EN_RDFL_EN_;
  261. else {
  262. mask|=INT_EN_RDFO_EN_;
  263. }
  264. SMC_ENABLE_INT(lp, mask);
  265. spin_unlock_irqrestore(&lp->lock, flags);
  266. }
  267. /*
  268. * this puts the device in an inactive state
  269. */
  270. static void smc911x_shutdown(struct net_device *dev)
  271. {
  272. struct smc911x_local *lp = netdev_priv(dev);
  273. unsigned cr;
  274. unsigned long flags;
  275. DBG(SMC_DEBUG_FUNC, dev, "%s: --> %s\n", CARDNAME, __func__);
  276. /* Disable IRQ's */
  277. SMC_SET_INT_EN(lp, 0);
  278. /* Turn of Rx and TX */
  279. spin_lock_irqsave(&lp->lock, flags);
  280. SMC_GET_MAC_CR(lp, cr);
  281. cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
  282. SMC_SET_MAC_CR(lp, cr);
  283. SMC_SET_TX_CFG(lp, TX_CFG_STOP_TX_);
  284. spin_unlock_irqrestore(&lp->lock, flags);
  285. }
  286. static inline void smc911x_drop_pkt(struct net_device *dev)
  287. {
  288. struct smc911x_local *lp = netdev_priv(dev);
  289. unsigned int fifo_count, timeout, reg;
  290. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "%s: --> %s\n",
  291. CARDNAME, __func__);
  292. fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
  293. if (fifo_count <= 4) {
  294. /* Manually dump the packet data */
  295. while (fifo_count--)
  296. SMC_GET_RX_FIFO(lp);
  297. } else {
  298. /* Fast forward through the bad packet */
  299. SMC_SET_RX_DP_CTRL(lp, RX_DP_CTRL_FFWD_BUSY_);
  300. timeout=50;
  301. do {
  302. udelay(10);
  303. reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_;
  304. } while (--timeout && reg);
  305. if (timeout == 0) {
  306. PRINTK(dev, "timeout waiting for RX fast forward\n");
  307. }
  308. }
  309. }
  310. /*
  311. * This is the procedure to handle the receipt of a packet.
  312. * It should be called after checking for packet presence in
  313. * the RX status FIFO. It must be called with the spin lock
  314. * already held.
  315. */
  316. static inline void smc911x_rcv(struct net_device *dev)
  317. {
  318. struct smc911x_local *lp = netdev_priv(dev);
  319. unsigned int pkt_len, status;
  320. struct sk_buff *skb;
  321. unsigned char *data;
  322. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "--> %s\n",
  323. __func__);
  324. status = SMC_GET_RX_STS_FIFO(lp);
  325. DBG(SMC_DEBUG_RX, dev, "Rx pkt len %d status 0x%08x\n",
  326. (status & 0x3fff0000) >> 16, status & 0xc000ffff);
  327. pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
  328. if (status & RX_STS_ES_) {
  329. /* Deal with a bad packet */
  330. dev->stats.rx_errors++;
  331. if (status & RX_STS_CRC_ERR_)
  332. dev->stats.rx_crc_errors++;
  333. else {
  334. if (status & RX_STS_LEN_ERR_)
  335. dev->stats.rx_length_errors++;
  336. if (status & RX_STS_MCAST_)
  337. dev->stats.multicast++;
  338. }
  339. /* Remove the bad packet data from the RX FIFO */
  340. smc911x_drop_pkt(dev);
  341. } else {
  342. /* Receive a valid packet */
  343. /* Alloc a buffer with extra room for DMA alignment */
  344. skb = netdev_alloc_skb(dev, pkt_len+32);
  345. if (unlikely(skb == NULL)) {
  346. PRINTK(dev, "Low memory, rcvd packet dropped.\n");
  347. dev->stats.rx_dropped++;
  348. smc911x_drop_pkt(dev);
  349. return;
  350. }
  351. /* Align IP header to 32 bits
  352. * Note that the device is configured to add a 2
  353. * byte padding to the packet start, so we really
  354. * want to write to the orignal data pointer */
  355. data = skb->data;
  356. skb_reserve(skb, 2);
  357. skb_put(skb,pkt_len-4);
  358. #ifdef SMC_USE_DMA
  359. {
  360. unsigned int fifo;
  361. /* Lower the FIFO threshold if possible */
  362. fifo = SMC_GET_FIFO_INT(lp);
  363. if (fifo & 0xFF) fifo--;
  364. DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n",
  365. fifo & 0xff);
  366. SMC_SET_FIFO_INT(lp, fifo);
  367. /* Setup RX DMA */
  368. SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
  369. lp->rxdma_active = 1;
  370. lp->current_rx_skb = skb;
  371. SMC_PULL_DATA(lp, data, (pkt_len+2+15) & ~15);
  372. /* Packet processing deferred to DMA RX interrupt */
  373. }
  374. #else
  375. SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
  376. SMC_PULL_DATA(lp, data, pkt_len+2+3);
  377. DBG(SMC_DEBUG_PKTS, dev, "Received packet\n");
  378. PRINT_PKT(data, min(pkt_len - 4, 64U));
  379. skb->protocol = eth_type_trans(skb, dev);
  380. netif_rx(skb);
  381. dev->stats.rx_packets++;
  382. dev->stats.rx_bytes += pkt_len-4;
  383. #endif
  384. }
  385. }
  386. /*
  387. * This is called to actually send a packet to the chip.
  388. */
  389. static void smc911x_hardware_send_pkt(struct net_device *dev)
  390. {
  391. struct smc911x_local *lp = netdev_priv(dev);
  392. struct sk_buff *skb;
  393. unsigned int cmdA, cmdB, len;
  394. unsigned char *buf;
  395. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__);
  396. BUG_ON(lp->pending_tx_skb == NULL);
  397. skb = lp->pending_tx_skb;
  398. lp->pending_tx_skb = NULL;
  399. /* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */
  400. /* cmdB {31:16] pkt tag [10:0] length */
  401. #ifdef SMC_USE_DMA
  402. /* 16 byte buffer alignment mode */
  403. buf = (char*)((u32)(skb->data) & ~0xF);
  404. len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF;
  405. cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) |
  406. TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
  407. skb->len;
  408. #else
  409. buf = (char *)((uintptr_t)skb->data & ~0x3);
  410. len = (skb->len + 3 + ((uintptr_t)skb->data & 3)) & ~0x3;
  411. cmdA = (((uintptr_t)skb->data & 0x3) << 16) |
  412. TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
  413. skb->len;
  414. #endif
  415. /* tag is packet length so we can use this in stats update later */
  416. cmdB = (skb->len << 16) | (skb->len & 0x7FF);
  417. DBG(SMC_DEBUG_TX, dev, "TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
  418. len, len, buf, cmdA, cmdB);
  419. SMC_SET_TX_FIFO(lp, cmdA);
  420. SMC_SET_TX_FIFO(lp, cmdB);
  421. DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n");
  422. PRINT_PKT(buf, min(len, 64U));
  423. /* Send pkt via PIO or DMA */
  424. #ifdef SMC_USE_DMA
  425. lp->current_tx_skb = skb;
  426. SMC_PUSH_DATA(lp, buf, len);
  427. /* DMA complete IRQ will free buffer and set jiffies */
  428. #else
  429. SMC_PUSH_DATA(lp, buf, len);
  430. netif_trans_update(dev);
  431. dev_kfree_skb_irq(skb);
  432. #endif
  433. if (!lp->tx_throttle) {
  434. netif_wake_queue(dev);
  435. }
  436. SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
  437. }
  438. /*
  439. * Since I am not sure if I will have enough room in the chip's ram
  440. * to store the packet, I call this routine which either sends it
  441. * now, or set the card to generates an interrupt when ready
  442. * for the packet.
  443. */
  444. static netdev_tx_t
  445. smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
  446. {
  447. struct smc911x_local *lp = netdev_priv(dev);
  448. unsigned int free;
  449. unsigned long flags;
  450. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
  451. __func__);
  452. spin_lock_irqsave(&lp->lock, flags);
  453. BUG_ON(lp->pending_tx_skb != NULL);
  454. free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
  455. DBG(SMC_DEBUG_TX, dev, "TX free space %d\n", free);
  456. /* Turn off the flow when running out of space in FIFO */
  457. if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
  458. DBG(SMC_DEBUG_TX, dev, "Disabling data flow due to low FIFO space (%d)\n",
  459. free);
  460. /* Reenable when at least 1 packet of size MTU present */
  461. SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
  462. lp->tx_throttle = 1;
  463. netif_stop_queue(dev);
  464. }
  465. /* Drop packets when we run out of space in TX FIFO
  466. * Account for overhead required for:
  467. *
  468. * Tx command words 8 bytes
  469. * Start offset 15 bytes
  470. * End padding 15 bytes
  471. */
  472. if (unlikely(free < (skb->len + 8 + 15 + 15))) {
  473. netdev_warn(dev, "No Tx free space %d < %d\n",
  474. free, skb->len);
  475. lp->pending_tx_skb = NULL;
  476. dev->stats.tx_errors++;
  477. dev->stats.tx_dropped++;
  478. spin_unlock_irqrestore(&lp->lock, flags);
  479. dev_kfree_skb_any(skb);
  480. return NETDEV_TX_OK;
  481. }
  482. #ifdef SMC_USE_DMA
  483. {
  484. /* If the DMA is already running then defer this packet Tx until
  485. * the DMA IRQ starts it
  486. */
  487. if (lp->txdma_active) {
  488. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Tx DMA running, deferring packet\n");
  489. lp->pending_tx_skb = skb;
  490. netif_stop_queue(dev);
  491. spin_unlock_irqrestore(&lp->lock, flags);
  492. return NETDEV_TX_OK;
  493. } else {
  494. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Activating Tx DMA\n");
  495. lp->txdma_active = 1;
  496. }
  497. }
  498. #endif
  499. lp->pending_tx_skb = skb;
  500. smc911x_hardware_send_pkt(dev);
  501. spin_unlock_irqrestore(&lp->lock, flags);
  502. return NETDEV_TX_OK;
  503. }
  504. /*
  505. * This handles a TX status interrupt, which is only called when:
  506. * - a TX error occurred, or
  507. * - TX of a packet completed.
  508. */
  509. static void smc911x_tx(struct net_device *dev)
  510. {
  511. struct smc911x_local *lp = netdev_priv(dev);
  512. unsigned int tx_status;
  513. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
  514. __func__);
  515. /* Collect the TX status */
  516. while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
  517. DBG(SMC_DEBUG_TX, dev, "Tx stat FIFO used 0x%04x\n",
  518. (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
  519. tx_status = SMC_GET_TX_STS_FIFO(lp);
  520. dev->stats.tx_packets++;
  521. dev->stats.tx_bytes+=tx_status>>16;
  522. DBG(SMC_DEBUG_TX, dev, "Tx FIFO tag 0x%04x status 0x%04x\n",
  523. (tx_status & 0xffff0000) >> 16,
  524. tx_status & 0x0000ffff);
  525. /* count Tx errors, but ignore lost carrier errors when in
  526. * full-duplex mode */
  527. if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
  528. !(tx_status & 0x00000306))) {
  529. dev->stats.tx_errors++;
  530. }
  531. if (tx_status & TX_STS_MANY_COLL_) {
  532. dev->stats.collisions+=16;
  533. dev->stats.tx_aborted_errors++;
  534. } else {
  535. dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3;
  536. }
  537. /* carrier error only has meaning for half-duplex communication */
  538. if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) &&
  539. !lp->ctl_rfduplx) {
  540. dev->stats.tx_carrier_errors++;
  541. }
  542. if (tx_status & TX_STS_LATE_COLL_) {
  543. dev->stats.collisions++;
  544. dev->stats.tx_aborted_errors++;
  545. }
  546. }
  547. }
  548. /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
  549. /*
  550. * Reads a register from the MII Management serial interface
  551. */
  552. static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
  553. {
  554. struct smc911x_local *lp = netdev_priv(dev);
  555. unsigned int phydata;
  556. SMC_GET_MII(lp, phyreg, phyaddr, phydata);
  557. DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
  558. __func__, phyaddr, phyreg, phydata);
  559. return phydata;
  560. }
  561. /*
  562. * Writes a register to the MII Management serial interface
  563. */
  564. static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
  565. int phydata)
  566. {
  567. struct smc911x_local *lp = netdev_priv(dev);
  568. DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
  569. __func__, phyaddr, phyreg, phydata);
  570. SMC_SET_MII(lp, phyreg, phyaddr, phydata);
  571. }
  572. /*
  573. * Finds and reports the PHY address (115 and 117 have external
  574. * PHY interface 118 has internal only
  575. */
  576. static void smc911x_phy_detect(struct net_device *dev)
  577. {
  578. struct smc911x_local *lp = netdev_priv(dev);
  579. int phyaddr;
  580. unsigned int cfg, id1, id2;
  581. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  582. lp->phy_type = 0;
  583. /*
  584. * Scan all 32 PHY addresses if necessary, starting at
  585. * PHY#1 to PHY#31, and then PHY#0 last.
  586. */
  587. switch(lp->version) {
  588. case CHIP_9115:
  589. case CHIP_9117:
  590. case CHIP_9215:
  591. case CHIP_9217:
  592. cfg = SMC_GET_HW_CFG(lp);
  593. if (cfg & HW_CFG_EXT_PHY_DET_) {
  594. cfg &= ~HW_CFG_PHY_CLK_SEL_;
  595. cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
  596. SMC_SET_HW_CFG(lp, cfg);
  597. udelay(10); /* Wait for clocks to stop */
  598. cfg |= HW_CFG_EXT_PHY_EN_;
  599. SMC_SET_HW_CFG(lp, cfg);
  600. udelay(10); /* Wait for clocks to stop */
  601. cfg &= ~HW_CFG_PHY_CLK_SEL_;
  602. cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
  603. SMC_SET_HW_CFG(lp, cfg);
  604. udelay(10); /* Wait for clocks to stop */
  605. cfg |= HW_CFG_SMI_SEL_;
  606. SMC_SET_HW_CFG(lp, cfg);
  607. for (phyaddr = 1; phyaddr < 32; ++phyaddr) {
  608. /* Read the PHY identifiers */
  609. SMC_GET_PHY_ID1(lp, phyaddr & 31, id1);
  610. SMC_GET_PHY_ID2(lp, phyaddr & 31, id2);
  611. /* Make sure it is a valid identifier */
  612. if (id1 != 0x0000 && id1 != 0xffff &&
  613. id1 != 0x8000 && id2 != 0x0000 &&
  614. id2 != 0xffff && id2 != 0x8000) {
  615. /* Save the PHY's address */
  616. lp->mii.phy_id = phyaddr & 31;
  617. lp->phy_type = id1 << 16 | id2;
  618. break;
  619. }
  620. }
  621. if (phyaddr < 32)
  622. /* Found an external PHY */
  623. break;
  624. }
  625. fallthrough;
  626. default:
  627. /* Internal media only */
  628. SMC_GET_PHY_ID1(lp, 1, id1);
  629. SMC_GET_PHY_ID2(lp, 1, id2);
  630. /* Save the PHY's address */
  631. lp->mii.phy_id = 1;
  632. lp->phy_type = id1 << 16 | id2;
  633. }
  634. DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%x\n",
  635. id1, id2, lp->mii.phy_id);
  636. }
  637. /*
  638. * Sets the PHY to a configuration as determined by the user.
  639. * Called with spin_lock held.
  640. */
  641. static int smc911x_phy_fixed(struct net_device *dev)
  642. {
  643. struct smc911x_local *lp = netdev_priv(dev);
  644. int phyaddr = lp->mii.phy_id;
  645. int bmcr;
  646. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  647. /* Enter Link Disable state */
  648. SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
  649. bmcr |= BMCR_PDOWN;
  650. SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
  651. /*
  652. * Set our fixed capabilities
  653. * Disable auto-negotiation
  654. */
  655. bmcr &= ~BMCR_ANENABLE;
  656. if (lp->ctl_rfduplx)
  657. bmcr |= BMCR_FULLDPLX;
  658. if (lp->ctl_rspeed == 100)
  659. bmcr |= BMCR_SPEED100;
  660. /* Write our capabilities to the phy control register */
  661. SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
  662. /* Re-Configure the Receive/Phy Control register */
  663. bmcr &= ~BMCR_PDOWN;
  664. SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
  665. return 1;
  666. }
  667. /**
  668. * smc911x_phy_reset - reset the phy
  669. * @dev: net device
  670. * @phy: phy address
  671. *
  672. * Issue a software reset for the specified PHY and
  673. * wait up to 100ms for the reset to complete. We should
  674. * not access the PHY for 50ms after issuing the reset.
  675. *
  676. * The time to wait appears to be dependent on the PHY.
  677. *
  678. */
  679. static int smc911x_phy_reset(struct net_device *dev, int phy)
  680. {
  681. struct smc911x_local *lp = netdev_priv(dev);
  682. int timeout;
  683. unsigned long flags;
  684. unsigned int reg;
  685. DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
  686. spin_lock_irqsave(&lp->lock, flags);
  687. reg = SMC_GET_PMT_CTRL(lp);
  688. reg &= ~0xfffff030;
  689. reg |= PMT_CTRL_PHY_RST_;
  690. SMC_SET_PMT_CTRL(lp, reg);
  691. spin_unlock_irqrestore(&lp->lock, flags);
  692. for (timeout = 2; timeout; timeout--) {
  693. msleep(50);
  694. spin_lock_irqsave(&lp->lock, flags);
  695. reg = SMC_GET_PMT_CTRL(lp);
  696. spin_unlock_irqrestore(&lp->lock, flags);
  697. if (!(reg & PMT_CTRL_PHY_RST_)) {
  698. /* extra delay required because the phy may
  699. * not be completed with its reset
  700. * when PHY_BCR_RESET_ is cleared. 256us
  701. * should suffice, but use 500us to be safe
  702. */
  703. udelay(500);
  704. break;
  705. }
  706. }
  707. return reg & PMT_CTRL_PHY_RST_;
  708. }
  709. /**
  710. * smc911x_phy_powerdown - powerdown phy
  711. * @dev: net device
  712. * @phy: phy address
  713. *
  714. * Power down the specified PHY
  715. */
  716. static void smc911x_phy_powerdown(struct net_device *dev, int phy)
  717. {
  718. struct smc911x_local *lp = netdev_priv(dev);
  719. unsigned int bmcr;
  720. /* Enter Link Disable state */
  721. SMC_GET_PHY_BMCR(lp, phy, bmcr);
  722. bmcr |= BMCR_PDOWN;
  723. SMC_SET_PHY_BMCR(lp, phy, bmcr);
  724. }
  725. /**
  726. * smc911x_phy_check_media - check the media status and adjust BMCR
  727. * @dev: net device
  728. * @init: set true for initialisation
  729. *
  730. * Select duplex mode depending on negotiation state. This
  731. * also updates our carrier state.
  732. */
  733. static void smc911x_phy_check_media(struct net_device *dev, int init)
  734. {
  735. struct smc911x_local *lp = netdev_priv(dev);
  736. int phyaddr = lp->mii.phy_id;
  737. unsigned int bmcr, cr;
  738. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  739. if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
  740. /* duplex state has changed */
  741. SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
  742. SMC_GET_MAC_CR(lp, cr);
  743. if (lp->mii.full_duplex) {
  744. DBG(SMC_DEBUG_MISC, dev, "Configuring for full-duplex mode\n");
  745. bmcr |= BMCR_FULLDPLX;
  746. cr |= MAC_CR_RCVOWN_;
  747. } else {
  748. DBG(SMC_DEBUG_MISC, dev, "Configuring for half-duplex mode\n");
  749. bmcr &= ~BMCR_FULLDPLX;
  750. cr &= ~MAC_CR_RCVOWN_;
  751. }
  752. SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
  753. SMC_SET_MAC_CR(lp, cr);
  754. }
  755. }
  756. /*
  757. * Configures the specified PHY through the MII management interface
  758. * using Autonegotiation.
  759. * Calls smc911x_phy_fixed() if the user has requested a certain config.
  760. * If RPC ANEG bit is set, the media selection is dependent purely on
  761. * the selection by the MII (either in the MII BMCR reg or the result
  762. * of autonegotiation.) If the RPC ANEG bit is cleared, the selection
  763. * is controlled by the RPC SPEED and RPC DPLX bits.
  764. */
  765. static void smc911x_phy_configure(struct work_struct *work)
  766. {
  767. struct smc911x_local *lp = container_of(work, struct smc911x_local,
  768. phy_configure);
  769. struct net_device *dev = lp->netdev;
  770. int phyaddr = lp->mii.phy_id;
  771. int my_phy_caps; /* My PHY capabilities */
  772. int my_ad_caps; /* My Advertised capabilities */
  773. int status __always_unused;
  774. unsigned long flags;
  775. DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
  776. /*
  777. * We should not be called if phy_type is zero.
  778. */
  779. if (lp->phy_type == 0)
  780. return;
  781. if (smc911x_phy_reset(dev, phyaddr)) {
  782. netdev_info(dev, "PHY reset timed out\n");
  783. return;
  784. }
  785. spin_lock_irqsave(&lp->lock, flags);
  786. /*
  787. * Enable PHY Interrupts (for register 18)
  788. * Interrupts listed here are enabled
  789. */
  790. SMC_SET_PHY_INT_MASK(lp, phyaddr, PHY_INT_MASK_ENERGY_ON_ |
  791. PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ |
  792. PHY_INT_MASK_LINK_DOWN_);
  793. /* If the user requested no auto neg, then go set his request */
  794. if (lp->mii.force_media) {
  795. smc911x_phy_fixed(dev);
  796. goto smc911x_phy_configure_exit;
  797. }
  798. /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
  799. SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps);
  800. if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
  801. netdev_info(dev, "Auto negotiation NOT supported\n");
  802. smc911x_phy_fixed(dev);
  803. goto smc911x_phy_configure_exit;
  804. }
  805. /* CSMA capable w/ both pauses */
  806. my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  807. if (my_phy_caps & BMSR_100BASE4)
  808. my_ad_caps |= ADVERTISE_100BASE4;
  809. if (my_phy_caps & BMSR_100FULL)
  810. my_ad_caps |= ADVERTISE_100FULL;
  811. if (my_phy_caps & BMSR_100HALF)
  812. my_ad_caps |= ADVERTISE_100HALF;
  813. if (my_phy_caps & BMSR_10FULL)
  814. my_ad_caps |= ADVERTISE_10FULL;
  815. if (my_phy_caps & BMSR_10HALF)
  816. my_ad_caps |= ADVERTISE_10HALF;
  817. /* Disable capabilities not selected by our user */
  818. if (lp->ctl_rspeed != 100)
  819. my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
  820. if (!lp->ctl_rfduplx)
  821. my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
  822. /* Update our Auto-Neg Advertisement Register */
  823. SMC_SET_PHY_MII_ADV(lp, phyaddr, my_ad_caps);
  824. lp->mii.advertising = my_ad_caps;
  825. /*
  826. * Read the register back. Without this, it appears that when
  827. * auto-negotiation is restarted, sometimes it isn't ready and
  828. * the link does not come up.
  829. */
  830. udelay(10);
  831. SMC_GET_PHY_MII_ADV(lp, phyaddr, status);
  832. DBG(SMC_DEBUG_MISC, dev, "phy caps=0x%04x\n", my_phy_caps);
  833. DBG(SMC_DEBUG_MISC, dev, "phy advertised caps=0x%04x\n", my_ad_caps);
  834. /* Restart auto-negotiation process in order to advertise my caps */
  835. SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
  836. smc911x_phy_check_media(dev, 1);
  837. smc911x_phy_configure_exit:
  838. spin_unlock_irqrestore(&lp->lock, flags);
  839. }
  840. /*
  841. * smc911x_phy_interrupt
  842. *
  843. * Purpose: Handle interrupts relating to PHY register 18. This is
  844. * called from the "hard" interrupt handler under our private spinlock.
  845. */
  846. static void smc911x_phy_interrupt(struct net_device *dev)
  847. {
  848. struct smc911x_local *lp = netdev_priv(dev);
  849. int phyaddr = lp->mii.phy_id;
  850. int status __always_unused;
  851. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  852. if (lp->phy_type == 0)
  853. return;
  854. smc911x_phy_check_media(dev, 0);
  855. /* read to clear status bits */
  856. SMC_GET_PHY_INT_SRC(lp, phyaddr,status);
  857. DBG(SMC_DEBUG_MISC, dev, "PHY interrupt status 0x%04x\n",
  858. status & 0xffff);
  859. DBG(SMC_DEBUG_MISC, dev, "AFC_CFG 0x%08x\n",
  860. SMC_GET_AFC_CFG(lp));
  861. }
  862. /*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
  863. /*
  864. * This is the main routine of the driver, to handle the device when
  865. * it needs some attention.
  866. */
  867. static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
  868. {
  869. struct net_device *dev = dev_id;
  870. struct smc911x_local *lp = netdev_priv(dev);
  871. unsigned int status, mask, timeout;
  872. unsigned int rx_overrun=0, cr, pkts;
  873. unsigned long flags;
  874. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  875. spin_lock_irqsave(&lp->lock, flags);
  876. /* Spurious interrupt check */
  877. if ((SMC_GET_IRQ_CFG(lp) & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
  878. (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
  879. spin_unlock_irqrestore(&lp->lock, flags);
  880. return IRQ_NONE;
  881. }
  882. mask = SMC_GET_INT_EN(lp);
  883. SMC_SET_INT_EN(lp, 0);
  884. /* set a timeout value, so I don't stay here forever */
  885. timeout = 8;
  886. do {
  887. status = SMC_GET_INT(lp);
  888. DBG(SMC_DEBUG_MISC, dev, "INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
  889. status, mask, status & ~mask);
  890. status &= mask;
  891. if (!status)
  892. break;
  893. /* Handle SW interrupt condition */
  894. if (status & INT_STS_SW_INT_) {
  895. SMC_ACK_INT(lp, INT_STS_SW_INT_);
  896. mask &= ~INT_EN_SW_INT_EN_;
  897. }
  898. /* Handle various error conditions */
  899. if (status & INT_STS_RXE_) {
  900. SMC_ACK_INT(lp, INT_STS_RXE_);
  901. dev->stats.rx_errors++;
  902. }
  903. if (status & INT_STS_RXDFH_INT_) {
  904. SMC_ACK_INT(lp, INT_STS_RXDFH_INT_);
  905. dev->stats.rx_dropped+=SMC_GET_RX_DROP(lp);
  906. }
  907. /* Undocumented interrupt-what is the right thing to do here? */
  908. if (status & INT_STS_RXDF_INT_) {
  909. SMC_ACK_INT(lp, INT_STS_RXDF_INT_);
  910. }
  911. /* Rx Data FIFO exceeds set level */
  912. if (status & INT_STS_RDFL_) {
  913. if (IS_REV_A(lp->revision)) {
  914. rx_overrun=1;
  915. SMC_GET_MAC_CR(lp, cr);
  916. cr &= ~MAC_CR_RXEN_;
  917. SMC_SET_MAC_CR(lp, cr);
  918. DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
  919. dev->stats.rx_errors++;
  920. dev->stats.rx_fifo_errors++;
  921. }
  922. SMC_ACK_INT(lp, INT_STS_RDFL_);
  923. }
  924. if (status & INT_STS_RDFO_) {
  925. if (!IS_REV_A(lp->revision)) {
  926. SMC_GET_MAC_CR(lp, cr);
  927. cr &= ~MAC_CR_RXEN_;
  928. SMC_SET_MAC_CR(lp, cr);
  929. rx_overrun=1;
  930. DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
  931. dev->stats.rx_errors++;
  932. dev->stats.rx_fifo_errors++;
  933. }
  934. SMC_ACK_INT(lp, INT_STS_RDFO_);
  935. }
  936. /* Handle receive condition */
  937. if ((status & INT_STS_RSFL_) || rx_overrun) {
  938. unsigned int fifo;
  939. DBG(SMC_DEBUG_RX, dev, "RX irq\n");
  940. fifo = SMC_GET_RX_FIFO_INF(lp);
  941. pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
  942. DBG(SMC_DEBUG_RX, dev, "Rx FIFO pkts %d, bytes %d\n",
  943. pkts, fifo & 0xFFFF);
  944. if (pkts != 0) {
  945. #ifdef SMC_USE_DMA
  946. unsigned int fifo;
  947. if (lp->rxdma_active){
  948. DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
  949. "RX DMA active\n");
  950. /* The DMA is already running so up the IRQ threshold */
  951. fifo = SMC_GET_FIFO_INT(lp) & ~0xFF;
  952. fifo |= pkts & 0xFF;
  953. DBG(SMC_DEBUG_RX, dev,
  954. "Setting RX stat FIFO threshold to %d\n",
  955. fifo & 0xff);
  956. SMC_SET_FIFO_INT(lp, fifo);
  957. } else
  958. #endif
  959. smc911x_rcv(dev);
  960. }
  961. SMC_ACK_INT(lp, INT_STS_RSFL_);
  962. }
  963. /* Handle transmit FIFO available */
  964. if (status & INT_STS_TDFA_) {
  965. DBG(SMC_DEBUG_TX, dev, "TX data FIFO space available irq\n");
  966. SMC_SET_FIFO_TDA(lp, 0xFF);
  967. lp->tx_throttle = 0;
  968. #ifdef SMC_USE_DMA
  969. if (!lp->txdma_active)
  970. #endif
  971. netif_wake_queue(dev);
  972. SMC_ACK_INT(lp, INT_STS_TDFA_);
  973. }
  974. /* Handle transmit done condition */
  975. #if 1
  976. if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
  977. DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, dev,
  978. "Tx stat FIFO limit (%d) /GPT irq\n",
  979. (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
  980. smc911x_tx(dev);
  981. SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
  982. SMC_ACK_INT(lp, INT_STS_TSFL_);
  983. SMC_ACK_INT(lp, INT_STS_TSFL_ | INT_STS_GPT_INT_);
  984. }
  985. #else
  986. if (status & INT_STS_TSFL_) {
  987. DBG(SMC_DEBUG_TX, dev, "TX status FIFO limit (%d) irq\n", ?);
  988. smc911x_tx(dev);
  989. SMC_ACK_INT(lp, INT_STS_TSFL_);
  990. }
  991. if (status & INT_STS_GPT_INT_) {
  992. DBG(SMC_DEBUG_RX, dev, "IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
  993. SMC_GET_IRQ_CFG(lp),
  994. SMC_GET_FIFO_INT(lp),
  995. SMC_GET_RX_CFG(lp));
  996. DBG(SMC_DEBUG_RX, dev, "Rx Stat FIFO Used 0x%02x Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
  997. (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
  998. SMC_GET_RX_FIFO_INF(lp) & 0xffff,
  999. SMC_GET_RX_STS_FIFO_PEEK(lp));
  1000. SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
  1001. SMC_ACK_INT(lp, INT_STS_GPT_INT_);
  1002. }
  1003. #endif
  1004. /* Handle PHY interrupt condition */
  1005. if (status & INT_STS_PHY_INT_) {
  1006. DBG(SMC_DEBUG_MISC, dev, "PHY irq\n");
  1007. smc911x_phy_interrupt(dev);
  1008. SMC_ACK_INT(lp, INT_STS_PHY_INT_);
  1009. }
  1010. } while (--timeout);
  1011. /* restore mask state */
  1012. SMC_SET_INT_EN(lp, mask);
  1013. DBG(SMC_DEBUG_MISC, dev, "Interrupt done (%d loops)\n",
  1014. 8-timeout);
  1015. spin_unlock_irqrestore(&lp->lock, flags);
  1016. return IRQ_HANDLED;
  1017. }
  1018. #ifdef SMC_USE_DMA
  1019. static void
  1020. smc911x_tx_dma_irq(void *data)
  1021. {
  1022. struct smc911x_local *lp = data;
  1023. struct net_device *dev = lp->netdev;
  1024. struct sk_buff *skb = lp->current_tx_skb;
  1025. unsigned long flags;
  1026. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1027. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
  1028. BUG_ON(skb == NULL);
  1029. dma_unmap_single(lp->dev, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
  1030. netif_trans_update(dev);
  1031. dev_kfree_skb_irq(skb);
  1032. lp->current_tx_skb = NULL;
  1033. if (lp->pending_tx_skb != NULL)
  1034. smc911x_hardware_send_pkt(dev);
  1035. else {
  1036. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
  1037. "No pending Tx packets. DMA disabled\n");
  1038. spin_lock_irqsave(&lp->lock, flags);
  1039. lp->txdma_active = 0;
  1040. if (!lp->tx_throttle) {
  1041. netif_wake_queue(dev);
  1042. }
  1043. spin_unlock_irqrestore(&lp->lock, flags);
  1044. }
  1045. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
  1046. "TX DMA irq completed\n");
  1047. }
  1048. static void
  1049. smc911x_rx_dma_irq(void *data)
  1050. {
  1051. struct smc911x_local *lp = data;
  1052. struct net_device *dev = lp->netdev;
  1053. struct sk_buff *skb = lp->current_rx_skb;
  1054. unsigned long flags;
  1055. unsigned int pkts;
  1056. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1057. DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n");
  1058. dma_unmap_single(lp->dev, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
  1059. BUG_ON(skb == NULL);
  1060. lp->current_rx_skb = NULL;
  1061. PRINT_PKT(skb->data, skb->len);
  1062. skb->protocol = eth_type_trans(skb, dev);
  1063. dev->stats.rx_packets++;
  1064. dev->stats.rx_bytes += skb->len;
  1065. netif_rx(skb);
  1066. spin_lock_irqsave(&lp->lock, flags);
  1067. pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16;
  1068. if (pkts != 0) {
  1069. smc911x_rcv(dev);
  1070. }else {
  1071. lp->rxdma_active = 0;
  1072. }
  1073. spin_unlock_irqrestore(&lp->lock, flags);
  1074. DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
  1075. "RX DMA irq completed. DMA RX FIFO PKTS %d\n",
  1076. pkts);
  1077. }
  1078. #endif /* SMC_USE_DMA */
  1079. #ifdef CONFIG_NET_POLL_CONTROLLER
  1080. /*
  1081. * Polling receive - used by netconsole and other diagnostic tools
  1082. * to allow network i/o with interrupts disabled.
  1083. */
  1084. static void smc911x_poll_controller(struct net_device *dev)
  1085. {
  1086. disable_irq(dev->irq);
  1087. smc911x_interrupt(dev->irq, dev);
  1088. enable_irq(dev->irq);
  1089. }
  1090. #endif
  1091. /* Our watchdog timed out. Called by the networking layer */
  1092. static void smc911x_timeout(struct net_device *dev, unsigned int txqueue)
  1093. {
  1094. struct smc911x_local *lp = netdev_priv(dev);
  1095. int status, mask;
  1096. unsigned long flags;
  1097. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1098. spin_lock_irqsave(&lp->lock, flags);
  1099. status = SMC_GET_INT(lp);
  1100. mask = SMC_GET_INT_EN(lp);
  1101. spin_unlock_irqrestore(&lp->lock, flags);
  1102. DBG(SMC_DEBUG_MISC, dev, "INT 0x%02x MASK 0x%02x\n",
  1103. status, mask);
  1104. /* Dump the current TX FIFO contents and restart */
  1105. mask = SMC_GET_TX_CFG(lp);
  1106. SMC_SET_TX_CFG(lp, mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_);
  1107. /*
  1108. * Reconfiguring the PHY doesn't seem like a bad idea here, but
  1109. * smc911x_phy_configure() calls msleep() which calls schedule_timeout()
  1110. * which calls schedule(). Hence we use a work queue.
  1111. */
  1112. if (lp->phy_type != 0)
  1113. schedule_work(&lp->phy_configure);
  1114. /* We can accept TX packets again */
  1115. netif_trans_update(dev); /* prevent tx timeout */
  1116. netif_wake_queue(dev);
  1117. }
  1118. /*
  1119. * This routine will, depending on the values passed to it,
  1120. * either make it accept multicast packets, go into
  1121. * promiscuous mode (for TCPDUMP and cousins) or accept
  1122. * a select set of multicast packets
  1123. */
  1124. static void smc911x_set_multicast_list(struct net_device *dev)
  1125. {
  1126. struct smc911x_local *lp = netdev_priv(dev);
  1127. unsigned int multicast_table[2];
  1128. unsigned int mcr, update_multicast = 0;
  1129. unsigned long flags;
  1130. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1131. spin_lock_irqsave(&lp->lock, flags);
  1132. SMC_GET_MAC_CR(lp, mcr);
  1133. spin_unlock_irqrestore(&lp->lock, flags);
  1134. if (dev->flags & IFF_PROMISC) {
  1135. DBG(SMC_DEBUG_MISC, dev, "RCR_PRMS\n");
  1136. mcr |= MAC_CR_PRMS_;
  1137. }
  1138. /*
  1139. * Here, I am setting this to accept all multicast packets.
  1140. * I don't need to zero the multicast table, because the flag is
  1141. * checked before the table is
  1142. */
  1143. else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
  1144. DBG(SMC_DEBUG_MISC, dev, "RCR_ALMUL\n");
  1145. mcr |= MAC_CR_MCPAS_;
  1146. }
  1147. /*
  1148. * This sets the internal hardware table to filter out unwanted
  1149. * multicast packets before they take up memory.
  1150. *
  1151. * The SMC chip uses a hash table where the high 6 bits of the CRC of
  1152. * address are the offset into the table. If that bit is 1, then the
  1153. * multicast packet is accepted. Otherwise, it's dropped silently.
  1154. *
  1155. * To use the 6 bits as an offset into the table, the high 1 bit is
  1156. * the number of the 32 bit register, while the low 5 bits are the bit
  1157. * within that register.
  1158. */
  1159. else if (!netdev_mc_empty(dev)) {
  1160. struct netdev_hw_addr *ha;
  1161. /* Set the Hash perfec mode */
  1162. mcr |= MAC_CR_HPFILT_;
  1163. /* start with a table of all zeros: reject all */
  1164. memset(multicast_table, 0, sizeof(multicast_table));
  1165. netdev_for_each_mc_addr(ha, dev) {
  1166. u32 position;
  1167. /* upper 6 bits are used as hash index */
  1168. position = ether_crc(ETH_ALEN, ha->addr)>>26;
  1169. multicast_table[position>>5] |= 1 << (position&0x1f);
  1170. }
  1171. /* be sure I get rid of flags I might have set */
  1172. mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
  1173. /* now, the table can be loaded into the chipset */
  1174. update_multicast = 1;
  1175. } else {
  1176. DBG(SMC_DEBUG_MISC, dev, "~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n");
  1177. mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
  1178. /*
  1179. * since I'm disabling all multicast entirely, I need to
  1180. * clear the multicast list
  1181. */
  1182. memset(multicast_table, 0, sizeof(multicast_table));
  1183. update_multicast = 1;
  1184. }
  1185. spin_lock_irqsave(&lp->lock, flags);
  1186. SMC_SET_MAC_CR(lp, mcr);
  1187. if (update_multicast) {
  1188. DBG(SMC_DEBUG_MISC, dev,
  1189. "update mcast hash table 0x%08x 0x%08x\n",
  1190. multicast_table[0], multicast_table[1]);
  1191. SMC_SET_HASHL(lp, multicast_table[0]);
  1192. SMC_SET_HASHH(lp, multicast_table[1]);
  1193. }
  1194. spin_unlock_irqrestore(&lp->lock, flags);
  1195. }
  1196. /*
  1197. * Open and Initialize the board
  1198. *
  1199. * Set up everything, reset the card, etc..
  1200. */
  1201. static int
  1202. smc911x_open(struct net_device *dev)
  1203. {
  1204. struct smc911x_local *lp = netdev_priv(dev);
  1205. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1206. /* reset the hardware */
  1207. smc911x_reset(dev);
  1208. /* Configure the PHY, initialize the link state */
  1209. smc911x_phy_configure(&lp->phy_configure);
  1210. /* Turn on Tx + Rx */
  1211. smc911x_enable(dev);
  1212. netif_start_queue(dev);
  1213. return 0;
  1214. }
  1215. /*
  1216. * smc911x_close
  1217. *
  1218. * this makes the board clean up everything that it can
  1219. * and not talk to the outside world. Caused by
  1220. * an 'ifconfig ethX down'
  1221. */
  1222. static int smc911x_close(struct net_device *dev)
  1223. {
  1224. struct smc911x_local *lp = netdev_priv(dev);
  1225. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1226. netif_stop_queue(dev);
  1227. netif_carrier_off(dev);
  1228. /* clear everything */
  1229. smc911x_shutdown(dev);
  1230. if (lp->phy_type != 0) {
  1231. /* We need to ensure that no calls to
  1232. * smc911x_phy_configure are pending.
  1233. */
  1234. cancel_work_sync(&lp->phy_configure);
  1235. smc911x_phy_powerdown(dev, lp->mii.phy_id);
  1236. }
  1237. if (lp->pending_tx_skb) {
  1238. dev_kfree_skb(lp->pending_tx_skb);
  1239. lp->pending_tx_skb = NULL;
  1240. }
  1241. return 0;
  1242. }
  1243. /*
  1244. * Ethtool support
  1245. */
  1246. static int
  1247. smc911x_ethtool_get_link_ksettings(struct net_device *dev,
  1248. struct ethtool_link_ksettings *cmd)
  1249. {
  1250. struct smc911x_local *lp = netdev_priv(dev);
  1251. int status;
  1252. unsigned long flags;
  1253. u32 supported;
  1254. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1255. if (lp->phy_type != 0) {
  1256. spin_lock_irqsave(&lp->lock, flags);
  1257. mii_ethtool_get_link_ksettings(&lp->mii, cmd);
  1258. spin_unlock_irqrestore(&lp->lock, flags);
  1259. } else {
  1260. supported = SUPPORTED_10baseT_Half |
  1261. SUPPORTED_10baseT_Full |
  1262. SUPPORTED_TP | SUPPORTED_AUI;
  1263. if (lp->ctl_rspeed == 10)
  1264. cmd->base.speed = SPEED_10;
  1265. else if (lp->ctl_rspeed == 100)
  1266. cmd->base.speed = SPEED_100;
  1267. cmd->base.autoneg = AUTONEG_DISABLE;
  1268. cmd->base.port = 0;
  1269. SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status);
  1270. cmd->base.duplex =
  1271. (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
  1272. DUPLEX_FULL : DUPLEX_HALF;
  1273. ethtool_convert_legacy_u32_to_link_mode(
  1274. cmd->link_modes.supported, supported);
  1275. }
  1276. return 0;
  1277. }
  1278. static int
  1279. smc911x_ethtool_set_link_ksettings(struct net_device *dev,
  1280. const struct ethtool_link_ksettings *cmd)
  1281. {
  1282. struct smc911x_local *lp = netdev_priv(dev);
  1283. int ret;
  1284. unsigned long flags;
  1285. if (lp->phy_type != 0) {
  1286. spin_lock_irqsave(&lp->lock, flags);
  1287. ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
  1288. spin_unlock_irqrestore(&lp->lock, flags);
  1289. } else {
  1290. if (cmd->base.autoneg != AUTONEG_DISABLE ||
  1291. cmd->base.speed != SPEED_10 ||
  1292. (cmd->base.duplex != DUPLEX_HALF &&
  1293. cmd->base.duplex != DUPLEX_FULL) ||
  1294. (cmd->base.port != PORT_TP &&
  1295. cmd->base.port != PORT_AUI))
  1296. return -EINVAL;
  1297. lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
  1298. ret = 0;
  1299. }
  1300. return ret;
  1301. }
  1302. static void
  1303. smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  1304. {
  1305. strscpy(info->driver, CARDNAME, sizeof(info->driver));
  1306. strscpy(info->version, version, sizeof(info->version));
  1307. strscpy(info->bus_info, dev_name(dev->dev.parent),
  1308. sizeof(info->bus_info));
  1309. }
  1310. static int smc911x_ethtool_nwayreset(struct net_device *dev)
  1311. {
  1312. struct smc911x_local *lp = netdev_priv(dev);
  1313. int ret = -EINVAL;
  1314. unsigned long flags;
  1315. if (lp->phy_type != 0) {
  1316. spin_lock_irqsave(&lp->lock, flags);
  1317. ret = mii_nway_restart(&lp->mii);
  1318. spin_unlock_irqrestore(&lp->lock, flags);
  1319. }
  1320. return ret;
  1321. }
  1322. static u32 smc911x_ethtool_getmsglevel(struct net_device *dev)
  1323. {
  1324. struct smc911x_local *lp = netdev_priv(dev);
  1325. return lp->msg_enable;
  1326. }
  1327. static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level)
  1328. {
  1329. struct smc911x_local *lp = netdev_priv(dev);
  1330. lp->msg_enable = level;
  1331. }
  1332. static int smc911x_ethtool_getregslen(struct net_device *dev)
  1333. {
  1334. /* System regs + MAC regs + PHY regs */
  1335. return (((E2P_CMD - ID_REV)/4 + 1) +
  1336. (WUCSR - MAC_CR)+1 + 32) * sizeof(u32);
  1337. }
  1338. static void smc911x_ethtool_getregs(struct net_device *dev,
  1339. struct ethtool_regs *regs, void *buf)
  1340. {
  1341. struct smc911x_local *lp = netdev_priv(dev);
  1342. unsigned long flags;
  1343. u32 reg,i,j=0;
  1344. u32 *data = (u32*)buf;
  1345. regs->version = lp->version;
  1346. for(i=ID_REV;i<=E2P_CMD;i+=4) {
  1347. data[j++] = SMC_inl(lp, i);
  1348. }
  1349. for(i=MAC_CR;i<=WUCSR;i++) {
  1350. spin_lock_irqsave(&lp->lock, flags);
  1351. SMC_GET_MAC_CSR(lp, i, reg);
  1352. spin_unlock_irqrestore(&lp->lock, flags);
  1353. data[j++] = reg;
  1354. }
  1355. for(i=0;i<=31;i++) {
  1356. spin_lock_irqsave(&lp->lock, flags);
  1357. SMC_GET_MII(lp, i, lp->mii.phy_id, reg);
  1358. spin_unlock_irqrestore(&lp->lock, flags);
  1359. data[j++] = reg & 0xFFFF;
  1360. }
  1361. }
  1362. static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
  1363. {
  1364. struct smc911x_local *lp = netdev_priv(dev);
  1365. unsigned int timeout;
  1366. int e2p_cmd;
  1367. e2p_cmd = SMC_GET_E2P_CMD(lp);
  1368. for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
  1369. if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
  1370. PRINTK(dev, "%s timeout waiting for EEPROM to respond\n",
  1371. __func__);
  1372. return -EFAULT;
  1373. }
  1374. mdelay(1);
  1375. e2p_cmd = SMC_GET_E2P_CMD(lp);
  1376. }
  1377. if (timeout == 0) {
  1378. PRINTK(dev, "%s timeout waiting for EEPROM CMD not busy\n",
  1379. __func__);
  1380. return -ETIMEDOUT;
  1381. }
  1382. return 0;
  1383. }
  1384. static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
  1385. int cmd, int addr)
  1386. {
  1387. struct smc911x_local *lp = netdev_priv(dev);
  1388. int ret;
  1389. if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
  1390. return ret;
  1391. SMC_SET_E2P_CMD(lp, E2P_CMD_EPC_BUSY_ |
  1392. ((cmd) & (0x7<<28)) |
  1393. ((addr) & 0xFF));
  1394. return 0;
  1395. }
  1396. static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
  1397. u8 *data)
  1398. {
  1399. struct smc911x_local *lp = netdev_priv(dev);
  1400. int ret;
  1401. if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
  1402. return ret;
  1403. *data = SMC_GET_E2P_DATA(lp);
  1404. return 0;
  1405. }
  1406. static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
  1407. u8 data)
  1408. {
  1409. struct smc911x_local *lp = netdev_priv(dev);
  1410. int ret;
  1411. if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
  1412. return ret;
  1413. SMC_SET_E2P_DATA(lp, data);
  1414. return 0;
  1415. }
  1416. static int smc911x_ethtool_geteeprom(struct net_device *dev,
  1417. struct ethtool_eeprom *eeprom, u8 *data)
  1418. {
  1419. u8 eebuf[SMC911X_EEPROM_LEN];
  1420. int i, ret;
  1421. for(i=0;i<SMC911X_EEPROM_LEN;i++) {
  1422. if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_READ_, i ))!=0)
  1423. return ret;
  1424. if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
  1425. return ret;
  1426. }
  1427. memcpy(data, eebuf+eeprom->offset, eeprom->len);
  1428. return 0;
  1429. }
  1430. static int smc911x_ethtool_seteeprom(struct net_device *dev,
  1431. struct ethtool_eeprom *eeprom, u8 *data)
  1432. {
  1433. int i, ret;
  1434. /* Enable erase */
  1435. if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0)
  1436. return ret;
  1437. for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) {
  1438. /* erase byte */
  1439. if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0)
  1440. return ret;
  1441. /* write byte */
  1442. if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
  1443. return ret;
  1444. if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
  1445. return ret;
  1446. }
  1447. return 0;
  1448. }
  1449. static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
  1450. {
  1451. return SMC911X_EEPROM_LEN;
  1452. }
  1453. static const struct ethtool_ops smc911x_ethtool_ops = {
  1454. .get_drvinfo = smc911x_ethtool_getdrvinfo,
  1455. .get_msglevel = smc911x_ethtool_getmsglevel,
  1456. .set_msglevel = smc911x_ethtool_setmsglevel,
  1457. .nway_reset = smc911x_ethtool_nwayreset,
  1458. .get_link = ethtool_op_get_link,
  1459. .get_regs_len = smc911x_ethtool_getregslen,
  1460. .get_regs = smc911x_ethtool_getregs,
  1461. .get_eeprom_len = smc911x_ethtool_geteeprom_len,
  1462. .get_eeprom = smc911x_ethtool_geteeprom,
  1463. .set_eeprom = smc911x_ethtool_seteeprom,
  1464. .get_link_ksettings = smc911x_ethtool_get_link_ksettings,
  1465. .set_link_ksettings = smc911x_ethtool_set_link_ksettings,
  1466. };
  1467. /*
  1468. * smc911x_findirq
  1469. *
  1470. * This routine has a simple purpose -- make the SMC chip generate an
  1471. * interrupt, so an auto-detect routine can detect it, and find the IRQ,
  1472. */
  1473. static int smc911x_findirq(struct net_device *dev)
  1474. {
  1475. struct smc911x_local *lp = netdev_priv(dev);
  1476. int timeout = 20;
  1477. unsigned long cookie;
  1478. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1479. cookie = probe_irq_on();
  1480. /*
  1481. * Force a SW interrupt
  1482. */
  1483. SMC_SET_INT_EN(lp, INT_EN_SW_INT_EN_);
  1484. /*
  1485. * Wait until positive that the interrupt has been generated
  1486. */
  1487. do {
  1488. int int_status;
  1489. udelay(10);
  1490. int_status = SMC_GET_INT_EN(lp);
  1491. if (int_status & INT_EN_SW_INT_EN_)
  1492. break; /* got the interrupt */
  1493. } while (--timeout);
  1494. /*
  1495. * there is really nothing that I can do here if timeout fails,
  1496. * as autoirq_report will return a 0 anyway, which is what I
  1497. * want in this case. Plus, the clean up is needed in both
  1498. * cases.
  1499. */
  1500. /* and disable all interrupts again */
  1501. SMC_SET_INT_EN(lp, 0);
  1502. /* and return what I found */
  1503. return probe_irq_off(cookie);
  1504. }
  1505. static const struct net_device_ops smc911x_netdev_ops = {
  1506. .ndo_open = smc911x_open,
  1507. .ndo_stop = smc911x_close,
  1508. .ndo_start_xmit = smc911x_hard_start_xmit,
  1509. .ndo_tx_timeout = smc911x_timeout,
  1510. .ndo_set_rx_mode = smc911x_set_multicast_list,
  1511. .ndo_validate_addr = eth_validate_addr,
  1512. .ndo_set_mac_address = eth_mac_addr,
  1513. #ifdef CONFIG_NET_POLL_CONTROLLER
  1514. .ndo_poll_controller = smc911x_poll_controller,
  1515. #endif
  1516. };
  1517. /*
  1518. * Function: smc911x_probe(unsigned long ioaddr)
  1519. *
  1520. * Purpose:
  1521. * Tests to see if a given ioaddr points to an SMC911x chip.
  1522. * Returns a 0 on success
  1523. *
  1524. * Algorithm:
  1525. * (1) see if the endian word is OK
  1526. * (1) see if I recognize the chip ID in the appropriate register
  1527. *
  1528. * Here I do typical initialization tasks.
  1529. *
  1530. * o Initialize the structure if needed
  1531. * o print out my vanity message if not done so already
  1532. * o print out what type of hardware is detected
  1533. * o print out the ethernet address
  1534. * o find the IRQ
  1535. * o set up my private data
  1536. * o configure the dev structure with my subroutines
  1537. * o actually GRAB the irq.
  1538. * o GRAB the region
  1539. */
  1540. static int smc911x_probe(struct net_device *dev)
  1541. {
  1542. struct smc911x_local *lp = netdev_priv(dev);
  1543. int i, retval;
  1544. unsigned int val, chip_id, revision;
  1545. const char *version_string;
  1546. unsigned long irq_flags;
  1547. #ifdef SMC_USE_DMA
  1548. struct dma_slave_config config;
  1549. dma_cap_mask_t mask;
  1550. #endif
  1551. u8 addr[ETH_ALEN];
  1552. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1553. /* First, see if the endian word is recognized */
  1554. val = SMC_GET_BYTE_TEST(lp);
  1555. DBG(SMC_DEBUG_MISC, dev, "%s: endian probe returned 0x%04x\n",
  1556. CARDNAME, val);
  1557. if (val != 0x87654321) {
  1558. netdev_err(dev, "Invalid chip endian 0x%08x\n", val);
  1559. retval = -ENODEV;
  1560. goto err_out;
  1561. }
  1562. /*
  1563. * check if the revision register is something that I
  1564. * recognize. These might need to be added to later,
  1565. * as future revisions could be added.
  1566. */
  1567. chip_id = SMC_GET_PN(lp);
  1568. DBG(SMC_DEBUG_MISC, dev, "%s: id probe returned 0x%04x\n",
  1569. CARDNAME, chip_id);
  1570. for(i=0;chip_ids[i].id != 0; i++) {
  1571. if (chip_ids[i].id == chip_id) break;
  1572. }
  1573. if (!chip_ids[i].id) {
  1574. netdev_err(dev, "Unknown chip ID %04x\n", chip_id);
  1575. retval = -ENODEV;
  1576. goto err_out;
  1577. }
  1578. version_string = chip_ids[i].name;
  1579. revision = SMC_GET_REV(lp);
  1580. DBG(SMC_DEBUG_MISC, dev, "%s: revision = 0x%04x\n", CARDNAME, revision);
  1581. /* At this point I'll assume that the chip is an SMC911x. */
  1582. DBG(SMC_DEBUG_MISC, dev, "%s: Found a %s\n",
  1583. CARDNAME, chip_ids[i].name);
  1584. /* Validate the TX FIFO size requested */
  1585. if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
  1586. netdev_err(dev, "Invalid TX FIFO size requested %d\n",
  1587. tx_fifo_kb);
  1588. retval = -EINVAL;
  1589. goto err_out;
  1590. }
  1591. /* fill in some of the fields */
  1592. lp->version = chip_ids[i].id;
  1593. lp->revision = revision;
  1594. lp->tx_fifo_kb = tx_fifo_kb;
  1595. /* Reverse calculate the RX FIFO size from the TX */
  1596. lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512;
  1597. lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15;
  1598. /* Set the automatic flow control values */
  1599. switch(lp->tx_fifo_kb) {
  1600. /*
  1601. * AFC_HI is about ((Rx Data Fifo Size)*2/3)/64
  1602. * AFC_LO is AFC_HI/2
  1603. * BACK_DUR is about 5uS*(AFC_LO) rounded down
  1604. */
  1605. case 2:/* 13440 Rx Data Fifo Size */
  1606. lp->afc_cfg=0x008C46AF;break;
  1607. case 3:/* 12480 Rx Data Fifo Size */
  1608. lp->afc_cfg=0x0082419F;break;
  1609. case 4:/* 11520 Rx Data Fifo Size */
  1610. lp->afc_cfg=0x00783C9F;break;
  1611. case 5:/* 10560 Rx Data Fifo Size */
  1612. lp->afc_cfg=0x006E374F;break;
  1613. case 6:/* 9600 Rx Data Fifo Size */
  1614. lp->afc_cfg=0x0064328F;break;
  1615. case 7:/* 8640 Rx Data Fifo Size */
  1616. lp->afc_cfg=0x005A2D7F;break;
  1617. case 8:/* 7680 Rx Data Fifo Size */
  1618. lp->afc_cfg=0x0050287F;break;
  1619. case 9:/* 6720 Rx Data Fifo Size */
  1620. lp->afc_cfg=0x0046236F;break;
  1621. case 10:/* 5760 Rx Data Fifo Size */
  1622. lp->afc_cfg=0x003C1E6F;break;
  1623. case 11:/* 4800 Rx Data Fifo Size */
  1624. lp->afc_cfg=0x0032195F;break;
  1625. /*
  1626. * AFC_HI is ~1520 bytes less than RX Data Fifo Size
  1627. * AFC_LO is AFC_HI/2
  1628. * BACK_DUR is about 5uS*(AFC_LO) rounded down
  1629. */
  1630. case 12:/* 3840 Rx Data Fifo Size */
  1631. lp->afc_cfg=0x0024124F;break;
  1632. case 13:/* 2880 Rx Data Fifo Size */
  1633. lp->afc_cfg=0x0015073F;break;
  1634. case 14:/* 1920 Rx Data Fifo Size */
  1635. lp->afc_cfg=0x0006032F;break;
  1636. default:
  1637. PRINTK(dev, "ERROR -- no AFC_CFG setting found");
  1638. break;
  1639. }
  1640. DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, dev,
  1641. "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
  1642. lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
  1643. spin_lock_init(&lp->lock);
  1644. /* Get the MAC address */
  1645. SMC_GET_MAC_ADDR(lp, addr);
  1646. eth_hw_addr_set(dev, addr);
  1647. /* now, reset the chip, and put it into a known state */
  1648. smc911x_reset(dev);
  1649. /*
  1650. * If dev->irq is 0, then the device has to be banged on to see
  1651. * what the IRQ is.
  1652. *
  1653. * Specifying an IRQ is done with the assumption that the user knows
  1654. * what (s)he is doing. No checking is done!!!!
  1655. */
  1656. if (dev->irq < 1) {
  1657. int trials;
  1658. trials = 3;
  1659. while (trials--) {
  1660. dev->irq = smc911x_findirq(dev);
  1661. if (dev->irq)
  1662. break;
  1663. /* kick the card and try again */
  1664. smc911x_reset(dev);
  1665. }
  1666. }
  1667. if (dev->irq == 0) {
  1668. netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
  1669. retval = -ENODEV;
  1670. goto err_out;
  1671. }
  1672. dev->irq = irq_canonicalize(dev->irq);
  1673. dev->netdev_ops = &smc911x_netdev_ops;
  1674. dev->watchdog_timeo = msecs_to_jiffies(watchdog);
  1675. dev->ethtool_ops = &smc911x_ethtool_ops;
  1676. INIT_WORK(&lp->phy_configure, smc911x_phy_configure);
  1677. lp->mii.phy_id_mask = 0x1f;
  1678. lp->mii.reg_num_mask = 0x1f;
  1679. lp->mii.force_media = 0;
  1680. lp->mii.full_duplex = 0;
  1681. lp->mii.dev = dev;
  1682. lp->mii.mdio_read = smc911x_phy_read;
  1683. lp->mii.mdio_write = smc911x_phy_write;
  1684. /*
  1685. * Locate the phy, if any.
  1686. */
  1687. smc911x_phy_detect(dev);
  1688. /* Set default parameters */
  1689. lp->msg_enable = NETIF_MSG_LINK;
  1690. lp->ctl_rfduplx = 1;
  1691. lp->ctl_rspeed = 100;
  1692. #ifdef SMC_DYNAMIC_BUS_CONFIG
  1693. irq_flags = lp->cfg.irq_flags;
  1694. #else
  1695. irq_flags = IRQF_SHARED | SMC_IRQ_SENSE;
  1696. #endif
  1697. /* Grab the IRQ */
  1698. retval = request_irq(dev->irq, smc911x_interrupt,
  1699. irq_flags, dev->name, dev);
  1700. if (retval)
  1701. goto err_out;
  1702. #ifdef SMC_USE_DMA
  1703. dma_cap_zero(mask);
  1704. dma_cap_set(DMA_SLAVE, mask);
  1705. lp->rxdma = dma_request_channel(mask, NULL, NULL);
  1706. lp->txdma = dma_request_channel(mask, NULL, NULL);
  1707. lp->rxdma_active = 0;
  1708. lp->txdma_active = 0;
  1709. memset(&config, 0, sizeof(config));
  1710. config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1711. config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1712. config.src_addr = lp->physaddr + RX_DATA_FIFO;
  1713. config.dst_addr = lp->physaddr + TX_DATA_FIFO;
  1714. config.src_maxburst = 32;
  1715. config.dst_maxburst = 32;
  1716. retval = dmaengine_slave_config(lp->rxdma, &config);
  1717. if (retval) {
  1718. dev_err(lp->dev, "dma rx channel configuration failed: %d\n",
  1719. retval);
  1720. goto err_out;
  1721. }
  1722. retval = dmaengine_slave_config(lp->txdma, &config);
  1723. if (retval) {
  1724. dev_err(lp->dev, "dma tx channel configuration failed: %d\n",
  1725. retval);
  1726. goto err_out;
  1727. }
  1728. #endif
  1729. retval = register_netdev(dev);
  1730. if (retval == 0) {
  1731. /* now, print out the card info, in a short format.. */
  1732. netdev_info(dev, "%s (rev %d) at %#lx IRQ %d",
  1733. version_string, lp->revision,
  1734. dev->base_addr, dev->irq);
  1735. #ifdef SMC_USE_DMA
  1736. if (lp->rxdma)
  1737. pr_cont(" RXDMA %p", lp->rxdma);
  1738. if (lp->txdma)
  1739. pr_cont(" TXDMA %p", lp->txdma);
  1740. #endif
  1741. pr_cont("\n");
  1742. if (!is_valid_ether_addr(dev->dev_addr)) {
  1743. netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
  1744. } else {
  1745. /* Print the Ethernet address */
  1746. netdev_info(dev, "Ethernet addr: %pM\n",
  1747. dev->dev_addr);
  1748. }
  1749. if (lp->phy_type == 0) {
  1750. PRINTK(dev, "No PHY found\n");
  1751. } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
  1752. PRINTK(dev, "LAN911x Internal PHY\n");
  1753. } else {
  1754. PRINTK(dev, "External PHY 0x%08x\n", lp->phy_type);
  1755. }
  1756. }
  1757. err_out:
  1758. #ifdef SMC_USE_DMA
  1759. if (retval) {
  1760. if (lp->rxdma)
  1761. dma_release_channel(lp->rxdma);
  1762. if (lp->txdma)
  1763. dma_release_channel(lp->txdma);
  1764. }
  1765. #endif
  1766. return retval;
  1767. }
  1768. /*
  1769. * smc911x_drv_probe(void)
  1770. *
  1771. * Output:
  1772. * 0 --> there is a device
  1773. * anything else, error
  1774. */
  1775. static int smc911x_drv_probe(struct platform_device *pdev)
  1776. {
  1777. struct net_device *ndev;
  1778. struct resource *res;
  1779. struct smc911x_local *lp;
  1780. void __iomem *addr;
  1781. int ret;
  1782. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1783. if (!res) {
  1784. ret = -ENODEV;
  1785. goto out;
  1786. }
  1787. /*
  1788. * Request the regions.
  1789. */
  1790. if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) {
  1791. ret = -EBUSY;
  1792. goto out;
  1793. }
  1794. ndev = alloc_etherdev(sizeof(struct smc911x_local));
  1795. if (!ndev) {
  1796. ret = -ENOMEM;
  1797. goto release_1;
  1798. }
  1799. SET_NETDEV_DEV(ndev, &pdev->dev);
  1800. ndev->dma = (unsigned char)-1;
  1801. ndev->irq = platform_get_irq(pdev, 0);
  1802. if (ndev->irq < 0) {
  1803. ret = ndev->irq;
  1804. goto release_both;
  1805. }
  1806. lp = netdev_priv(ndev);
  1807. lp->netdev = ndev;
  1808. #ifdef SMC_DYNAMIC_BUS_CONFIG
  1809. {
  1810. struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev);
  1811. if (!pd) {
  1812. ret = -EINVAL;
  1813. goto release_both;
  1814. }
  1815. memcpy(&lp->cfg, pd, sizeof(lp->cfg));
  1816. }
  1817. #endif
  1818. addr = ioremap(res->start, SMC911X_IO_EXTENT);
  1819. if (!addr) {
  1820. ret = -ENOMEM;
  1821. goto release_both;
  1822. }
  1823. platform_set_drvdata(pdev, ndev);
  1824. lp->base = addr;
  1825. ndev->base_addr = res->start;
  1826. ret = smc911x_probe(ndev);
  1827. if (ret != 0) {
  1828. iounmap(addr);
  1829. release_both:
  1830. free_netdev(ndev);
  1831. release_1:
  1832. release_mem_region(res->start, SMC911X_IO_EXTENT);
  1833. out:
  1834. pr_info("%s: not found (%d).\n", CARDNAME, ret);
  1835. }
  1836. #ifdef SMC_USE_DMA
  1837. else {
  1838. lp->physaddr = res->start;
  1839. lp->dev = &pdev->dev;
  1840. }
  1841. #endif
  1842. return ret;
  1843. }
  1844. static int smc911x_drv_remove(struct platform_device *pdev)
  1845. {
  1846. struct net_device *ndev = platform_get_drvdata(pdev);
  1847. struct smc911x_local *lp = netdev_priv(ndev);
  1848. struct resource *res;
  1849. DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
  1850. unregister_netdev(ndev);
  1851. free_irq(ndev->irq, ndev);
  1852. #ifdef SMC_USE_DMA
  1853. {
  1854. if (lp->rxdma)
  1855. dma_release_channel(lp->rxdma);
  1856. if (lp->txdma)
  1857. dma_release_channel(lp->txdma);
  1858. }
  1859. #endif
  1860. iounmap(lp->base);
  1861. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1862. release_mem_region(res->start, SMC911X_IO_EXTENT);
  1863. free_netdev(ndev);
  1864. return 0;
  1865. }
  1866. static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
  1867. {
  1868. struct net_device *ndev = platform_get_drvdata(dev);
  1869. struct smc911x_local *lp = netdev_priv(ndev);
  1870. DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
  1871. if (ndev) {
  1872. if (netif_running(ndev)) {
  1873. netif_device_detach(ndev);
  1874. smc911x_shutdown(ndev);
  1875. #if POWER_DOWN
  1876. /* Set D2 - Energy detect only setting */
  1877. SMC_SET_PMT_CTRL(lp, 2<<12);
  1878. #endif
  1879. }
  1880. }
  1881. return 0;
  1882. }
  1883. static int smc911x_drv_resume(struct platform_device *dev)
  1884. {
  1885. struct net_device *ndev = platform_get_drvdata(dev);
  1886. DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
  1887. if (ndev) {
  1888. struct smc911x_local *lp = netdev_priv(ndev);
  1889. if (netif_running(ndev)) {
  1890. smc911x_reset(ndev);
  1891. if (lp->phy_type != 0)
  1892. smc911x_phy_configure(&lp->phy_configure);
  1893. smc911x_enable(ndev);
  1894. netif_device_attach(ndev);
  1895. }
  1896. }
  1897. return 0;
  1898. }
  1899. static struct platform_driver smc911x_driver = {
  1900. .probe = smc911x_drv_probe,
  1901. .remove = smc911x_drv_remove,
  1902. .suspend = smc911x_drv_suspend,
  1903. .resume = smc911x_drv_resume,
  1904. .driver = {
  1905. .name = CARDNAME,
  1906. },
  1907. };
  1908. module_platform_driver(smc911x_driver);