sonic.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * sonic.c
  4. *
  5. * (C) 2005 Finn Thain
  6. *
  7. * Converted to DMA API, added zero-copy buffer handling, and
  8. * (from the mac68k project) introduced dhd's support for 16-bit cards.
  9. *
  10. * (C) 1996,1998 by Thomas Bogendoerfer ([email protected])
  11. *
  12. * This driver is based on work from Andreas Busse, but most of
  13. * the code is rewritten.
  14. *
  15. * (C) 1995 by Andreas Busse ([email protected])
  16. *
  17. * Core code included by system sonic drivers
  18. *
  19. * And... partially rewritten again by David Huggins-Daines in order
  20. * to cope with screwed up Macintosh NICs that may or may not use
  21. * 16-bit DMA.
  22. *
  23. * (C) 1999 David Huggins-Daines <[email protected]>
  24. *
  25. */
  26. /*
  27. * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
  28. * National Semiconductors data sheet for the DP83932B Sonic Ethernet
  29. * controller, and the files "8390.c" and "skeleton.c" in this directory.
  30. *
  31. * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
  32. * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
  33. * the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
  34. */
  35. static unsigned int version_printed;
  36. static int sonic_debug = -1;
  37. module_param(sonic_debug, int, 0);
  38. MODULE_PARM_DESC(sonic_debug, "debug message level");
  39. static void sonic_msg_init(struct net_device *dev)
  40. {
  41. struct sonic_local *lp = netdev_priv(dev);
  42. lp->msg_enable = netif_msg_init(sonic_debug, 0);
  43. if (version_printed++ == 0)
  44. netif_dbg(lp, drv, dev, "%s", version);
  45. }
  46. static int sonic_alloc_descriptors(struct net_device *dev)
  47. {
  48. struct sonic_local *lp = netdev_priv(dev);
  49. /* Allocate a chunk of memory for the descriptors. Note that this
  50. * must not cross a 64K boundary. It is smaller than one page which
  51. * means that page alignment is a sufficient condition.
  52. */
  53. lp->descriptors =
  54. dma_alloc_coherent(lp->device,
  55. SIZEOF_SONIC_DESC *
  56. SONIC_BUS_SCALE(lp->dma_bitmode),
  57. &lp->descriptors_laddr, GFP_KERNEL);
  58. if (!lp->descriptors)
  59. return -ENOMEM;
  60. lp->cda = lp->descriptors;
  61. lp->tda = lp->cda + SIZEOF_SONIC_CDA *
  62. SONIC_BUS_SCALE(lp->dma_bitmode);
  63. lp->rda = lp->tda + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
  64. SONIC_BUS_SCALE(lp->dma_bitmode);
  65. lp->rra = lp->rda + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
  66. SONIC_BUS_SCALE(lp->dma_bitmode);
  67. lp->cda_laddr = lp->descriptors_laddr;
  68. lp->tda_laddr = lp->cda_laddr + SIZEOF_SONIC_CDA *
  69. SONIC_BUS_SCALE(lp->dma_bitmode);
  70. lp->rda_laddr = lp->tda_laddr + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
  71. SONIC_BUS_SCALE(lp->dma_bitmode);
  72. lp->rra_laddr = lp->rda_laddr + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
  73. SONIC_BUS_SCALE(lp->dma_bitmode);
  74. return 0;
  75. }
  76. /*
  77. * Open/initialize the SONIC controller.
  78. *
  79. * This routine should set everything up anew at each open, even
  80. * registers that "should" only need to be set once at boot, so that
  81. * there is non-reboot way to recover if something goes wrong.
  82. */
  83. static int sonic_open(struct net_device *dev)
  84. {
  85. struct sonic_local *lp = netdev_priv(dev);
  86. int i;
  87. netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
  88. spin_lock_init(&lp->lock);
  89. for (i = 0; i < SONIC_NUM_RRS; i++) {
  90. struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
  91. if (skb == NULL) {
  92. while(i > 0) { /* free any that were allocated successfully */
  93. i--;
  94. dev_kfree_skb(lp->rx_skb[i]);
  95. lp->rx_skb[i] = NULL;
  96. }
  97. printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
  98. dev->name);
  99. return -ENOMEM;
  100. }
  101. /* align IP header unless DMA requires otherwise */
  102. if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
  103. skb_reserve(skb, 2);
  104. lp->rx_skb[i] = skb;
  105. }
  106. for (i = 0; i < SONIC_NUM_RRS; i++) {
  107. dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
  108. SONIC_RBSIZE, DMA_FROM_DEVICE);
  109. if (dma_mapping_error(lp->device, laddr)) {
  110. while(i > 0) { /* free any that were mapped successfully */
  111. i--;
  112. dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
  113. lp->rx_laddr[i] = (dma_addr_t)0;
  114. }
  115. for (i = 0; i < SONIC_NUM_RRS; i++) {
  116. dev_kfree_skb(lp->rx_skb[i]);
  117. lp->rx_skb[i] = NULL;
  118. }
  119. printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
  120. dev->name);
  121. return -ENOMEM;
  122. }
  123. lp->rx_laddr[i] = laddr;
  124. }
  125. /*
  126. * Initialize the SONIC
  127. */
  128. sonic_init(dev, true);
  129. netif_start_queue(dev);
  130. netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__);
  131. return 0;
  132. }
  133. /* Wait for the SONIC to become idle. */
  134. static void sonic_quiesce(struct net_device *dev, u16 mask, bool may_sleep)
  135. {
  136. struct sonic_local * __maybe_unused lp = netdev_priv(dev);
  137. int i;
  138. u16 bits;
  139. for (i = 0; i < 1000; ++i) {
  140. bits = SONIC_READ(SONIC_CMD) & mask;
  141. if (!bits)
  142. return;
  143. if (!may_sleep)
  144. udelay(20);
  145. else
  146. usleep_range(100, 200);
  147. }
  148. WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
  149. }
  150. /*
  151. * Close the SONIC device
  152. */
  153. static int sonic_close(struct net_device *dev)
  154. {
  155. struct sonic_local *lp = netdev_priv(dev);
  156. int i;
  157. netif_dbg(lp, ifdown, dev, "%s\n", __func__);
  158. netif_stop_queue(dev);
  159. /*
  160. * stop the SONIC, disable interrupts
  161. */
  162. SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
  163. sonic_quiesce(dev, SONIC_CR_ALL, true);
  164. SONIC_WRITE(SONIC_IMR, 0);
  165. SONIC_WRITE(SONIC_ISR, 0x7fff);
  166. SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
  167. /* unmap and free skbs that haven't been transmitted */
  168. for (i = 0; i < SONIC_NUM_TDS; i++) {
  169. if(lp->tx_laddr[i]) {
  170. dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
  171. lp->tx_laddr[i] = (dma_addr_t)0;
  172. }
  173. if(lp->tx_skb[i]) {
  174. dev_kfree_skb(lp->tx_skb[i]);
  175. lp->tx_skb[i] = NULL;
  176. }
  177. }
  178. /* unmap and free the receive buffers */
  179. for (i = 0; i < SONIC_NUM_RRS; i++) {
  180. if(lp->rx_laddr[i]) {
  181. dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
  182. lp->rx_laddr[i] = (dma_addr_t)0;
  183. }
  184. if(lp->rx_skb[i]) {
  185. dev_kfree_skb(lp->rx_skb[i]);
  186. lp->rx_skb[i] = NULL;
  187. }
  188. }
  189. return 0;
  190. }
  191. static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
  192. {
  193. struct sonic_local *lp = netdev_priv(dev);
  194. int i;
  195. /*
  196. * put the Sonic into software-reset mode and
  197. * disable all interrupts before releasing DMA buffers
  198. */
  199. SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
  200. sonic_quiesce(dev, SONIC_CR_ALL, false);
  201. SONIC_WRITE(SONIC_IMR, 0);
  202. SONIC_WRITE(SONIC_ISR, 0x7fff);
  203. SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
  204. /* We could resend the original skbs. Easier to re-initialise. */
  205. for (i = 0; i < SONIC_NUM_TDS; i++) {
  206. if(lp->tx_laddr[i]) {
  207. dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
  208. lp->tx_laddr[i] = (dma_addr_t)0;
  209. }
  210. if(lp->tx_skb[i]) {
  211. dev_kfree_skb(lp->tx_skb[i]);
  212. lp->tx_skb[i] = NULL;
  213. }
  214. }
  215. /* Try to restart the adaptor. */
  216. sonic_init(dev, false);
  217. lp->stats.tx_errors++;
  218. netif_trans_update(dev); /* prevent tx timeout */
  219. netif_wake_queue(dev);
  220. }
  221. /*
  222. * transmit packet
  223. *
  224. * Appends new TD during transmission thus avoiding any TX interrupts
  225. * until we run out of TDs.
  226. * This routine interacts closely with the ISR in that it may,
  227. * set tx_skb[i]
  228. * reset the status flags of the new TD
  229. * set and reset EOL flags
  230. * stop the tx queue
  231. * The ISR interacts with this routine in various ways. It may,
  232. * reset tx_skb[i]
  233. * test the EOL and status flags of the TDs
  234. * wake the tx queue
  235. * Concurrently with all of this, the SONIC is potentially writing to
  236. * the status flags of the TDs.
  237. */
  238. static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
  239. {
  240. struct sonic_local *lp = netdev_priv(dev);
  241. dma_addr_t laddr;
  242. int length;
  243. int entry;
  244. unsigned long flags;
  245. netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
  246. length = skb->len;
  247. if (length < ETH_ZLEN) {
  248. if (skb_padto(skb, ETH_ZLEN))
  249. return NETDEV_TX_OK;
  250. length = ETH_ZLEN;
  251. }
  252. /*
  253. * Map the packet data into the logical DMA address space
  254. */
  255. laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
  256. if (dma_mapping_error(lp->device, laddr)) {
  257. pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
  258. dev_kfree_skb_any(skb);
  259. return NETDEV_TX_OK;
  260. }
  261. spin_lock_irqsave(&lp->lock, flags);
  262. entry = (lp->eol_tx + 1) & SONIC_TDS_MASK;
  263. sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
  264. sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
  265. sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
  266. sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
  267. sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
  268. sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
  269. sonic_tda_put(dev, entry, SONIC_TD_LINK,
  270. sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
  271. sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK, ~SONIC_EOL &
  272. sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK));
  273. netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
  274. SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
  275. lp->tx_len[entry] = length;
  276. lp->tx_laddr[entry] = laddr;
  277. lp->tx_skb[entry] = skb;
  278. lp->eol_tx = entry;
  279. entry = (entry + 1) & SONIC_TDS_MASK;
  280. if (lp->tx_skb[entry]) {
  281. /* The ring is full, the ISR has yet to process the next TD. */
  282. netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
  283. netif_stop_queue(dev);
  284. /* after this packet, wait for ISR to free up some TDAs */
  285. }
  286. spin_unlock_irqrestore(&lp->lock, flags);
  287. return NETDEV_TX_OK;
  288. }
  289. /*
  290. * The typical workload of the driver:
  291. * Handle the network interface interrupts.
  292. */
  293. static irqreturn_t sonic_interrupt(int irq, void *dev_id)
  294. {
  295. struct net_device *dev = dev_id;
  296. struct sonic_local *lp = netdev_priv(dev);
  297. int status;
  298. unsigned long flags;
  299. /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
  300. * with sonic_send_packet() so that the two functions can share state.
  301. * Secondly, it makes sonic_interrupt() re-entrant, as that is required
  302. * by macsonic which must use two IRQs with different priority levels.
  303. */
  304. spin_lock_irqsave(&lp->lock, flags);
  305. status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
  306. if (!status) {
  307. spin_unlock_irqrestore(&lp->lock, flags);
  308. return IRQ_NONE;
  309. }
  310. do {
  311. SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
  312. if (status & SONIC_INT_PKTRX) {
  313. netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
  314. sonic_rx(dev); /* got packet(s) */
  315. }
  316. if (status & SONIC_INT_TXDN) {
  317. int entry = lp->cur_tx;
  318. int td_status;
  319. int freed_some = 0;
  320. /* The state of a Transmit Descriptor may be inferred
  321. * from { tx_skb[entry], td_status } as follows.
  322. * { clear, clear } => the TD has never been used
  323. * { set, clear } => the TD was handed to SONIC
  324. * { set, set } => the TD was handed back
  325. * { clear, set } => the TD is available for re-use
  326. */
  327. netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
  328. while (lp->tx_skb[entry] != NULL) {
  329. if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
  330. break;
  331. if (td_status & SONIC_TCR_PTX) {
  332. lp->stats.tx_packets++;
  333. lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
  334. } else {
  335. if (td_status & (SONIC_TCR_EXD |
  336. SONIC_TCR_EXC | SONIC_TCR_BCM))
  337. lp->stats.tx_aborted_errors++;
  338. if (td_status &
  339. (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
  340. lp->stats.tx_carrier_errors++;
  341. if (td_status & SONIC_TCR_OWC)
  342. lp->stats.tx_window_errors++;
  343. if (td_status & SONIC_TCR_FU)
  344. lp->stats.tx_fifo_errors++;
  345. }
  346. /* We must free the original skb */
  347. dev_consume_skb_irq(lp->tx_skb[entry]);
  348. lp->tx_skb[entry] = NULL;
  349. /* and unmap DMA buffer */
  350. dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
  351. lp->tx_laddr[entry] = (dma_addr_t)0;
  352. freed_some = 1;
  353. if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
  354. entry = (entry + 1) & SONIC_TDS_MASK;
  355. break;
  356. }
  357. entry = (entry + 1) & SONIC_TDS_MASK;
  358. }
  359. if (freed_some || lp->tx_skb[entry] == NULL)
  360. netif_wake_queue(dev); /* The ring is no longer full */
  361. lp->cur_tx = entry;
  362. }
  363. /*
  364. * check error conditions
  365. */
  366. if (status & SONIC_INT_RFO) {
  367. netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
  368. __func__);
  369. }
  370. if (status & SONIC_INT_RDE) {
  371. netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
  372. __func__);
  373. }
  374. if (status & SONIC_INT_RBAE) {
  375. netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
  376. __func__);
  377. }
  378. /* counter overruns; all counters are 16bit wide */
  379. if (status & SONIC_INT_FAE)
  380. lp->stats.rx_frame_errors += 65536;
  381. if (status & SONIC_INT_CRC)
  382. lp->stats.rx_crc_errors += 65536;
  383. if (status & SONIC_INT_MP)
  384. lp->stats.rx_missed_errors += 65536;
  385. /* transmit error */
  386. if (status & SONIC_INT_TXER) {
  387. u16 tcr = SONIC_READ(SONIC_TCR);
  388. netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
  389. __func__, tcr);
  390. if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
  391. SONIC_TCR_FU | SONIC_TCR_BCM)) {
  392. /* Aborted transmission. Try again. */
  393. netif_stop_queue(dev);
  394. SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
  395. }
  396. }
  397. /* bus retry */
  398. if (status & SONIC_INT_BR) {
  399. printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
  400. dev->name);
  401. /* ... to help debug DMA problems causing endless interrupts. */
  402. /* Bounce the eth interface to turn on the interrupt again. */
  403. SONIC_WRITE(SONIC_IMR, 0);
  404. }
  405. status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
  406. } while (status);
  407. spin_unlock_irqrestore(&lp->lock, flags);
  408. return IRQ_HANDLED;
  409. }
  410. /* Return the array index corresponding to a given Receive Buffer pointer. */
  411. static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
  412. unsigned int last)
  413. {
  414. unsigned int i = last;
  415. do {
  416. i = (i + 1) & SONIC_RRS_MASK;
  417. if (addr == lp->rx_laddr[i])
  418. return i;
  419. } while (i != last);
  420. return -ENOENT;
  421. }
  422. /* Allocate and map a new skb to be used as a receive buffer. */
  423. static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
  424. struct sk_buff **new_skb, dma_addr_t *new_addr)
  425. {
  426. *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
  427. if (!*new_skb)
  428. return false;
  429. if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
  430. skb_reserve(*new_skb, 2);
  431. *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
  432. SONIC_RBSIZE, DMA_FROM_DEVICE);
  433. if (dma_mapping_error(lp->device, *new_addr)) {
  434. dev_kfree_skb(*new_skb);
  435. *new_skb = NULL;
  436. return false;
  437. }
  438. return true;
  439. }
  440. /* Place a new receive resource in the Receive Resource Area and update RWP. */
  441. static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
  442. dma_addr_t old_addr, dma_addr_t new_addr)
  443. {
  444. unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
  445. unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
  446. u32 buf;
  447. /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
  448. * scans the other resources in the RRA, those in the range [RWP, RRP).
  449. */
  450. do {
  451. buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
  452. sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
  453. if (buf == old_addr)
  454. break;
  455. entry = (entry + 1) & SONIC_RRS_MASK;
  456. } while (entry != end);
  457. WARN_ONCE(buf != old_addr, "failed to find resource!\n");
  458. sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
  459. sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
  460. entry = (entry + 1) & SONIC_RRS_MASK;
  461. SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
  462. }
  463. /*
  464. * We have a good packet(s), pass it/them up the network stack.
  465. */
  466. static void sonic_rx(struct net_device *dev)
  467. {
  468. struct sonic_local *lp = netdev_priv(dev);
  469. int entry = lp->cur_rx;
  470. int prev_entry = lp->eol_rx;
  471. bool rbe = false;
  472. while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
  473. u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
  474. /* If the RD has LPKT set, the chip has finished with the RB */
  475. if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
  476. struct sk_buff *new_skb;
  477. dma_addr_t new_laddr;
  478. u32 addr = (sonic_rda_get(dev, entry,
  479. SONIC_RD_PKTPTR_H) << 16) |
  480. sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
  481. int i = index_from_addr(lp, addr, entry);
  482. if (i < 0) {
  483. WARN_ONCE(1, "failed to find buffer!\n");
  484. break;
  485. }
  486. if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
  487. struct sk_buff *used_skb = lp->rx_skb[i];
  488. int pkt_len;
  489. /* Pass the used buffer up the stack */
  490. dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
  491. DMA_FROM_DEVICE);
  492. pkt_len = sonic_rda_get(dev, entry,
  493. SONIC_RD_PKTLEN);
  494. skb_trim(used_skb, pkt_len);
  495. used_skb->protocol = eth_type_trans(used_skb,
  496. dev);
  497. netif_rx(used_skb);
  498. lp->stats.rx_packets++;
  499. lp->stats.rx_bytes += pkt_len;
  500. lp->rx_skb[i] = new_skb;
  501. lp->rx_laddr[i] = new_laddr;
  502. } else {
  503. /* Failed to obtain a new buffer so re-use it */
  504. new_laddr = addr;
  505. lp->stats.rx_dropped++;
  506. }
  507. /* If RBE is already asserted when RWP advances then
  508. * it's safe to clear RBE after processing this packet.
  509. */
  510. rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
  511. sonic_update_rra(dev, lp, addr, new_laddr);
  512. }
  513. /*
  514. * give back the descriptor
  515. */
  516. sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
  517. sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
  518. prev_entry = entry;
  519. entry = (entry + 1) & SONIC_RDS_MASK;
  520. }
  521. lp->cur_rx = entry;
  522. if (prev_entry != lp->eol_rx) {
  523. /* Advance the EOL flag to put descriptors back into service */
  524. sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
  525. sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
  526. sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
  527. sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
  528. lp->eol_rx = prev_entry;
  529. }
  530. if (rbe)
  531. SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
  532. }
  533. /*
  534. * Get the current statistics.
  535. * This may be called with the device open or closed.
  536. */
  537. static struct net_device_stats *sonic_get_stats(struct net_device *dev)
  538. {
  539. struct sonic_local *lp = netdev_priv(dev);
  540. /* read the tally counter from the SONIC and reset them */
  541. lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
  542. SONIC_WRITE(SONIC_CRCT, 0xffff);
  543. lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
  544. SONIC_WRITE(SONIC_FAET, 0xffff);
  545. lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
  546. SONIC_WRITE(SONIC_MPT, 0xffff);
  547. return &lp->stats;
  548. }
  549. /*
  550. * Set or clear the multicast filter for this adaptor.
  551. */
  552. static void sonic_multicast_list(struct net_device *dev)
  553. {
  554. struct sonic_local *lp = netdev_priv(dev);
  555. unsigned int rcr;
  556. struct netdev_hw_addr *ha;
  557. unsigned char *addr;
  558. int i;
  559. rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
  560. rcr |= SONIC_RCR_BRD; /* accept broadcast packets */
  561. if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
  562. rcr |= SONIC_RCR_PRO;
  563. } else {
  564. if ((dev->flags & IFF_ALLMULTI) ||
  565. (netdev_mc_count(dev) > 15)) {
  566. rcr |= SONIC_RCR_AMC;
  567. } else {
  568. unsigned long flags;
  569. netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
  570. netdev_mc_count(dev));
  571. sonic_set_cam_enable(dev, 1); /* always enable our own address */
  572. i = 1;
  573. netdev_for_each_mc_addr(ha, dev) {
  574. addr = ha->addr;
  575. sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
  576. sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
  577. sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
  578. sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
  579. i++;
  580. }
  581. SONIC_WRITE(SONIC_CDC, 16);
  582. SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
  583. /* LCAM and TXP commands can't be used simultaneously */
  584. spin_lock_irqsave(&lp->lock, flags);
  585. sonic_quiesce(dev, SONIC_CR_TXP, false);
  586. SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
  587. sonic_quiesce(dev, SONIC_CR_LCAM, false);
  588. spin_unlock_irqrestore(&lp->lock, flags);
  589. }
  590. }
  591. netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr);
  592. SONIC_WRITE(SONIC_RCR, rcr);
  593. }
  594. /*
  595. * Initialize the SONIC ethernet controller.
  596. */
  597. static int sonic_init(struct net_device *dev, bool may_sleep)
  598. {
  599. struct sonic_local *lp = netdev_priv(dev);
  600. int i;
  601. /*
  602. * put the Sonic into software-reset mode and
  603. * disable all interrupts
  604. */
  605. SONIC_WRITE(SONIC_IMR, 0);
  606. SONIC_WRITE(SONIC_ISR, 0x7fff);
  607. SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
  608. /* While in reset mode, clear CAM Enable register */
  609. SONIC_WRITE(SONIC_CE, 0);
  610. /*
  611. * clear software reset flag, disable receiver, clear and
  612. * enable interrupts, then completely initialize the SONIC
  613. */
  614. SONIC_WRITE(SONIC_CMD, 0);
  615. SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
  616. sonic_quiesce(dev, SONIC_CR_ALL, may_sleep);
  617. /*
  618. * initialize the receive resource area
  619. */
  620. netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n",
  621. __func__);
  622. for (i = 0; i < SONIC_NUM_RRS; i++) {
  623. u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
  624. u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
  625. sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
  626. sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
  627. sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
  628. sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
  629. }
  630. /* initialize all RRA registers */
  631. SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
  632. SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
  633. SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
  634. SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
  635. SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
  636. SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
  637. /* load the resource pointers */
  638. netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
  639. SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
  640. sonic_quiesce(dev, SONIC_CR_RRRA, may_sleep);
  641. /*
  642. * Initialize the receive descriptors so that they
  643. * become a circular linked list, ie. let the last
  644. * descriptor point to the first again.
  645. */
  646. netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n",
  647. __func__);
  648. for (i=0; i<SONIC_NUM_RDS; i++) {
  649. sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
  650. sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
  651. sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
  652. sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
  653. sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
  654. sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
  655. sonic_rda_put(dev, i, SONIC_RD_LINK,
  656. lp->rda_laddr +
  657. ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
  658. }
  659. /* fix last descriptor */
  660. sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
  661. (lp->rda_laddr & 0xffff) | SONIC_EOL);
  662. lp->eol_rx = SONIC_NUM_RDS - 1;
  663. lp->cur_rx = 0;
  664. SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
  665. SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
  666. /*
  667. * initialize transmit descriptors
  668. */
  669. netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n",
  670. __func__);
  671. for (i = 0; i < SONIC_NUM_TDS; i++) {
  672. sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
  673. sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
  674. sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
  675. sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
  676. sonic_tda_put(dev, i, SONIC_TD_LINK,
  677. (lp->tda_laddr & 0xffff) +
  678. (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
  679. lp->tx_skb[i] = NULL;
  680. }
  681. /* fix last descriptor */
  682. sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
  683. (lp->tda_laddr & 0xffff));
  684. SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
  685. SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
  686. lp->cur_tx = 0;
  687. lp->eol_tx = SONIC_NUM_TDS - 1;
  688. /*
  689. * put our own address to CAM desc[0]
  690. */
  691. sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
  692. sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
  693. sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
  694. sonic_set_cam_enable(dev, 1);
  695. for (i = 0; i < 16; i++)
  696. sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
  697. /*
  698. * initialize CAM registers
  699. */
  700. SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
  701. SONIC_WRITE(SONIC_CDC, 16);
  702. /*
  703. * load the CAM
  704. */
  705. SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
  706. sonic_quiesce(dev, SONIC_CR_LCAM, may_sleep);
  707. /*
  708. * enable receiver, disable loopback
  709. * and enable all interrupts
  710. */
  711. SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
  712. SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
  713. SONIC_WRITE(SONIC_ISR, 0x7fff);
  714. SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
  715. SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
  716. netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
  717. SONIC_READ(SONIC_CMD));
  718. return 0;
  719. }
  720. MODULE_LICENSE("GPL");