mal.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * drivers/net/ethernet/ibm/emac/mal.c
  4. *
  5. * Memory Access Layer (MAL) support
  6. *
  7. * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
  8. * <[email protected]>
  9. *
  10. * Based on the arch/ppc version of the driver:
  11. *
  12. * Copyright (c) 2004, 2005 Zultys Technologies.
  13. * Eugene Surovegin <[email protected]> or <[email protected]>
  14. *
  15. * Based on original work by
  16. * Benjamin Herrenschmidt <[email protected]>,
  17. * David Gibson <[email protected]>,
  18. *
  19. * Armin Kuster <[email protected]>
  20. * Copyright 2002 MontaVista Softare Inc.
  21. */
  22. #include <linux/delay.h>
  23. #include <linux/slab.h>
  24. #include <linux/of_irq.h>
  25. #include "core.h"
  26. #include <asm/dcr-regs.h>
  27. static int mal_count;
  28. int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac)
  29. {
  30. unsigned long flags;
  31. spin_lock_irqsave(&mal->lock, flags);
  32. MAL_DBG(mal, "reg(%08x, %08x)" NL,
  33. commac->tx_chan_mask, commac->rx_chan_mask);
  34. /* Don't let multiple commacs claim the same channel(s) */
  35. if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
  36. (mal->rx_chan_mask & commac->rx_chan_mask)) {
  37. spin_unlock_irqrestore(&mal->lock, flags);
  38. printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
  39. mal->index);
  40. return -EBUSY;
  41. }
  42. if (list_empty(&mal->list))
  43. napi_enable(&mal->napi);
  44. mal->tx_chan_mask |= commac->tx_chan_mask;
  45. mal->rx_chan_mask |= commac->rx_chan_mask;
  46. list_add(&commac->list, &mal->list);
  47. spin_unlock_irqrestore(&mal->lock, flags);
  48. return 0;
  49. }
  50. void mal_unregister_commac(struct mal_instance *mal,
  51. struct mal_commac *commac)
  52. {
  53. unsigned long flags;
  54. spin_lock_irqsave(&mal->lock, flags);
  55. MAL_DBG(mal, "unreg(%08x, %08x)" NL,
  56. commac->tx_chan_mask, commac->rx_chan_mask);
  57. mal->tx_chan_mask &= ~commac->tx_chan_mask;
  58. mal->rx_chan_mask &= ~commac->rx_chan_mask;
  59. list_del_init(&commac->list);
  60. if (list_empty(&mal->list))
  61. napi_disable(&mal->napi);
  62. spin_unlock_irqrestore(&mal->lock, flags);
  63. }
  64. int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
  65. {
  66. BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||
  67. size > MAL_MAX_RX_SIZE);
  68. MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
  69. if (size & 0xf) {
  70. printk(KERN_WARNING
  71. "mal%d: incorrect RX size %lu for the channel %d\n",
  72. mal->index, size, channel);
  73. return -EINVAL;
  74. }
  75. set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
  76. return 0;
  77. }
  78. int mal_tx_bd_offset(struct mal_instance *mal, int channel)
  79. {
  80. BUG_ON(channel < 0 || channel >= mal->num_tx_chans);
  81. return channel * NUM_TX_BUFF;
  82. }
  83. int mal_rx_bd_offset(struct mal_instance *mal, int channel)
  84. {
  85. BUG_ON(channel < 0 || channel >= mal->num_rx_chans);
  86. return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
  87. }
  88. void mal_enable_tx_channel(struct mal_instance *mal, int channel)
  89. {
  90. unsigned long flags;
  91. spin_lock_irqsave(&mal->lock, flags);
  92. MAL_DBG(mal, "enable_tx(%d)" NL, channel);
  93. set_mal_dcrn(mal, MAL_TXCASR,
  94. get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
  95. spin_unlock_irqrestore(&mal->lock, flags);
  96. }
  97. void mal_disable_tx_channel(struct mal_instance *mal, int channel)
  98. {
  99. set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
  100. MAL_DBG(mal, "disable_tx(%d)" NL, channel);
  101. }
  102. void mal_enable_rx_channel(struct mal_instance *mal, int channel)
  103. {
  104. unsigned long flags;
  105. /*
  106. * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
  107. * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
  108. * for the bitmask
  109. */
  110. if (!(channel % 8))
  111. channel >>= 3;
  112. spin_lock_irqsave(&mal->lock, flags);
  113. MAL_DBG(mal, "enable_rx(%d)" NL, channel);
  114. set_mal_dcrn(mal, MAL_RXCASR,
  115. get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
  116. spin_unlock_irqrestore(&mal->lock, flags);
  117. }
  118. void mal_disable_rx_channel(struct mal_instance *mal, int channel)
  119. {
  120. /*
  121. * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
  122. * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
  123. * for the bitmask
  124. */
  125. if (!(channel % 8))
  126. channel >>= 3;
  127. set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
  128. MAL_DBG(mal, "disable_rx(%d)" NL, channel);
  129. }
  130. void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
  131. {
  132. unsigned long flags;
  133. spin_lock_irqsave(&mal->lock, flags);
  134. MAL_DBG(mal, "poll_add(%p)" NL, commac);
  135. /* starts disabled */
  136. set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
  137. list_add_tail(&commac->poll_list, &mal->poll_list);
  138. spin_unlock_irqrestore(&mal->lock, flags);
  139. }
  140. void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
  141. {
  142. unsigned long flags;
  143. spin_lock_irqsave(&mal->lock, flags);
  144. MAL_DBG(mal, "poll_del(%p)" NL, commac);
  145. list_del(&commac->poll_list);
  146. spin_unlock_irqrestore(&mal->lock, flags);
  147. }
  148. /* synchronized by mal_poll() */
  149. static inline void mal_enable_eob_irq(struct mal_instance *mal)
  150. {
  151. MAL_DBG2(mal, "enable_irq" NL);
  152. // XXX might want to cache MAL_CFG as the DCR read can be slooooow
  153. set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
  154. }
  155. /* synchronized by NAPI state */
  156. static inline void mal_disable_eob_irq(struct mal_instance *mal)
  157. {
  158. // XXX might want to cache MAL_CFG as the DCR read can be slooooow
  159. set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
  160. MAL_DBG2(mal, "disable_irq" NL);
  161. }
  162. static irqreturn_t mal_serr(int irq, void *dev_instance)
  163. {
  164. struct mal_instance *mal = dev_instance;
  165. u32 esr = get_mal_dcrn(mal, MAL_ESR);
  166. /* Clear the error status register */
  167. set_mal_dcrn(mal, MAL_ESR, esr);
  168. MAL_DBG(mal, "SERR %08x" NL, esr);
  169. if (esr & MAL_ESR_EVB) {
  170. if (esr & MAL_ESR_DE) {
  171. /* We ignore Descriptor error,
  172. * TXDE or RXDE interrupt will be generated anyway.
  173. */
  174. return IRQ_HANDLED;
  175. }
  176. if (esr & MAL_ESR_PEIN) {
  177. /* PLB error, it's probably buggy hardware or
  178. * incorrect physical address in BD (i.e. bug)
  179. */
  180. if (net_ratelimit())
  181. printk(KERN_ERR
  182. "mal%d: system error, "
  183. "PLB (ESR = 0x%08x)\n",
  184. mal->index, esr);
  185. return IRQ_HANDLED;
  186. }
  187. /* OPB error, it's probably buggy hardware or incorrect
  188. * EBC setup
  189. */
  190. if (net_ratelimit())
  191. printk(KERN_ERR
  192. "mal%d: system error, OPB (ESR = 0x%08x)\n",
  193. mal->index, esr);
  194. }
  195. return IRQ_HANDLED;
  196. }
  197. static inline void mal_schedule_poll(struct mal_instance *mal)
  198. {
  199. if (likely(napi_schedule_prep(&mal->napi))) {
  200. MAL_DBG2(mal, "schedule_poll" NL);
  201. spin_lock(&mal->lock);
  202. mal_disable_eob_irq(mal);
  203. spin_unlock(&mal->lock);
  204. __napi_schedule(&mal->napi);
  205. } else
  206. MAL_DBG2(mal, "already in poll" NL);
  207. }
  208. static irqreturn_t mal_txeob(int irq, void *dev_instance)
  209. {
  210. struct mal_instance *mal = dev_instance;
  211. u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
  212. MAL_DBG2(mal, "txeob %08x" NL, r);
  213. mal_schedule_poll(mal);
  214. set_mal_dcrn(mal, MAL_TXEOBISR, r);
  215. #ifdef CONFIG_PPC_DCR_NATIVE
  216. if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
  217. mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
  218. (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
  219. #endif
  220. return IRQ_HANDLED;
  221. }
  222. static irqreturn_t mal_rxeob(int irq, void *dev_instance)
  223. {
  224. struct mal_instance *mal = dev_instance;
  225. u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
  226. MAL_DBG2(mal, "rxeob %08x" NL, r);
  227. mal_schedule_poll(mal);
  228. set_mal_dcrn(mal, MAL_RXEOBISR, r);
  229. #ifdef CONFIG_PPC_DCR_NATIVE
  230. if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
  231. mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
  232. (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
  233. #endif
  234. return IRQ_HANDLED;
  235. }
  236. static irqreturn_t mal_txde(int irq, void *dev_instance)
  237. {
  238. struct mal_instance *mal = dev_instance;
  239. u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
  240. set_mal_dcrn(mal, MAL_TXDEIR, deir);
  241. MAL_DBG(mal, "txde %08x" NL, deir);
  242. if (net_ratelimit())
  243. printk(KERN_ERR
  244. "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
  245. mal->index, deir);
  246. return IRQ_HANDLED;
  247. }
  248. static irqreturn_t mal_rxde(int irq, void *dev_instance)
  249. {
  250. struct mal_instance *mal = dev_instance;
  251. struct list_head *l;
  252. u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
  253. MAL_DBG(mal, "rxde %08x" NL, deir);
  254. list_for_each(l, &mal->list) {
  255. struct mal_commac *mc = list_entry(l, struct mal_commac, list);
  256. if (deir & mc->rx_chan_mask) {
  257. set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
  258. mc->ops->rxde(mc->dev);
  259. }
  260. }
  261. mal_schedule_poll(mal);
  262. set_mal_dcrn(mal, MAL_RXDEIR, deir);
  263. return IRQ_HANDLED;
  264. }
  265. static irqreturn_t mal_int(int irq, void *dev_instance)
  266. {
  267. struct mal_instance *mal = dev_instance;
  268. u32 esr = get_mal_dcrn(mal, MAL_ESR);
  269. if (esr & MAL_ESR_EVB) {
  270. /* descriptor error */
  271. if (esr & MAL_ESR_DE) {
  272. if (esr & MAL_ESR_CIDT)
  273. return mal_rxde(irq, dev_instance);
  274. else
  275. return mal_txde(irq, dev_instance);
  276. } else { /* SERR */
  277. return mal_serr(irq, dev_instance);
  278. }
  279. }
  280. return IRQ_HANDLED;
  281. }
  282. void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
  283. {
  284. /* Spinlock-type semantics: only one caller disable poll at a time */
  285. while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
  286. msleep(1);
  287. /* Synchronize with the MAL NAPI poller */
  288. napi_synchronize(&mal->napi);
  289. }
  290. void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
  291. {
  292. smp_wmb();
  293. clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
  294. /* Feels better to trigger a poll here to catch up with events that
  295. * may have happened on this channel while disabled. It will most
  296. * probably be delayed until the next interrupt but that's mostly a
  297. * non-issue in the context where this is called.
  298. */
  299. napi_schedule(&mal->napi);
  300. }
  301. static int mal_poll(struct napi_struct *napi, int budget)
  302. {
  303. struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
  304. struct list_head *l;
  305. int received = 0;
  306. unsigned long flags;
  307. MAL_DBG2(mal, "poll(%d)" NL, budget);
  308. /* Process TX skbs */
  309. list_for_each(l, &mal->poll_list) {
  310. struct mal_commac *mc =
  311. list_entry(l, struct mal_commac, poll_list);
  312. mc->ops->poll_tx(mc->dev);
  313. }
  314. /* Process RX skbs.
  315. *
  316. * We _might_ need something more smart here to enforce polling
  317. * fairness.
  318. */
  319. list_for_each(l, &mal->poll_list) {
  320. struct mal_commac *mc =
  321. list_entry(l, struct mal_commac, poll_list);
  322. int n;
  323. if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
  324. continue;
  325. n = mc->ops->poll_rx(mc->dev, budget - received);
  326. if (n) {
  327. received += n;
  328. if (received >= budget)
  329. return budget;
  330. }
  331. }
  332. if (napi_complete_done(napi, received)) {
  333. /* We need to disable IRQs to protect from RXDE IRQ here */
  334. spin_lock_irqsave(&mal->lock, flags);
  335. mal_enable_eob_irq(mal);
  336. spin_unlock_irqrestore(&mal->lock, flags);
  337. }
  338. /* Check for "rotting" packet(s) */
  339. list_for_each(l, &mal->poll_list) {
  340. struct mal_commac *mc =
  341. list_entry(l, struct mal_commac, poll_list);
  342. if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
  343. continue;
  344. if (unlikely(mc->ops->peek_rx(mc->dev) ||
  345. test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
  346. MAL_DBG2(mal, "rotting packet" NL);
  347. if (!napi_reschedule(napi))
  348. goto more_work;
  349. spin_lock_irqsave(&mal->lock, flags);
  350. mal_disable_eob_irq(mal);
  351. spin_unlock_irqrestore(&mal->lock, flags);
  352. }
  353. mc->ops->poll_tx(mc->dev);
  354. }
  355. more_work:
  356. MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
  357. return received;
  358. }
  359. static void mal_reset(struct mal_instance *mal)
  360. {
  361. int n = 10;
  362. MAL_DBG(mal, "reset" NL);
  363. set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
  364. /* Wait for reset to complete (1 system clock) */
  365. while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
  366. --n;
  367. if (unlikely(!n))
  368. printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
  369. }
  370. int mal_get_regs_len(struct mal_instance *mal)
  371. {
  372. return sizeof(struct emac_ethtool_regs_subhdr) +
  373. sizeof(struct mal_regs);
  374. }
  375. void *mal_dump_regs(struct mal_instance *mal, void *buf)
  376. {
  377. struct emac_ethtool_regs_subhdr *hdr = buf;
  378. struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
  379. int i;
  380. hdr->version = mal->version;
  381. hdr->index = mal->index;
  382. regs->tx_count = mal->num_tx_chans;
  383. regs->rx_count = mal->num_rx_chans;
  384. regs->cfg = get_mal_dcrn(mal, MAL_CFG);
  385. regs->esr = get_mal_dcrn(mal, MAL_ESR);
  386. regs->ier = get_mal_dcrn(mal, MAL_IER);
  387. regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
  388. regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
  389. regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
  390. regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
  391. regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
  392. regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
  393. regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
  394. regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
  395. for (i = 0; i < regs->tx_count; ++i)
  396. regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
  397. for (i = 0; i < regs->rx_count; ++i) {
  398. regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
  399. regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
  400. }
  401. return regs + 1;
  402. }
  403. static int mal_probe(struct platform_device *ofdev)
  404. {
  405. struct mal_instance *mal;
  406. int err = 0, i, bd_size;
  407. int index = mal_count++;
  408. unsigned int dcr_base;
  409. const u32 *prop;
  410. u32 cfg;
  411. unsigned long irqflags;
  412. irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
  413. mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
  414. if (!mal)
  415. return -ENOMEM;
  416. mal->index = index;
  417. mal->ofdev = ofdev;
  418. mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
  419. MAL_DBG(mal, "probe" NL);
  420. prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
  421. if (prop == NULL) {
  422. printk(KERN_ERR
  423. "mal%d: can't find MAL num-tx-chans property!\n",
  424. index);
  425. err = -ENODEV;
  426. goto fail;
  427. }
  428. mal->num_tx_chans = prop[0];
  429. prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
  430. if (prop == NULL) {
  431. printk(KERN_ERR
  432. "mal%d: can't find MAL num-rx-chans property!\n",
  433. index);
  434. err = -ENODEV;
  435. goto fail;
  436. }
  437. mal->num_rx_chans = prop[0];
  438. dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
  439. if (dcr_base == 0) {
  440. printk(KERN_ERR
  441. "mal%d: can't find DCR resource!\n", index);
  442. err = -ENODEV;
  443. goto fail;
  444. }
  445. mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
  446. if (!DCR_MAP_OK(mal->dcr_host)) {
  447. printk(KERN_ERR
  448. "mal%d: failed to map DCRs !\n", index);
  449. err = -ENODEV;
  450. goto fail;
  451. }
  452. if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
  453. #if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \
  454. defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR)
  455. mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
  456. MAL_FTR_COMMON_ERR_INT);
  457. #else
  458. printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n",
  459. ofdev->dev.of_node);
  460. err = -ENODEV;
  461. goto fail;
  462. #endif
  463. }
  464. mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
  465. mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
  466. mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
  467. if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
  468. mal->txde_irq = mal->rxde_irq = mal->serr_irq;
  469. } else {
  470. mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
  471. mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
  472. }
  473. if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq ||
  474. !mal->txde_irq || !mal->rxde_irq) {
  475. printk(KERN_ERR
  476. "mal%d: failed to map interrupts !\n", index);
  477. err = -ENODEV;
  478. goto fail_unmap;
  479. }
  480. INIT_LIST_HEAD(&mal->poll_list);
  481. INIT_LIST_HEAD(&mal->list);
  482. spin_lock_init(&mal->lock);
  483. init_dummy_netdev(&mal->dummy_dev);
  484. netif_napi_add_weight(&mal->dummy_dev, &mal->napi, mal_poll,
  485. CONFIG_IBM_EMAC_POLL_WEIGHT);
  486. /* Load power-on reset defaults */
  487. mal_reset(mal);
  488. /* Set the MAL configuration register */
  489. cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
  490. cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA;
  491. /* Current Axon is not happy with priority being non-0, it can
  492. * deadlock, fix it up here
  493. */
  494. if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
  495. cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
  496. /* Apply configuration */
  497. set_mal_dcrn(mal, MAL_CFG, cfg);
  498. /* Allocate space for BD rings */
  499. BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
  500. BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
  501. bd_size = sizeof(struct mal_descriptor) *
  502. (NUM_TX_BUFF * mal->num_tx_chans +
  503. NUM_RX_BUFF * mal->num_rx_chans);
  504. mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
  505. GFP_KERNEL);
  506. if (mal->bd_virt == NULL) {
  507. err = -ENOMEM;
  508. goto fail_unmap;
  509. }
  510. for (i = 0; i < mal->num_tx_chans; ++i)
  511. set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
  512. sizeof(struct mal_descriptor) *
  513. mal_tx_bd_offset(mal, i));
  514. for (i = 0; i < mal->num_rx_chans; ++i)
  515. set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
  516. sizeof(struct mal_descriptor) *
  517. mal_rx_bd_offset(mal, i));
  518. if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
  519. irqflags = IRQF_SHARED;
  520. hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
  521. } else {
  522. irqflags = 0;
  523. hdlr_serr = mal_serr;
  524. hdlr_txde = mal_txde;
  525. hdlr_rxde = mal_rxde;
  526. }
  527. err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
  528. if (err)
  529. goto fail2;
  530. err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
  531. if (err)
  532. goto fail3;
  533. err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
  534. if (err)
  535. goto fail4;
  536. err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
  537. if (err)
  538. goto fail5;
  539. err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
  540. if (err)
  541. goto fail6;
  542. /* Enable all MAL SERR interrupt sources */
  543. set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
  544. /* Enable EOB interrupt */
  545. mal_enable_eob_irq(mal);
  546. printk(KERN_INFO
  547. "MAL v%d %pOF, %d TX channels, %d RX channels\n",
  548. mal->version, ofdev->dev.of_node,
  549. mal->num_tx_chans, mal->num_rx_chans);
  550. /* Advertise this instance to the rest of the world */
  551. wmb();
  552. platform_set_drvdata(ofdev, mal);
  553. return 0;
  554. fail6:
  555. free_irq(mal->rxde_irq, mal);
  556. fail5:
  557. free_irq(mal->txeob_irq, mal);
  558. fail4:
  559. free_irq(mal->txde_irq, mal);
  560. fail3:
  561. free_irq(mal->serr_irq, mal);
  562. fail2:
  563. dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
  564. fail_unmap:
  565. dcr_unmap(mal->dcr_host, 0x100);
  566. fail:
  567. kfree(mal);
  568. return err;
  569. }
  570. static int mal_remove(struct platform_device *ofdev)
  571. {
  572. struct mal_instance *mal = platform_get_drvdata(ofdev);
  573. MAL_DBG(mal, "remove" NL);
  574. /* Synchronize with scheduled polling */
  575. napi_disable(&mal->napi);
  576. if (!list_empty(&mal->list))
  577. /* This is *very* bad */
  578. WARN(1, KERN_EMERG
  579. "mal%d: commac list is not empty on remove!\n",
  580. mal->index);
  581. free_irq(mal->serr_irq, mal);
  582. free_irq(mal->txde_irq, mal);
  583. free_irq(mal->txeob_irq, mal);
  584. free_irq(mal->rxde_irq, mal);
  585. free_irq(mal->rxeob_irq, mal);
  586. mal_reset(mal);
  587. dma_free_coherent(&ofdev->dev,
  588. sizeof(struct mal_descriptor) *
  589. (NUM_TX_BUFF * mal->num_tx_chans +
  590. NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
  591. mal->bd_dma);
  592. kfree(mal);
  593. return 0;
  594. }
  595. static const struct of_device_id mal_platform_match[] =
  596. {
  597. {
  598. .compatible = "ibm,mcmal",
  599. },
  600. {
  601. .compatible = "ibm,mcmal2",
  602. },
  603. /* Backward compat */
  604. {
  605. .type = "mcmal-dma",
  606. .compatible = "ibm,mcmal",
  607. },
  608. {
  609. .type = "mcmal-dma",
  610. .compatible = "ibm,mcmal2",
  611. },
  612. {},
  613. };
  614. static struct platform_driver mal_of_driver = {
  615. .driver = {
  616. .name = "mcmal",
  617. .of_match_table = mal_platform_match,
  618. },
  619. .probe = mal_probe,
  620. .remove = mal_remove,
  621. };
  622. int __init mal_init(void)
  623. {
  624. return platform_driver_register(&mal_of_driver);
  625. }
  626. void mal_exit(void)
  627. {
  628. platform_driver_unregister(&mal_of_driver);
  629. }