mscan.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * CAN bus driver for the alone generic (as possible as) MSCAN controller.
  4. *
  5. * Copyright (C) 2005-2006 Andrey Volkov <[email protected]>,
  6. * Varma Electronics Oy
  7. * Copyright (C) 2008-2009 Wolfgang Grandegger <[email protected]>
  8. * Copyright (C) 2008-2009 Pengutronix <[email protected]>
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/delay.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/if_arp.h>
  16. #include <linux/if_ether.h>
  17. #include <linux/list.h>
  18. #include <linux/can/dev.h>
  19. #include <linux/can/error.h>
  20. #include <linux/io.h>
  21. #include "mscan.h"
  22. static const struct can_bittiming_const mscan_bittiming_const = {
  23. .name = "mscan",
  24. .tseg1_min = 4,
  25. .tseg1_max = 16,
  26. .tseg2_min = 2,
  27. .tseg2_max = 8,
  28. .sjw_max = 4,
  29. .brp_min = 1,
  30. .brp_max = 64,
  31. .brp_inc = 1,
  32. };
  33. struct mscan_state {
  34. u8 mode;
  35. u8 canrier;
  36. u8 cantier;
  37. };
  38. static enum can_state state_map[] = {
  39. CAN_STATE_ERROR_ACTIVE,
  40. CAN_STATE_ERROR_WARNING,
  41. CAN_STATE_ERROR_PASSIVE,
  42. CAN_STATE_BUS_OFF
  43. };
  44. static int mscan_set_mode(struct net_device *dev, u8 mode)
  45. {
  46. struct mscan_priv *priv = netdev_priv(dev);
  47. struct mscan_regs __iomem *regs = priv->reg_base;
  48. int ret = 0;
  49. int i;
  50. u8 canctl1;
  51. if (mode != MSCAN_NORMAL_MODE) {
  52. if (priv->tx_active) {
  53. /* Abort transfers before going to sleep */#
  54. out_8(&regs->cantarq, priv->tx_active);
  55. /* Suppress TX done interrupts */
  56. out_8(&regs->cantier, 0);
  57. }
  58. canctl1 = in_8(&regs->canctl1);
  59. if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) {
  60. setbits8(&regs->canctl0, MSCAN_SLPRQ);
  61. for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
  62. if (in_8(&regs->canctl1) & MSCAN_SLPAK)
  63. break;
  64. udelay(100);
  65. }
  66. /*
  67. * The mscan controller will fail to enter sleep mode,
  68. * while there are irregular activities on bus, like
  69. * somebody keeps retransmitting. This behavior is
  70. * undocumented and seems to differ between mscan built
  71. * in mpc5200b and mpc5200. We proceed in that case,
  72. * since otherwise the slprq will be kept set and the
  73. * controller will get stuck. NOTE: INITRQ or CSWAI
  74. * will abort all active transmit actions, if still
  75. * any, at once.
  76. */
  77. if (i >= MSCAN_SET_MODE_RETRIES)
  78. netdev_dbg(dev,
  79. "device failed to enter sleep mode. "
  80. "We proceed anyhow.\n");
  81. else
  82. priv->can.state = CAN_STATE_SLEEPING;
  83. }
  84. if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) {
  85. setbits8(&regs->canctl0, MSCAN_INITRQ);
  86. for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
  87. if (in_8(&regs->canctl1) & MSCAN_INITAK)
  88. break;
  89. }
  90. if (i >= MSCAN_SET_MODE_RETRIES)
  91. ret = -ENODEV;
  92. }
  93. if (!ret)
  94. priv->can.state = CAN_STATE_STOPPED;
  95. if (mode & MSCAN_CSWAI)
  96. setbits8(&regs->canctl0, MSCAN_CSWAI);
  97. } else {
  98. canctl1 = in_8(&regs->canctl1);
  99. if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
  100. clrbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
  101. for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
  102. canctl1 = in_8(&regs->canctl1);
  103. if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
  104. break;
  105. }
  106. if (i >= MSCAN_SET_MODE_RETRIES)
  107. ret = -ENODEV;
  108. else
  109. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  110. }
  111. }
  112. return ret;
  113. }
  114. static int mscan_start(struct net_device *dev)
  115. {
  116. struct mscan_priv *priv = netdev_priv(dev);
  117. struct mscan_regs __iomem *regs = priv->reg_base;
  118. u8 canrflg;
  119. int err;
  120. out_8(&regs->canrier, 0);
  121. INIT_LIST_HEAD(&priv->tx_head);
  122. priv->prev_buf_id = 0;
  123. priv->cur_pri = 0;
  124. priv->tx_active = 0;
  125. priv->shadow_canrier = 0;
  126. priv->flags = 0;
  127. if (priv->type == MSCAN_TYPE_MPC5121) {
  128. /* Clear pending bus-off condition */
  129. if (in_8(&regs->canmisc) & MSCAN_BOHOLD)
  130. out_8(&regs->canmisc, MSCAN_BOHOLD);
  131. }
  132. err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
  133. if (err)
  134. return err;
  135. canrflg = in_8(&regs->canrflg);
  136. priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
  137. priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
  138. MSCAN_STATE_TX(canrflg))];
  139. out_8(&regs->cantier, 0);
  140. /* Enable receive interrupts. */
  141. out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
  142. return 0;
  143. }
  144. static int mscan_restart(struct net_device *dev)
  145. {
  146. struct mscan_priv *priv = netdev_priv(dev);
  147. if (priv->type == MSCAN_TYPE_MPC5121) {
  148. struct mscan_regs __iomem *regs = priv->reg_base;
  149. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  150. WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
  151. "bus-off state expected\n");
  152. out_8(&regs->canmisc, MSCAN_BOHOLD);
  153. /* Re-enable receive interrupts. */
  154. out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
  155. } else {
  156. if (priv->can.state <= CAN_STATE_BUS_OFF)
  157. mscan_set_mode(dev, MSCAN_INIT_MODE);
  158. return mscan_start(dev);
  159. }
  160. return 0;
  161. }
  162. static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
  163. {
  164. struct can_frame *frame = (struct can_frame *)skb->data;
  165. struct mscan_priv *priv = netdev_priv(dev);
  166. struct mscan_regs __iomem *regs = priv->reg_base;
  167. int i, rtr, buf_id;
  168. u32 can_id;
  169. if (can_dev_dropped_skb(dev, skb))
  170. return NETDEV_TX_OK;
  171. out_8(&regs->cantier, 0);
  172. i = ~priv->tx_active & MSCAN_TXE;
  173. buf_id = ffs(i) - 1;
  174. switch (hweight8(i)) {
  175. case 0:
  176. netif_stop_queue(dev);
  177. netdev_err(dev, "Tx Ring full when queue awake!\n");
  178. return NETDEV_TX_BUSY;
  179. case 1:
  180. /*
  181. * if buf_id < 3, then current frame will be send out of order,
  182. * since buffer with lower id have higher priority (hell..)
  183. */
  184. netif_stop_queue(dev);
  185. fallthrough;
  186. case 2:
  187. if (buf_id < priv->prev_buf_id) {
  188. priv->cur_pri++;
  189. if (priv->cur_pri == 0xff) {
  190. set_bit(F_TX_WAIT_ALL, &priv->flags);
  191. netif_stop_queue(dev);
  192. }
  193. }
  194. set_bit(F_TX_PROGRESS, &priv->flags);
  195. break;
  196. }
  197. priv->prev_buf_id = buf_id;
  198. out_8(&regs->cantbsel, i);
  199. rtr = frame->can_id & CAN_RTR_FLAG;
  200. /* RTR is always the lowest bit of interest, then IDs follow */
  201. if (frame->can_id & CAN_EFF_FLAG) {
  202. can_id = (frame->can_id & CAN_EFF_MASK)
  203. << (MSCAN_EFF_RTR_SHIFT + 1);
  204. if (rtr)
  205. can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
  206. out_be16(&regs->tx.idr3_2, can_id);
  207. can_id >>= 16;
  208. /* EFF_FLAGS are between the IDs :( */
  209. can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
  210. | MSCAN_EFF_FLAGS;
  211. } else {
  212. can_id = (frame->can_id & CAN_SFF_MASK)
  213. << (MSCAN_SFF_RTR_SHIFT + 1);
  214. if (rtr)
  215. can_id |= 1 << MSCAN_SFF_RTR_SHIFT;
  216. }
  217. out_be16(&regs->tx.idr1_0, can_id);
  218. if (!rtr) {
  219. void __iomem *data = &regs->tx.dsr1_0;
  220. u16 *payload = (u16 *)frame->data;
  221. for (i = 0; i < frame->len / 2; i++) {
  222. out_be16(data, *payload++);
  223. data += 2 + _MSCAN_RESERVED_DSR_SIZE;
  224. }
  225. /* write remaining byte if necessary */
  226. if (frame->len & 1)
  227. out_8(data, frame->data[frame->len - 1]);
  228. }
  229. out_8(&regs->tx.dlr, frame->len);
  230. out_8(&regs->tx.tbpr, priv->cur_pri);
  231. /* Start transmission. */
  232. out_8(&regs->cantflg, 1 << buf_id);
  233. if (!test_bit(F_TX_PROGRESS, &priv->flags))
  234. netif_trans_update(dev);
  235. list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
  236. can_put_echo_skb(skb, dev, buf_id, 0);
  237. /* Enable interrupt. */
  238. priv->tx_active |= 1 << buf_id;
  239. out_8(&regs->cantier, priv->tx_active);
  240. return NETDEV_TX_OK;
  241. }
  242. static enum can_state get_new_state(struct net_device *dev, u8 canrflg)
  243. {
  244. struct mscan_priv *priv = netdev_priv(dev);
  245. if (unlikely(canrflg & MSCAN_CSCIF))
  246. return state_map[max(MSCAN_STATE_RX(canrflg),
  247. MSCAN_STATE_TX(canrflg))];
  248. return priv->can.state;
  249. }
  250. static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
  251. {
  252. struct mscan_priv *priv = netdev_priv(dev);
  253. struct mscan_regs __iomem *regs = priv->reg_base;
  254. u32 can_id;
  255. int i;
  256. can_id = in_be16(&regs->rx.idr1_0);
  257. if (can_id & (1 << 3)) {
  258. frame->can_id = CAN_EFF_FLAG;
  259. can_id = ((can_id << 16) | in_be16(&regs->rx.idr3_2));
  260. can_id = ((can_id & 0xffe00000) |
  261. ((can_id & 0x7ffff) << 2)) >> 2;
  262. } else {
  263. can_id >>= 4;
  264. frame->can_id = 0;
  265. }
  266. frame->can_id |= can_id >> 1;
  267. if (can_id & 1)
  268. frame->can_id |= CAN_RTR_FLAG;
  269. frame->len = can_cc_dlc2len(in_8(&regs->rx.dlr) & 0xf);
  270. if (!(frame->can_id & CAN_RTR_FLAG)) {
  271. void __iomem *data = &regs->rx.dsr1_0;
  272. u16 *payload = (u16 *)frame->data;
  273. for (i = 0; i < frame->len / 2; i++) {
  274. *payload++ = in_be16(data);
  275. data += 2 + _MSCAN_RESERVED_DSR_SIZE;
  276. }
  277. /* read remaining byte if necessary */
  278. if (frame->len & 1)
  279. frame->data[frame->len - 1] = in_8(data);
  280. }
  281. out_8(&regs->canrflg, MSCAN_RXF);
  282. }
  283. static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
  284. u8 canrflg)
  285. {
  286. struct mscan_priv *priv = netdev_priv(dev);
  287. struct mscan_regs __iomem *regs = priv->reg_base;
  288. struct net_device_stats *stats = &dev->stats;
  289. enum can_state new_state;
  290. netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg);
  291. frame->can_id = CAN_ERR_FLAG;
  292. if (canrflg & MSCAN_OVRIF) {
  293. frame->can_id |= CAN_ERR_CRTL;
  294. frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
  295. stats->rx_over_errors++;
  296. stats->rx_errors++;
  297. } else {
  298. frame->data[1] = 0;
  299. }
  300. new_state = get_new_state(dev, canrflg);
  301. if (new_state != priv->can.state) {
  302. can_change_state(dev, frame,
  303. state_map[MSCAN_STATE_TX(canrflg)],
  304. state_map[MSCAN_STATE_RX(canrflg)]);
  305. if (priv->can.state == CAN_STATE_BUS_OFF) {
  306. /*
  307. * The MSCAN on the MPC5200 does recover from bus-off
  308. * automatically. To avoid that we stop the chip doing
  309. * a light-weight stop (we are in irq-context).
  310. */
  311. if (priv->type != MSCAN_TYPE_MPC5121) {
  312. out_8(&regs->cantier, 0);
  313. out_8(&regs->canrier, 0);
  314. setbits8(&regs->canctl0,
  315. MSCAN_SLPRQ | MSCAN_INITRQ);
  316. }
  317. can_bus_off(dev);
  318. }
  319. }
  320. priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
  321. frame->len = CAN_ERR_DLC;
  322. out_8(&regs->canrflg, MSCAN_ERR_IF);
  323. }
  324. static int mscan_rx_poll(struct napi_struct *napi, int quota)
  325. {
  326. struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
  327. struct net_device *dev = napi->dev;
  328. struct mscan_regs __iomem *regs = priv->reg_base;
  329. struct net_device_stats *stats = &dev->stats;
  330. int work_done = 0;
  331. struct sk_buff *skb;
  332. struct can_frame *frame;
  333. u8 canrflg;
  334. while (work_done < quota) {
  335. canrflg = in_8(&regs->canrflg);
  336. if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
  337. break;
  338. skb = alloc_can_skb(dev, &frame);
  339. if (!skb) {
  340. if (printk_ratelimit())
  341. netdev_notice(dev, "packet dropped\n");
  342. stats->rx_dropped++;
  343. out_8(&regs->canrflg, canrflg);
  344. continue;
  345. }
  346. if (canrflg & MSCAN_RXF) {
  347. mscan_get_rx_frame(dev, frame);
  348. stats->rx_packets++;
  349. if (!(frame->can_id & CAN_RTR_FLAG))
  350. stats->rx_bytes += frame->len;
  351. } else if (canrflg & MSCAN_ERR_IF) {
  352. mscan_get_err_frame(dev, frame, canrflg);
  353. }
  354. work_done++;
  355. netif_receive_skb(skb);
  356. }
  357. if (work_done < quota) {
  358. if (likely(napi_complete_done(&priv->napi, work_done))) {
  359. clear_bit(F_RX_PROGRESS, &priv->flags);
  360. if (priv->can.state < CAN_STATE_BUS_OFF)
  361. out_8(&regs->canrier, priv->shadow_canrier);
  362. }
  363. }
  364. return work_done;
  365. }
  366. static irqreturn_t mscan_isr(int irq, void *dev_id)
  367. {
  368. struct net_device *dev = (struct net_device *)dev_id;
  369. struct mscan_priv *priv = netdev_priv(dev);
  370. struct mscan_regs __iomem *regs = priv->reg_base;
  371. struct net_device_stats *stats = &dev->stats;
  372. u8 cantier, cantflg, canrflg;
  373. irqreturn_t ret = IRQ_NONE;
  374. cantier = in_8(&regs->cantier) & MSCAN_TXE;
  375. cantflg = in_8(&regs->cantflg) & cantier;
  376. if (cantier && cantflg) {
  377. struct list_head *tmp, *pos;
  378. list_for_each_safe(pos, tmp, &priv->tx_head) {
  379. struct tx_queue_entry *entry =
  380. list_entry(pos, struct tx_queue_entry, list);
  381. u8 mask = entry->mask;
  382. if (!(cantflg & mask))
  383. continue;
  384. out_8(&regs->cantbsel, mask);
  385. stats->tx_bytes += can_get_echo_skb(dev, entry->id,
  386. NULL);
  387. stats->tx_packets++;
  388. priv->tx_active &= ~mask;
  389. list_del(pos);
  390. }
  391. if (list_empty(&priv->tx_head)) {
  392. clear_bit(F_TX_WAIT_ALL, &priv->flags);
  393. clear_bit(F_TX_PROGRESS, &priv->flags);
  394. priv->cur_pri = 0;
  395. } else {
  396. netif_trans_update(dev);
  397. }
  398. if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
  399. netif_wake_queue(dev);
  400. out_8(&regs->cantier, priv->tx_active);
  401. ret = IRQ_HANDLED;
  402. }
  403. canrflg = in_8(&regs->canrflg);
  404. if ((canrflg & ~MSCAN_STAT_MSK) &&
  405. !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
  406. if (canrflg & ~MSCAN_STAT_MSK) {
  407. priv->shadow_canrier = in_8(&regs->canrier);
  408. out_8(&regs->canrier, 0);
  409. napi_schedule(&priv->napi);
  410. ret = IRQ_HANDLED;
  411. } else {
  412. clear_bit(F_RX_PROGRESS, &priv->flags);
  413. }
  414. }
  415. return ret;
  416. }
  417. static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
  418. {
  419. int ret = 0;
  420. switch (mode) {
  421. case CAN_MODE_START:
  422. ret = mscan_restart(dev);
  423. if (ret)
  424. break;
  425. if (netif_queue_stopped(dev))
  426. netif_wake_queue(dev);
  427. break;
  428. default:
  429. ret = -EOPNOTSUPP;
  430. break;
  431. }
  432. return ret;
  433. }
  434. static int mscan_do_set_bittiming(struct net_device *dev)
  435. {
  436. struct mscan_priv *priv = netdev_priv(dev);
  437. struct mscan_regs __iomem *regs = priv->reg_base;
  438. struct can_bittiming *bt = &priv->can.bittiming;
  439. u8 btr0, btr1;
  440. btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
  441. btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
  442. BTR1_SET_TSEG2(bt->phase_seg2) |
  443. BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
  444. netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
  445. out_8(&regs->canbtr0, btr0);
  446. out_8(&regs->canbtr1, btr1);
  447. return 0;
  448. }
  449. static int mscan_get_berr_counter(const struct net_device *dev,
  450. struct can_berr_counter *bec)
  451. {
  452. struct mscan_priv *priv = netdev_priv(dev);
  453. struct mscan_regs __iomem *regs = priv->reg_base;
  454. bec->txerr = in_8(&regs->cantxerr);
  455. bec->rxerr = in_8(&regs->canrxerr);
  456. return 0;
  457. }
  458. static int mscan_open(struct net_device *dev)
  459. {
  460. int ret;
  461. struct mscan_priv *priv = netdev_priv(dev);
  462. struct mscan_regs __iomem *regs = priv->reg_base;
  463. ret = clk_prepare_enable(priv->clk_ipg);
  464. if (ret)
  465. goto exit_retcode;
  466. ret = clk_prepare_enable(priv->clk_can);
  467. if (ret)
  468. goto exit_dis_ipg_clock;
  469. /* common open */
  470. ret = open_candev(dev);
  471. if (ret)
  472. goto exit_dis_can_clock;
  473. napi_enable(&priv->napi);
  474. ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
  475. if (ret < 0) {
  476. netdev_err(dev, "failed to attach interrupt\n");
  477. goto exit_napi_disable;
  478. }
  479. if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
  480. setbits8(&regs->canctl1, MSCAN_LISTEN);
  481. else
  482. clrbits8(&regs->canctl1, MSCAN_LISTEN);
  483. ret = mscan_start(dev);
  484. if (ret)
  485. goto exit_free_irq;
  486. netif_start_queue(dev);
  487. return 0;
  488. exit_free_irq:
  489. free_irq(dev->irq, dev);
  490. exit_napi_disable:
  491. napi_disable(&priv->napi);
  492. close_candev(dev);
  493. exit_dis_can_clock:
  494. clk_disable_unprepare(priv->clk_can);
  495. exit_dis_ipg_clock:
  496. clk_disable_unprepare(priv->clk_ipg);
  497. exit_retcode:
  498. return ret;
  499. }
  500. static int mscan_close(struct net_device *dev)
  501. {
  502. struct mscan_priv *priv = netdev_priv(dev);
  503. struct mscan_regs __iomem *regs = priv->reg_base;
  504. netif_stop_queue(dev);
  505. napi_disable(&priv->napi);
  506. out_8(&regs->cantier, 0);
  507. out_8(&regs->canrier, 0);
  508. mscan_set_mode(dev, MSCAN_INIT_MODE);
  509. close_candev(dev);
  510. free_irq(dev->irq, dev);
  511. clk_disable_unprepare(priv->clk_can);
  512. clk_disable_unprepare(priv->clk_ipg);
  513. return 0;
  514. }
  515. static const struct net_device_ops mscan_netdev_ops = {
  516. .ndo_open = mscan_open,
  517. .ndo_stop = mscan_close,
  518. .ndo_start_xmit = mscan_start_xmit,
  519. .ndo_change_mtu = can_change_mtu,
  520. };
  521. static const struct ethtool_ops mscan_ethtool_ops = {
  522. .get_ts_info = ethtool_op_get_ts_info,
  523. };
  524. int register_mscandev(struct net_device *dev, int mscan_clksrc)
  525. {
  526. struct mscan_priv *priv = netdev_priv(dev);
  527. struct mscan_regs __iomem *regs = priv->reg_base;
  528. u8 ctl1;
  529. ctl1 = in_8(&regs->canctl1);
  530. if (mscan_clksrc)
  531. ctl1 |= MSCAN_CLKSRC;
  532. else
  533. ctl1 &= ~MSCAN_CLKSRC;
  534. if (priv->type == MSCAN_TYPE_MPC5121) {
  535. priv->can.do_get_berr_counter = mscan_get_berr_counter;
  536. ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
  537. }
  538. ctl1 |= MSCAN_CANE;
  539. out_8(&regs->canctl1, ctl1);
  540. udelay(100);
  541. /* acceptance mask/acceptance code (accept everything) */
  542. out_be16(&regs->canidar1_0, 0);
  543. out_be16(&regs->canidar3_2, 0);
  544. out_be16(&regs->canidar5_4, 0);
  545. out_be16(&regs->canidar7_6, 0);
  546. out_be16(&regs->canidmr1_0, 0xffff);
  547. out_be16(&regs->canidmr3_2, 0xffff);
  548. out_be16(&regs->canidmr5_4, 0xffff);
  549. out_be16(&regs->canidmr7_6, 0xffff);
  550. /* Two 32 bit Acceptance Filters */
  551. out_8(&regs->canidac, MSCAN_AF_32BIT);
  552. mscan_set_mode(dev, MSCAN_INIT_MODE);
  553. return register_candev(dev);
  554. }
  555. void unregister_mscandev(struct net_device *dev)
  556. {
  557. struct mscan_priv *priv = netdev_priv(dev);
  558. struct mscan_regs __iomem *regs = priv->reg_base;
  559. mscan_set_mode(dev, MSCAN_INIT_MODE);
  560. clrbits8(&regs->canctl1, MSCAN_CANE);
  561. unregister_candev(dev);
  562. }
  563. struct net_device *alloc_mscandev(void)
  564. {
  565. struct net_device *dev;
  566. struct mscan_priv *priv;
  567. int i;
  568. dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
  569. if (!dev)
  570. return NULL;
  571. priv = netdev_priv(dev);
  572. dev->netdev_ops = &mscan_netdev_ops;
  573. dev->ethtool_ops = &mscan_ethtool_ops;
  574. dev->flags |= IFF_ECHO; /* we support local echo */
  575. netif_napi_add_weight(dev, &priv->napi, mscan_rx_poll, 8);
  576. priv->can.bittiming_const = &mscan_bittiming_const;
  577. priv->can.do_set_bittiming = mscan_do_set_bittiming;
  578. priv->can.do_set_mode = mscan_do_set_mode;
  579. priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
  580. CAN_CTRLMODE_LISTENONLY;
  581. for (i = 0; i < TX_QUEUE_SIZE; i++) {
  582. priv->tx_queue[i].id = i;
  583. priv->tx_queue[i].mask = 1 << i;
  584. }
  585. return dev;
  586. }
  587. MODULE_AUTHOR("Andrey Volkov <[email protected]>");
  588. MODULE_LICENSE("GPL v2");
  589. MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");