mse102x.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (C) 2021 in-tech smart charging GmbH
  3. *
  4. * driver is based on micrel/ks8851_spi.c
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/interrupt.h>
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/etherdevice.h>
  12. #include <linux/ethtool.h>
  13. #include <linux/cache.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/spi/spi.h>
  17. #include <linux/of_net.h>
  18. #define MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  19. NETIF_MSG_TIMER)
  20. #define DRV_NAME "mse102x"
  21. #define DET_CMD 0x0001
  22. #define DET_SOF 0x0002
  23. #define DET_DFT 0x55AA
  24. #define CMD_SHIFT 12
  25. #define CMD_RTS (0x1 << CMD_SHIFT)
  26. #define CMD_CTR (0x2 << CMD_SHIFT)
  27. #define CMD_MASK GENMASK(15, CMD_SHIFT)
  28. #define LEN_MASK GENMASK(CMD_SHIFT - 1, 0)
  29. #define DET_CMD_LEN 4
  30. #define DET_SOF_LEN 2
  31. #define DET_DFT_LEN 2
  32. #define MIN_FREQ_HZ 6000000
  33. #define MAX_FREQ_HZ 7142857
  34. struct mse102x_stats {
  35. u64 xfer_err;
  36. u64 invalid_cmd;
  37. u64 invalid_ctr;
  38. u64 invalid_dft;
  39. u64 invalid_len;
  40. u64 invalid_rts;
  41. u64 invalid_sof;
  42. u64 tx_timeout;
  43. };
  44. static const char mse102x_gstrings_stats[][ETH_GSTRING_LEN] = {
  45. "SPI transfer errors",
  46. "Invalid command",
  47. "Invalid CTR",
  48. "Invalid DFT",
  49. "Invalid frame length",
  50. "Invalid RTS",
  51. "Invalid SOF",
  52. "TX timeout",
  53. };
  54. struct mse102x_net {
  55. struct net_device *ndev;
  56. u8 rxd[8];
  57. u8 txd[8];
  58. u32 msg_enable ____cacheline_aligned;
  59. struct sk_buff_head txq;
  60. struct mse102x_stats stats;
  61. };
  62. struct mse102x_net_spi {
  63. struct mse102x_net mse102x;
  64. struct mutex lock; /* Protect SPI frame transfer */
  65. struct work_struct tx_work;
  66. struct spi_device *spidev;
  67. struct spi_message spi_msg;
  68. struct spi_transfer spi_xfer;
  69. #ifdef CONFIG_DEBUG_FS
  70. struct dentry *device_root;
  71. #endif
  72. };
  73. #define to_mse102x_spi(mse) container_of((mse), struct mse102x_net_spi, mse102x)
  74. #ifdef CONFIG_DEBUG_FS
  75. static int mse102x_info_show(struct seq_file *s, void *what)
  76. {
  77. struct mse102x_net_spi *mses = s->private;
  78. seq_printf(s, "TX ring size : %u\n",
  79. skb_queue_len(&mses->mse102x.txq));
  80. seq_printf(s, "IRQ : %d\n",
  81. mses->spidev->irq);
  82. seq_printf(s, "SPI effective speed : %lu\n",
  83. (unsigned long)mses->spi_xfer.effective_speed_hz);
  84. seq_printf(s, "SPI mode : %x\n",
  85. mses->spidev->mode);
  86. return 0;
  87. }
  88. DEFINE_SHOW_ATTRIBUTE(mse102x_info);
  89. static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses)
  90. {
  91. mses->device_root = debugfs_create_dir(dev_name(&mses->mse102x.ndev->dev),
  92. NULL);
  93. debugfs_create_file("info", S_IFREG | 0444, mses->device_root, mses,
  94. &mse102x_info_fops);
  95. }
  96. static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses)
  97. {
  98. debugfs_remove_recursive(mses->device_root);
  99. }
  100. #else /* CONFIG_DEBUG_FS */
  101. static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses)
  102. {
  103. }
  104. static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses)
  105. {
  106. }
  107. #endif
  108. /* SPI register read/write calls.
  109. *
  110. * All these calls issue SPI transactions to access the chip's registers. They
  111. * all require that the necessary lock is held to prevent accesses when the
  112. * chip is busy transferring packet data.
  113. */
  114. static void mse102x_tx_cmd_spi(struct mse102x_net *mse, u16 cmd)
  115. {
  116. struct mse102x_net_spi *mses = to_mse102x_spi(mse);
  117. struct spi_transfer *xfer = &mses->spi_xfer;
  118. struct spi_message *msg = &mses->spi_msg;
  119. __be16 txb[2];
  120. int ret;
  121. txb[0] = cpu_to_be16(DET_CMD);
  122. txb[1] = cpu_to_be16(cmd);
  123. xfer->tx_buf = txb;
  124. xfer->rx_buf = NULL;
  125. xfer->len = DET_CMD_LEN;
  126. ret = spi_sync(mses->spidev, msg);
  127. if (ret < 0) {
  128. netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
  129. __func__, ret);
  130. mse->stats.xfer_err++;
  131. }
  132. }
  133. static int mse102x_rx_cmd_spi(struct mse102x_net *mse, u8 *rxb)
  134. {
  135. struct mse102x_net_spi *mses = to_mse102x_spi(mse);
  136. struct spi_transfer *xfer = &mses->spi_xfer;
  137. struct spi_message *msg = &mses->spi_msg;
  138. __be16 *txb = (__be16 *)mse->txd;
  139. __be16 *cmd = (__be16 *)mse->rxd;
  140. u8 *trx = mse->rxd;
  141. int ret;
  142. txb[0] = 0;
  143. txb[1] = 0;
  144. xfer->tx_buf = txb;
  145. xfer->rx_buf = trx;
  146. xfer->len = DET_CMD_LEN;
  147. ret = spi_sync(mses->spidev, msg);
  148. if (ret < 0) {
  149. netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
  150. __func__, ret);
  151. mse->stats.xfer_err++;
  152. } else if (*cmd != cpu_to_be16(DET_CMD)) {
  153. net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
  154. __func__, *cmd);
  155. mse->stats.invalid_cmd++;
  156. ret = -EIO;
  157. } else {
  158. memcpy(rxb, trx + 2, 2);
  159. }
  160. return ret;
  161. }
  162. static inline void mse102x_push_header(struct sk_buff *skb)
  163. {
  164. __be16 *header = skb_push(skb, DET_SOF_LEN);
  165. *header = cpu_to_be16(DET_SOF);
  166. }
  167. static inline void mse102x_put_footer(struct sk_buff *skb)
  168. {
  169. __be16 *footer = skb_put(skb, DET_DFT_LEN);
  170. *footer = cpu_to_be16(DET_DFT);
  171. }
  172. static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
  173. unsigned int pad)
  174. {
  175. struct mse102x_net_spi *mses = to_mse102x_spi(mse);
  176. struct spi_transfer *xfer = &mses->spi_xfer;
  177. struct spi_message *msg = &mses->spi_msg;
  178. struct sk_buff *tskb;
  179. int ret;
  180. netif_dbg(mse, tx_queued, mse->ndev, "%s: skb %p, %d@%p\n",
  181. __func__, txp, txp->len, txp->data);
  182. if ((skb_headroom(txp) < DET_SOF_LEN) ||
  183. (skb_tailroom(txp) < DET_DFT_LEN + pad)) {
  184. tskb = skb_copy_expand(txp, DET_SOF_LEN, DET_DFT_LEN + pad,
  185. GFP_KERNEL);
  186. if (!tskb)
  187. return -ENOMEM;
  188. dev_kfree_skb(txp);
  189. txp = tskb;
  190. }
  191. mse102x_push_header(txp);
  192. if (pad)
  193. skb_put_zero(txp, pad);
  194. mse102x_put_footer(txp);
  195. xfer->tx_buf = txp->data;
  196. xfer->rx_buf = NULL;
  197. xfer->len = txp->len;
  198. ret = spi_sync(mses->spidev, msg);
  199. if (ret < 0) {
  200. netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
  201. __func__, ret);
  202. mse->stats.xfer_err++;
  203. }
  204. return ret;
  205. }
  206. static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
  207. unsigned int frame_len)
  208. {
  209. struct mse102x_net_spi *mses = to_mse102x_spi(mse);
  210. struct spi_transfer *xfer = &mses->spi_xfer;
  211. struct spi_message *msg = &mses->spi_msg;
  212. __be16 *sof = (__be16 *)buff;
  213. __be16 *dft = (__be16 *)(buff + DET_SOF_LEN + frame_len);
  214. int ret;
  215. xfer->rx_buf = buff;
  216. xfer->tx_buf = NULL;
  217. xfer->len = DET_SOF_LEN + frame_len + DET_DFT_LEN;
  218. ret = spi_sync(mses->spidev, msg);
  219. if (ret < 0) {
  220. netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
  221. __func__, ret);
  222. mse->stats.xfer_err++;
  223. } else if (*sof != cpu_to_be16(DET_SOF)) {
  224. netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
  225. __func__, *sof);
  226. mse->stats.invalid_sof++;
  227. ret = -EIO;
  228. } else if (*dft != cpu_to_be16(DET_DFT)) {
  229. netdev_dbg(mse->ndev, "%s: SPI frame tail is invalid (0x%04x)\n",
  230. __func__, *dft);
  231. mse->stats.invalid_dft++;
  232. ret = -EIO;
  233. }
  234. return ret;
  235. }
  236. static void mse102x_dump_packet(const char *msg, int len, const char *data)
  237. {
  238. printk(KERN_DEBUG ": %s - packet len:%d\n", msg, len);
  239. print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1,
  240. data, len, true);
  241. }
  242. static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
  243. {
  244. struct sk_buff *skb;
  245. unsigned int rxalign;
  246. unsigned int rxlen;
  247. __be16 rx = 0;
  248. u16 cmd_resp;
  249. u8 *rxpkt;
  250. int ret;
  251. mse102x_tx_cmd_spi(mse, CMD_CTR);
  252. ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
  253. cmd_resp = be16_to_cpu(rx);
  254. if (ret || ((cmd_resp & CMD_MASK) != CMD_RTS)) {
  255. usleep_range(50, 100);
  256. mse102x_tx_cmd_spi(mse, CMD_CTR);
  257. ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
  258. if (ret)
  259. return;
  260. cmd_resp = be16_to_cpu(rx);
  261. if ((cmd_resp & CMD_MASK) != CMD_RTS) {
  262. net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
  263. __func__, cmd_resp);
  264. mse->stats.invalid_rts++;
  265. return;
  266. }
  267. net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
  268. __func__);
  269. }
  270. rxlen = cmd_resp & LEN_MASK;
  271. if (!rxlen) {
  272. net_dbg_ratelimited("%s: No frame length defined\n", __func__);
  273. mse->stats.invalid_len++;
  274. return;
  275. }
  276. rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
  277. skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
  278. if (!skb)
  279. return;
  280. /* 2 bytes Start of frame (before ethernet header)
  281. * 2 bytes Data frame tail (after ethernet frame)
  282. * They are copied, but ignored.
  283. */
  284. rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
  285. if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
  286. mse->ndev->stats.rx_errors++;
  287. dev_kfree_skb(skb);
  288. return;
  289. }
  290. if (netif_msg_pktdata(mse))
  291. mse102x_dump_packet(__func__, skb->len, skb->data);
  292. skb->protocol = eth_type_trans(skb, mse->ndev);
  293. netif_rx(skb);
  294. mse->ndev->stats.rx_packets++;
  295. mse->ndev->stats.rx_bytes += rxlen;
  296. }
  297. static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
  298. unsigned long work_timeout)
  299. {
  300. unsigned int pad = 0;
  301. __be16 rx = 0;
  302. u16 cmd_resp;
  303. int ret;
  304. bool first = true;
  305. if (txb->len < 60)
  306. pad = 60 - txb->len;
  307. while (1) {
  308. mse102x_tx_cmd_spi(mse, CMD_RTS | (txb->len + pad));
  309. ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
  310. cmd_resp = be16_to_cpu(rx);
  311. if (!ret) {
  312. /* ready to send frame ? */
  313. if (cmd_resp == CMD_CTR)
  314. break;
  315. net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
  316. __func__, cmd_resp);
  317. mse->stats.invalid_ctr++;
  318. }
  319. /* It's not predictable how long / many retries it takes to
  320. * send at least one packet, so TX timeouts are possible.
  321. * That's the reason why the netdev watchdog is not used here.
  322. */
  323. if (time_after(jiffies, work_timeout))
  324. return -ETIMEDOUT;
  325. if (first) {
  326. /* throttle at first issue */
  327. netif_stop_queue(mse->ndev);
  328. /* fast retry */
  329. usleep_range(50, 100);
  330. first = false;
  331. } else {
  332. msleep(20);
  333. }
  334. }
  335. ret = mse102x_tx_frame_spi(mse, txb, pad);
  336. if (ret)
  337. net_dbg_ratelimited("%s: Failed to send (%d), drop frame\n",
  338. __func__, ret);
  339. return ret;
  340. }
  341. #define TX_QUEUE_MAX 10
  342. static void mse102x_tx_work(struct work_struct *work)
  343. {
  344. /* Make sure timeout is sufficient to transfer TX_QUEUE_MAX frames */
  345. unsigned long work_timeout = jiffies + msecs_to_jiffies(1000);
  346. struct mse102x_net_spi *mses;
  347. struct mse102x_net *mse;
  348. struct sk_buff *txb;
  349. int ret = 0;
  350. mses = container_of(work, struct mse102x_net_spi, tx_work);
  351. mse = &mses->mse102x;
  352. while ((txb = skb_dequeue(&mse->txq))) {
  353. mutex_lock(&mses->lock);
  354. ret = mse102x_tx_pkt_spi(mse, txb, work_timeout);
  355. mutex_unlock(&mses->lock);
  356. if (ret) {
  357. mse->ndev->stats.tx_dropped++;
  358. } else {
  359. mse->ndev->stats.tx_bytes += txb->len;
  360. mse->ndev->stats.tx_packets++;
  361. }
  362. dev_kfree_skb(txb);
  363. }
  364. if (ret == -ETIMEDOUT) {
  365. if (netif_msg_timer(mse))
  366. netdev_err(mse->ndev, "tx work timeout\n");
  367. mse->stats.tx_timeout++;
  368. }
  369. netif_wake_queue(mse->ndev);
  370. }
  371. static netdev_tx_t mse102x_start_xmit_spi(struct sk_buff *skb,
  372. struct net_device *ndev)
  373. {
  374. struct mse102x_net *mse = netdev_priv(ndev);
  375. struct mse102x_net_spi *mses = to_mse102x_spi(mse);
  376. netif_dbg(mse, tx_queued, ndev,
  377. "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
  378. skb_queue_tail(&mse->txq, skb);
  379. if (skb_queue_len(&mse->txq) >= TX_QUEUE_MAX)
  380. netif_stop_queue(ndev);
  381. schedule_work(&mses->tx_work);
  382. return NETDEV_TX_OK;
  383. }
  384. static void mse102x_init_mac(struct mse102x_net *mse, struct device_node *np)
  385. {
  386. struct net_device *ndev = mse->ndev;
  387. int ret = of_get_ethdev_address(np, ndev);
  388. if (ret) {
  389. eth_hw_addr_random(ndev);
  390. netdev_err(ndev, "Using random MAC address: %pM\n",
  391. ndev->dev_addr);
  392. }
  393. }
  394. /* Assumption: this is called for every incoming packet */
  395. static irqreturn_t mse102x_irq(int irq, void *_mse)
  396. {
  397. struct mse102x_net *mse = _mse;
  398. struct mse102x_net_spi *mses = to_mse102x_spi(mse);
  399. mutex_lock(&mses->lock);
  400. mse102x_rx_pkt_spi(mse);
  401. mutex_unlock(&mses->lock);
  402. return IRQ_HANDLED;
  403. }
  404. static int mse102x_net_open(struct net_device *ndev)
  405. {
  406. struct mse102x_net *mse = netdev_priv(ndev);
  407. int ret;
  408. ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
  409. ndev->name, mse);
  410. if (ret < 0) {
  411. netdev_err(ndev, "Failed to get irq: %d\n", ret);
  412. return ret;
  413. }
  414. netif_dbg(mse, ifup, ndev, "opening\n");
  415. netif_start_queue(ndev);
  416. netif_carrier_on(ndev);
  417. netif_dbg(mse, ifup, ndev, "network device up\n");
  418. return 0;
  419. }
  420. static int mse102x_net_stop(struct net_device *ndev)
  421. {
  422. struct mse102x_net *mse = netdev_priv(ndev);
  423. struct mse102x_net_spi *mses = to_mse102x_spi(mse);
  424. netif_info(mse, ifdown, ndev, "shutting down\n");
  425. netif_carrier_off(mse->ndev);
  426. /* stop any outstanding work */
  427. flush_work(&mses->tx_work);
  428. netif_stop_queue(ndev);
  429. skb_queue_purge(&mse->txq);
  430. free_irq(ndev->irq, mse);
  431. return 0;
  432. }
  433. static const struct net_device_ops mse102x_netdev_ops = {
  434. .ndo_open = mse102x_net_open,
  435. .ndo_stop = mse102x_net_stop,
  436. .ndo_start_xmit = mse102x_start_xmit_spi,
  437. .ndo_set_mac_address = eth_mac_addr,
  438. .ndo_validate_addr = eth_validate_addr,
  439. };
  440. /* ethtool support */
  441. static void mse102x_get_drvinfo(struct net_device *ndev,
  442. struct ethtool_drvinfo *di)
  443. {
  444. strscpy(di->driver, DRV_NAME, sizeof(di->driver));
  445. strscpy(di->bus_info, dev_name(ndev->dev.parent), sizeof(di->bus_info));
  446. }
  447. static u32 mse102x_get_msglevel(struct net_device *ndev)
  448. {
  449. struct mse102x_net *mse = netdev_priv(ndev);
  450. return mse->msg_enable;
  451. }
  452. static void mse102x_set_msglevel(struct net_device *ndev, u32 to)
  453. {
  454. struct mse102x_net *mse = netdev_priv(ndev);
  455. mse->msg_enable = to;
  456. }
  457. static void mse102x_get_ethtool_stats(struct net_device *ndev,
  458. struct ethtool_stats *estats, u64 *data)
  459. {
  460. struct mse102x_net *mse = netdev_priv(ndev);
  461. struct mse102x_stats *st = &mse->stats;
  462. memcpy(data, st, ARRAY_SIZE(mse102x_gstrings_stats) * sizeof(u64));
  463. }
  464. static void mse102x_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
  465. {
  466. switch (stringset) {
  467. case ETH_SS_STATS:
  468. memcpy(buf, &mse102x_gstrings_stats,
  469. sizeof(mse102x_gstrings_stats));
  470. break;
  471. default:
  472. WARN_ON(1);
  473. break;
  474. }
  475. }
  476. static int mse102x_get_sset_count(struct net_device *ndev, int sset)
  477. {
  478. switch (sset) {
  479. case ETH_SS_STATS:
  480. return ARRAY_SIZE(mse102x_gstrings_stats);
  481. default:
  482. return -EINVAL;
  483. }
  484. }
  485. static const struct ethtool_ops mse102x_ethtool_ops = {
  486. .get_drvinfo = mse102x_get_drvinfo,
  487. .get_link = ethtool_op_get_link,
  488. .get_msglevel = mse102x_get_msglevel,
  489. .set_msglevel = mse102x_set_msglevel,
  490. .get_ethtool_stats = mse102x_get_ethtool_stats,
  491. .get_strings = mse102x_get_strings,
  492. .get_sset_count = mse102x_get_sset_count,
  493. };
  494. /* driver bus management functions */
  495. #ifdef CONFIG_PM_SLEEP
  496. static int mse102x_suspend(struct device *dev)
  497. {
  498. struct mse102x_net *mse = dev_get_drvdata(dev);
  499. struct net_device *ndev = mse->ndev;
  500. if (netif_running(ndev)) {
  501. netif_device_detach(ndev);
  502. mse102x_net_stop(ndev);
  503. }
  504. return 0;
  505. }
  506. static int mse102x_resume(struct device *dev)
  507. {
  508. struct mse102x_net *mse = dev_get_drvdata(dev);
  509. struct net_device *ndev = mse->ndev;
  510. if (netif_running(ndev)) {
  511. mse102x_net_open(ndev);
  512. netif_device_attach(ndev);
  513. }
  514. return 0;
  515. }
  516. #endif
  517. static SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume);
  518. static int mse102x_probe_spi(struct spi_device *spi)
  519. {
  520. struct device *dev = &spi->dev;
  521. struct mse102x_net_spi *mses;
  522. struct net_device *ndev;
  523. struct mse102x_net *mse;
  524. int ret;
  525. spi->bits_per_word = 8;
  526. spi->mode |= SPI_MODE_3;
  527. /* enforce minimum speed to ensure device functionality */
  528. spi->master->min_speed_hz = MIN_FREQ_HZ;
  529. if (!spi->max_speed_hz)
  530. spi->max_speed_hz = MAX_FREQ_HZ;
  531. if (spi->max_speed_hz < MIN_FREQ_HZ ||
  532. spi->max_speed_hz > MAX_FREQ_HZ) {
  533. dev_err(&spi->dev, "SPI max frequency out of range (min: %u, max: %u)\n",
  534. MIN_FREQ_HZ, MAX_FREQ_HZ);
  535. return -EINVAL;
  536. }
  537. ret = spi_setup(spi);
  538. if (ret < 0) {
  539. dev_err(&spi->dev, "Unable to setup SPI device: %d\n", ret);
  540. return ret;
  541. }
  542. ndev = devm_alloc_etherdev(dev, sizeof(struct mse102x_net_spi));
  543. if (!ndev)
  544. return -ENOMEM;
  545. ndev->needed_tailroom += ALIGN(DET_DFT_LEN, 4);
  546. ndev->needed_headroom += ALIGN(DET_SOF_LEN, 4);
  547. ndev->priv_flags &= ~IFF_TX_SKB_SHARING;
  548. ndev->tx_queue_len = 100;
  549. mse = netdev_priv(ndev);
  550. mses = to_mse102x_spi(mse);
  551. mses->spidev = spi;
  552. mutex_init(&mses->lock);
  553. INIT_WORK(&mses->tx_work, mse102x_tx_work);
  554. /* initialise pre-made spi transfer messages */
  555. spi_message_init(&mses->spi_msg);
  556. spi_message_add_tail(&mses->spi_xfer, &mses->spi_msg);
  557. ndev->irq = spi->irq;
  558. mse->ndev = ndev;
  559. /* set the default message enable */
  560. mse->msg_enable = netif_msg_init(-1, MSG_DEFAULT);
  561. skb_queue_head_init(&mse->txq);
  562. SET_NETDEV_DEV(ndev, dev);
  563. dev_set_drvdata(dev, mse);
  564. netif_carrier_off(mse->ndev);
  565. ndev->netdev_ops = &mse102x_netdev_ops;
  566. ndev->ethtool_ops = &mse102x_ethtool_ops;
  567. mse102x_init_mac(mse, dev->of_node);
  568. ret = register_netdev(ndev);
  569. if (ret) {
  570. dev_err(dev, "failed to register network device: %d\n", ret);
  571. return ret;
  572. }
  573. mse102x_init_device_debugfs(mses);
  574. return 0;
  575. }
  576. static void mse102x_remove_spi(struct spi_device *spi)
  577. {
  578. struct mse102x_net *mse = dev_get_drvdata(&spi->dev);
  579. struct mse102x_net_spi *mses = to_mse102x_spi(mse);
  580. if (netif_msg_drv(mse))
  581. dev_info(&spi->dev, "remove\n");
  582. mse102x_remove_device_debugfs(mses);
  583. unregister_netdev(mse->ndev);
  584. }
  585. static const struct of_device_id mse102x_match_table[] = {
  586. { .compatible = "vertexcom,mse1021" },
  587. { .compatible = "vertexcom,mse1022" },
  588. { }
  589. };
  590. MODULE_DEVICE_TABLE(of, mse102x_match_table);
  591. static const struct spi_device_id mse102x_ids[] = {
  592. { "mse1021" },
  593. { "mse1022" },
  594. { }
  595. };
  596. MODULE_DEVICE_TABLE(spi, mse102x_ids);
  597. static struct spi_driver mse102x_driver = {
  598. .driver = {
  599. .name = DRV_NAME,
  600. .of_match_table = mse102x_match_table,
  601. .pm = &mse102x_pm_ops,
  602. },
  603. .probe = mse102x_probe_spi,
  604. .remove = mse102x_remove_spi,
  605. .id_table = mse102x_ids,
  606. };
  607. module_spi_driver(mse102x_driver);
  608. MODULE_DESCRIPTION("MSE102x Network driver");
  609. MODULE_AUTHOR("Stefan Wahren <[email protected]>");
  610. MODULE_LICENSE("GPL");
  611. MODULE_ALIAS("spi:" DRV_NAME);