ax88796c_main.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2010 ASIX Electronics Corporation
  4. * Copyright (c) 2020 Samsung Electronics Co., Ltd.
  5. *
  6. * ASIX AX88796C SPI Fast Ethernet Linux driver
  7. */
  8. #define pr_fmt(fmt) "ax88796c: " fmt
  9. #include "ax88796c_main.h"
  10. #include "ax88796c_ioctl.h"
  11. #include <linux/bitmap.h>
  12. #include <linux/etherdevice.h>
  13. #include <linux/iopoll.h>
  14. #include <linux/lockdep.h>
  15. #include <linux/mdio.h>
  16. #include <linux/minmax.h>
  17. #include <linux/module.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/of.h>
  20. #include <linux/phy.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/spi/spi.h>
  23. static int comp = IS_ENABLED(CONFIG_SPI_AX88796C_COMPRESSION);
  24. static int msg_enable = NETIF_MSG_PROBE |
  25. NETIF_MSG_LINK |
  26. NETIF_MSG_RX_ERR |
  27. NETIF_MSG_TX_ERR;
  28. static const char *no_regs_list = "80018001,e1918001,8001a001,fc0d0000";
  29. unsigned long ax88796c_no_regs_mask[AX88796C_REGDUMP_LEN / (sizeof(unsigned long) * 8)];
  30. module_param(msg_enable, int, 0444);
  31. MODULE_PARM_DESC(msg_enable, "Message mask (see linux/netdevice.h for bitmap)");
  32. static int ax88796c_soft_reset(struct ax88796c_device *ax_local)
  33. {
  34. u16 temp;
  35. int ret;
  36. lockdep_assert_held(&ax_local->spi_lock);
  37. AX_WRITE(&ax_local->ax_spi, PSR_RESET, P0_PSR);
  38. AX_WRITE(&ax_local->ax_spi, PSR_RESET_CLR, P0_PSR);
  39. ret = read_poll_timeout(AX_READ, ret,
  40. (ret & PSR_DEV_READY),
  41. 0, jiffies_to_usecs(160 * HZ / 1000), false,
  42. &ax_local->ax_spi, P0_PSR);
  43. if (ret)
  44. return ret;
  45. temp = AX_READ(&ax_local->ax_spi, P4_SPICR);
  46. if (ax_local->priv_flags & AX_CAP_COMP) {
  47. AX_WRITE(&ax_local->ax_spi,
  48. (temp | SPICR_RCEN | SPICR_QCEN), P4_SPICR);
  49. ax_local->ax_spi.comp = 1;
  50. } else {
  51. AX_WRITE(&ax_local->ax_spi,
  52. (temp & ~(SPICR_RCEN | SPICR_QCEN)), P4_SPICR);
  53. ax_local->ax_spi.comp = 0;
  54. }
  55. return 0;
  56. }
  57. static int ax88796c_reload_eeprom(struct ax88796c_device *ax_local)
  58. {
  59. int ret;
  60. lockdep_assert_held(&ax_local->spi_lock);
  61. AX_WRITE(&ax_local->ax_spi, EECR_RELOAD, P3_EECR);
  62. ret = read_poll_timeout(AX_READ, ret,
  63. (ret & PSR_DEV_READY),
  64. 0, jiffies_to_usecs(2 * HZ / 1000), false,
  65. &ax_local->ax_spi, P0_PSR);
  66. if (ret) {
  67. dev_err(&ax_local->spi->dev,
  68. "timeout waiting for reload eeprom\n");
  69. return ret;
  70. }
  71. return 0;
  72. }
  73. static void ax88796c_set_hw_multicast(struct net_device *ndev)
  74. {
  75. struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
  76. int mc_count = netdev_mc_count(ndev);
  77. u16 rx_ctl = RXCR_AB;
  78. lockdep_assert_held(&ax_local->spi_lock);
  79. memset(ax_local->multi_filter, 0, AX_MCAST_FILTER_SIZE);
  80. if (ndev->flags & IFF_PROMISC) {
  81. rx_ctl |= RXCR_PRO;
  82. } else if (ndev->flags & IFF_ALLMULTI || mc_count > AX_MAX_MCAST) {
  83. rx_ctl |= RXCR_AMALL;
  84. } else if (mc_count == 0) {
  85. /* just broadcast and directed */
  86. } else {
  87. u32 crc_bits;
  88. int i;
  89. struct netdev_hw_addr *ha;
  90. netdev_for_each_mc_addr(ha, ndev) {
  91. crc_bits = ether_crc(ETH_ALEN, ha->addr);
  92. ax_local->multi_filter[crc_bits >> 29] |=
  93. (1 << ((crc_bits >> 26) & 7));
  94. }
  95. for (i = 0; i < 4; i++) {
  96. AX_WRITE(&ax_local->ax_spi,
  97. ((ax_local->multi_filter[i * 2 + 1] << 8) |
  98. ax_local->multi_filter[i * 2]), P3_MFAR(i));
  99. }
  100. }
  101. AX_WRITE(&ax_local->ax_spi, rx_ctl, P2_RXCR);
  102. }
  103. static void ax88796c_set_mac_addr(struct net_device *ndev)
  104. {
  105. struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
  106. lockdep_assert_held(&ax_local->spi_lock);
  107. AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[4] << 8) |
  108. (u16)ndev->dev_addr[5]), P3_MACASR0);
  109. AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[2] << 8) |
  110. (u16)ndev->dev_addr[3]), P3_MACASR1);
  111. AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[0] << 8) |
  112. (u16)ndev->dev_addr[1]), P3_MACASR2);
  113. }
  114. static void ax88796c_load_mac_addr(struct net_device *ndev)
  115. {
  116. struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
  117. u8 addr[ETH_ALEN];
  118. u16 temp;
  119. lockdep_assert_held(&ax_local->spi_lock);
  120. /* Try the device tree first */
  121. if (!platform_get_ethdev_address(&ax_local->spi->dev, ndev) &&
  122. is_valid_ether_addr(ndev->dev_addr)) {
  123. if (netif_msg_probe(ax_local))
  124. dev_info(&ax_local->spi->dev,
  125. "MAC address read from device tree\n");
  126. return;
  127. }
  128. /* Read the MAC address from AX88796C */
  129. temp = AX_READ(&ax_local->ax_spi, P3_MACASR0);
  130. addr[5] = (u8)temp;
  131. addr[4] = (u8)(temp >> 8);
  132. temp = AX_READ(&ax_local->ax_spi, P3_MACASR1);
  133. addr[3] = (u8)temp;
  134. addr[2] = (u8)(temp >> 8);
  135. temp = AX_READ(&ax_local->ax_spi, P3_MACASR2);
  136. addr[1] = (u8)temp;
  137. addr[0] = (u8)(temp >> 8);
  138. if (is_valid_ether_addr(addr)) {
  139. eth_hw_addr_set(ndev, addr);
  140. if (netif_msg_probe(ax_local))
  141. dev_info(&ax_local->spi->dev,
  142. "MAC address read from ASIX chip\n");
  143. return;
  144. }
  145. /* Use random address if none found */
  146. if (netif_msg_probe(ax_local))
  147. dev_info(&ax_local->spi->dev, "Use random MAC address\n");
  148. eth_hw_addr_random(ndev);
  149. }
  150. static void ax88796c_proc_tx_hdr(struct tx_pkt_info *info, u8 ip_summed)
  151. {
  152. u16 pkt_len_bar = (~info->pkt_len & TX_HDR_SOP_PKTLENBAR);
  153. /* Prepare SOP header */
  154. info->sop.flags_len = info->pkt_len |
  155. ((ip_summed == CHECKSUM_NONE) ||
  156. (ip_summed == CHECKSUM_UNNECESSARY) ? TX_HDR_SOP_DICF : 0);
  157. info->sop.seq_lenbar = ((info->seq_num << 11) & TX_HDR_SOP_SEQNUM)
  158. | pkt_len_bar;
  159. cpu_to_be16s(&info->sop.flags_len);
  160. cpu_to_be16s(&info->sop.seq_lenbar);
  161. /* Prepare Segment header */
  162. info->seg.flags_seqnum_seglen = TX_HDR_SEG_FS | TX_HDR_SEG_LS
  163. | info->pkt_len;
  164. info->seg.eo_so_seglenbar = pkt_len_bar;
  165. cpu_to_be16s(&info->seg.flags_seqnum_seglen);
  166. cpu_to_be16s(&info->seg.eo_so_seglenbar);
  167. /* Prepare EOP header */
  168. info->eop.seq_len = ((info->seq_num << 11) &
  169. TX_HDR_EOP_SEQNUM) | info->pkt_len;
  170. info->eop.seqbar_lenbar = ((~info->seq_num << 11) &
  171. TX_HDR_EOP_SEQNUMBAR) | pkt_len_bar;
  172. cpu_to_be16s(&info->eop.seq_len);
  173. cpu_to_be16s(&info->eop.seqbar_lenbar);
  174. }
  175. static int
  176. ax88796c_check_free_pages(struct ax88796c_device *ax_local, u8 need_pages)
  177. {
  178. u8 free_pages;
  179. u16 tmp;
  180. lockdep_assert_held(&ax_local->spi_lock);
  181. free_pages = AX_READ(&ax_local->ax_spi, P0_TFBFCR) & TX_FREEBUF_MASK;
  182. if (free_pages < need_pages) {
  183. /* schedule free page interrupt */
  184. tmp = AX_READ(&ax_local->ax_spi, P0_TFBFCR)
  185. & TFBFCR_SCHE_FREE_PAGE;
  186. AX_WRITE(&ax_local->ax_spi, tmp | TFBFCR_TX_PAGE_SET |
  187. TFBFCR_SET_FREE_PAGE(need_pages),
  188. P0_TFBFCR);
  189. return -ENOMEM;
  190. }
  191. return 0;
  192. }
  193. static struct sk_buff *
  194. ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
  195. {
  196. struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
  197. u8 spi_len = ax_local->ax_spi.comp ? 1 : 4;
  198. struct sk_buff *skb;
  199. struct tx_pkt_info info;
  200. struct skb_data *entry;
  201. u16 pkt_len;
  202. u8 padlen, seq_num;
  203. u8 need_pages;
  204. int headroom;
  205. int tailroom;
  206. if (skb_queue_empty(q))
  207. return NULL;
  208. skb = skb_peek(q);
  209. pkt_len = skb->len;
  210. need_pages = (pkt_len + TX_OVERHEAD + 127) >> 7;
  211. if (ax88796c_check_free_pages(ax_local, need_pages) != 0)
  212. return NULL;
  213. headroom = skb_headroom(skb);
  214. tailroom = skb_tailroom(skb);
  215. padlen = round_up(pkt_len, 4) - pkt_len;
  216. seq_num = ++ax_local->seq_num & 0x1F;
  217. info.pkt_len = pkt_len;
  218. if (skb_cloned(skb) ||
  219. (headroom < (TX_OVERHEAD + spi_len)) ||
  220. (tailroom < (padlen + TX_EOP_SIZE))) {
  221. size_t h = max((TX_OVERHEAD + spi_len) - headroom, 0);
  222. size_t t = max((padlen + TX_EOP_SIZE) - tailroom, 0);
  223. if (pskb_expand_head(skb, h, t, GFP_KERNEL))
  224. return NULL;
  225. }
  226. info.seq_num = seq_num;
  227. ax88796c_proc_tx_hdr(&info, skb->ip_summed);
  228. /* SOP and SEG header */
  229. memcpy(skb_push(skb, TX_OVERHEAD), &info.sop, TX_OVERHEAD);
  230. /* Write SPI TXQ header */
  231. memcpy(skb_push(skb, spi_len), ax88796c_tx_cmd_buf, spi_len);
  232. /* Make 32-bit alignment */
  233. skb_put(skb, padlen);
  234. /* EOP header */
  235. skb_put_data(skb, &info.eop, TX_EOP_SIZE);
  236. skb_unlink(skb, q);
  237. entry = (struct skb_data *)skb->cb;
  238. memset(entry, 0, sizeof(*entry));
  239. entry->len = pkt_len;
  240. if (netif_msg_pktdata(ax_local)) {
  241. char pfx[IFNAMSIZ + 7];
  242. snprintf(pfx, sizeof(pfx), "%s: ", ndev->name);
  243. netdev_info(ndev, "TX packet len %d, total len %d, seq %d\n",
  244. pkt_len, skb->len, seq_num);
  245. netdev_info(ndev, " SPI Header:\n");
  246. print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
  247. skb->data, 4, 0);
  248. netdev_info(ndev, " TX SOP:\n");
  249. print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
  250. skb->data + 4, TX_OVERHEAD, 0);
  251. netdev_info(ndev, " TX packet:\n");
  252. print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
  253. skb->data + 4 + TX_OVERHEAD,
  254. skb->len - TX_EOP_SIZE - 4 - TX_OVERHEAD, 0);
  255. netdev_info(ndev, " TX EOP:\n");
  256. print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
  257. skb->data + skb->len - 4, 4, 0);
  258. }
  259. return skb;
  260. }
  261. static int ax88796c_hard_xmit(struct ax88796c_device *ax_local)
  262. {
  263. struct ax88796c_pcpu_stats *stats;
  264. struct sk_buff *tx_skb;
  265. struct skb_data *entry;
  266. unsigned long flags;
  267. lockdep_assert_held(&ax_local->spi_lock);
  268. stats = this_cpu_ptr(ax_local->stats);
  269. tx_skb = ax88796c_tx_fixup(ax_local->ndev, &ax_local->tx_wait_q);
  270. if (!tx_skb) {
  271. this_cpu_inc(ax_local->stats->tx_dropped);
  272. return 0;
  273. }
  274. entry = (struct skb_data *)tx_skb->cb;
  275. AX_WRITE(&ax_local->ax_spi,
  276. (TSNR_TXB_START | TSNR_PKT_CNT(1)), P0_TSNR);
  277. axspi_write_txq(&ax_local->ax_spi, tx_skb->data, tx_skb->len);
  278. if (((AX_READ(&ax_local->ax_spi, P0_TSNR) & TXNR_TXB_IDLE) == 0) ||
  279. ((ISR_TXERR & AX_READ(&ax_local->ax_spi, P0_ISR)) != 0)) {
  280. /* Ack tx error int */
  281. AX_WRITE(&ax_local->ax_spi, ISR_TXERR, P0_ISR);
  282. this_cpu_inc(ax_local->stats->tx_dropped);
  283. if (net_ratelimit())
  284. netif_err(ax_local, tx_err, ax_local->ndev,
  285. "TX FIFO error, re-initialize the TX bridge\n");
  286. /* Reinitial tx bridge */
  287. AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT |
  288. AX_READ(&ax_local->ax_spi, P0_TSNR), P0_TSNR);
  289. ax_local->seq_num = 0;
  290. } else {
  291. flags = u64_stats_update_begin_irqsave(&stats->syncp);
  292. u64_stats_inc(&stats->tx_packets);
  293. u64_stats_add(&stats->tx_bytes, entry->len);
  294. u64_stats_update_end_irqrestore(&stats->syncp, flags);
  295. }
  296. entry->state = tx_done;
  297. dev_kfree_skb(tx_skb);
  298. return 1;
  299. }
  300. static netdev_tx_t
  301. ax88796c_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  302. {
  303. struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
  304. skb_queue_tail(&ax_local->tx_wait_q, skb);
  305. if (skb_queue_len(&ax_local->tx_wait_q) > TX_QUEUE_HIGH_WATER)
  306. netif_stop_queue(ndev);
  307. set_bit(EVENT_TX, &ax_local->flags);
  308. schedule_work(&ax_local->ax_work);
  309. return NETDEV_TX_OK;
  310. }
  311. static void
  312. ax88796c_skb_return(struct ax88796c_device *ax_local,
  313. struct sk_buff *skb, struct rx_header *rxhdr)
  314. {
  315. struct net_device *ndev = ax_local->ndev;
  316. struct ax88796c_pcpu_stats *stats;
  317. unsigned long flags;
  318. int status;
  319. stats = this_cpu_ptr(ax_local->stats);
  320. do {
  321. if (!(ndev->features & NETIF_F_RXCSUM))
  322. break;
  323. /* checksum error bit is set */
  324. if ((rxhdr->flags & RX_HDR3_L3_ERR) ||
  325. (rxhdr->flags & RX_HDR3_L4_ERR))
  326. break;
  327. /* Other types may be indicated by more than one bit. */
  328. if ((rxhdr->flags & RX_HDR3_L4_TYPE_TCP) ||
  329. (rxhdr->flags & RX_HDR3_L4_TYPE_UDP))
  330. skb->ip_summed = CHECKSUM_UNNECESSARY;
  331. } while (0);
  332. flags = u64_stats_update_begin_irqsave(&stats->syncp);
  333. u64_stats_inc(&stats->rx_packets);
  334. u64_stats_add(&stats->rx_bytes, skb->len);
  335. u64_stats_update_end_irqrestore(&stats->syncp, flags);
  336. skb->dev = ndev;
  337. skb->protocol = eth_type_trans(skb, ax_local->ndev);
  338. netif_info(ax_local, rx_status, ndev, "< rx, len %zu, type 0x%x\n",
  339. skb->len + sizeof(struct ethhdr), skb->protocol);
  340. status = netif_rx(skb);
  341. if (status != NET_RX_SUCCESS && net_ratelimit())
  342. netif_info(ax_local, rx_err, ndev,
  343. "netif_rx status %d\n", status);
  344. }
  345. static void
  346. ax88796c_rx_fixup(struct ax88796c_device *ax_local, struct sk_buff *rx_skb)
  347. {
  348. struct rx_header *rxhdr = (struct rx_header *)rx_skb->data;
  349. struct net_device *ndev = ax_local->ndev;
  350. u16 len;
  351. be16_to_cpus(&rxhdr->flags_len);
  352. be16_to_cpus(&rxhdr->seq_lenbar);
  353. be16_to_cpus(&rxhdr->flags);
  354. if ((rxhdr->flags_len & RX_HDR1_PKT_LEN) !=
  355. (~rxhdr->seq_lenbar & 0x7FF)) {
  356. netif_err(ax_local, rx_err, ndev, "Header error\n");
  357. this_cpu_inc(ax_local->stats->rx_frame_errors);
  358. kfree_skb(rx_skb);
  359. return;
  360. }
  361. if ((rxhdr->flags_len & RX_HDR1_MII_ERR) ||
  362. (rxhdr->flags_len & RX_HDR1_CRC_ERR)) {
  363. netif_err(ax_local, rx_err, ndev, "CRC or MII error\n");
  364. this_cpu_inc(ax_local->stats->rx_crc_errors);
  365. kfree_skb(rx_skb);
  366. return;
  367. }
  368. len = rxhdr->flags_len & RX_HDR1_PKT_LEN;
  369. if (netif_msg_pktdata(ax_local)) {
  370. char pfx[IFNAMSIZ + 7];
  371. snprintf(pfx, sizeof(pfx), "%s: ", ndev->name);
  372. netdev_info(ndev, "RX data, total len %d, packet len %d\n",
  373. rx_skb->len, len);
  374. netdev_info(ndev, " Dump RX packet header:");
  375. print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
  376. rx_skb->data, sizeof(*rxhdr), 0);
  377. netdev_info(ndev, " Dump RX packet:");
  378. print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
  379. rx_skb->data + sizeof(*rxhdr), len, 0);
  380. }
  381. skb_pull(rx_skb, sizeof(*rxhdr));
  382. pskb_trim(rx_skb, len);
  383. ax88796c_skb_return(ax_local, rx_skb, rxhdr);
  384. }
  385. static int ax88796c_receive(struct net_device *ndev)
  386. {
  387. struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
  388. struct skb_data *entry;
  389. u16 w_count, pkt_len;
  390. struct sk_buff *skb;
  391. u8 pkt_cnt;
  392. lockdep_assert_held(&ax_local->spi_lock);
  393. /* check rx packet and total word count */
  394. AX_WRITE(&ax_local->ax_spi, AX_READ(&ax_local->ax_spi, P0_RTWCR)
  395. | RTWCR_RX_LATCH, P0_RTWCR);
  396. pkt_cnt = AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_PKT_MASK;
  397. if (!pkt_cnt)
  398. return 0;
  399. pkt_len = AX_READ(&ax_local->ax_spi, P0_RCPHR) & 0x7FF;
  400. w_count = round_up(pkt_len + 6, 4) >> 1;
  401. skb = netdev_alloc_skb(ndev, w_count * 2);
  402. if (!skb) {
  403. AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_DISCARD, P0_RXBCR1);
  404. this_cpu_inc(ax_local->stats->rx_dropped);
  405. return 0;
  406. }
  407. entry = (struct skb_data *)skb->cb;
  408. AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_START | w_count, P0_RXBCR1);
  409. axspi_read_rxq(&ax_local->ax_spi,
  410. skb_put(skb, w_count * 2), skb->len);
  411. /* Check if rx bridge is idle */
  412. if ((AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_RXB_IDLE) == 0) {
  413. if (net_ratelimit())
  414. netif_err(ax_local, rx_err, ndev,
  415. "Rx Bridge is not idle\n");
  416. AX_WRITE(&ax_local->ax_spi, RXBCR2_RXB_REINIT, P0_RXBCR2);
  417. entry->state = rx_err;
  418. } else {
  419. entry->state = rx_done;
  420. }
  421. AX_WRITE(&ax_local->ax_spi, ISR_RXPKT, P0_ISR);
  422. ax88796c_rx_fixup(ax_local, skb);
  423. return 1;
  424. }
  425. static int ax88796c_process_isr(struct ax88796c_device *ax_local)
  426. {
  427. struct net_device *ndev = ax_local->ndev;
  428. int todo = 0;
  429. u16 isr;
  430. lockdep_assert_held(&ax_local->spi_lock);
  431. isr = AX_READ(&ax_local->ax_spi, P0_ISR);
  432. AX_WRITE(&ax_local->ax_spi, isr, P0_ISR);
  433. netif_dbg(ax_local, intr, ndev, " ISR 0x%04x\n", isr);
  434. if (isr & ISR_TXERR) {
  435. netif_dbg(ax_local, intr, ndev, " TXERR interrupt\n");
  436. AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT, P0_TSNR);
  437. ax_local->seq_num = 0x1f;
  438. }
  439. if (isr & ISR_TXPAGES) {
  440. netif_dbg(ax_local, intr, ndev, " TXPAGES interrupt\n");
  441. set_bit(EVENT_TX, &ax_local->flags);
  442. }
  443. if (isr & ISR_LINK) {
  444. netif_dbg(ax_local, intr, ndev, " Link change interrupt\n");
  445. phy_mac_interrupt(ax_local->ndev->phydev);
  446. }
  447. if (isr & ISR_RXPKT) {
  448. netif_dbg(ax_local, intr, ndev, " RX interrupt\n");
  449. todo = ax88796c_receive(ax_local->ndev);
  450. }
  451. return todo;
  452. }
  453. static irqreturn_t ax88796c_interrupt(int irq, void *dev_instance)
  454. {
  455. struct ax88796c_device *ax_local;
  456. struct net_device *ndev;
  457. ndev = dev_instance;
  458. if (!ndev) {
  459. pr_err("irq %d for unknown device.\n", irq);
  460. return IRQ_RETVAL(0);
  461. }
  462. ax_local = to_ax88796c_device(ndev);
  463. disable_irq_nosync(irq);
  464. netif_dbg(ax_local, intr, ndev, "Interrupt occurred\n");
  465. set_bit(EVENT_INTR, &ax_local->flags);
  466. schedule_work(&ax_local->ax_work);
  467. return IRQ_HANDLED;
  468. }
  469. static void ax88796c_work(struct work_struct *work)
  470. {
  471. struct ax88796c_device *ax_local =
  472. container_of(work, struct ax88796c_device, ax_work);
  473. mutex_lock(&ax_local->spi_lock);
  474. if (test_bit(EVENT_SET_MULTI, &ax_local->flags)) {
  475. ax88796c_set_hw_multicast(ax_local->ndev);
  476. clear_bit(EVENT_SET_MULTI, &ax_local->flags);
  477. }
  478. if (test_bit(EVENT_INTR, &ax_local->flags)) {
  479. AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR);
  480. while (ax88796c_process_isr(ax_local))
  481. /* nothing */;
  482. clear_bit(EVENT_INTR, &ax_local->flags);
  483. AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR);
  484. enable_irq(ax_local->ndev->irq);
  485. }
  486. if (test_bit(EVENT_TX, &ax_local->flags)) {
  487. while (skb_queue_len(&ax_local->tx_wait_q)) {
  488. if (!ax88796c_hard_xmit(ax_local))
  489. break;
  490. }
  491. clear_bit(EVENT_TX, &ax_local->flags);
  492. if (netif_queue_stopped(ax_local->ndev) &&
  493. (skb_queue_len(&ax_local->tx_wait_q) < TX_QUEUE_LOW_WATER))
  494. netif_wake_queue(ax_local->ndev);
  495. }
  496. mutex_unlock(&ax_local->spi_lock);
  497. }
  498. static void ax88796c_get_stats64(struct net_device *ndev,
  499. struct rtnl_link_stats64 *stats)
  500. {
  501. struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
  502. u32 rx_frame_errors = 0, rx_crc_errors = 0;
  503. u32 rx_dropped = 0, tx_dropped = 0;
  504. unsigned int start;
  505. int cpu;
  506. for_each_possible_cpu(cpu) {
  507. struct ax88796c_pcpu_stats *s;
  508. u64 rx_packets, rx_bytes;
  509. u64 tx_packets, tx_bytes;
  510. s = per_cpu_ptr(ax_local->stats, cpu);
  511. do {
  512. start = u64_stats_fetch_begin_irq(&s->syncp);
  513. rx_packets = u64_stats_read(&s->rx_packets);
  514. rx_bytes = u64_stats_read(&s->rx_bytes);
  515. tx_packets = u64_stats_read(&s->tx_packets);
  516. tx_bytes = u64_stats_read(&s->tx_bytes);
  517. } while (u64_stats_fetch_retry_irq(&s->syncp, start));
  518. stats->rx_packets += rx_packets;
  519. stats->rx_bytes += rx_bytes;
  520. stats->tx_packets += tx_packets;
  521. stats->tx_bytes += tx_bytes;
  522. rx_dropped += s->rx_dropped;
  523. tx_dropped += s->tx_dropped;
  524. rx_frame_errors += s->rx_frame_errors;
  525. rx_crc_errors += s->rx_crc_errors;
  526. }
  527. stats->rx_dropped = rx_dropped;
  528. stats->tx_dropped = tx_dropped;
  529. stats->rx_frame_errors = rx_frame_errors;
  530. stats->rx_crc_errors = rx_crc_errors;
  531. }
  532. static void ax88796c_set_mac(struct ax88796c_device *ax_local)
  533. {
  534. u16 maccr;
  535. maccr = (ax_local->link) ? MACCR_RXEN : 0;
  536. switch (ax_local->speed) {
  537. case SPEED_100:
  538. maccr |= MACCR_SPEED_100;
  539. break;
  540. case SPEED_10:
  541. case SPEED_UNKNOWN:
  542. break;
  543. default:
  544. return;
  545. }
  546. switch (ax_local->duplex) {
  547. case DUPLEX_FULL:
  548. maccr |= MACCR_SPEED_100;
  549. break;
  550. case DUPLEX_HALF:
  551. case DUPLEX_UNKNOWN:
  552. break;
  553. default:
  554. return;
  555. }
  556. if (ax_local->flowctrl & AX_FC_ANEG &&
  557. ax_local->phydev->autoneg) {
  558. maccr |= ax_local->pause ? MACCR_RXFC_ENABLE : 0;
  559. maccr |= !ax_local->pause != !ax_local->asym_pause ?
  560. MACCR_TXFC_ENABLE : 0;
  561. } else {
  562. maccr |= (ax_local->flowctrl & AX_FC_RX) ? MACCR_RXFC_ENABLE : 0;
  563. maccr |= (ax_local->flowctrl & AX_FC_TX) ? MACCR_TXFC_ENABLE : 0;
  564. }
  565. mutex_lock(&ax_local->spi_lock);
  566. maccr |= AX_READ(&ax_local->ax_spi, P0_MACCR) &
  567. ~(MACCR_DUPLEX_FULL | MACCR_SPEED_100 |
  568. MACCR_TXFC_ENABLE | MACCR_RXFC_ENABLE);
  569. AX_WRITE(&ax_local->ax_spi, maccr, P0_MACCR);
  570. mutex_unlock(&ax_local->spi_lock);
  571. }
  572. static void ax88796c_handle_link_change(struct net_device *ndev)
  573. {
  574. struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
  575. struct phy_device *phydev = ndev->phydev;
  576. bool update = false;
  577. if (phydev->link && (ax_local->speed != phydev->speed ||
  578. ax_local->duplex != phydev->duplex ||
  579. ax_local->pause != phydev->pause ||
  580. ax_local->asym_pause != phydev->asym_pause)) {
  581. ax_local->speed = phydev->speed;
  582. ax_local->duplex = phydev->duplex;
  583. ax_local->pause = phydev->pause;
  584. ax_local->asym_pause = phydev->asym_pause;
  585. update = true;
  586. }
  587. if (phydev->link != ax_local->link) {
  588. if (!phydev->link) {
  589. ax_local->speed = SPEED_UNKNOWN;
  590. ax_local->duplex = DUPLEX_UNKNOWN;
  591. }
  592. ax_local->link = phydev->link;
  593. update = true;
  594. }
  595. if (update)
  596. ax88796c_set_mac(ax_local);
  597. if (net_ratelimit())
  598. phy_print_status(ndev->phydev);
  599. }
  600. static void ax88796c_set_csums(struct ax88796c_device *ax_local)
  601. {
  602. struct net_device *ndev = ax_local->ndev;
  603. lockdep_assert_held(&ax_local->spi_lock);
  604. if (ndev->features & NETIF_F_RXCSUM) {
  605. AX_WRITE(&ax_local->ax_spi, COERCR0_DEFAULT, P4_COERCR0);
  606. AX_WRITE(&ax_local->ax_spi, COERCR1_DEFAULT, P4_COERCR1);
  607. } else {
  608. AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR0);
  609. AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR1);
  610. }
  611. if (ndev->features & NETIF_F_HW_CSUM) {
  612. AX_WRITE(&ax_local->ax_spi, COETCR0_DEFAULT, P4_COETCR0);
  613. AX_WRITE(&ax_local->ax_spi, COETCR1_TXPPPE, P4_COETCR1);
  614. } else {
  615. AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR0);
  616. AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR1);
  617. }
  618. }
  619. static int
  620. ax88796c_open(struct net_device *ndev)
  621. {
  622. struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
  623. unsigned long irq_flag = 0;
  624. int fc = AX_FC_NONE;
  625. int ret;
  626. u16 t;
  627. ret = request_irq(ndev->irq, ax88796c_interrupt,
  628. irq_flag, ndev->name, ndev);
  629. if (ret) {
  630. netdev_err(ndev, "unable to get IRQ %d (errno=%d).\n",
  631. ndev->irq, ret);
  632. return ret;
  633. }
  634. mutex_lock(&ax_local->spi_lock);
  635. ret = ax88796c_soft_reset(ax_local);
  636. if (ret < 0) {
  637. free_irq(ndev->irq, ndev);
  638. mutex_unlock(&ax_local->spi_lock);
  639. return ret;
  640. }
  641. ax_local->seq_num = 0x1f;
  642. ax88796c_set_mac_addr(ndev);
  643. ax88796c_set_csums(ax_local);
  644. /* Disable stuffing packet */
  645. t = AX_READ(&ax_local->ax_spi, P1_RXBSPCR);
  646. t &= ~RXBSPCR_STUF_ENABLE;
  647. AX_WRITE(&ax_local->ax_spi, t, P1_RXBSPCR);
  648. /* Enable RX packet process */
  649. AX_WRITE(&ax_local->ax_spi, RPPER_RXEN, P1_RPPER);
  650. t = AX_READ(&ax_local->ax_spi, P0_FER);
  651. t |= FER_RXEN | FER_TXEN | FER_BSWAP | FER_IRQ_PULL;
  652. AX_WRITE(&ax_local->ax_spi, t, P0_FER);
  653. /* Setup LED mode */
  654. AX_WRITE(&ax_local->ax_spi,
  655. (LCR_LED0_EN | LCR_LED0_DUPLEX | LCR_LED1_EN |
  656. LCR_LED1_100MODE), P2_LCR0);
  657. AX_WRITE(&ax_local->ax_spi,
  658. (AX_READ(&ax_local->ax_spi, P2_LCR1) & LCR_LED2_MASK) |
  659. LCR_LED2_EN | LCR_LED2_LINK, P2_LCR1);
  660. /* Disable PHY auto-polling */
  661. AX_WRITE(&ax_local->ax_spi, PCR_PHYID(AX88796C_PHY_ID), P2_PCR);
  662. /* Enable MAC interrupts */
  663. AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR);
  664. mutex_unlock(&ax_local->spi_lock);
  665. /* Setup flow-control configuration */
  666. phy_support_asym_pause(ax_local->phydev);
  667. if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
  668. ax_local->phydev->advertising) ||
  669. linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
  670. ax_local->phydev->advertising))
  671. fc |= AX_FC_ANEG;
  672. fc |= linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
  673. ax_local->phydev->advertising) ? AX_FC_RX : 0;
  674. fc |= (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
  675. ax_local->phydev->advertising) !=
  676. linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
  677. ax_local->phydev->advertising)) ? AX_FC_TX : 0;
  678. ax_local->flowctrl = fc;
  679. phy_start(ax_local->ndev->phydev);
  680. netif_start_queue(ndev);
  681. spi_message_init(&ax_local->ax_spi.rx_msg);
  682. return 0;
  683. }
  684. static int
  685. ax88796c_close(struct net_device *ndev)
  686. {
  687. struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
  688. phy_stop(ndev->phydev);
  689. /* We lock the mutex early not only to protect the device
  690. * against concurrent access, but also avoid waking up the
  691. * queue in ax88796c_work(). phy_stop() needs to be called
  692. * before because it locks the mutex to access SPI.
  693. */
  694. mutex_lock(&ax_local->spi_lock);
  695. netif_stop_queue(ndev);
  696. /* No more work can be scheduled now. Make any pending work,
  697. * including one already waiting for the mutex to be unlocked,
  698. * NOP.
  699. */
  700. netif_dbg(ax_local, ifdown, ndev, "clearing bits\n");
  701. clear_bit(EVENT_SET_MULTI, &ax_local->flags);
  702. clear_bit(EVENT_INTR, &ax_local->flags);
  703. clear_bit(EVENT_TX, &ax_local->flags);
  704. /* Disable MAC interrupts */
  705. AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR);
  706. __skb_queue_purge(&ax_local->tx_wait_q);
  707. ax88796c_soft_reset(ax_local);
  708. mutex_unlock(&ax_local->spi_lock);
  709. cancel_work_sync(&ax_local->ax_work);
  710. free_irq(ndev->irq, ndev);
  711. return 0;
  712. }
  713. static int
  714. ax88796c_set_features(struct net_device *ndev, netdev_features_t features)
  715. {
  716. struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
  717. netdev_features_t changed = features ^ ndev->features;
  718. if (!(changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM)))
  719. return 0;
  720. ndev->features = features;
  721. if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM))
  722. ax88796c_set_csums(ax_local);
  723. return 0;
  724. }
  725. static const struct net_device_ops ax88796c_netdev_ops = {
  726. .ndo_open = ax88796c_open,
  727. .ndo_stop = ax88796c_close,
  728. .ndo_start_xmit = ax88796c_start_xmit,
  729. .ndo_get_stats64 = ax88796c_get_stats64,
  730. .ndo_eth_ioctl = ax88796c_ioctl,
  731. .ndo_set_mac_address = eth_mac_addr,
  732. .ndo_set_features = ax88796c_set_features,
  733. };
  734. static int ax88796c_hard_reset(struct ax88796c_device *ax_local)
  735. {
  736. struct device *dev = (struct device *)&ax_local->spi->dev;
  737. struct gpio_desc *reset_gpio;
  738. /* reset info */
  739. reset_gpio = gpiod_get(dev, "reset", 0);
  740. if (IS_ERR(reset_gpio)) {
  741. dev_err(dev, "Could not get 'reset' GPIO: %ld", PTR_ERR(reset_gpio));
  742. return PTR_ERR(reset_gpio);
  743. }
  744. /* set reset */
  745. gpiod_direction_output(reset_gpio, 1);
  746. msleep(100);
  747. gpiod_direction_output(reset_gpio, 0);
  748. gpiod_put(reset_gpio);
  749. msleep(20);
  750. return 0;
  751. }
  752. static int ax88796c_probe(struct spi_device *spi)
  753. {
  754. char phy_id[MII_BUS_ID_SIZE + 3];
  755. struct ax88796c_device *ax_local;
  756. struct net_device *ndev;
  757. u16 temp;
  758. int ret;
  759. ndev = devm_alloc_etherdev(&spi->dev, sizeof(*ax_local));
  760. if (!ndev)
  761. return -ENOMEM;
  762. SET_NETDEV_DEV(ndev, &spi->dev);
  763. ax_local = to_ax88796c_device(ndev);
  764. dev_set_drvdata(&spi->dev, ax_local);
  765. ax_local->spi = spi;
  766. ax_local->ax_spi.spi = spi;
  767. ax_local->stats =
  768. devm_netdev_alloc_pcpu_stats(&spi->dev,
  769. struct ax88796c_pcpu_stats);
  770. if (!ax_local->stats)
  771. return -ENOMEM;
  772. ax_local->ndev = ndev;
  773. ax_local->priv_flags |= comp ? AX_CAP_COMP : 0;
  774. ax_local->msg_enable = msg_enable;
  775. mutex_init(&ax_local->spi_lock);
  776. ax_local->mdiobus = devm_mdiobus_alloc(&spi->dev);
  777. if (!ax_local->mdiobus)
  778. return -ENOMEM;
  779. ax_local->mdiobus->priv = ax_local;
  780. ax_local->mdiobus->read = ax88796c_mdio_read;
  781. ax_local->mdiobus->write = ax88796c_mdio_write;
  782. ax_local->mdiobus->name = "ax88976c-mdiobus";
  783. ax_local->mdiobus->phy_mask = (u32)~BIT(AX88796C_PHY_ID);
  784. ax_local->mdiobus->parent = &spi->dev;
  785. snprintf(ax_local->mdiobus->id, MII_BUS_ID_SIZE,
  786. "ax88796c-%s.%u", dev_name(&spi->dev), spi->chip_select);
  787. ret = devm_mdiobus_register(&spi->dev, ax_local->mdiobus);
  788. if (ret < 0) {
  789. dev_err(&spi->dev, "Could not register MDIO bus\n");
  790. return ret;
  791. }
  792. if (netif_msg_probe(ax_local)) {
  793. dev_info(&spi->dev, "AX88796C-SPI Configuration:\n");
  794. dev_info(&spi->dev, " Compression : %s\n",
  795. ax_local->priv_flags & AX_CAP_COMP ? "ON" : "OFF");
  796. }
  797. ndev->irq = spi->irq;
  798. ndev->netdev_ops = &ax88796c_netdev_ops;
  799. ndev->ethtool_ops = &ax88796c_ethtool_ops;
  800. ndev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
  801. ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
  802. ndev->needed_headroom = TX_OVERHEAD;
  803. ndev->needed_tailroom = TX_EOP_SIZE;
  804. mutex_lock(&ax_local->spi_lock);
  805. /* ax88796c gpio reset */
  806. ax88796c_hard_reset(ax_local);
  807. /* Reset AX88796C */
  808. ret = ax88796c_soft_reset(ax_local);
  809. if (ret < 0) {
  810. ret = -ENODEV;
  811. mutex_unlock(&ax_local->spi_lock);
  812. goto err;
  813. }
  814. /* Check board revision */
  815. temp = AX_READ(&ax_local->ax_spi, P2_CRIR);
  816. if ((temp & 0xF) != 0x0) {
  817. dev_err(&spi->dev, "spi read failed: %d\n", temp);
  818. ret = -ENODEV;
  819. mutex_unlock(&ax_local->spi_lock);
  820. goto err;
  821. }
  822. /*Reload EEPROM*/
  823. ax88796c_reload_eeprom(ax_local);
  824. ax88796c_load_mac_addr(ndev);
  825. if (netif_msg_probe(ax_local))
  826. dev_info(&spi->dev,
  827. "irq %d, MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
  828. ndev->irq,
  829. ndev->dev_addr[0], ndev->dev_addr[1],
  830. ndev->dev_addr[2], ndev->dev_addr[3],
  831. ndev->dev_addr[4], ndev->dev_addr[5]);
  832. /* Disable power saving */
  833. AX_WRITE(&ax_local->ax_spi, (AX_READ(&ax_local->ax_spi, P0_PSCR)
  834. & PSCR_PS_MASK) | PSCR_PS_D0, P0_PSCR);
  835. mutex_unlock(&ax_local->spi_lock);
  836. INIT_WORK(&ax_local->ax_work, ax88796c_work);
  837. skb_queue_head_init(&ax_local->tx_wait_q);
  838. snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
  839. ax_local->mdiobus->id, AX88796C_PHY_ID);
  840. ax_local->phydev = phy_connect(ax_local->ndev, phy_id,
  841. ax88796c_handle_link_change,
  842. PHY_INTERFACE_MODE_MII);
  843. if (IS_ERR(ax_local->phydev)) {
  844. ret = PTR_ERR(ax_local->phydev);
  845. goto err;
  846. }
  847. ax_local->phydev->irq = PHY_POLL;
  848. ret = devm_register_netdev(&spi->dev, ndev);
  849. if (ret) {
  850. dev_err(&spi->dev, "failed to register a network device\n");
  851. goto err_phy_dis;
  852. }
  853. netif_info(ax_local, probe, ndev, "%s %s registered\n",
  854. dev_driver_string(&spi->dev),
  855. dev_name(&spi->dev));
  856. phy_attached_info(ax_local->phydev);
  857. return 0;
  858. err_phy_dis:
  859. phy_disconnect(ax_local->phydev);
  860. err:
  861. return ret;
  862. }
  863. static void ax88796c_remove(struct spi_device *spi)
  864. {
  865. struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev);
  866. struct net_device *ndev = ax_local->ndev;
  867. phy_disconnect(ndev->phydev);
  868. netif_info(ax_local, probe, ndev, "removing network device %s %s\n",
  869. dev_driver_string(&spi->dev),
  870. dev_name(&spi->dev));
  871. }
  872. #ifdef CONFIG_OF
  873. static const struct of_device_id ax88796c_dt_ids[] = {
  874. { .compatible = "asix,ax88796c" },
  875. {},
  876. };
  877. MODULE_DEVICE_TABLE(of, ax88796c_dt_ids);
  878. #endif
  879. static const struct spi_device_id asix_id[] = {
  880. { "ax88796c", 0 },
  881. { }
  882. };
  883. MODULE_DEVICE_TABLE(spi, asix_id);
  884. static struct spi_driver ax88796c_spi_driver = {
  885. .driver = {
  886. .name = DRV_NAME,
  887. .of_match_table = of_match_ptr(ax88796c_dt_ids),
  888. },
  889. .probe = ax88796c_probe,
  890. .remove = ax88796c_remove,
  891. .id_table = asix_id,
  892. };
  893. static __init int ax88796c_spi_init(void)
  894. {
  895. int ret;
  896. bitmap_zero(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
  897. ret = bitmap_parse(no_regs_list, 35,
  898. ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
  899. if (ret) {
  900. bitmap_fill(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
  901. pr_err("Invalid bitmap description, masking all registers\n");
  902. }
  903. return spi_register_driver(&ax88796c_spi_driver);
  904. }
  905. static __exit void ax88796c_spi_exit(void)
  906. {
  907. spi_unregister_driver(&ax88796c_spi_driver);
  908. }
  909. module_init(ax88796c_spi_init);
  910. module_exit(ax88796c_spi_exit);
  911. MODULE_AUTHOR("Łukasz Stelmach <[email protected]>");
  912. MODULE_DESCRIPTION("ASIX AX88796C SPI Ethernet driver");
  913. MODULE_LICENSE("GPL");