hip04_eth.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Copyright (c) 2014 Linaro Ltd.
  3. * Copyright (c) 2014 Hisilicon Limited.
  4. */
  5. #include <linux/module.h>
  6. #include <linux/etherdevice.h>
  7. #include <linux/platform_device.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/ktime.h>
  10. #include <linux/of_address.h>
  11. #include <linux/phy.h>
  12. #include <linux/of_mdio.h>
  13. #include <linux/of_net.h>
  14. #include <linux/mfd/syscon.h>
  15. #include <linux/regmap.h>
  16. #define SC_PPE_RESET_DREQ 0x026C
  17. #define PPE_CFG_RX_ADDR 0x100
  18. #define PPE_CFG_POOL_GRP 0x300
  19. #define PPE_CFG_RX_BUF_SIZE 0x400
  20. #define PPE_CFG_RX_FIFO_SIZE 0x500
  21. #define PPE_CURR_BUF_CNT 0xa200
  22. #define GE_DUPLEX_TYPE 0x08
  23. #define GE_MAX_FRM_SIZE_REG 0x3c
  24. #define GE_PORT_MODE 0x40
  25. #define GE_PORT_EN 0x44
  26. #define GE_SHORT_RUNTS_THR_REG 0x50
  27. #define GE_TX_LOCAL_PAGE_REG 0x5c
  28. #define GE_TRANSMIT_CONTROL_REG 0x60
  29. #define GE_CF_CRC_STRIP_REG 0x1b0
  30. #define GE_MODE_CHANGE_REG 0x1b4
  31. #define GE_RECV_CONTROL_REG 0x1e0
  32. #define GE_STATION_MAC_ADDRESS 0x210
  33. #define PPE_CFG_BUS_CTRL_REG 0x424
  34. #define PPE_CFG_RX_CTRL_REG 0x428
  35. #if defined(CONFIG_HI13X1_GMAC)
  36. #define PPE_CFG_CPU_ADD_ADDR 0x6D0
  37. #define PPE_CFG_MAX_FRAME_LEN_REG 0x500
  38. #define PPE_CFG_RX_PKT_MODE_REG 0x504
  39. #define PPE_CFG_QOS_VMID_GEN 0x520
  40. #define PPE_CFG_RX_PKT_INT 0x740
  41. #define PPE_INTEN 0x700
  42. #define PPE_INTSTS 0x708
  43. #define PPE_RINT 0x704
  44. #define PPE_CFG_STS_MODE 0x880
  45. #else
  46. #define PPE_CFG_CPU_ADD_ADDR 0x580
  47. #define PPE_CFG_MAX_FRAME_LEN_REG 0x408
  48. #define PPE_CFG_RX_PKT_MODE_REG 0x438
  49. #define PPE_CFG_QOS_VMID_GEN 0x500
  50. #define PPE_CFG_RX_PKT_INT 0x538
  51. #define PPE_INTEN 0x600
  52. #define PPE_INTSTS 0x608
  53. #define PPE_RINT 0x604
  54. #define PPE_CFG_STS_MODE 0x700
  55. #endif /* CONFIG_HI13X1_GMAC */
  56. #define PPE_HIS_RX_PKT_CNT 0x804
  57. #define RESET_DREQ_ALL 0xffffffff
  58. /* REG_INTERRUPT */
  59. #define RCV_INT BIT(10)
  60. #define RCV_NOBUF BIT(8)
  61. #define RCV_DROP BIT(7)
  62. #define TX_DROP BIT(6)
  63. #define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP)
  64. #define DEF_INT_MASK (RCV_INT | DEF_INT_ERR)
  65. /* TX descriptor config */
  66. #define TX_FREE_MEM BIT(0)
  67. #define TX_READ_ALLOC_L3 BIT(1)
  68. #if defined(CONFIG_HI13X1_GMAC)
  69. #define TX_CLEAR_WB BIT(7)
  70. #define TX_RELEASE_TO_PPE BIT(4)
  71. #define TX_FINISH_CACHE_INV BIT(6)
  72. #define TX_POOL_SHIFT 16
  73. #else
  74. #define TX_CLEAR_WB BIT(4)
  75. #define TX_FINISH_CACHE_INV BIT(2)
  76. #endif
  77. #define TX_L3_CHECKSUM BIT(5)
  78. #define TX_LOOP_BACK BIT(11)
  79. /* RX error */
  80. #define RX_PKT_DROP BIT(0)
  81. #define RX_L2_ERR BIT(1)
  82. #define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR)
  83. #define SGMII_SPEED_1000 0x08
  84. #define SGMII_SPEED_100 0x07
  85. #define SGMII_SPEED_10 0x06
  86. #define MII_SPEED_100 0x01
  87. #define MII_SPEED_10 0x00
  88. #define GE_DUPLEX_FULL BIT(0)
  89. #define GE_DUPLEX_HALF 0x00
  90. #define GE_MODE_CHANGE_EN BIT(0)
  91. #define GE_TX_AUTO_NEG BIT(5)
  92. #define GE_TX_ADD_CRC BIT(6)
  93. #define GE_TX_SHORT_PAD_THROUGH BIT(7)
  94. #define GE_RX_STRIP_CRC BIT(0)
  95. #define GE_RX_STRIP_PAD BIT(3)
  96. #define GE_RX_PAD_EN BIT(4)
  97. #define GE_AUTO_NEG_CTL BIT(0)
  98. #define GE_RX_INT_THRESHOLD BIT(6)
  99. #define GE_RX_TIMEOUT 0x04
  100. #define GE_RX_PORT_EN BIT(1)
  101. #define GE_TX_PORT_EN BIT(2)
  102. #define PPE_CFG_RX_PKT_ALIGN BIT(18)
  103. #if defined(CONFIG_HI13X1_GMAC)
  104. #define PPE_CFG_QOS_VMID_GRP_SHIFT 4
  105. #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 7
  106. #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(0)
  107. #define PPE_CFG_QOS_VMID_MODE BIT(15)
  108. #define PPE_CFG_BUS_LOCAL_REL (BIT(9) | BIT(15) | BIT(19) | BIT(23))
  109. /* buf unit size is cache_line_size, which is 64, so the shift is 6 */
  110. #define PPE_BUF_SIZE_SHIFT 6
  111. #define PPE_TX_BUF_HOLD BIT(31)
  112. #define SOC_CACHE_LINE_MASK 0x3F
  113. #else
  114. #define PPE_CFG_QOS_VMID_GRP_SHIFT 8
  115. #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
  116. #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
  117. #define PPE_CFG_QOS_VMID_MODE BIT(14)
  118. #define PPE_CFG_BUS_LOCAL_REL BIT(14)
  119. /* buf unit size is 1, so the shift is 6 */
  120. #define PPE_BUF_SIZE_SHIFT 0
  121. #define PPE_TX_BUF_HOLD 0
  122. #endif /* CONFIG_HI13X1_GMAC */
  123. #define PPE_CFG_RX_FIFO_FSFU BIT(11)
  124. #define PPE_CFG_RX_DEPTH_SHIFT 16
  125. #define PPE_CFG_RX_START_SHIFT 0
  126. #define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
  127. #define RX_DESC_NUM 128
  128. #define TX_DESC_NUM 256
  129. #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1))
  130. #define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1))
  131. #define GMAC_PPE_RX_PKT_MAX_LEN 379
  132. #define GMAC_MAX_PKT_LEN 1516
  133. #define GMAC_MIN_PKT_LEN 31
  134. #define RX_BUF_SIZE 1600
  135. #define RESET_TIMEOUT 1000
  136. #define TX_TIMEOUT (6 * HZ)
  137. #define DRV_NAME "hip04-ether"
  138. #define DRV_VERSION "v1.0"
  139. #define HIP04_MAX_TX_COALESCE_USECS 200
  140. #define HIP04_MIN_TX_COALESCE_USECS 100
  141. #define HIP04_MAX_TX_COALESCE_FRAMES 200
  142. #define HIP04_MIN_TX_COALESCE_FRAMES 100
  143. struct tx_desc {
  144. #if defined(CONFIG_HI13X1_GMAC)
  145. u32 reserved1[2];
  146. u32 send_addr;
  147. u16 send_size;
  148. u16 data_offset;
  149. u32 reserved2[7];
  150. u32 cfg;
  151. u32 wb_addr;
  152. u32 reserved3[3];
  153. #else
  154. u32 send_addr;
  155. u32 send_size;
  156. u32 next_addr;
  157. u32 cfg;
  158. u32 wb_addr;
  159. #endif
  160. } __aligned(64);
  161. struct rx_desc {
  162. #if defined(CONFIG_HI13X1_GMAC)
  163. u32 reserved1[3];
  164. u16 pkt_len;
  165. u16 reserved_16;
  166. u32 reserved2[6];
  167. u32 pkt_err;
  168. u32 reserved3[5];
  169. #else
  170. u16 reserved_16;
  171. u16 pkt_len;
  172. u32 reserve1[3];
  173. u32 pkt_err;
  174. u32 reserve2[4];
  175. #endif
  176. };
  177. struct hip04_priv {
  178. void __iomem *base;
  179. #if defined(CONFIG_HI13X1_GMAC)
  180. void __iomem *sysctrl_base;
  181. #endif
  182. phy_interface_t phy_mode;
  183. int chan;
  184. unsigned int port;
  185. unsigned int group;
  186. unsigned int speed;
  187. unsigned int duplex;
  188. unsigned int reg_inten;
  189. struct napi_struct napi;
  190. struct device *dev;
  191. struct net_device *ndev;
  192. struct tx_desc *tx_desc;
  193. dma_addr_t tx_desc_dma;
  194. struct sk_buff *tx_skb[TX_DESC_NUM];
  195. dma_addr_t tx_phys[TX_DESC_NUM];
  196. unsigned int tx_head;
  197. int tx_coalesce_frames;
  198. int tx_coalesce_usecs;
  199. struct hrtimer tx_coalesce_timer;
  200. unsigned char *rx_buf[RX_DESC_NUM];
  201. dma_addr_t rx_phys[RX_DESC_NUM];
  202. unsigned int rx_head;
  203. unsigned int rx_buf_size;
  204. unsigned int rx_cnt_remaining;
  205. struct device_node *phy_node;
  206. struct phy_device *phy;
  207. struct regmap *map;
  208. struct work_struct tx_timeout_task;
  209. /* written only by tx cleanup */
  210. unsigned int tx_tail ____cacheline_aligned_in_smp;
  211. };
  212. static inline unsigned int tx_count(unsigned int head, unsigned int tail)
  213. {
  214. return (head - tail) % TX_DESC_NUM;
  215. }
  216. static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
  217. {
  218. struct hip04_priv *priv = netdev_priv(ndev);
  219. u32 val;
  220. priv->speed = speed;
  221. priv->duplex = duplex;
  222. switch (priv->phy_mode) {
  223. case PHY_INTERFACE_MODE_SGMII:
  224. if (speed == SPEED_1000)
  225. val = SGMII_SPEED_1000;
  226. else if (speed == SPEED_100)
  227. val = SGMII_SPEED_100;
  228. else
  229. val = SGMII_SPEED_10;
  230. break;
  231. case PHY_INTERFACE_MODE_MII:
  232. if (speed == SPEED_100)
  233. val = MII_SPEED_100;
  234. else
  235. val = MII_SPEED_10;
  236. break;
  237. default:
  238. netdev_warn(ndev, "not supported mode\n");
  239. val = MII_SPEED_10;
  240. break;
  241. }
  242. writel_relaxed(val, priv->base + GE_PORT_MODE);
  243. val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
  244. writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
  245. val = GE_MODE_CHANGE_EN;
  246. writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
  247. }
  248. static void hip04_reset_dreq(struct hip04_priv *priv)
  249. {
  250. #if defined(CONFIG_HI13X1_GMAC)
  251. writel_relaxed(RESET_DREQ_ALL, priv->sysctrl_base + SC_PPE_RESET_DREQ);
  252. #endif
  253. }
  254. static void hip04_reset_ppe(struct hip04_priv *priv)
  255. {
  256. u32 val, tmp, timeout = 0;
  257. do {
  258. regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
  259. regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
  260. if (timeout++ > RESET_TIMEOUT)
  261. break;
  262. } while (val & 0xfff);
  263. }
  264. static void hip04_config_fifo(struct hip04_priv *priv)
  265. {
  266. u32 val;
  267. val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
  268. val |= PPE_CFG_STS_RX_PKT_CNT_RC;
  269. writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
  270. val = BIT(priv->group);
  271. regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
  272. val = priv->group << PPE_CFG_QOS_VMID_GRP_SHIFT;
  273. val |= PPE_CFG_QOS_VMID_MODE;
  274. writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
  275. val = RX_BUF_SIZE >> PPE_BUF_SIZE_SHIFT;
  276. regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
  277. val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
  278. val |= PPE_CFG_RX_FIFO_FSFU;
  279. val |= priv->chan << PPE_CFG_RX_START_SHIFT;
  280. regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
  281. val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
  282. writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
  283. val = PPE_CFG_RX_PKT_ALIGN;
  284. writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
  285. val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
  286. writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
  287. val = GMAC_PPE_RX_PKT_MAX_LEN;
  288. writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
  289. val = GMAC_MAX_PKT_LEN;
  290. writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
  291. val = GMAC_MIN_PKT_LEN;
  292. writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
  293. val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
  294. val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
  295. writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
  296. val = GE_RX_STRIP_CRC;
  297. writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
  298. val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
  299. val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
  300. writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
  301. #ifndef CONFIG_HI13X1_GMAC
  302. val = GE_AUTO_NEG_CTL;
  303. writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
  304. #endif
  305. }
  306. static void hip04_mac_enable(struct net_device *ndev)
  307. {
  308. struct hip04_priv *priv = netdev_priv(ndev);
  309. u32 val;
  310. /* enable tx & rx */
  311. val = readl_relaxed(priv->base + GE_PORT_EN);
  312. val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
  313. writel_relaxed(val, priv->base + GE_PORT_EN);
  314. /* clear rx int */
  315. val = RCV_INT;
  316. writel_relaxed(val, priv->base + PPE_RINT);
  317. /* config recv int */
  318. val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
  319. writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
  320. /* enable interrupt */
  321. priv->reg_inten = DEF_INT_MASK;
  322. writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
  323. }
  324. static void hip04_mac_disable(struct net_device *ndev)
  325. {
  326. struct hip04_priv *priv = netdev_priv(ndev);
  327. u32 val;
  328. /* disable int */
  329. priv->reg_inten &= ~(DEF_INT_MASK);
  330. writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
  331. /* disable tx & rx */
  332. val = readl_relaxed(priv->base + GE_PORT_EN);
  333. val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
  334. writel_relaxed(val, priv->base + GE_PORT_EN);
  335. }
  336. static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
  337. {
  338. u32 val;
  339. val = phys >> PPE_BUF_SIZE_SHIFT | PPE_TX_BUF_HOLD;
  340. writel(val, priv->base + PPE_CFG_CPU_ADD_ADDR);
  341. }
  342. static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
  343. {
  344. u32 val;
  345. val = phys >> PPE_BUF_SIZE_SHIFT;
  346. regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, val);
  347. }
  348. static u32 hip04_recv_cnt(struct hip04_priv *priv)
  349. {
  350. return readl(priv->base + PPE_HIS_RX_PKT_CNT);
  351. }
  352. static void hip04_update_mac_address(struct net_device *ndev)
  353. {
  354. struct hip04_priv *priv = netdev_priv(ndev);
  355. writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
  356. priv->base + GE_STATION_MAC_ADDRESS);
  357. writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
  358. (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
  359. priv->base + GE_STATION_MAC_ADDRESS + 4);
  360. }
  361. static int hip04_set_mac_address(struct net_device *ndev, void *addr)
  362. {
  363. eth_mac_addr(ndev, addr);
  364. hip04_update_mac_address(ndev);
  365. return 0;
  366. }
  367. static int hip04_tx_reclaim(struct net_device *ndev, bool force)
  368. {
  369. struct hip04_priv *priv = netdev_priv(ndev);
  370. unsigned tx_tail = priv->tx_tail;
  371. struct tx_desc *desc;
  372. unsigned int bytes_compl = 0, pkts_compl = 0;
  373. unsigned int count;
  374. smp_rmb();
  375. count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
  376. if (count == 0)
  377. goto out;
  378. while (count) {
  379. desc = &priv->tx_desc[tx_tail];
  380. if (desc->send_addr != 0) {
  381. if (force)
  382. desc->send_addr = 0;
  383. else
  384. break;
  385. }
  386. if (priv->tx_phys[tx_tail]) {
  387. dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
  388. priv->tx_skb[tx_tail]->len,
  389. DMA_TO_DEVICE);
  390. priv->tx_phys[tx_tail] = 0;
  391. }
  392. pkts_compl++;
  393. bytes_compl += priv->tx_skb[tx_tail]->len;
  394. dev_kfree_skb(priv->tx_skb[tx_tail]);
  395. priv->tx_skb[tx_tail] = NULL;
  396. tx_tail = TX_NEXT(tx_tail);
  397. count--;
  398. }
  399. priv->tx_tail = tx_tail;
  400. smp_wmb(); /* Ensure tx_tail visible to xmit */
  401. out:
  402. if (pkts_compl || bytes_compl)
  403. netdev_completed_queue(ndev, pkts_compl, bytes_compl);
  404. if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
  405. netif_wake_queue(ndev);
  406. return count;
  407. }
  408. static void hip04_start_tx_timer(struct hip04_priv *priv)
  409. {
  410. unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2;
  411. /* allow timer to fire after half the time at the earliest */
  412. hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns),
  413. ns, HRTIMER_MODE_REL);
  414. }
  415. static netdev_tx_t
  416. hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  417. {
  418. struct hip04_priv *priv = netdev_priv(ndev);
  419. struct net_device_stats *stats = &ndev->stats;
  420. unsigned int tx_head = priv->tx_head, count;
  421. struct tx_desc *desc = &priv->tx_desc[tx_head];
  422. dma_addr_t phys;
  423. smp_rmb();
  424. count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
  425. if (count == (TX_DESC_NUM - 1)) {
  426. netif_stop_queue(ndev);
  427. return NETDEV_TX_BUSY;
  428. }
  429. phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
  430. if (dma_mapping_error(priv->dev, phys)) {
  431. dev_kfree_skb(skb);
  432. return NETDEV_TX_OK;
  433. }
  434. priv->tx_skb[tx_head] = skb;
  435. priv->tx_phys[tx_head] = phys;
  436. desc->send_size = (__force u32)cpu_to_be32(skb->len);
  437. #if defined(CONFIG_HI13X1_GMAC)
  438. desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
  439. | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
  440. desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK);
  441. desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK);
  442. #else
  443. desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
  444. desc->send_addr = (__force u32)cpu_to_be32(phys);
  445. #endif
  446. phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
  447. desc->wb_addr = (__force u32)cpu_to_be32(phys +
  448. offsetof(struct tx_desc, send_addr));
  449. skb_tx_timestamp(skb);
  450. hip04_set_xmit_desc(priv, phys);
  451. count++;
  452. netdev_sent_queue(ndev, skb->len);
  453. priv->tx_head = TX_NEXT(tx_head);
  454. stats->tx_bytes += skb->len;
  455. stats->tx_packets++;
  456. /* Ensure tx_head update visible to tx reclaim */
  457. smp_wmb();
  458. /* queue is getting full, better start cleaning up now */
  459. if (count >= priv->tx_coalesce_frames) {
  460. if (napi_schedule_prep(&priv->napi)) {
  461. /* disable rx interrupt and timer */
  462. priv->reg_inten &= ~(RCV_INT);
  463. writel_relaxed(DEF_INT_MASK & ~RCV_INT,
  464. priv->base + PPE_INTEN);
  465. hrtimer_cancel(&priv->tx_coalesce_timer);
  466. __napi_schedule(&priv->napi);
  467. }
  468. } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
  469. /* cleanup not pending yet, start a new timer */
  470. hip04_start_tx_timer(priv);
  471. }
  472. return NETDEV_TX_OK;
  473. }
  474. static int hip04_rx_poll(struct napi_struct *napi, int budget)
  475. {
  476. struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
  477. struct net_device *ndev = priv->ndev;
  478. struct net_device_stats *stats = &ndev->stats;
  479. struct rx_desc *desc;
  480. struct sk_buff *skb;
  481. unsigned char *buf;
  482. bool last = false;
  483. dma_addr_t phys;
  484. int rx = 0;
  485. int tx_remaining;
  486. u16 len;
  487. u32 err;
  488. /* clean up tx descriptors */
  489. tx_remaining = hip04_tx_reclaim(ndev, false);
  490. priv->rx_cnt_remaining += hip04_recv_cnt(priv);
  491. while (priv->rx_cnt_remaining && !last) {
  492. buf = priv->rx_buf[priv->rx_head];
  493. skb = build_skb(buf, priv->rx_buf_size);
  494. if (unlikely(!skb)) {
  495. net_dbg_ratelimited("build_skb failed\n");
  496. goto refill;
  497. }
  498. dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
  499. RX_BUF_SIZE, DMA_FROM_DEVICE);
  500. priv->rx_phys[priv->rx_head] = 0;
  501. desc = (struct rx_desc *)skb->data;
  502. len = be16_to_cpu((__force __be16)desc->pkt_len);
  503. err = be32_to_cpu((__force __be32)desc->pkt_err);
  504. if (0 == len) {
  505. dev_kfree_skb_any(skb);
  506. last = true;
  507. } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
  508. dev_kfree_skb_any(skb);
  509. stats->rx_dropped++;
  510. stats->rx_errors++;
  511. } else {
  512. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  513. skb_put(skb, len);
  514. skb->protocol = eth_type_trans(skb, ndev);
  515. napi_gro_receive(&priv->napi, skb);
  516. stats->rx_packets++;
  517. stats->rx_bytes += len;
  518. rx++;
  519. }
  520. refill:
  521. buf = netdev_alloc_frag(priv->rx_buf_size);
  522. if (!buf)
  523. goto done;
  524. phys = dma_map_single(priv->dev, buf,
  525. RX_BUF_SIZE, DMA_FROM_DEVICE);
  526. if (dma_mapping_error(priv->dev, phys))
  527. goto done;
  528. priv->rx_buf[priv->rx_head] = buf;
  529. priv->rx_phys[priv->rx_head] = phys;
  530. hip04_set_recv_desc(priv, phys);
  531. priv->rx_head = RX_NEXT(priv->rx_head);
  532. if (rx >= budget) {
  533. --priv->rx_cnt_remaining;
  534. goto done;
  535. }
  536. if (--priv->rx_cnt_remaining == 0)
  537. priv->rx_cnt_remaining += hip04_recv_cnt(priv);
  538. }
  539. if (!(priv->reg_inten & RCV_INT)) {
  540. /* enable rx interrupt */
  541. priv->reg_inten |= RCV_INT;
  542. writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
  543. }
  544. napi_complete_done(napi, rx);
  545. done:
  546. /* start a new timer if necessary */
  547. if (rx < budget && tx_remaining)
  548. hip04_start_tx_timer(priv);
  549. return rx;
  550. }
  551. static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
  552. {
  553. struct net_device *ndev = (struct net_device *)dev_id;
  554. struct hip04_priv *priv = netdev_priv(ndev);
  555. struct net_device_stats *stats = &ndev->stats;
  556. u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
  557. if (!ists)
  558. return IRQ_NONE;
  559. writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
  560. if (unlikely(ists & DEF_INT_ERR)) {
  561. if (ists & (RCV_NOBUF | RCV_DROP)) {
  562. stats->rx_errors++;
  563. stats->rx_dropped++;
  564. netdev_err(ndev, "rx drop\n");
  565. }
  566. if (ists & TX_DROP) {
  567. stats->tx_dropped++;
  568. netdev_err(ndev, "tx drop\n");
  569. }
  570. }
  571. if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
  572. /* disable rx interrupt */
  573. priv->reg_inten &= ~(RCV_INT);
  574. writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
  575. hrtimer_cancel(&priv->tx_coalesce_timer);
  576. __napi_schedule(&priv->napi);
  577. }
  578. return IRQ_HANDLED;
  579. }
  580. static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
  581. {
  582. struct hip04_priv *priv;
  583. priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
  584. if (napi_schedule_prep(&priv->napi)) {
  585. /* disable rx interrupt */
  586. priv->reg_inten &= ~(RCV_INT);
  587. writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
  588. __napi_schedule(&priv->napi);
  589. }
  590. return HRTIMER_NORESTART;
  591. }
  592. static void hip04_adjust_link(struct net_device *ndev)
  593. {
  594. struct hip04_priv *priv = netdev_priv(ndev);
  595. struct phy_device *phy = priv->phy;
  596. if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
  597. hip04_config_port(ndev, phy->speed, phy->duplex);
  598. phy_print_status(phy);
  599. }
  600. }
  601. static int hip04_mac_open(struct net_device *ndev)
  602. {
  603. struct hip04_priv *priv = netdev_priv(ndev);
  604. int i;
  605. priv->rx_head = 0;
  606. priv->rx_cnt_remaining = 0;
  607. priv->tx_head = 0;
  608. priv->tx_tail = 0;
  609. hip04_reset_ppe(priv);
  610. for (i = 0; i < RX_DESC_NUM; i++) {
  611. dma_addr_t phys;
  612. phys = dma_map_single(priv->dev, priv->rx_buf[i],
  613. RX_BUF_SIZE, DMA_FROM_DEVICE);
  614. if (dma_mapping_error(priv->dev, phys))
  615. return -EIO;
  616. priv->rx_phys[i] = phys;
  617. hip04_set_recv_desc(priv, phys);
  618. }
  619. if (priv->phy)
  620. phy_start(priv->phy);
  621. netdev_reset_queue(ndev);
  622. netif_start_queue(ndev);
  623. hip04_mac_enable(ndev);
  624. napi_enable(&priv->napi);
  625. return 0;
  626. }
  627. static int hip04_mac_stop(struct net_device *ndev)
  628. {
  629. struct hip04_priv *priv = netdev_priv(ndev);
  630. int i;
  631. napi_disable(&priv->napi);
  632. netif_stop_queue(ndev);
  633. hip04_mac_disable(ndev);
  634. hip04_tx_reclaim(ndev, true);
  635. hip04_reset_ppe(priv);
  636. if (priv->phy)
  637. phy_stop(priv->phy);
  638. for (i = 0; i < RX_DESC_NUM; i++) {
  639. if (priv->rx_phys[i]) {
  640. dma_unmap_single(priv->dev, priv->rx_phys[i],
  641. RX_BUF_SIZE, DMA_FROM_DEVICE);
  642. priv->rx_phys[i] = 0;
  643. }
  644. }
  645. return 0;
  646. }
  647. static void hip04_timeout(struct net_device *ndev, unsigned int txqueue)
  648. {
  649. struct hip04_priv *priv = netdev_priv(ndev);
  650. schedule_work(&priv->tx_timeout_task);
  651. }
  652. static void hip04_tx_timeout_task(struct work_struct *work)
  653. {
  654. struct hip04_priv *priv;
  655. priv = container_of(work, struct hip04_priv, tx_timeout_task);
  656. hip04_mac_stop(priv->ndev);
  657. hip04_mac_open(priv->ndev);
  658. }
  659. static int hip04_get_coalesce(struct net_device *netdev,
  660. struct ethtool_coalesce *ec,
  661. struct kernel_ethtool_coalesce *kernel_coal,
  662. struct netlink_ext_ack *extack)
  663. {
  664. struct hip04_priv *priv = netdev_priv(netdev);
  665. ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
  666. ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
  667. return 0;
  668. }
  669. static int hip04_set_coalesce(struct net_device *netdev,
  670. struct ethtool_coalesce *ec,
  671. struct kernel_ethtool_coalesce *kernel_coal,
  672. struct netlink_ext_ack *extack)
  673. {
  674. struct hip04_priv *priv = netdev_priv(netdev);
  675. if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
  676. ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
  677. (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
  678. ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
  679. return -EINVAL;
  680. priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
  681. priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
  682. return 0;
  683. }
  684. static void hip04_get_drvinfo(struct net_device *netdev,
  685. struct ethtool_drvinfo *drvinfo)
  686. {
  687. strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
  688. strscpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
  689. }
  690. static const struct ethtool_ops hip04_ethtool_ops = {
  691. .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
  692. ETHTOOL_COALESCE_TX_MAX_FRAMES,
  693. .get_coalesce = hip04_get_coalesce,
  694. .set_coalesce = hip04_set_coalesce,
  695. .get_drvinfo = hip04_get_drvinfo,
  696. };
  697. static const struct net_device_ops hip04_netdev_ops = {
  698. .ndo_open = hip04_mac_open,
  699. .ndo_stop = hip04_mac_stop,
  700. .ndo_start_xmit = hip04_mac_start_xmit,
  701. .ndo_set_mac_address = hip04_set_mac_address,
  702. .ndo_tx_timeout = hip04_timeout,
  703. .ndo_validate_addr = eth_validate_addr,
  704. };
  705. static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
  706. {
  707. struct hip04_priv *priv = netdev_priv(ndev);
  708. int i;
  709. priv->tx_desc = dma_alloc_coherent(d,
  710. TX_DESC_NUM * sizeof(struct tx_desc),
  711. &priv->tx_desc_dma, GFP_KERNEL);
  712. if (!priv->tx_desc)
  713. return -ENOMEM;
  714. priv->rx_buf_size = RX_BUF_SIZE +
  715. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  716. for (i = 0; i < RX_DESC_NUM; i++) {
  717. priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
  718. if (!priv->rx_buf[i])
  719. return -ENOMEM;
  720. }
  721. return 0;
  722. }
  723. static void hip04_free_ring(struct net_device *ndev, struct device *d)
  724. {
  725. struct hip04_priv *priv = netdev_priv(ndev);
  726. int i;
  727. for (i = 0; i < RX_DESC_NUM; i++)
  728. if (priv->rx_buf[i])
  729. skb_free_frag(priv->rx_buf[i]);
  730. for (i = 0; i < TX_DESC_NUM; i++)
  731. if (priv->tx_skb[i])
  732. dev_kfree_skb_any(priv->tx_skb[i]);
  733. dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
  734. priv->tx_desc, priv->tx_desc_dma);
  735. }
  736. static int hip04_mac_probe(struct platform_device *pdev)
  737. {
  738. struct device *d = &pdev->dev;
  739. struct device_node *node = d->of_node;
  740. struct of_phandle_args arg;
  741. struct net_device *ndev;
  742. struct hip04_priv *priv;
  743. int irq;
  744. int ret;
  745. ndev = alloc_etherdev(sizeof(struct hip04_priv));
  746. if (!ndev)
  747. return -ENOMEM;
  748. priv = netdev_priv(ndev);
  749. priv->dev = d;
  750. priv->ndev = ndev;
  751. platform_set_drvdata(pdev, ndev);
  752. SET_NETDEV_DEV(ndev, &pdev->dev);
  753. priv->base = devm_platform_ioremap_resource(pdev, 0);
  754. if (IS_ERR(priv->base)) {
  755. ret = PTR_ERR(priv->base);
  756. goto init_fail;
  757. }
  758. #if defined(CONFIG_HI13X1_GMAC)
  759. priv->sysctrl_base = devm_platform_ioremap_resource(pdev, 1);
  760. if (IS_ERR(priv->sysctrl_base)) {
  761. ret = PTR_ERR(priv->sysctrl_base);
  762. goto init_fail;
  763. }
  764. #endif
  765. ret = of_parse_phandle_with_fixed_args(node, "port-handle", 3, 0, &arg);
  766. if (ret < 0) {
  767. dev_warn(d, "no port-handle\n");
  768. goto init_fail;
  769. }
  770. priv->port = arg.args[0];
  771. priv->chan = arg.args[1] * RX_DESC_NUM;
  772. priv->group = arg.args[2];
  773. hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  774. /* BQL will try to keep the TX queue as short as possible, but it can't
  775. * be faster than tx_coalesce_usecs, so we need a fast timeout here,
  776. * but also long enough to gather up enough frames to ensure we don't
  777. * get more interrupts than necessary.
  778. * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
  779. */
  780. priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
  781. priv->tx_coalesce_usecs = 200;
  782. priv->tx_coalesce_timer.function = tx_done;
  783. priv->map = syscon_node_to_regmap(arg.np);
  784. if (IS_ERR(priv->map)) {
  785. dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
  786. ret = PTR_ERR(priv->map);
  787. goto init_fail;
  788. }
  789. ret = of_get_phy_mode(node, &priv->phy_mode);
  790. if (ret) {
  791. dev_warn(d, "not find phy-mode\n");
  792. goto init_fail;
  793. }
  794. irq = platform_get_irq(pdev, 0);
  795. if (irq <= 0) {
  796. ret = -EINVAL;
  797. goto init_fail;
  798. }
  799. ret = devm_request_irq(d, irq, hip04_mac_interrupt,
  800. 0, pdev->name, ndev);
  801. if (ret) {
  802. netdev_err(ndev, "devm_request_irq failed\n");
  803. goto init_fail;
  804. }
  805. priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
  806. if (priv->phy_node) {
  807. priv->phy = of_phy_connect(ndev, priv->phy_node,
  808. &hip04_adjust_link,
  809. 0, priv->phy_mode);
  810. if (!priv->phy) {
  811. ret = -EPROBE_DEFER;
  812. goto init_fail;
  813. }
  814. }
  815. INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
  816. ndev->netdev_ops = &hip04_netdev_ops;
  817. ndev->ethtool_ops = &hip04_ethtool_ops;
  818. ndev->watchdog_timeo = TX_TIMEOUT;
  819. ndev->priv_flags |= IFF_UNICAST_FLT;
  820. ndev->irq = irq;
  821. netif_napi_add(ndev, &priv->napi, hip04_rx_poll);
  822. hip04_reset_dreq(priv);
  823. hip04_reset_ppe(priv);
  824. if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
  825. hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
  826. hip04_config_fifo(priv);
  827. eth_hw_addr_random(ndev);
  828. hip04_update_mac_address(ndev);
  829. ret = hip04_alloc_ring(ndev, d);
  830. if (ret) {
  831. netdev_err(ndev, "alloc ring fail\n");
  832. goto alloc_fail;
  833. }
  834. ret = register_netdev(ndev);
  835. if (ret)
  836. goto alloc_fail;
  837. return 0;
  838. alloc_fail:
  839. hip04_free_ring(ndev, d);
  840. init_fail:
  841. of_node_put(priv->phy_node);
  842. free_netdev(ndev);
  843. return ret;
  844. }
  845. static int hip04_remove(struct platform_device *pdev)
  846. {
  847. struct net_device *ndev = platform_get_drvdata(pdev);
  848. struct hip04_priv *priv = netdev_priv(ndev);
  849. struct device *d = &pdev->dev;
  850. if (priv->phy)
  851. phy_disconnect(priv->phy);
  852. hip04_free_ring(ndev, d);
  853. unregister_netdev(ndev);
  854. of_node_put(priv->phy_node);
  855. cancel_work_sync(&priv->tx_timeout_task);
  856. free_netdev(ndev);
  857. return 0;
  858. }
  859. static const struct of_device_id hip04_mac_match[] = {
  860. { .compatible = "hisilicon,hip04-mac" },
  861. { }
  862. };
  863. MODULE_DEVICE_TABLE(of, hip04_mac_match);
  864. static struct platform_driver hip04_mac_driver = {
  865. .probe = hip04_mac_probe,
  866. .remove = hip04_remove,
  867. .driver = {
  868. .name = DRV_NAME,
  869. .of_match_table = hip04_mac_match,
  870. },
  871. };
  872. module_platform_driver(hip04_mac_driver);
  873. MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
  874. MODULE_LICENSE("GPL");