dwmac-intel.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2020, Intel Corporation
  3. */
  4. #include <linux/clk-provider.h>
  5. #include <linux/pci.h>
  6. #include <linux/dmi.h>
  7. #include "dwmac-intel.h"
  8. #include "dwmac4.h"
  9. #include "stmmac.h"
  10. #include "stmmac_ptp.h"
  11. struct intel_priv_data {
  12. int mdio_adhoc_addr; /* mdio address for serdes & etc */
  13. unsigned long crossts_adj;
  14. bool is_pse;
  15. };
  16. /* This struct is used to associate PCI Function of MAC controller on a board,
  17. * discovered via DMI, with the address of PHY connected to the MAC. The
  18. * negative value of the address means that MAC controller is not connected
  19. * with PHY.
  20. */
  21. struct stmmac_pci_func_data {
  22. unsigned int func;
  23. int phy_addr;
  24. };
  25. struct stmmac_pci_dmi_data {
  26. const struct stmmac_pci_func_data *func;
  27. size_t nfuncs;
  28. };
  29. struct stmmac_pci_info {
  30. int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
  31. };
  32. static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
  33. const struct dmi_system_id *dmi_list)
  34. {
  35. const struct stmmac_pci_func_data *func_data;
  36. const struct stmmac_pci_dmi_data *dmi_data;
  37. const struct dmi_system_id *dmi_id;
  38. int func = PCI_FUNC(pdev->devfn);
  39. size_t n;
  40. dmi_id = dmi_first_match(dmi_list);
  41. if (!dmi_id)
  42. return -ENODEV;
  43. dmi_data = dmi_id->driver_data;
  44. func_data = dmi_data->func;
  45. for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
  46. if (func_data->func == func)
  47. return func_data->phy_addr;
  48. return -ENODEV;
  49. }
  50. static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
  51. int phyreg, u32 mask, u32 val)
  52. {
  53. unsigned int retries = 10;
  54. int val_rd;
  55. do {
  56. val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
  57. if ((val_rd & mask) == (val & mask))
  58. return 0;
  59. udelay(POLL_DELAY_US);
  60. } while (--retries);
  61. return -ETIMEDOUT;
  62. }
  63. static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
  64. {
  65. struct intel_priv_data *intel_priv = priv_data;
  66. struct stmmac_priv *priv = netdev_priv(ndev);
  67. int serdes_phy_addr = 0;
  68. u32 data = 0;
  69. if (!intel_priv->mdio_adhoc_addr)
  70. return 0;
  71. serdes_phy_addr = intel_priv->mdio_adhoc_addr;
  72. /* Set the serdes rate and the PCLK rate */
  73. data = mdiobus_read(priv->mii, serdes_phy_addr,
  74. SERDES_GCR0);
  75. data &= ~SERDES_RATE_MASK;
  76. data &= ~SERDES_PCLK_MASK;
  77. if (priv->plat->max_speed == 2500)
  78. data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
  79. SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
  80. else
  81. data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT |
  82. SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT;
  83. mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
  84. /* assert clk_req */
  85. data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
  86. data |= SERDES_PLL_CLK;
  87. mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
  88. /* check for clk_ack assertion */
  89. data = serdes_status_poll(priv, serdes_phy_addr,
  90. SERDES_GSR0,
  91. SERDES_PLL_CLK,
  92. SERDES_PLL_CLK);
  93. if (data) {
  94. dev_err(priv->device, "Serdes PLL clk request timeout\n");
  95. return data;
  96. }
  97. /* assert lane reset */
  98. data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
  99. data |= SERDES_RST;
  100. mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
  101. /* check for assert lane reset reflection */
  102. data = serdes_status_poll(priv, serdes_phy_addr,
  103. SERDES_GSR0,
  104. SERDES_RST,
  105. SERDES_RST);
  106. if (data) {
  107. dev_err(priv->device, "Serdes assert lane reset timeout\n");
  108. return data;
  109. }
  110. /* move power state to P0 */
  111. data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
  112. data &= ~SERDES_PWR_ST_MASK;
  113. data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
  114. mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
  115. /* Check for P0 state */
  116. data = serdes_status_poll(priv, serdes_phy_addr,
  117. SERDES_GSR0,
  118. SERDES_PWR_ST_MASK,
  119. SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
  120. if (data) {
  121. dev_err(priv->device, "Serdes power state P0 timeout.\n");
  122. return data;
  123. }
  124. /* PSE only - ungate SGMII PHY Rx Clock */
  125. if (intel_priv->is_pse)
  126. mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
  127. 0, SERDES_PHY_RX_CLK);
  128. return 0;
  129. }
  130. static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
  131. {
  132. struct intel_priv_data *intel_priv = intel_data;
  133. struct stmmac_priv *priv = netdev_priv(ndev);
  134. int serdes_phy_addr = 0;
  135. u32 data = 0;
  136. if (!intel_priv->mdio_adhoc_addr)
  137. return;
  138. serdes_phy_addr = intel_priv->mdio_adhoc_addr;
  139. /* PSE only - gate SGMII PHY Rx Clock */
  140. if (intel_priv->is_pse)
  141. mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
  142. SERDES_PHY_RX_CLK, 0);
  143. /* move power state to P3 */
  144. data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
  145. data &= ~SERDES_PWR_ST_MASK;
  146. data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
  147. mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
  148. /* Check for P3 state */
  149. data = serdes_status_poll(priv, serdes_phy_addr,
  150. SERDES_GSR0,
  151. SERDES_PWR_ST_MASK,
  152. SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
  153. if (data) {
  154. dev_err(priv->device, "Serdes power state P3 timeout\n");
  155. return;
  156. }
  157. /* de-assert clk_req */
  158. data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
  159. data &= ~SERDES_PLL_CLK;
  160. mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
  161. /* check for clk_ack de-assert */
  162. data = serdes_status_poll(priv, serdes_phy_addr,
  163. SERDES_GSR0,
  164. SERDES_PLL_CLK,
  165. (u32)~SERDES_PLL_CLK);
  166. if (data) {
  167. dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
  168. return;
  169. }
  170. /* de-assert lane reset */
  171. data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
  172. data &= ~SERDES_RST;
  173. mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
  174. /* check for de-assert lane reset reflection */
  175. data = serdes_status_poll(priv, serdes_phy_addr,
  176. SERDES_GSR0,
  177. SERDES_RST,
  178. (u32)~SERDES_RST);
  179. if (data) {
  180. dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
  181. return;
  182. }
  183. }
  184. static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
  185. {
  186. struct intel_priv_data *intel_priv = intel_data;
  187. struct stmmac_priv *priv = netdev_priv(ndev);
  188. int serdes_phy_addr = 0;
  189. u32 data = 0;
  190. serdes_phy_addr = intel_priv->mdio_adhoc_addr;
  191. /* Determine the link speed mode: 2.5Gbps/1Gbps */
  192. data = mdiobus_read(priv->mii, serdes_phy_addr,
  193. SERDES_GCR);
  194. if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
  195. SERDES_LINK_MODE_2G5) {
  196. dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
  197. priv->plat->max_speed = 2500;
  198. priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
  199. priv->plat->mdio_bus_data->xpcs_an_inband = false;
  200. } else {
  201. priv->plat->max_speed = 1000;
  202. }
  203. }
  204. /* Program PTP Clock Frequency for different variant of
  205. * Intel mGBE that has slightly different GPO mapping
  206. */
  207. static void intel_mgbe_ptp_clk_freq_config(void *npriv)
  208. {
  209. struct stmmac_priv *priv = (struct stmmac_priv *)npriv;
  210. struct intel_priv_data *intel_priv;
  211. u32 gpio_value;
  212. intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;
  213. gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);
  214. if (intel_priv->is_pse) {
  215. /* For PSE GbE, use 200MHz */
  216. gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
  217. gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
  218. } else {
  219. /* For PCH GbE, use 200MHz */
  220. gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
  221. gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
  222. }
  223. writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
  224. }
  225. static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
  226. u64 *art_time)
  227. {
  228. u64 ns;
  229. ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
  230. ns <<= GMAC4_ART_TIME_SHIFT;
  231. ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
  232. ns <<= GMAC4_ART_TIME_SHIFT;
  233. ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
  234. ns <<= GMAC4_ART_TIME_SHIFT;
  235. ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);
  236. *art_time = ns;
  237. }
  238. static int stmmac_cross_ts_isr(struct stmmac_priv *priv)
  239. {
  240. return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE);
  241. }
  242. static int intel_crosststamp(ktime_t *device,
  243. struct system_counterval_t *system,
  244. void *ctx)
  245. {
  246. struct intel_priv_data *intel_priv;
  247. struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
  248. void __iomem *ptpaddr = priv->ptpaddr;
  249. void __iomem *ioaddr = priv->hw->pcsr;
  250. unsigned long flags;
  251. u64 art_time = 0;
  252. u64 ptp_time = 0;
  253. u32 num_snapshot;
  254. u32 gpio_value;
  255. u32 acr_value;
  256. int i;
  257. if (!boot_cpu_has(X86_FEATURE_ART))
  258. return -EOPNOTSUPP;
  259. intel_priv = priv->plat->bsp_priv;
  260. /* Both internal crosstimestamping and external triggered event
  261. * timestamping cannot be run concurrently.
  262. */
  263. if (priv->plat->ext_snapshot_en)
  264. return -EBUSY;
  265. priv->plat->int_snapshot_en = 1;
  266. mutex_lock(&priv->aux_ts_lock);
  267. /* Enable Internal snapshot trigger */
  268. acr_value = readl(ptpaddr + PTP_ACR);
  269. acr_value &= ~PTP_ACR_MASK;
  270. switch (priv->plat->int_snapshot_num) {
  271. case AUX_SNAPSHOT0:
  272. acr_value |= PTP_ACR_ATSEN0;
  273. break;
  274. case AUX_SNAPSHOT1:
  275. acr_value |= PTP_ACR_ATSEN1;
  276. break;
  277. case AUX_SNAPSHOT2:
  278. acr_value |= PTP_ACR_ATSEN2;
  279. break;
  280. case AUX_SNAPSHOT3:
  281. acr_value |= PTP_ACR_ATSEN3;
  282. break;
  283. default:
  284. mutex_unlock(&priv->aux_ts_lock);
  285. priv->plat->int_snapshot_en = 0;
  286. return -EINVAL;
  287. }
  288. writel(acr_value, ptpaddr + PTP_ACR);
  289. /* Clear FIFO */
  290. acr_value = readl(ptpaddr + PTP_ACR);
  291. acr_value |= PTP_ACR_ATSFC;
  292. writel(acr_value, ptpaddr + PTP_ACR);
  293. /* Release the mutex */
  294. mutex_unlock(&priv->aux_ts_lock);
  295. /* Trigger Internal snapshot signal
  296. * Create a rising edge by just toggle the GPO1 to low
  297. * and back to high.
  298. */
  299. gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
  300. gpio_value &= ~GMAC_GPO1;
  301. writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
  302. gpio_value |= GMAC_GPO1;
  303. writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
  304. /* Time sync done Indication - Interrupt method */
  305. if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
  306. stmmac_cross_ts_isr(priv),
  307. HZ / 100)) {
  308. priv->plat->int_snapshot_en = 0;
  309. return -ETIMEDOUT;
  310. }
  311. num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
  312. GMAC_TIMESTAMP_ATSNS_MASK) >>
  313. GMAC_TIMESTAMP_ATSNS_SHIFT;
  314. /* Repeat until the timestamps are from the FIFO last segment */
  315. for (i = 0; i < num_snapshot; i++) {
  316. read_lock_irqsave(&priv->ptp_lock, flags);
  317. stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
  318. *device = ns_to_ktime(ptp_time);
  319. read_unlock_irqrestore(&priv->ptp_lock, flags);
  320. get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
  321. *system = convert_art_to_tsc(art_time);
  322. }
  323. system->cycles *= intel_priv->crossts_adj;
  324. priv->plat->int_snapshot_en = 0;
  325. return 0;
  326. }
  327. static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
  328. int base)
  329. {
  330. if (boot_cpu_has(X86_FEATURE_ART)) {
  331. unsigned int art_freq;
  332. /* On systems that support ART, ART frequency can be obtained
  333. * from ECX register of CPUID leaf (0x15).
  334. */
  335. art_freq = cpuid_ecx(ART_CPUID_LEAF);
  336. do_div(art_freq, base);
  337. intel_priv->crossts_adj = art_freq;
  338. }
  339. }
  340. static void common_default_data(struct plat_stmmacenet_data *plat)
  341. {
  342. plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
  343. plat->has_gmac = 1;
  344. plat->force_sf_dma_mode = 1;
  345. plat->mdio_bus_data->needs_reset = true;
  346. /* Set default value for multicast hash bins */
  347. plat->multicast_filter_bins = HASH_TABLE_SIZE;
  348. /* Set default value for unicast filter entries */
  349. plat->unicast_filter_entries = 1;
  350. /* Set the maxmtu to a default of JUMBO_LEN */
  351. plat->maxmtu = JUMBO_LEN;
  352. /* Set default number of RX and TX queues to use */
  353. plat->tx_queues_to_use = 1;
  354. plat->rx_queues_to_use = 1;
  355. /* Disable Priority config by default */
  356. plat->tx_queues_cfg[0].use_prio = false;
  357. plat->rx_queues_cfg[0].use_prio = false;
  358. /* Disable RX queues routing by default */
  359. plat->rx_queues_cfg[0].pkt_route = 0x0;
  360. }
  361. static int intel_mgbe_common_data(struct pci_dev *pdev,
  362. struct plat_stmmacenet_data *plat)
  363. {
  364. struct fwnode_handle *fwnode;
  365. char clk_name[20];
  366. int ret;
  367. int i;
  368. plat->pdev = pdev;
  369. plat->phy_addr = -1;
  370. plat->clk_csr = 5;
  371. plat->has_gmac = 0;
  372. plat->has_gmac4 = 1;
  373. plat->force_sf_dma_mode = 0;
  374. plat->tso_en = 1;
  375. plat->sph_disable = 1;
  376. /* Multiplying factor to the clk_eee_i clock time
  377. * period to make it closer to 100 ns. This value
  378. * should be programmed such that the clk_eee_time_period *
  379. * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns
  380. * clk_eee frequency is 19.2Mhz
  381. * clk_eee_time_period is 52ns
  382. * 52ns * (1 + 1) = 104ns
  383. * MULT_FACT_100NS = 1
  384. */
  385. plat->mult_fact_100ns = 1;
  386. plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
  387. for (i = 0; i < plat->rx_queues_to_use; i++) {
  388. plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
  389. plat->rx_queues_cfg[i].chan = i;
  390. /* Disable Priority config by default */
  391. plat->rx_queues_cfg[i].use_prio = false;
  392. /* Disable RX queues routing by default */
  393. plat->rx_queues_cfg[i].pkt_route = 0x0;
  394. }
  395. for (i = 0; i < plat->tx_queues_to_use; i++) {
  396. plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
  397. /* Disable Priority config by default */
  398. plat->tx_queues_cfg[i].use_prio = false;
  399. /* Default TX Q0 to use TSO and rest TXQ for TBS */
  400. if (i > 0)
  401. plat->tx_queues_cfg[i].tbs_en = 1;
  402. }
  403. /* FIFO size is 4096 bytes for 1 tx/rx queue */
  404. plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
  405. plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
  406. plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
  407. plat->tx_queues_cfg[0].weight = 0x09;
  408. plat->tx_queues_cfg[1].weight = 0x0A;
  409. plat->tx_queues_cfg[2].weight = 0x0B;
  410. plat->tx_queues_cfg[3].weight = 0x0C;
  411. plat->tx_queues_cfg[4].weight = 0x0D;
  412. plat->tx_queues_cfg[5].weight = 0x0E;
  413. plat->tx_queues_cfg[6].weight = 0x0F;
  414. plat->tx_queues_cfg[7].weight = 0x10;
  415. plat->dma_cfg->pbl = 32;
  416. plat->dma_cfg->pblx8 = true;
  417. plat->dma_cfg->fixed_burst = 0;
  418. plat->dma_cfg->mixed_burst = 0;
  419. plat->dma_cfg->aal = 0;
  420. plat->dma_cfg->dche = true;
  421. plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
  422. GFP_KERNEL);
  423. if (!plat->axi)
  424. return -ENOMEM;
  425. plat->axi->axi_lpi_en = 0;
  426. plat->axi->axi_xit_frm = 0;
  427. plat->axi->axi_wr_osr_lmt = 1;
  428. plat->axi->axi_rd_osr_lmt = 1;
  429. plat->axi->axi_blen[0] = 4;
  430. plat->axi->axi_blen[1] = 8;
  431. plat->axi->axi_blen[2] = 16;
  432. plat->ptp_max_adj = plat->clk_ptp_rate;
  433. plat->eee_usecs_rate = plat->clk_ptp_rate;
  434. /* Set system clock */
  435. sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
  436. plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
  437. clk_name, NULL, 0,
  438. plat->clk_ptp_rate);
  439. if (IS_ERR(plat->stmmac_clk)) {
  440. dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
  441. plat->stmmac_clk = NULL;
  442. }
  443. ret = clk_prepare_enable(plat->stmmac_clk);
  444. if (ret) {
  445. clk_unregister_fixed_rate(plat->stmmac_clk);
  446. return ret;
  447. }
  448. plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
  449. /* Set default value for multicast hash bins */
  450. plat->multicast_filter_bins = HASH_TABLE_SIZE;
  451. /* Set default value for unicast filter entries */
  452. plat->unicast_filter_entries = 1;
  453. /* Set the maxmtu to a default of JUMBO_LEN */
  454. plat->maxmtu = JUMBO_LEN;
  455. plat->vlan_fail_q_en = true;
  456. /* Use the last Rx queue */
  457. plat->vlan_fail_q = plat->rx_queues_to_use - 1;
  458. /* For fixed-link setup, we allow phy-mode setting */
  459. fwnode = dev_fwnode(&pdev->dev);
  460. if (fwnode) {
  461. int phy_mode;
  462. /* "phy-mode" setting is optional. If it is set,
  463. * we allow either sgmii or 1000base-x for now.
  464. */
  465. phy_mode = fwnode_get_phy_mode(fwnode);
  466. if (phy_mode >= 0) {
  467. if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
  468. phy_mode == PHY_INTERFACE_MODE_1000BASEX)
  469. plat->phy_interface = phy_mode;
  470. else
  471. dev_warn(&pdev->dev, "Invalid phy-mode\n");
  472. }
  473. }
  474. /* Intel mgbe SGMII interface uses pcs-xcps */
  475. if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
  476. plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
  477. plat->mdio_bus_data->has_xpcs = true;
  478. plat->mdio_bus_data->xpcs_an_inband = true;
  479. }
  480. /* For fixed-link setup, we clear xpcs_an_inband */
  481. if (fwnode) {
  482. struct fwnode_handle *fixed_node;
  483. fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
  484. if (fixed_node)
  485. plat->mdio_bus_data->xpcs_an_inband = false;
  486. fwnode_handle_put(fixed_node);
  487. }
  488. /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
  489. plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
  490. plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
  491. plat->int_snapshot_num = AUX_SNAPSHOT1;
  492. plat->ext_snapshot_num = AUX_SNAPSHOT0;
  493. plat->crosststamp = intel_crosststamp;
  494. plat->int_snapshot_en = 0;
  495. /* Setup MSI vector offset specific to Intel mGbE controller */
  496. plat->msi_mac_vec = 29;
  497. plat->msi_lpi_vec = 28;
  498. plat->msi_sfty_ce_vec = 27;
  499. plat->msi_sfty_ue_vec = 26;
  500. plat->msi_rx_base_vec = 0;
  501. plat->msi_tx_base_vec = 1;
  502. return 0;
  503. }
  504. static int ehl_common_data(struct pci_dev *pdev,
  505. struct plat_stmmacenet_data *plat)
  506. {
  507. plat->rx_queues_to_use = 8;
  508. plat->tx_queues_to_use = 8;
  509. plat->use_phy_wol = 1;
  510. plat->safety_feat_cfg->tsoee = 1;
  511. plat->safety_feat_cfg->mrxpee = 1;
  512. plat->safety_feat_cfg->mestee = 1;
  513. plat->safety_feat_cfg->mrxee = 1;
  514. plat->safety_feat_cfg->mtxee = 1;
  515. plat->safety_feat_cfg->epsi = 0;
  516. plat->safety_feat_cfg->edpp = 0;
  517. plat->safety_feat_cfg->prtyen = 0;
  518. plat->safety_feat_cfg->tmouten = 0;
  519. return intel_mgbe_common_data(pdev, plat);
  520. }
  521. static int ehl_sgmii_data(struct pci_dev *pdev,
  522. struct plat_stmmacenet_data *plat)
  523. {
  524. plat->bus_id = 1;
  525. plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
  526. plat->speed_mode_2500 = intel_speed_mode_2500;
  527. plat->serdes_powerup = intel_serdes_powerup;
  528. plat->serdes_powerdown = intel_serdes_powerdown;
  529. plat->clk_ptp_rate = 204800000;
  530. return ehl_common_data(pdev, plat);
  531. }
  532. static struct stmmac_pci_info ehl_sgmii1g_info = {
  533. .setup = ehl_sgmii_data,
  534. };
  535. static int ehl_rgmii_data(struct pci_dev *pdev,
  536. struct plat_stmmacenet_data *plat)
  537. {
  538. plat->bus_id = 1;
  539. plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
  540. plat->clk_ptp_rate = 204800000;
  541. return ehl_common_data(pdev, plat);
  542. }
  543. static struct stmmac_pci_info ehl_rgmii1g_info = {
  544. .setup = ehl_rgmii_data,
  545. };
  546. static int ehl_pse0_common_data(struct pci_dev *pdev,
  547. struct plat_stmmacenet_data *plat)
  548. {
  549. struct intel_priv_data *intel_priv = plat->bsp_priv;
  550. intel_priv->is_pse = true;
  551. plat->bus_id = 2;
  552. plat->host_dma_width = 32;
  553. plat->clk_ptp_rate = 200000000;
  554. intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
  555. return ehl_common_data(pdev, plat);
  556. }
  557. static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
  558. struct plat_stmmacenet_data *plat)
  559. {
  560. plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
  561. return ehl_pse0_common_data(pdev, plat);
  562. }
  563. static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
  564. .setup = ehl_pse0_rgmii1g_data,
  565. };
  566. static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
  567. struct plat_stmmacenet_data *plat)
  568. {
  569. plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
  570. plat->speed_mode_2500 = intel_speed_mode_2500;
  571. plat->serdes_powerup = intel_serdes_powerup;
  572. plat->serdes_powerdown = intel_serdes_powerdown;
  573. return ehl_pse0_common_data(pdev, plat);
  574. }
  575. static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
  576. .setup = ehl_pse0_sgmii1g_data,
  577. };
  578. static int ehl_pse1_common_data(struct pci_dev *pdev,
  579. struct plat_stmmacenet_data *plat)
  580. {
  581. struct intel_priv_data *intel_priv = plat->bsp_priv;
  582. intel_priv->is_pse = true;
  583. plat->bus_id = 3;
  584. plat->host_dma_width = 32;
  585. plat->clk_ptp_rate = 200000000;
  586. intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
  587. return ehl_common_data(pdev, plat);
  588. }
  589. static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
  590. struct plat_stmmacenet_data *plat)
  591. {
  592. plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
  593. return ehl_pse1_common_data(pdev, plat);
  594. }
  595. static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
  596. .setup = ehl_pse1_rgmii1g_data,
  597. };
  598. static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
  599. struct plat_stmmacenet_data *plat)
  600. {
  601. plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
  602. plat->speed_mode_2500 = intel_speed_mode_2500;
  603. plat->serdes_powerup = intel_serdes_powerup;
  604. plat->serdes_powerdown = intel_serdes_powerdown;
  605. return ehl_pse1_common_data(pdev, plat);
  606. }
  607. static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
  608. .setup = ehl_pse1_sgmii1g_data,
  609. };
  610. static int tgl_common_data(struct pci_dev *pdev,
  611. struct plat_stmmacenet_data *plat)
  612. {
  613. plat->rx_queues_to_use = 6;
  614. plat->tx_queues_to_use = 4;
  615. plat->clk_ptp_rate = 204800000;
  616. plat->speed_mode_2500 = intel_speed_mode_2500;
  617. plat->safety_feat_cfg->tsoee = 1;
  618. plat->safety_feat_cfg->mrxpee = 0;
  619. plat->safety_feat_cfg->mestee = 1;
  620. plat->safety_feat_cfg->mrxee = 1;
  621. plat->safety_feat_cfg->mtxee = 1;
  622. plat->safety_feat_cfg->epsi = 0;
  623. plat->safety_feat_cfg->edpp = 0;
  624. plat->safety_feat_cfg->prtyen = 0;
  625. plat->safety_feat_cfg->tmouten = 0;
  626. return intel_mgbe_common_data(pdev, plat);
  627. }
  628. static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
  629. struct plat_stmmacenet_data *plat)
  630. {
  631. plat->bus_id = 1;
  632. plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
  633. plat->serdes_powerup = intel_serdes_powerup;
  634. plat->serdes_powerdown = intel_serdes_powerdown;
  635. return tgl_common_data(pdev, plat);
  636. }
  637. static struct stmmac_pci_info tgl_sgmii1g_phy0_info = {
  638. .setup = tgl_sgmii_phy0_data,
  639. };
  640. static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
  641. struct plat_stmmacenet_data *plat)
  642. {
  643. plat->bus_id = 2;
  644. plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
  645. plat->serdes_powerup = intel_serdes_powerup;
  646. plat->serdes_powerdown = intel_serdes_powerdown;
  647. return tgl_common_data(pdev, plat);
  648. }
  649. static struct stmmac_pci_info tgl_sgmii1g_phy1_info = {
  650. .setup = tgl_sgmii_phy1_data,
  651. };
  652. static int adls_sgmii_phy0_data(struct pci_dev *pdev,
  653. struct plat_stmmacenet_data *plat)
  654. {
  655. plat->bus_id = 1;
  656. plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
  657. /* SerDes power up and power down are done in BIOS for ADL */
  658. return tgl_common_data(pdev, plat);
  659. }
  660. static struct stmmac_pci_info adls_sgmii1g_phy0_info = {
  661. .setup = adls_sgmii_phy0_data,
  662. };
  663. static int adls_sgmii_phy1_data(struct pci_dev *pdev,
  664. struct plat_stmmacenet_data *plat)
  665. {
  666. plat->bus_id = 2;
  667. plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
  668. /* SerDes power up and power down are done in BIOS for ADL */
  669. return tgl_common_data(pdev, plat);
  670. }
  671. static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
  672. .setup = adls_sgmii_phy1_data,
  673. };
  674. static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
  675. {
  676. .func = 6,
  677. .phy_addr = 1,
  678. },
  679. };
  680. static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
  681. .func = galileo_stmmac_func_data,
  682. .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
  683. };
  684. static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
  685. {
  686. .func = 6,
  687. .phy_addr = 1,
  688. },
  689. {
  690. .func = 7,
  691. .phy_addr = 1,
  692. },
  693. };
  694. static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
  695. .func = iot2040_stmmac_func_data,
  696. .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
  697. };
  698. static const struct dmi_system_id quark_pci_dmi[] = {
  699. {
  700. .matches = {
  701. DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
  702. },
  703. .driver_data = (void *)&galileo_stmmac_dmi_data,
  704. },
  705. {
  706. .matches = {
  707. DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
  708. },
  709. .driver_data = (void *)&galileo_stmmac_dmi_data,
  710. },
  711. /* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
  712. * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
  713. * has only one pci network device while other asset tags are
  714. * for IOT2040 which has two.
  715. */
  716. {
  717. .matches = {
  718. DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
  719. DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
  720. "6ES7647-0AA00-0YA2"),
  721. },
  722. .driver_data = (void *)&galileo_stmmac_dmi_data,
  723. },
  724. {
  725. .matches = {
  726. DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
  727. },
  728. .driver_data = (void *)&iot2040_stmmac_dmi_data,
  729. },
  730. {}
  731. };
  732. static int quark_default_data(struct pci_dev *pdev,
  733. struct plat_stmmacenet_data *plat)
  734. {
  735. int ret;
  736. /* Set common default data first */
  737. common_default_data(plat);
  738. /* Refuse to load the driver and register net device if MAC controller
  739. * does not connect to any PHY interface.
  740. */
  741. ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
  742. if (ret < 0) {
  743. /* Return error to the caller on DMI enabled boards. */
  744. if (dmi_get_system_info(DMI_BOARD_NAME))
  745. return ret;
  746. /* Galileo boards with old firmware don't support DMI. We always
  747. * use 1 here as PHY address, so at least the first found MAC
  748. * controller would be probed.
  749. */
  750. ret = 1;
  751. }
  752. plat->bus_id = pci_dev_id(pdev);
  753. plat->phy_addr = ret;
  754. plat->phy_interface = PHY_INTERFACE_MODE_RMII;
  755. plat->dma_cfg->pbl = 16;
  756. plat->dma_cfg->pblx8 = true;
  757. plat->dma_cfg->fixed_burst = 1;
  758. /* AXI (TODO) */
  759. return 0;
  760. }
  761. static const struct stmmac_pci_info quark_info = {
  762. .setup = quark_default_data,
  763. };
  764. static int stmmac_config_single_msi(struct pci_dev *pdev,
  765. struct plat_stmmacenet_data *plat,
  766. struct stmmac_resources *res)
  767. {
  768. int ret;
  769. ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
  770. if (ret < 0) {
  771. dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
  772. __func__);
  773. return ret;
  774. }
  775. res->irq = pci_irq_vector(pdev, 0);
  776. res->wol_irq = res->irq;
  777. plat->multi_msi_en = 0;
  778. dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
  779. __func__);
  780. return 0;
  781. }
  782. static int stmmac_config_multi_msi(struct pci_dev *pdev,
  783. struct plat_stmmacenet_data *plat,
  784. struct stmmac_resources *res)
  785. {
  786. int ret;
  787. int i;
  788. if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
  789. plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
  790. dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
  791. __func__);
  792. return -1;
  793. }
  794. ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
  795. PCI_IRQ_MSI | PCI_IRQ_MSIX);
  796. if (ret < 0) {
  797. dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
  798. __func__);
  799. return ret;
  800. }
  801. /* For RX MSI */
  802. for (i = 0; i < plat->rx_queues_to_use; i++) {
  803. res->rx_irq[i] = pci_irq_vector(pdev,
  804. plat->msi_rx_base_vec + i * 2);
  805. }
  806. /* For TX MSI */
  807. for (i = 0; i < plat->tx_queues_to_use; i++) {
  808. res->tx_irq[i] = pci_irq_vector(pdev,
  809. plat->msi_tx_base_vec + i * 2);
  810. }
  811. if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
  812. res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
  813. if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
  814. res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
  815. if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
  816. res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
  817. if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
  818. res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
  819. if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
  820. res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
  821. plat->multi_msi_en = 1;
  822. dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
  823. return 0;
  824. }
  825. /**
  826. * intel_eth_pci_probe
  827. *
  828. * @pdev: pci device pointer
  829. * @id: pointer to table of device id/id's.
  830. *
  831. * Description: This probing function gets called for all PCI devices which
  832. * match the ID table and are not "owned" by other driver yet. This function
  833. * gets passed a "struct pci_dev *" for each device whose entry in the ID table
  834. * matches the device. The probe functions returns zero when the driver choose
  835. * to take "ownership" of the device or an error code(-ve no) otherwise.
  836. */
  837. static int intel_eth_pci_probe(struct pci_dev *pdev,
  838. const struct pci_device_id *id)
  839. {
  840. struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
  841. struct intel_priv_data *intel_priv;
  842. struct plat_stmmacenet_data *plat;
  843. struct stmmac_resources res;
  844. int ret;
  845. intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
  846. if (!intel_priv)
  847. return -ENOMEM;
  848. plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
  849. if (!plat)
  850. return -ENOMEM;
  851. plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
  852. sizeof(*plat->mdio_bus_data),
  853. GFP_KERNEL);
  854. if (!plat->mdio_bus_data)
  855. return -ENOMEM;
  856. plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
  857. GFP_KERNEL);
  858. if (!plat->dma_cfg)
  859. return -ENOMEM;
  860. plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
  861. sizeof(*plat->safety_feat_cfg),
  862. GFP_KERNEL);
  863. if (!plat->safety_feat_cfg)
  864. return -ENOMEM;
  865. /* Enable pci device */
  866. ret = pcim_enable_device(pdev);
  867. if (ret) {
  868. dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
  869. __func__);
  870. return ret;
  871. }
  872. ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
  873. if (ret)
  874. return ret;
  875. pci_set_master(pdev);
  876. plat->bsp_priv = intel_priv;
  877. intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
  878. intel_priv->crossts_adj = 1;
  879. /* Initialize all MSI vectors to invalid so that it can be set
  880. * according to platform data settings below.
  881. * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
  882. */
  883. plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
  884. plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
  885. plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
  886. plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
  887. plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
  888. plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
  889. plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
  890. ret = info->setup(pdev, plat);
  891. if (ret)
  892. return ret;
  893. memset(&res, 0, sizeof(res));
  894. res.addr = pcim_iomap_table(pdev)[0];
  895. if (plat->eee_usecs_rate > 0) {
  896. u32 tx_lpi_usec;
  897. tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
  898. writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
  899. }
  900. ret = stmmac_config_multi_msi(pdev, plat, &res);
  901. if (ret) {
  902. ret = stmmac_config_single_msi(pdev, plat, &res);
  903. if (ret) {
  904. dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
  905. __func__);
  906. goto err_alloc_irq;
  907. }
  908. }
  909. ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
  910. if (ret) {
  911. goto err_alloc_irq;
  912. }
  913. return 0;
  914. err_alloc_irq:
  915. clk_disable_unprepare(plat->stmmac_clk);
  916. clk_unregister_fixed_rate(plat->stmmac_clk);
  917. return ret;
  918. }
  919. /**
  920. * intel_eth_pci_remove
  921. *
  922. * @pdev: pci device pointer
  923. * Description: this function calls the main to free the net resources
  924. * and releases the PCI resources.
  925. */
  926. static void intel_eth_pci_remove(struct pci_dev *pdev)
  927. {
  928. struct net_device *ndev = dev_get_drvdata(&pdev->dev);
  929. struct stmmac_priv *priv = netdev_priv(ndev);
  930. stmmac_dvr_remove(&pdev->dev);
  931. clk_disable_unprepare(priv->plat->stmmac_clk);
  932. clk_unregister_fixed_rate(priv->plat->stmmac_clk);
  933. }
  934. static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
  935. {
  936. struct pci_dev *pdev = to_pci_dev(dev);
  937. int ret;
  938. ret = stmmac_suspend(dev);
  939. if (ret)
  940. return ret;
  941. ret = pci_save_state(pdev);
  942. if (ret)
  943. return ret;
  944. pci_wake_from_d3(pdev, true);
  945. pci_set_power_state(pdev, PCI_D3hot);
  946. return 0;
  947. }
  948. static int __maybe_unused intel_eth_pci_resume(struct device *dev)
  949. {
  950. struct pci_dev *pdev = to_pci_dev(dev);
  951. int ret;
  952. pci_restore_state(pdev);
  953. pci_set_power_state(pdev, PCI_D0);
  954. ret = pcim_enable_device(pdev);
  955. if (ret)
  956. return ret;
  957. pci_set_master(pdev);
  958. return stmmac_resume(dev);
  959. }
  960. static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
  961. intel_eth_pci_resume);
  962. #define PCI_DEVICE_ID_INTEL_QUARK 0x0937
  963. #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30
  964. #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31
  965. #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5 0x4b32
  966. /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
  967. * which are named PSE0 and PSE1
  968. */
  969. #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G 0x4ba0
  970. #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G 0x4ba1
  971. #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5 0x4ba2
  972. #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G 0x4bb0
  973. #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G 0x4bb1
  974. #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5 0x4bb2
  975. #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0 0x43ac
  976. #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1 0x43a2
  977. #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac
  978. #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac
  979. #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad
  980. #define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G 0x54ac
  981. #define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G 0x51ac
  982. static const struct pci_device_id intel_eth_pci_id_table[] = {
  983. { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
  984. { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) },
  985. { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) },
  986. { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) },
  987. { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) },
  988. { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) },
  989. { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) },
  990. { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) },
  991. { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) },
  992. { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) },
  993. { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) },
  994. { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) },
  995. { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
  996. { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
  997. { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
  998. { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
  999. { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) },
  1000. {}
  1001. };
  1002. MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
  1003. static struct pci_driver intel_eth_pci_driver = {
  1004. .name = "intel-eth-pci",
  1005. .id_table = intel_eth_pci_id_table,
  1006. .probe = intel_eth_pci_probe,
  1007. .remove = intel_eth_pci_remove,
  1008. .driver = {
  1009. .pm = &intel_eth_pm_ops,
  1010. },
  1011. };
  1012. module_pci_driver(intel_eth_pci_driver);
  1013. MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
  1014. MODULE_AUTHOR("Voon Weifeng <[email protected]>");
  1015. MODULE_LICENSE("GPL v2");