nxp-c45-tja11xx.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* NXP C45 PHY driver
  3. * Copyright (C) 2021 NXP
  4. * Author: Radu Pirea <[email protected]>
  5. */
  6. #include <linux/delay.h>
  7. #include <linux/ethtool.h>
  8. #include <linux/ethtool_netlink.h>
  9. #include <linux/kernel.h>
  10. #include <linux/mii.h>
  11. #include <linux/module.h>
  12. #include <linux/phy.h>
  13. #include <linux/processor.h>
  14. #include <linux/property.h>
  15. #include <linux/ptp_classify.h>
  16. #include <linux/ptp_clock_kernel.h>
  17. #include <linux/net_tstamp.h>
  18. #define PHY_ID_TJA_1103 0x001BB010
  19. #define PMAPMD_B100T1_PMAPMD_CTL 0x0834
  20. #define B100T1_PMAPMD_CONFIG_EN BIT(15)
  21. #define B100T1_PMAPMD_MASTER BIT(14)
  22. #define MASTER_MODE (B100T1_PMAPMD_CONFIG_EN | \
  23. B100T1_PMAPMD_MASTER)
  24. #define SLAVE_MODE (B100T1_PMAPMD_CONFIG_EN)
  25. #define VEND1_DEVICE_CONTROL 0x0040
  26. #define DEVICE_CONTROL_RESET BIT(15)
  27. #define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
  28. #define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13)
  29. #define VEND1_PHY_IRQ_ACK 0x80A0
  30. #define VEND1_PHY_IRQ_EN 0x80A1
  31. #define VEND1_PHY_IRQ_STATUS 0x80A2
  32. #define PHY_IRQ_LINK_EVENT BIT(1)
  33. #define VEND1_PHY_CONTROL 0x8100
  34. #define PHY_CONFIG_EN BIT(14)
  35. #define PHY_START_OP BIT(0)
  36. #define VEND1_PHY_CONFIG 0x8108
  37. #define PHY_CONFIG_AUTO BIT(0)
  38. #define VEND1_SIGNAL_QUALITY 0x8320
  39. #define SQI_VALID BIT(14)
  40. #define SQI_MASK GENMASK(2, 0)
  41. #define MAX_SQI SQI_MASK
  42. #define VEND1_CABLE_TEST 0x8330
  43. #define CABLE_TEST_ENABLE BIT(15)
  44. #define CABLE_TEST_START BIT(14)
  45. #define CABLE_TEST_VALID BIT(13)
  46. #define CABLE_TEST_OK 0x00
  47. #define CABLE_TEST_SHORTED 0x01
  48. #define CABLE_TEST_OPEN 0x02
  49. #define CABLE_TEST_UNKNOWN 0x07
  50. #define VEND1_PORT_CONTROL 0x8040
  51. #define PORT_CONTROL_EN BIT(14)
  52. #define VEND1_PORT_ABILITIES 0x8046
  53. #define PTP_ABILITY BIT(3)
  54. #define VEND1_PORT_INFRA_CONTROL 0xAC00
  55. #define PORT_INFRA_CONTROL_EN BIT(14)
  56. #define VEND1_RXID 0xAFCC
  57. #define VEND1_TXID 0xAFCD
  58. #define ID_ENABLE BIT(15)
  59. #define VEND1_ABILITIES 0xAFC4
  60. #define RGMII_ID_ABILITY BIT(15)
  61. #define RGMII_ABILITY BIT(14)
  62. #define RMII_ABILITY BIT(10)
  63. #define REVMII_ABILITY BIT(9)
  64. #define MII_ABILITY BIT(8)
  65. #define SGMII_ABILITY BIT(0)
  66. #define VEND1_MII_BASIC_CONFIG 0xAFC6
  67. #define MII_BASIC_CONFIG_REV BIT(4)
  68. #define MII_BASIC_CONFIG_SGMII 0x9
  69. #define MII_BASIC_CONFIG_RGMII 0x7
  70. #define MII_BASIC_CONFIG_RMII 0x5
  71. #define MII_BASIC_CONFIG_MII 0x4
  72. #define VEND1_SYMBOL_ERROR_COUNTER 0x8350
  73. #define VEND1_LINK_DROP_COUNTER 0x8352
  74. #define VEND1_LINK_LOSSES_AND_FAILURES 0x8353
  75. #define VEND1_R_GOOD_FRAME_CNT 0xA950
  76. #define VEND1_R_BAD_FRAME_CNT 0xA952
  77. #define VEND1_R_RXER_FRAME_CNT 0xA954
  78. #define VEND1_RX_PREAMBLE_COUNT 0xAFCE
  79. #define VEND1_TX_PREAMBLE_COUNT 0xAFCF
  80. #define VEND1_RX_IPG_LENGTH 0xAFD0
  81. #define VEND1_TX_IPG_LENGTH 0xAFD1
  82. #define COUNTER_EN BIT(15)
  83. #define VEND1_PTP_CONFIG 0x1102
  84. #define EXT_TRG_EDGE BIT(1)
  85. #define PPS_OUT_POL BIT(2)
  86. #define PPS_OUT_EN BIT(3)
  87. #define VEND1_LTC_LOAD_CTRL 0x1105
  88. #define READ_LTC BIT(2)
  89. #define LOAD_LTC BIT(0)
  90. #define VEND1_LTC_WR_NSEC_0 0x1106
  91. #define VEND1_LTC_WR_NSEC_1 0x1107
  92. #define VEND1_LTC_WR_SEC_0 0x1108
  93. #define VEND1_LTC_WR_SEC_1 0x1109
  94. #define VEND1_LTC_RD_NSEC_0 0x110A
  95. #define VEND1_LTC_RD_NSEC_1 0x110B
  96. #define VEND1_LTC_RD_SEC_0 0x110C
  97. #define VEND1_LTC_RD_SEC_1 0x110D
  98. #define VEND1_RATE_ADJ_SUBNS_0 0x110F
  99. #define VEND1_RATE_ADJ_SUBNS_1 0x1110
  100. #define CLK_RATE_ADJ_LD BIT(15)
  101. #define CLK_RATE_ADJ_DIR BIT(14)
  102. #define VEND1_HW_LTC_LOCK_CTRL 0x1115
  103. #define HW_LTC_LOCK_EN BIT(0)
  104. #define VEND1_PTP_IRQ_EN 0x1131
  105. #define VEND1_PTP_IRQ_STATUS 0x1132
  106. #define PTP_IRQ_EGR_TS BIT(0)
  107. #define VEND1_RX_TS_INSRT_CTRL 0x114D
  108. #define RX_TS_INSRT_MODE2 0x02
  109. #define VEND1_EGR_RING_DATA_0 0x114E
  110. #define VEND1_EGR_RING_DATA_1_SEQ_ID 0x114F
  111. #define VEND1_EGR_RING_DATA_2_NSEC_15_0 0x1150
  112. #define VEND1_EGR_RING_DATA_3 0x1151
  113. #define VEND1_EGR_RING_CTRL 0x1154
  114. #define VEND1_EXT_TRG_TS_DATA_0 0x1121
  115. #define VEND1_EXT_TRG_TS_DATA_1 0x1122
  116. #define VEND1_EXT_TRG_TS_DATA_2 0x1123
  117. #define VEND1_EXT_TRG_TS_DATA_3 0x1124
  118. #define VEND1_EXT_TRG_TS_DATA_4 0x1125
  119. #define VEND1_EXT_TRG_TS_CTRL 0x1126
  120. #define RING_DATA_0_DOMAIN_NUMBER GENMASK(7, 0)
  121. #define RING_DATA_0_MSG_TYPE GENMASK(11, 8)
  122. #define RING_DATA_0_SEC_4_2 GENMASK(14, 2)
  123. #define RING_DATA_0_TS_VALID BIT(15)
  124. #define RING_DATA_3_NSEC_29_16 GENMASK(13, 0)
  125. #define RING_DATA_3_SEC_1_0 GENMASK(15, 14)
  126. #define RING_DATA_5_SEC_16_5 GENMASK(15, 4)
  127. #define RING_DONE BIT(0)
  128. #define TS_SEC_MASK GENMASK(1, 0)
  129. #define VEND1_PORT_FUNC_ENABLES 0x8048
  130. #define PTP_ENABLE BIT(3)
  131. #define VEND1_PORT_PTP_CONTROL 0x9000
  132. #define PORT_PTP_CONTROL_BYPASS BIT(11)
  133. #define VEND1_PTP_CLK_PERIOD 0x1104
  134. #define PTP_CLK_PERIOD_100BT1 15ULL
  135. #define VEND1_EVENT_MSG_FILT 0x1148
  136. #define EVENT_MSG_FILT_ALL 0x0F
  137. #define EVENT_MSG_FILT_NONE 0x00
  138. #define VEND1_TX_PIPE_DLY_NS 0x1149
  139. #define VEND1_TX_PIPEDLY_SUBNS 0x114A
  140. #define VEND1_RX_PIPE_DLY_NS 0x114B
  141. #define VEND1_RX_PIPEDLY_SUBNS 0x114C
  142. #define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40
  143. #define GPIO_FUNC_EN BIT(15)
  144. #define GPIO_FUNC_PTP BIT(6)
  145. #define GPIO_SIGNAL_PTP_TRIGGER 0x01
  146. #define GPIO_SIGNAL_PPS_OUT 0x12
  147. #define GPIO_DISABLE 0
  148. #define GPIO_PPS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
  149. GPIO_SIGNAL_PPS_OUT)
  150. #define GPIO_EXTTS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
  151. GPIO_SIGNAL_PTP_TRIGGER)
  152. #define RGMII_PERIOD_PS 8000U
  153. #define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
  154. #define MIN_ID_PS 1644U
  155. #define MAX_ID_PS 2260U
  156. #define DEFAULT_ID_PS 2000U
  157. #define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK_ULL(31, 0) * (ppb) * \
  158. PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
  159. #define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
  160. struct nxp_c45_skb_cb {
  161. struct ptp_header *header;
  162. unsigned int type;
  163. };
  164. struct nxp_c45_hwts {
  165. u32 nsec;
  166. u32 sec;
  167. u8 domain_number;
  168. u16 sequence_id;
  169. u8 msg_type;
  170. };
  171. struct nxp_c45_phy {
  172. struct phy_device *phydev;
  173. struct mii_timestamper mii_ts;
  174. struct ptp_clock *ptp_clock;
  175. struct ptp_clock_info caps;
  176. struct sk_buff_head tx_queue;
  177. struct sk_buff_head rx_queue;
  178. /* used to access the PTP registers atomic */
  179. struct mutex ptp_lock;
  180. int hwts_tx;
  181. int hwts_rx;
  182. u32 tx_delay;
  183. u32 rx_delay;
  184. struct timespec64 extts_ts;
  185. int extts_index;
  186. bool extts;
  187. };
  188. struct nxp_c45_phy_stats {
  189. const char *name;
  190. u8 mmd;
  191. u16 reg;
  192. u8 off;
  193. u16 mask;
  194. };
  195. static bool nxp_c45_poll_txts(struct phy_device *phydev)
  196. {
  197. return phydev->irq <= 0;
  198. }
  199. static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
  200. struct timespec64 *ts,
  201. struct ptp_system_timestamp *sts)
  202. {
  203. struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
  204. phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
  205. READ_LTC);
  206. ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
  207. VEND1_LTC_RD_NSEC_0);
  208. ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
  209. VEND1_LTC_RD_NSEC_1) << 16;
  210. ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
  211. VEND1_LTC_RD_SEC_0);
  212. ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
  213. VEND1_LTC_RD_SEC_1) << 16;
  214. return 0;
  215. }
  216. static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
  217. struct timespec64 *ts,
  218. struct ptp_system_timestamp *sts)
  219. {
  220. struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
  221. mutex_lock(&priv->ptp_lock);
  222. _nxp_c45_ptp_gettimex64(ptp, ts, sts);
  223. mutex_unlock(&priv->ptp_lock);
  224. return 0;
  225. }
  226. static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
  227. const struct timespec64 *ts)
  228. {
  229. struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
  230. phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_0,
  231. ts->tv_nsec);
  232. phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_1,
  233. ts->tv_nsec >> 16);
  234. phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_0,
  235. ts->tv_sec);
  236. phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_1,
  237. ts->tv_sec >> 16);
  238. phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
  239. LOAD_LTC);
  240. return 0;
  241. }
  242. static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
  243. const struct timespec64 *ts)
  244. {
  245. struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
  246. mutex_lock(&priv->ptp_lock);
  247. _nxp_c45_ptp_settime64(ptp, ts);
  248. mutex_unlock(&priv->ptp_lock);
  249. return 0;
  250. }
  251. static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
  252. {
  253. struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
  254. s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
  255. u64 subns_inc_val;
  256. bool inc;
  257. mutex_lock(&priv->ptp_lock);
  258. inc = ppb >= 0;
  259. ppb = abs(ppb);
  260. subns_inc_val = PPM_TO_SUBNS_INC(ppb);
  261. phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_0,
  262. subns_inc_val);
  263. subns_inc_val >>= 16;
  264. subns_inc_val |= CLK_RATE_ADJ_LD;
  265. if (inc)
  266. subns_inc_val |= CLK_RATE_ADJ_DIR;
  267. phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_1,
  268. subns_inc_val);
  269. mutex_unlock(&priv->ptp_lock);
  270. return 0;
  271. }
  272. static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
  273. {
  274. struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
  275. struct timespec64 now, then;
  276. mutex_lock(&priv->ptp_lock);
  277. then = ns_to_timespec64(delta);
  278. _nxp_c45_ptp_gettimex64(ptp, &now, NULL);
  279. now = timespec64_add(now, then);
  280. _nxp_c45_ptp_settime64(ptp, &now);
  281. mutex_unlock(&priv->ptp_lock);
  282. return 0;
  283. }
  284. static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
  285. struct nxp_c45_hwts *hwts)
  286. {
  287. ts->tv_nsec = hwts->nsec;
  288. if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
  289. ts->tv_sec -= TS_SEC_MASK + 1;
  290. ts->tv_sec &= ~TS_SEC_MASK;
  291. ts->tv_sec |= hwts->sec & TS_SEC_MASK;
  292. }
  293. static bool nxp_c45_match_ts(struct ptp_header *header,
  294. struct nxp_c45_hwts *hwts,
  295. unsigned int type)
  296. {
  297. return ntohs(header->sequence_id) == hwts->sequence_id &&
  298. ptp_get_msgtype(header, type) == hwts->msg_type &&
  299. header->domain_number == hwts->domain_number;
  300. }
  301. static void nxp_c45_get_extts(struct nxp_c45_phy *priv,
  302. struct timespec64 *extts)
  303. {
  304. extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
  305. VEND1_EXT_TRG_TS_DATA_0);
  306. extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
  307. VEND1_EXT_TRG_TS_DATA_1) << 16;
  308. extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
  309. VEND1_EXT_TRG_TS_DATA_2);
  310. extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
  311. VEND1_EXT_TRG_TS_DATA_3) << 16;
  312. phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EXT_TRG_TS_CTRL,
  313. RING_DONE);
  314. }
  315. static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
  316. struct nxp_c45_hwts *hwts)
  317. {
  318. bool valid;
  319. u16 reg;
  320. mutex_lock(&priv->ptp_lock);
  321. phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
  322. RING_DONE);
  323. reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
  324. valid = !!(reg & RING_DATA_0_TS_VALID);
  325. if (!valid)
  326. goto nxp_c45_get_hwtxts_out;
  327. hwts->domain_number = reg;
  328. hwts->msg_type = (reg & RING_DATA_0_MSG_TYPE) >> 8;
  329. hwts->sec = (reg & RING_DATA_0_SEC_4_2) >> 10;
  330. hwts->sequence_id = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
  331. VEND1_EGR_RING_DATA_1_SEQ_ID);
  332. hwts->nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
  333. VEND1_EGR_RING_DATA_2_NSEC_15_0);
  334. reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_3);
  335. hwts->nsec |= (reg & RING_DATA_3_NSEC_29_16) << 16;
  336. hwts->sec |= (reg & RING_DATA_3_SEC_1_0) >> 14;
  337. nxp_c45_get_hwtxts_out:
  338. mutex_unlock(&priv->ptp_lock);
  339. return valid;
  340. }
  341. static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
  342. struct nxp_c45_hwts *txts)
  343. {
  344. struct sk_buff *skb, *tmp, *skb_match = NULL;
  345. struct skb_shared_hwtstamps shhwtstamps;
  346. struct timespec64 ts;
  347. unsigned long flags;
  348. bool ts_match;
  349. s64 ts_ns;
  350. spin_lock_irqsave(&priv->tx_queue.lock, flags);
  351. skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
  352. ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
  353. NXP_C45_SKB_CB(skb)->type);
  354. if (!ts_match)
  355. continue;
  356. skb_match = skb;
  357. __skb_unlink(skb, &priv->tx_queue);
  358. break;
  359. }
  360. spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
  361. if (skb_match) {
  362. nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
  363. nxp_c45_reconstruct_ts(&ts, txts);
  364. memset(&shhwtstamps, 0, sizeof(shhwtstamps));
  365. ts_ns = timespec64_to_ns(&ts);
  366. shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
  367. skb_complete_tx_timestamp(skb_match, &shhwtstamps);
  368. } else {
  369. phydev_warn(priv->phydev,
  370. "the tx timestamp doesn't match with any skb\n");
  371. }
  372. }
  373. static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
  374. {
  375. struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
  376. bool poll_txts = nxp_c45_poll_txts(priv->phydev);
  377. struct skb_shared_hwtstamps *shhwtstamps_rx;
  378. struct ptp_clock_event event;
  379. struct nxp_c45_hwts hwts;
  380. bool reschedule = false;
  381. struct timespec64 ts;
  382. struct sk_buff *skb;
  383. bool txts_valid;
  384. u32 ts_raw;
  385. while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
  386. txts_valid = nxp_c45_get_hwtxts(priv, &hwts);
  387. if (unlikely(!txts_valid)) {
  388. /* Still more skbs in the queue */
  389. reschedule = true;
  390. break;
  391. }
  392. nxp_c45_process_txts(priv, &hwts);
  393. }
  394. while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
  395. nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
  396. ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
  397. hwts.sec = ts_raw >> 30;
  398. hwts.nsec = ts_raw & GENMASK(29, 0);
  399. nxp_c45_reconstruct_ts(&ts, &hwts);
  400. shhwtstamps_rx = skb_hwtstamps(skb);
  401. shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
  402. NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
  403. netif_rx(skb);
  404. }
  405. if (priv->extts) {
  406. nxp_c45_get_extts(priv, &ts);
  407. if (timespec64_compare(&ts, &priv->extts_ts) != 0) {
  408. priv->extts_ts = ts;
  409. event.index = priv->extts_index;
  410. event.type = PTP_CLOCK_EXTTS;
  411. event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
  412. ptp_clock_event(priv->ptp_clock, &event);
  413. }
  414. reschedule = true;
  415. }
  416. return reschedule ? 1 : -1;
  417. }
  418. static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
  419. int pin, u16 pin_cfg)
  420. {
  421. struct phy_device *phydev = priv->phydev;
  422. phy_write_mmd(phydev, MDIO_MMD_VEND1,
  423. VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
  424. }
  425. static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
  426. struct ptp_perout_request *perout, int on)
  427. {
  428. struct phy_device *phydev = priv->phydev;
  429. int pin;
  430. if (perout->flags & ~PTP_PEROUT_PHASE)
  431. return -EOPNOTSUPP;
  432. pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
  433. if (pin < 0)
  434. return pin;
  435. if (!on) {
  436. phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG,
  437. PPS_OUT_EN);
  438. phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG,
  439. PPS_OUT_POL);
  440. nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
  441. return 0;
  442. }
  443. /* The PPS signal is fixed to 1 second and is always generated when the
  444. * seconds counter is incremented. The start time is not configurable.
  445. * If the clock is adjusted, the PPS signal is automatically readjusted.
  446. */
  447. if (perout->period.sec != 1 || perout->period.nsec != 0) {
  448. phydev_warn(phydev, "The period can be set only to 1 second.");
  449. return -EINVAL;
  450. }
  451. if (!(perout->flags & PTP_PEROUT_PHASE)) {
  452. if (perout->start.sec != 0 || perout->start.nsec != 0) {
  453. phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
  454. return -EINVAL;
  455. }
  456. } else {
  457. if (perout->phase.nsec != 0 &&
  458. perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
  459. phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
  460. return -EINVAL;
  461. }
  462. if (perout->phase.nsec == 0)
  463. phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
  464. VEND1_PTP_CONFIG, PPS_OUT_POL);
  465. else
  466. phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
  467. VEND1_PTP_CONFIG, PPS_OUT_POL);
  468. }
  469. nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
  470. phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG, PPS_OUT_EN);
  471. return 0;
  472. }
  473. static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
  474. struct ptp_extts_request *extts, int on)
  475. {
  476. int pin;
  477. if (extts->flags & ~(PTP_ENABLE_FEATURE |
  478. PTP_RISING_EDGE |
  479. PTP_FALLING_EDGE |
  480. PTP_STRICT_FLAGS))
  481. return -EOPNOTSUPP;
  482. /* Sampling on both edges is not supported */
  483. if ((extts->flags & PTP_RISING_EDGE) &&
  484. (extts->flags & PTP_FALLING_EDGE))
  485. return -EOPNOTSUPP;
  486. pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
  487. if (pin < 0)
  488. return pin;
  489. if (!on) {
  490. nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
  491. priv->extts = false;
  492. return 0;
  493. }
  494. if (extts->flags & PTP_RISING_EDGE)
  495. phy_clear_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
  496. VEND1_PTP_CONFIG, EXT_TRG_EDGE);
  497. if (extts->flags & PTP_FALLING_EDGE)
  498. phy_set_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
  499. VEND1_PTP_CONFIG, EXT_TRG_EDGE);
  500. nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
  501. priv->extts = true;
  502. priv->extts_index = extts->index;
  503. ptp_schedule_worker(priv->ptp_clock, 0);
  504. return 0;
  505. }
  506. static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
  507. struct ptp_clock_request *req, int on)
  508. {
  509. struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
  510. switch (req->type) {
  511. case PTP_CLK_REQ_EXTTS:
  512. return nxp_c45_extts_enable(priv, &req->extts, on);
  513. case PTP_CLK_REQ_PEROUT:
  514. return nxp_c45_perout_enable(priv, &req->perout, on);
  515. default:
  516. return -EOPNOTSUPP;
  517. }
  518. }
  519. static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
  520. { "nxp_c45_gpio0", 0, PTP_PF_NONE},
  521. { "nxp_c45_gpio1", 1, PTP_PF_NONE},
  522. { "nxp_c45_gpio2", 2, PTP_PF_NONE},
  523. { "nxp_c45_gpio3", 3, PTP_PF_NONE},
  524. { "nxp_c45_gpio4", 4, PTP_PF_NONE},
  525. { "nxp_c45_gpio5", 5, PTP_PF_NONE},
  526. { "nxp_c45_gpio6", 6, PTP_PF_NONE},
  527. { "nxp_c45_gpio7", 7, PTP_PF_NONE},
  528. { "nxp_c45_gpio8", 8, PTP_PF_NONE},
  529. { "nxp_c45_gpio9", 9, PTP_PF_NONE},
  530. { "nxp_c45_gpio10", 10, PTP_PF_NONE},
  531. { "nxp_c45_gpio11", 11, PTP_PF_NONE},
  532. };
  533. static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
  534. enum ptp_pin_function func, unsigned int chan)
  535. {
  536. if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
  537. return -EINVAL;
  538. switch (func) {
  539. case PTP_PF_NONE:
  540. case PTP_PF_PEROUT:
  541. case PTP_PF_EXTTS:
  542. break;
  543. default:
  544. return -EOPNOTSUPP;
  545. }
  546. return 0;
  547. }
  548. static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
  549. {
  550. priv->caps = (struct ptp_clock_info) {
  551. .owner = THIS_MODULE,
  552. .name = "NXP C45 PHC",
  553. .max_adj = 16666666,
  554. .adjfine = nxp_c45_ptp_adjfine,
  555. .adjtime = nxp_c45_ptp_adjtime,
  556. .gettimex64 = nxp_c45_ptp_gettimex64,
  557. .settime64 = nxp_c45_ptp_settime64,
  558. .enable = nxp_c45_ptp_enable,
  559. .verify = nxp_c45_ptp_verify_pin,
  560. .do_aux_work = nxp_c45_do_aux_work,
  561. .pin_config = nxp_c45_ptp_pins,
  562. .n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
  563. .n_ext_ts = 1,
  564. .n_per_out = 1,
  565. };
  566. priv->ptp_clock = ptp_clock_register(&priv->caps,
  567. &priv->phydev->mdio.dev);
  568. if (IS_ERR(priv->ptp_clock))
  569. return PTR_ERR(priv->ptp_clock);
  570. if (!priv->ptp_clock)
  571. return -ENOMEM;
  572. return 0;
  573. }
  574. static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
  575. struct sk_buff *skb, int type)
  576. {
  577. struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
  578. mii_ts);
  579. switch (priv->hwts_tx) {
  580. case HWTSTAMP_TX_ON:
  581. NXP_C45_SKB_CB(skb)->type = type;
  582. NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
  583. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  584. skb_queue_tail(&priv->tx_queue, skb);
  585. if (nxp_c45_poll_txts(priv->phydev))
  586. ptp_schedule_worker(priv->ptp_clock, 0);
  587. break;
  588. case HWTSTAMP_TX_OFF:
  589. default:
  590. kfree_skb(skb);
  591. break;
  592. }
  593. }
  594. static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
  595. struct sk_buff *skb, int type)
  596. {
  597. struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
  598. mii_ts);
  599. struct ptp_header *header = ptp_parse_header(skb, type);
  600. if (!header)
  601. return false;
  602. if (!priv->hwts_rx)
  603. return false;
  604. NXP_C45_SKB_CB(skb)->header = header;
  605. skb_queue_tail(&priv->rx_queue, skb);
  606. ptp_schedule_worker(priv->ptp_clock, 0);
  607. return true;
  608. }
  609. static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
  610. struct ifreq *ifreq)
  611. {
  612. struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
  613. mii_ts);
  614. struct phy_device *phydev = priv->phydev;
  615. struct hwtstamp_config cfg;
  616. if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg)))
  617. return -EFAULT;
  618. if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON)
  619. return -ERANGE;
  620. priv->hwts_tx = cfg.tx_type;
  621. switch (cfg.rx_filter) {
  622. case HWTSTAMP_FILTER_NONE:
  623. priv->hwts_rx = 0;
  624. break;
  625. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  626. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  627. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  628. priv->hwts_rx = 1;
  629. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
  630. break;
  631. default:
  632. return -ERANGE;
  633. }
  634. if (priv->hwts_rx || priv->hwts_tx) {
  635. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
  636. EVENT_MSG_FILT_ALL);
  637. phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
  638. VEND1_PORT_PTP_CONTROL,
  639. PORT_PTP_CONTROL_BYPASS);
  640. } else {
  641. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
  642. EVENT_MSG_FILT_NONE);
  643. phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_PTP_CONTROL,
  644. PORT_PTP_CONTROL_BYPASS);
  645. }
  646. if (nxp_c45_poll_txts(priv->phydev))
  647. goto nxp_c45_no_ptp_irq;
  648. if (priv->hwts_tx)
  649. phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
  650. VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
  651. else
  652. phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
  653. VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
  654. nxp_c45_no_ptp_irq:
  655. return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  656. }
  657. static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
  658. struct ethtool_ts_info *ts_info)
  659. {
  660. struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
  661. mii_ts);
  662. ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
  663. SOF_TIMESTAMPING_RX_HARDWARE |
  664. SOF_TIMESTAMPING_RAW_HARDWARE;
  665. ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
  666. ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
  667. ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
  668. (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
  669. (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
  670. (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
  671. return 0;
  672. }
  673. static const struct nxp_c45_phy_stats nxp_c45_hw_stats[] = {
  674. { "phy_symbol_error_cnt", MDIO_MMD_VEND1,
  675. VEND1_SYMBOL_ERROR_COUNTER, 0, GENMASK(15, 0) },
  676. { "phy_link_status_drop_cnt", MDIO_MMD_VEND1,
  677. VEND1_LINK_DROP_COUNTER, 8, GENMASK(13, 8) },
  678. { "phy_link_availability_drop_cnt", MDIO_MMD_VEND1,
  679. VEND1_LINK_DROP_COUNTER, 0, GENMASK(5, 0) },
  680. { "phy_link_loss_cnt", MDIO_MMD_VEND1,
  681. VEND1_LINK_LOSSES_AND_FAILURES, 10, GENMASK(15, 10) },
  682. { "phy_link_failure_cnt", MDIO_MMD_VEND1,
  683. VEND1_LINK_LOSSES_AND_FAILURES, 0, GENMASK(9, 0) },
  684. { "r_good_frame_cnt", MDIO_MMD_VEND1,
  685. VEND1_R_GOOD_FRAME_CNT, 0, GENMASK(15, 0) },
  686. { "r_bad_frame_cnt", MDIO_MMD_VEND1,
  687. VEND1_R_BAD_FRAME_CNT, 0, GENMASK(15, 0) },
  688. { "r_rxer_frame_cnt", MDIO_MMD_VEND1,
  689. VEND1_R_RXER_FRAME_CNT, 0, GENMASK(15, 0) },
  690. { "rx_preamble_count", MDIO_MMD_VEND1,
  691. VEND1_RX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
  692. { "tx_preamble_count", MDIO_MMD_VEND1,
  693. VEND1_TX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
  694. { "rx_ipg_length", MDIO_MMD_VEND1,
  695. VEND1_RX_IPG_LENGTH, 0, GENMASK(8, 0) },
  696. { "tx_ipg_length", MDIO_MMD_VEND1,
  697. VEND1_TX_IPG_LENGTH, 0, GENMASK(8, 0) },
  698. };
  699. static int nxp_c45_get_sset_count(struct phy_device *phydev)
  700. {
  701. return ARRAY_SIZE(nxp_c45_hw_stats);
  702. }
  703. static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
  704. {
  705. size_t i;
  706. for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
  707. strncpy(data + i * ETH_GSTRING_LEN,
  708. nxp_c45_hw_stats[i].name, ETH_GSTRING_LEN);
  709. }
  710. }
  711. static void nxp_c45_get_stats(struct phy_device *phydev,
  712. struct ethtool_stats *stats, u64 *data)
  713. {
  714. size_t i;
  715. int ret;
  716. for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
  717. ret = phy_read_mmd(phydev, nxp_c45_hw_stats[i].mmd,
  718. nxp_c45_hw_stats[i].reg);
  719. if (ret < 0) {
  720. data[i] = U64_MAX;
  721. } else {
  722. data[i] = ret & nxp_c45_hw_stats[i].mask;
  723. data[i] >>= nxp_c45_hw_stats[i].off;
  724. }
  725. }
  726. }
  727. static int nxp_c45_config_enable(struct phy_device *phydev)
  728. {
  729. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
  730. DEVICE_CONTROL_CONFIG_GLOBAL_EN |
  731. DEVICE_CONTROL_CONFIG_ALL_EN);
  732. usleep_range(400, 450);
  733. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
  734. PORT_CONTROL_EN);
  735. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
  736. PHY_CONFIG_EN);
  737. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
  738. PORT_INFRA_CONTROL_EN);
  739. return 0;
  740. }
  741. static int nxp_c45_start_op(struct phy_device *phydev)
  742. {
  743. return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
  744. PHY_START_OP);
  745. }
  746. static int nxp_c45_config_intr(struct phy_device *phydev)
  747. {
  748. if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
  749. return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
  750. VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
  751. else
  752. return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
  753. VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
  754. }
  755. static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
  756. {
  757. struct nxp_c45_phy *priv = phydev->priv;
  758. irqreturn_t ret = IRQ_NONE;
  759. struct nxp_c45_hwts hwts;
  760. int irq;
  761. irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
  762. if (irq & PHY_IRQ_LINK_EVENT) {
  763. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
  764. PHY_IRQ_LINK_EVENT);
  765. phy_trigger_machine(phydev);
  766. ret = IRQ_HANDLED;
  767. }
  768. /* There is no need for ACK.
  769. * The irq signal will be asserted until the EGR TS FIFO will be
  770. * emptied.
  771. */
  772. irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_IRQ_STATUS);
  773. if (irq & PTP_IRQ_EGR_TS) {
  774. while (nxp_c45_get_hwtxts(priv, &hwts))
  775. nxp_c45_process_txts(priv, &hwts);
  776. ret = IRQ_HANDLED;
  777. }
  778. return ret;
  779. }
  780. static int nxp_c45_soft_reset(struct phy_device *phydev)
  781. {
  782. int ret;
  783. ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
  784. DEVICE_CONTROL_RESET);
  785. if (ret)
  786. return ret;
  787. return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
  788. VEND1_DEVICE_CONTROL, ret,
  789. !(ret & DEVICE_CONTROL_RESET), 20000,
  790. 240000, false);
  791. }
  792. static int nxp_c45_cable_test_start(struct phy_device *phydev)
  793. {
  794. return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
  795. CABLE_TEST_ENABLE | CABLE_TEST_START);
  796. }
  797. static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
  798. bool *finished)
  799. {
  800. int ret;
  801. u8 cable_test_result;
  802. ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST);
  803. if (!(ret & CABLE_TEST_VALID)) {
  804. *finished = false;
  805. return 0;
  806. }
  807. *finished = true;
  808. cable_test_result = ret & GENMASK(2, 0);
  809. switch (cable_test_result) {
  810. case CABLE_TEST_OK:
  811. ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
  812. ETHTOOL_A_CABLE_RESULT_CODE_OK);
  813. break;
  814. case CABLE_TEST_SHORTED:
  815. ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
  816. ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
  817. break;
  818. case CABLE_TEST_OPEN:
  819. ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
  820. ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
  821. break;
  822. default:
  823. ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
  824. ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
  825. }
  826. phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
  827. CABLE_TEST_ENABLE);
  828. return nxp_c45_start_op(phydev);
  829. }
  830. static int nxp_c45_setup_master_slave(struct phy_device *phydev)
  831. {
  832. switch (phydev->master_slave_set) {
  833. case MASTER_SLAVE_CFG_MASTER_FORCE:
  834. case MASTER_SLAVE_CFG_MASTER_PREFERRED:
  835. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
  836. MASTER_MODE);
  837. break;
  838. case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
  839. case MASTER_SLAVE_CFG_SLAVE_FORCE:
  840. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
  841. SLAVE_MODE);
  842. break;
  843. case MASTER_SLAVE_CFG_UNKNOWN:
  844. case MASTER_SLAVE_CFG_UNSUPPORTED:
  845. return 0;
  846. default:
  847. phydev_warn(phydev, "Unsupported Master/Slave mode\n");
  848. return -EOPNOTSUPP;
  849. }
  850. return 0;
  851. }
  852. static int nxp_c45_read_master_slave(struct phy_device *phydev)
  853. {
  854. int reg;
  855. phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
  856. phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
  857. reg = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL);
  858. if (reg < 0)
  859. return reg;
  860. if (reg & B100T1_PMAPMD_MASTER) {
  861. phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
  862. phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
  863. } else {
  864. phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
  865. phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
  866. }
  867. return 0;
  868. }
  869. static int nxp_c45_config_aneg(struct phy_device *phydev)
  870. {
  871. return nxp_c45_setup_master_slave(phydev);
  872. }
  873. static int nxp_c45_read_status(struct phy_device *phydev)
  874. {
  875. int ret;
  876. ret = genphy_c45_read_status(phydev);
  877. if (ret)
  878. return ret;
  879. ret = nxp_c45_read_master_slave(phydev);
  880. if (ret)
  881. return ret;
  882. return 0;
  883. }
  884. static int nxp_c45_get_sqi(struct phy_device *phydev)
  885. {
  886. int reg;
  887. reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
  888. if (!(reg & SQI_VALID))
  889. return -EINVAL;
  890. reg &= SQI_MASK;
  891. return reg;
  892. }
  893. static int nxp_c45_get_sqi_max(struct phy_device *phydev)
  894. {
  895. return MAX_SQI;
  896. }
  897. static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
  898. {
  899. if (delay < MIN_ID_PS) {
  900. phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
  901. return -EINVAL;
  902. }
  903. if (delay > MAX_ID_PS) {
  904. phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
  905. return -EINVAL;
  906. }
  907. return 0;
  908. }
  909. static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
  910. {
  911. /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
  912. * To avoid floating point operations we'll multiply by 10
  913. * and get 1 decimal point precision.
  914. */
  915. phase_offset_raw *= 10;
  916. phase_offset_raw -= 738;
  917. return div_u64(phase_offset_raw, 9);
  918. }
  919. static void nxp_c45_disable_delays(struct phy_device *phydev)
  920. {
  921. phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
  922. phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
  923. }
  924. static void nxp_c45_set_delays(struct phy_device *phydev)
  925. {
  926. struct nxp_c45_phy *priv = phydev->priv;
  927. u64 tx_delay = priv->tx_delay;
  928. u64 rx_delay = priv->rx_delay;
  929. u64 degree;
  930. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
  931. phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
  932. degree = div_u64(tx_delay, PS_PER_DEGREE);
  933. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
  934. ID_ENABLE | nxp_c45_get_phase_shift(degree));
  935. } else {
  936. phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
  937. ID_ENABLE);
  938. }
  939. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
  940. phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
  941. degree = div_u64(rx_delay, PS_PER_DEGREE);
  942. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
  943. ID_ENABLE | nxp_c45_get_phase_shift(degree));
  944. } else {
  945. phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
  946. ID_ENABLE);
  947. }
  948. }
  949. static int nxp_c45_get_delays(struct phy_device *phydev)
  950. {
  951. struct nxp_c45_phy *priv = phydev->priv;
  952. int ret;
  953. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
  954. phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
  955. ret = device_property_read_u32(&phydev->mdio.dev,
  956. "tx-internal-delay-ps",
  957. &priv->tx_delay);
  958. if (ret)
  959. priv->tx_delay = DEFAULT_ID_PS;
  960. ret = nxp_c45_check_delay(phydev, priv->tx_delay);
  961. if (ret) {
  962. phydev_err(phydev,
  963. "tx-internal-delay-ps invalid value\n");
  964. return ret;
  965. }
  966. }
  967. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
  968. phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
  969. ret = device_property_read_u32(&phydev->mdio.dev,
  970. "rx-internal-delay-ps",
  971. &priv->rx_delay);
  972. if (ret)
  973. priv->rx_delay = DEFAULT_ID_PS;
  974. ret = nxp_c45_check_delay(phydev, priv->rx_delay);
  975. if (ret) {
  976. phydev_err(phydev,
  977. "rx-internal-delay-ps invalid value\n");
  978. return ret;
  979. }
  980. }
  981. return 0;
  982. }
  983. static int nxp_c45_set_phy_mode(struct phy_device *phydev)
  984. {
  985. int ret;
  986. ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
  987. phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
  988. switch (phydev->interface) {
  989. case PHY_INTERFACE_MODE_RGMII:
  990. if (!(ret & RGMII_ABILITY)) {
  991. phydev_err(phydev, "rgmii mode not supported\n");
  992. return -EINVAL;
  993. }
  994. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
  995. MII_BASIC_CONFIG_RGMII);
  996. nxp_c45_disable_delays(phydev);
  997. break;
  998. case PHY_INTERFACE_MODE_RGMII_ID:
  999. case PHY_INTERFACE_MODE_RGMII_TXID:
  1000. case PHY_INTERFACE_MODE_RGMII_RXID:
  1001. if (!(ret & RGMII_ID_ABILITY)) {
  1002. phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
  1003. return -EINVAL;
  1004. }
  1005. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
  1006. MII_BASIC_CONFIG_RGMII);
  1007. ret = nxp_c45_get_delays(phydev);
  1008. if (ret)
  1009. return ret;
  1010. nxp_c45_set_delays(phydev);
  1011. break;
  1012. case PHY_INTERFACE_MODE_MII:
  1013. if (!(ret & MII_ABILITY)) {
  1014. phydev_err(phydev, "mii mode not supported\n");
  1015. return -EINVAL;
  1016. }
  1017. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
  1018. MII_BASIC_CONFIG_MII);
  1019. break;
  1020. case PHY_INTERFACE_MODE_REVMII:
  1021. if (!(ret & REVMII_ABILITY)) {
  1022. phydev_err(phydev, "rev-mii mode not supported\n");
  1023. return -EINVAL;
  1024. }
  1025. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
  1026. MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
  1027. break;
  1028. case PHY_INTERFACE_MODE_RMII:
  1029. if (!(ret & RMII_ABILITY)) {
  1030. phydev_err(phydev, "rmii mode not supported\n");
  1031. return -EINVAL;
  1032. }
  1033. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
  1034. MII_BASIC_CONFIG_RMII);
  1035. break;
  1036. case PHY_INTERFACE_MODE_SGMII:
  1037. if (!(ret & SGMII_ABILITY)) {
  1038. phydev_err(phydev, "sgmii mode not supported\n");
  1039. return -EINVAL;
  1040. }
  1041. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
  1042. MII_BASIC_CONFIG_SGMII);
  1043. break;
  1044. case PHY_INTERFACE_MODE_INTERNAL:
  1045. break;
  1046. default:
  1047. return -EINVAL;
  1048. }
  1049. return 0;
  1050. }
  1051. static int nxp_c45_config_init(struct phy_device *phydev)
  1052. {
  1053. int ret;
  1054. ret = nxp_c45_config_enable(phydev);
  1055. if (ret) {
  1056. phydev_err(phydev, "Failed to enable config\n");
  1057. return ret;
  1058. }
  1059. /* Bug workaround for SJA1110 rev B: enable write access
  1060. * to MDIO_MMD_PMAPMD
  1061. */
  1062. phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
  1063. phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
  1064. phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
  1065. PHY_CONFIG_AUTO);
  1066. phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
  1067. COUNTER_EN);
  1068. phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
  1069. COUNTER_EN);
  1070. phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
  1071. COUNTER_EN);
  1072. phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
  1073. COUNTER_EN);
  1074. phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
  1075. COUNTER_EN);
  1076. ret = nxp_c45_set_phy_mode(phydev);
  1077. if (ret)
  1078. return ret;
  1079. phydev->autoneg = AUTONEG_DISABLE;
  1080. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CLK_PERIOD,
  1081. PTP_CLK_PERIOD_100BT1);
  1082. phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_HW_LTC_LOCK_CTRL,
  1083. HW_LTC_LOCK_EN);
  1084. phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
  1085. RX_TS_INSRT_MODE2);
  1086. phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
  1087. PTP_ENABLE);
  1088. return nxp_c45_start_op(phydev);
  1089. }
  1090. static int nxp_c45_probe(struct phy_device *phydev)
  1091. {
  1092. struct nxp_c45_phy *priv;
  1093. int ptp_ability;
  1094. int ret = 0;
  1095. priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
  1096. if (!priv)
  1097. return -ENOMEM;
  1098. skb_queue_head_init(&priv->tx_queue);
  1099. skb_queue_head_init(&priv->rx_queue);
  1100. priv->phydev = phydev;
  1101. phydev->priv = priv;
  1102. mutex_init(&priv->ptp_lock);
  1103. ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
  1104. VEND1_PORT_ABILITIES);
  1105. ptp_ability = !!(ptp_ability & PTP_ABILITY);
  1106. if (!ptp_ability) {
  1107. phydev_dbg(phydev, "the phy does not support PTP");
  1108. goto no_ptp_support;
  1109. }
  1110. if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
  1111. IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
  1112. priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
  1113. priv->mii_ts.txtstamp = nxp_c45_txtstamp;
  1114. priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
  1115. priv->mii_ts.ts_info = nxp_c45_ts_info;
  1116. phydev->mii_ts = &priv->mii_ts;
  1117. ret = nxp_c45_init_ptp_clock(priv);
  1118. } else {
  1119. phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
  1120. }
  1121. no_ptp_support:
  1122. return ret;
  1123. }
  1124. static void nxp_c45_remove(struct phy_device *phydev)
  1125. {
  1126. struct nxp_c45_phy *priv = phydev->priv;
  1127. if (priv->ptp_clock)
  1128. ptp_clock_unregister(priv->ptp_clock);
  1129. skb_queue_purge(&priv->tx_queue);
  1130. skb_queue_purge(&priv->rx_queue);
  1131. }
  1132. static struct phy_driver nxp_c45_driver[] = {
  1133. {
  1134. PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
  1135. .name = "NXP C45 TJA1103",
  1136. .features = PHY_BASIC_T1_FEATURES,
  1137. .probe = nxp_c45_probe,
  1138. .soft_reset = nxp_c45_soft_reset,
  1139. .config_aneg = nxp_c45_config_aneg,
  1140. .config_init = nxp_c45_config_init,
  1141. .config_intr = nxp_c45_config_intr,
  1142. .handle_interrupt = nxp_c45_handle_interrupt,
  1143. .read_status = nxp_c45_read_status,
  1144. .suspend = genphy_c45_pma_suspend,
  1145. .resume = genphy_c45_pma_resume,
  1146. .get_sset_count = nxp_c45_get_sset_count,
  1147. .get_strings = nxp_c45_get_strings,
  1148. .get_stats = nxp_c45_get_stats,
  1149. .cable_test_start = nxp_c45_cable_test_start,
  1150. .cable_test_get_status = nxp_c45_cable_test_get_status,
  1151. .set_loopback = genphy_c45_loopback,
  1152. .get_sqi = nxp_c45_get_sqi,
  1153. .get_sqi_max = nxp_c45_get_sqi_max,
  1154. .remove = nxp_c45_remove,
  1155. },
  1156. };
  1157. module_phy_driver(nxp_c45_driver);
  1158. static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
  1159. { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
  1160. { /*sentinel*/ },
  1161. };
  1162. MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
  1163. MODULE_AUTHOR("Radu Pirea <[email protected]>");
  1164. MODULE_DESCRIPTION("NXP C45 PHY driver");
  1165. MODULE_LICENSE("GPL v2");