rzn1_a5psw.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2022 Schneider-Electric
  4. *
  5. * Clément Léger <[email protected]>
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/etherdevice.h>
  9. #include <linux/if_bridge.h>
  10. #include <linux/if_ether.h>
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/of_mdio.h>
  15. #include <net/dsa.h>
  16. #include "rzn1_a5psw.h"
  17. struct a5psw_stats {
  18. u16 offset;
  19. const char name[ETH_GSTRING_LEN];
  20. };
  21. #define STAT_DESC(_offset) { \
  22. .offset = A5PSW_##_offset, \
  23. .name = __stringify(_offset), \
  24. }
  25. static const struct a5psw_stats a5psw_stats[] = {
  26. STAT_DESC(aFramesTransmittedOK),
  27. STAT_DESC(aFramesReceivedOK),
  28. STAT_DESC(aFrameCheckSequenceErrors),
  29. STAT_DESC(aAlignmentErrors),
  30. STAT_DESC(aOctetsTransmittedOK),
  31. STAT_DESC(aOctetsReceivedOK),
  32. STAT_DESC(aTxPAUSEMACCtrlFrames),
  33. STAT_DESC(aRxPAUSEMACCtrlFrames),
  34. STAT_DESC(ifInErrors),
  35. STAT_DESC(ifOutErrors),
  36. STAT_DESC(ifInUcastPkts),
  37. STAT_DESC(ifInMulticastPkts),
  38. STAT_DESC(ifInBroadcastPkts),
  39. STAT_DESC(ifOutDiscards),
  40. STAT_DESC(ifOutUcastPkts),
  41. STAT_DESC(ifOutMulticastPkts),
  42. STAT_DESC(ifOutBroadcastPkts),
  43. STAT_DESC(etherStatsDropEvents),
  44. STAT_DESC(etherStatsOctets),
  45. STAT_DESC(etherStatsPkts),
  46. STAT_DESC(etherStatsUndersizePkts),
  47. STAT_DESC(etherStatsOversizePkts),
  48. STAT_DESC(etherStatsPkts64Octets),
  49. STAT_DESC(etherStatsPkts65to127Octets),
  50. STAT_DESC(etherStatsPkts128to255Octets),
  51. STAT_DESC(etherStatsPkts256to511Octets),
  52. STAT_DESC(etherStatsPkts1024to1518Octets),
  53. STAT_DESC(etherStatsPkts1519toXOctets),
  54. STAT_DESC(etherStatsJabbers),
  55. STAT_DESC(etherStatsFragments),
  56. STAT_DESC(VLANReceived),
  57. STAT_DESC(VLANTransmitted),
  58. STAT_DESC(aDeferred),
  59. STAT_DESC(aMultipleCollisions),
  60. STAT_DESC(aSingleCollisions),
  61. STAT_DESC(aLateCollisions),
  62. STAT_DESC(aExcessiveCollisions),
  63. STAT_DESC(aCarrierSenseErrors),
  64. };
  65. static void a5psw_reg_writel(struct a5psw *a5psw, int offset, u32 value)
  66. {
  67. writel(value, a5psw->base + offset);
  68. }
  69. static u32 a5psw_reg_readl(struct a5psw *a5psw, int offset)
  70. {
  71. return readl(a5psw->base + offset);
  72. }
  73. static void a5psw_reg_rmw(struct a5psw *a5psw, int offset, u32 mask, u32 val)
  74. {
  75. u32 reg;
  76. spin_lock(&a5psw->reg_lock);
  77. reg = a5psw_reg_readl(a5psw, offset);
  78. reg &= ~mask;
  79. reg |= val;
  80. a5psw_reg_writel(a5psw, offset, reg);
  81. spin_unlock(&a5psw->reg_lock);
  82. }
  83. static enum dsa_tag_protocol a5psw_get_tag_protocol(struct dsa_switch *ds,
  84. int port,
  85. enum dsa_tag_protocol mp)
  86. {
  87. return DSA_TAG_PROTO_RZN1_A5PSW;
  88. }
  89. static void a5psw_port_pattern_set(struct a5psw *a5psw, int port, int pattern,
  90. bool enable)
  91. {
  92. u32 rx_match = 0;
  93. if (enable)
  94. rx_match |= A5PSW_RXMATCH_CONFIG_PATTERN(pattern);
  95. a5psw_reg_rmw(a5psw, A5PSW_RXMATCH_CONFIG(port),
  96. A5PSW_RXMATCH_CONFIG_PATTERN(pattern), rx_match);
  97. }
  98. static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
  99. {
  100. /* Enable "management forward" pattern matching, this will forward
  101. * packets from this port only towards the management port and thus
  102. * isolate the port.
  103. */
  104. a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
  105. }
  106. static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable)
  107. {
  108. u32 mask = A5PSW_PORT_ENA_TX(port);
  109. u32 reg = enable ? mask : 0;
  110. /* Even though the port TX is disabled through TXENA bit in the
  111. * PORT_ENA register, it can still send BPDUs. This depends on the tag
  112. * configuration added when sending packets from the CPU port to the
  113. * switch port. Indeed, when using forced forwarding without filtering,
  114. * even disabled ports will be able to send packets that are tagged.
  115. * This allows to implement STP support when ports are in a state where
  116. * forwarding traffic should be stopped but BPDUs should still be sent.
  117. */
  118. a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg);
  119. }
  120. static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
  121. {
  122. u32 port_ena = 0;
  123. if (enable)
  124. port_ena |= A5PSW_PORT_ENA_TX_RX(port);
  125. a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, A5PSW_PORT_ENA_TX_RX(port),
  126. port_ena);
  127. }
  128. static int a5psw_lk_execute_ctrl(struct a5psw *a5psw, u32 *ctrl)
  129. {
  130. int ret;
  131. a5psw_reg_writel(a5psw, A5PSW_LK_ADDR_CTRL, *ctrl);
  132. ret = readl_poll_timeout(a5psw->base + A5PSW_LK_ADDR_CTRL, *ctrl,
  133. !(*ctrl & A5PSW_LK_ADDR_CTRL_BUSY),
  134. A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
  135. if (ret)
  136. dev_err(a5psw->dev, "LK_CTRL timeout waiting for BUSY bit\n");
  137. return ret;
  138. }
  139. static void a5psw_port_fdb_flush(struct a5psw *a5psw, int port)
  140. {
  141. u32 ctrl = A5PSW_LK_ADDR_CTRL_DELETE_PORT | BIT(port);
  142. mutex_lock(&a5psw->lk_lock);
  143. a5psw_lk_execute_ctrl(a5psw, &ctrl);
  144. mutex_unlock(&a5psw->lk_lock);
  145. }
  146. static void a5psw_port_authorize_set(struct a5psw *a5psw, int port,
  147. bool authorize)
  148. {
  149. u32 reg = a5psw_reg_readl(a5psw, A5PSW_AUTH_PORT(port));
  150. if (authorize)
  151. reg |= A5PSW_AUTH_PORT_AUTHORIZED;
  152. else
  153. reg &= ~A5PSW_AUTH_PORT_AUTHORIZED;
  154. a5psw_reg_writel(a5psw, A5PSW_AUTH_PORT(port), reg);
  155. }
  156. static void a5psw_port_disable(struct dsa_switch *ds, int port)
  157. {
  158. struct a5psw *a5psw = ds->priv;
  159. a5psw_port_authorize_set(a5psw, port, false);
  160. a5psw_port_enable_set(a5psw, port, false);
  161. }
  162. static int a5psw_port_enable(struct dsa_switch *ds, int port,
  163. struct phy_device *phy)
  164. {
  165. struct a5psw *a5psw = ds->priv;
  166. a5psw_port_authorize_set(a5psw, port, true);
  167. a5psw_port_enable_set(a5psw, port, true);
  168. return 0;
  169. }
  170. static int a5psw_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
  171. {
  172. struct a5psw *a5psw = ds->priv;
  173. new_mtu += ETH_HLEN + A5PSW_EXTRA_MTU_LEN + ETH_FCS_LEN;
  174. a5psw_reg_writel(a5psw, A5PSW_FRM_LENGTH(port), new_mtu);
  175. return 0;
  176. }
  177. static int a5psw_port_max_mtu(struct dsa_switch *ds, int port)
  178. {
  179. return A5PSW_MAX_MTU;
  180. }
  181. static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
  182. struct phylink_config *config)
  183. {
  184. unsigned long *intf = config->supported_interfaces;
  185. config->mac_capabilities = MAC_1000FD;
  186. if (dsa_is_cpu_port(ds, port)) {
  187. /* GMII is used internally and GMAC2 is connected to the switch
  188. * using 1000Mbps Full-Duplex mode only (cf ethernet manual)
  189. */
  190. __set_bit(PHY_INTERFACE_MODE_GMII, intf);
  191. } else {
  192. config->mac_capabilities |= MAC_100 | MAC_10;
  193. phy_interface_set_rgmii(intf);
  194. __set_bit(PHY_INTERFACE_MODE_RMII, intf);
  195. __set_bit(PHY_INTERFACE_MODE_MII, intf);
  196. }
  197. }
  198. static struct phylink_pcs *
  199. a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
  200. phy_interface_t interface)
  201. {
  202. struct dsa_port *dp = dsa_to_port(ds, port);
  203. struct a5psw *a5psw = ds->priv;
  204. if (!dsa_port_is_cpu(dp) && a5psw->pcs[port])
  205. return a5psw->pcs[port];
  206. return NULL;
  207. }
  208. static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
  209. unsigned int mode,
  210. phy_interface_t interface)
  211. {
  212. struct a5psw *a5psw = ds->priv;
  213. u32 cmd_cfg;
  214. cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
  215. cmd_cfg &= ~(A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA);
  216. a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
  217. }
  218. static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
  219. unsigned int mode,
  220. phy_interface_t interface,
  221. struct phy_device *phydev, int speed,
  222. int duplex, bool tx_pause, bool rx_pause)
  223. {
  224. u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
  225. A5PSW_CMD_CFG_TX_CRC_APPEND;
  226. struct a5psw *a5psw = ds->priv;
  227. if (speed == SPEED_1000)
  228. cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
  229. if (duplex == DUPLEX_HALF)
  230. cmd_cfg |= A5PSW_CMD_CFG_HD_ENA;
  231. cmd_cfg |= A5PSW_CMD_CFG_CNTL_FRM_ENA;
  232. if (!rx_pause)
  233. cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
  234. a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
  235. }
  236. static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
  237. {
  238. struct a5psw *a5psw = ds->priv;
  239. unsigned long rate;
  240. u64 max, tmp;
  241. u32 agetime;
  242. rate = clk_get_rate(a5psw->clk);
  243. max = div64_ul(((u64)A5PSW_LK_AGETIME_MASK * A5PSW_TABLE_ENTRIES * 1024),
  244. rate) * 1000;
  245. if (msecs > max)
  246. return -EINVAL;
  247. tmp = div_u64(rate, MSEC_PER_SEC);
  248. agetime = div_u64(msecs * tmp, 1024 * A5PSW_TABLE_ENTRIES);
  249. a5psw_reg_writel(a5psw, A5PSW_LK_AGETIME, agetime);
  250. return 0;
  251. }
  252. static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn)
  253. {
  254. u32 mask = A5PSW_INPUT_LEARN_DIS(port);
  255. u32 reg = !learn ? mask : 0;
  256. a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
  257. }
  258. static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
  259. {
  260. u32 mask = A5PSW_INPUT_LEARN_BLOCK(port);
  261. u32 reg = block ? mask : 0;
  262. a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
  263. }
  264. static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
  265. bool set)
  266. {
  267. u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
  268. A5PSW_MCAST_DEF_MASK};
  269. int i;
  270. if (set)
  271. a5psw->bridged_ports |= BIT(port);
  272. else
  273. a5psw->bridged_ports &= ~BIT(port);
  274. for (i = 0; i < ARRAY_SIZE(offsets); i++)
  275. a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
  276. }
  277. static void a5psw_port_set_standalone(struct a5psw *a5psw, int port,
  278. bool standalone)
  279. {
  280. a5psw_port_learning_set(a5psw, port, !standalone);
  281. a5psw_flooding_set_resolution(a5psw, port, !standalone);
  282. a5psw_port_mgmtfwd_set(a5psw, port, standalone);
  283. }
  284. static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
  285. struct dsa_bridge bridge,
  286. bool *tx_fwd_offload,
  287. struct netlink_ext_ack *extack)
  288. {
  289. struct a5psw *a5psw = ds->priv;
  290. /* We only support 1 bridge device */
  291. if (a5psw->br_dev && bridge.dev != a5psw->br_dev) {
  292. NL_SET_ERR_MSG_MOD(extack,
  293. "Forwarding offload supported for a single bridge");
  294. return -EOPNOTSUPP;
  295. }
  296. a5psw->br_dev = bridge.dev;
  297. a5psw_port_set_standalone(a5psw, port, false);
  298. return 0;
  299. }
  300. static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
  301. struct dsa_bridge bridge)
  302. {
  303. struct a5psw *a5psw = ds->priv;
  304. a5psw_port_set_standalone(a5psw, port, true);
  305. /* No more ports bridged */
  306. if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
  307. a5psw->br_dev = NULL;
  308. }
  309. static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
  310. {
  311. bool learning_enabled, rx_enabled, tx_enabled;
  312. struct a5psw *a5psw = ds->priv;
  313. switch (state) {
  314. case BR_STATE_DISABLED:
  315. case BR_STATE_BLOCKING:
  316. case BR_STATE_LISTENING:
  317. rx_enabled = false;
  318. tx_enabled = false;
  319. learning_enabled = false;
  320. break;
  321. case BR_STATE_LEARNING:
  322. rx_enabled = false;
  323. tx_enabled = false;
  324. learning_enabled = true;
  325. break;
  326. case BR_STATE_FORWARDING:
  327. rx_enabled = true;
  328. tx_enabled = true;
  329. learning_enabled = true;
  330. break;
  331. default:
  332. dev_err(ds->dev, "invalid STP state: %d\n", state);
  333. return;
  334. }
  335. a5psw_port_learning_set(a5psw, port, learning_enabled);
  336. a5psw_port_rx_block_set(a5psw, port, !rx_enabled);
  337. a5psw_port_tx_enable(a5psw, port, tx_enabled);
  338. }
  339. static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
  340. {
  341. struct a5psw *a5psw = ds->priv;
  342. a5psw_port_fdb_flush(a5psw, port);
  343. }
  344. static int a5psw_lk_execute_lookup(struct a5psw *a5psw, union lk_data *lk_data,
  345. u16 *entry)
  346. {
  347. u32 ctrl;
  348. int ret;
  349. a5psw_reg_writel(a5psw, A5PSW_LK_DATA_LO, lk_data->lo);
  350. a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data->hi);
  351. ctrl = A5PSW_LK_ADDR_CTRL_LOOKUP;
  352. ret = a5psw_lk_execute_ctrl(a5psw, &ctrl);
  353. if (ret)
  354. return ret;
  355. *entry = ctrl & A5PSW_LK_ADDR_CTRL_ADDRESS;
  356. return 0;
  357. }
  358. static int a5psw_port_fdb_add(struct dsa_switch *ds, int port,
  359. const unsigned char *addr, u16 vid,
  360. struct dsa_db db)
  361. {
  362. struct a5psw *a5psw = ds->priv;
  363. union lk_data lk_data = {0};
  364. bool inc_learncount = false;
  365. int ret = 0;
  366. u16 entry;
  367. u32 reg;
  368. ether_addr_copy(lk_data.entry.mac, addr);
  369. lk_data.entry.port_mask = BIT(port);
  370. mutex_lock(&a5psw->lk_lock);
  371. /* Set the value to be written in the lookup table */
  372. ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
  373. if (ret)
  374. goto lk_unlock;
  375. lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
  376. if (!lk_data.entry.valid) {
  377. inc_learncount = true;
  378. /* port_mask set to 0x1f when entry is not valid, clear it */
  379. lk_data.entry.port_mask = 0;
  380. lk_data.entry.prio = 0;
  381. }
  382. lk_data.entry.port_mask |= BIT(port);
  383. lk_data.entry.is_static = 1;
  384. lk_data.entry.valid = 1;
  385. a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
  386. reg = A5PSW_LK_ADDR_CTRL_WRITE | entry;
  387. ret = a5psw_lk_execute_ctrl(a5psw, &reg);
  388. if (ret)
  389. goto lk_unlock;
  390. if (inc_learncount) {
  391. reg = A5PSW_LK_LEARNCOUNT_MODE_INC;
  392. a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
  393. }
  394. lk_unlock:
  395. mutex_unlock(&a5psw->lk_lock);
  396. return ret;
  397. }
  398. static int a5psw_port_fdb_del(struct dsa_switch *ds, int port,
  399. const unsigned char *addr, u16 vid,
  400. struct dsa_db db)
  401. {
  402. struct a5psw *a5psw = ds->priv;
  403. union lk_data lk_data = {0};
  404. bool clear = false;
  405. u16 entry;
  406. u32 reg;
  407. int ret;
  408. ether_addr_copy(lk_data.entry.mac, addr);
  409. mutex_lock(&a5psw->lk_lock);
  410. ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
  411. if (ret)
  412. goto lk_unlock;
  413. lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
  414. /* Our hardware does not associate any VID to the FDB entries so this
  415. * means that if two entries were added for the same mac but for
  416. * different VID, then, on the deletion of the first one, we would also
  417. * delete the second one. Since there is unfortunately nothing we can do
  418. * about that, do not return an error...
  419. */
  420. if (!lk_data.entry.valid)
  421. goto lk_unlock;
  422. lk_data.entry.port_mask &= ~BIT(port);
  423. /* If there is no more port in the mask, clear the entry */
  424. if (lk_data.entry.port_mask == 0)
  425. clear = true;
  426. a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
  427. reg = entry;
  428. if (clear)
  429. reg |= A5PSW_LK_ADDR_CTRL_CLEAR;
  430. else
  431. reg |= A5PSW_LK_ADDR_CTRL_WRITE;
  432. ret = a5psw_lk_execute_ctrl(a5psw, &reg);
  433. if (ret)
  434. goto lk_unlock;
  435. /* Decrement LEARNCOUNT */
  436. if (clear) {
  437. reg = A5PSW_LK_LEARNCOUNT_MODE_DEC;
  438. a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
  439. }
  440. lk_unlock:
  441. mutex_unlock(&a5psw->lk_lock);
  442. return ret;
  443. }
  444. static int a5psw_port_fdb_dump(struct dsa_switch *ds, int port,
  445. dsa_fdb_dump_cb_t *cb, void *data)
  446. {
  447. struct a5psw *a5psw = ds->priv;
  448. union lk_data lk_data;
  449. int i = 0, ret = 0;
  450. u32 reg;
  451. mutex_lock(&a5psw->lk_lock);
  452. for (i = 0; i < A5PSW_TABLE_ENTRIES; i++) {
  453. reg = A5PSW_LK_ADDR_CTRL_READ | A5PSW_LK_ADDR_CTRL_WAIT | i;
  454. ret = a5psw_lk_execute_ctrl(a5psw, &reg);
  455. if (ret)
  456. goto out_unlock;
  457. lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
  458. /* If entry is not valid or does not contain the port, skip */
  459. if (!lk_data.entry.valid ||
  460. !(lk_data.entry.port_mask & BIT(port)))
  461. continue;
  462. lk_data.lo = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_LO);
  463. ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data);
  464. if (ret)
  465. goto out_unlock;
  466. }
  467. out_unlock:
  468. mutex_unlock(&a5psw->lk_lock);
  469. return ret;
  470. }
  471. static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port)
  472. {
  473. u32 reg_lo, reg_hi;
  474. reg_lo = a5psw_reg_readl(a5psw, offset + A5PSW_PORT_OFFSET(port));
  475. /* A5PSW_STATS_HIWORD is latched on stat read */
  476. reg_hi = a5psw_reg_readl(a5psw, A5PSW_STATS_HIWORD);
  477. return ((u64)reg_hi << 32) | reg_lo;
  478. }
  479. static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset,
  480. uint8_t *data)
  481. {
  482. unsigned int u;
  483. if (stringset != ETH_SS_STATS)
  484. return;
  485. for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++) {
  486. memcpy(data + u * ETH_GSTRING_LEN, a5psw_stats[u].name,
  487. ETH_GSTRING_LEN);
  488. }
  489. }
  490. static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port,
  491. uint64_t *data)
  492. {
  493. struct a5psw *a5psw = ds->priv;
  494. unsigned int u;
  495. for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
  496. data[u] = a5psw_read_stat(a5psw, a5psw_stats[u].offset, port);
  497. }
  498. static int a5psw_get_sset_count(struct dsa_switch *ds, int port, int sset)
  499. {
  500. if (sset != ETH_SS_STATS)
  501. return 0;
  502. return ARRAY_SIZE(a5psw_stats);
  503. }
  504. static void a5psw_get_eth_mac_stats(struct dsa_switch *ds, int port,
  505. struct ethtool_eth_mac_stats *mac_stats)
  506. {
  507. struct a5psw *a5psw = ds->priv;
  508. #define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
  509. mac_stats->FramesTransmittedOK = RD(aFramesTransmittedOK);
  510. mac_stats->SingleCollisionFrames = RD(aSingleCollisions);
  511. mac_stats->MultipleCollisionFrames = RD(aMultipleCollisions);
  512. mac_stats->FramesReceivedOK = RD(aFramesReceivedOK);
  513. mac_stats->FrameCheckSequenceErrors = RD(aFrameCheckSequenceErrors);
  514. mac_stats->AlignmentErrors = RD(aAlignmentErrors);
  515. mac_stats->OctetsTransmittedOK = RD(aOctetsTransmittedOK);
  516. mac_stats->FramesWithDeferredXmissions = RD(aDeferred);
  517. mac_stats->LateCollisions = RD(aLateCollisions);
  518. mac_stats->FramesAbortedDueToXSColls = RD(aExcessiveCollisions);
  519. mac_stats->FramesLostDueToIntMACXmitError = RD(ifOutErrors);
  520. mac_stats->CarrierSenseErrors = RD(aCarrierSenseErrors);
  521. mac_stats->OctetsReceivedOK = RD(aOctetsReceivedOK);
  522. mac_stats->FramesLostDueToIntMACRcvError = RD(ifInErrors);
  523. mac_stats->MulticastFramesXmittedOK = RD(ifOutMulticastPkts);
  524. mac_stats->BroadcastFramesXmittedOK = RD(ifOutBroadcastPkts);
  525. mac_stats->FramesWithExcessiveDeferral = RD(aDeferred);
  526. mac_stats->MulticastFramesReceivedOK = RD(ifInMulticastPkts);
  527. mac_stats->BroadcastFramesReceivedOK = RD(ifInBroadcastPkts);
  528. #undef RD
  529. }
  530. static const struct ethtool_rmon_hist_range a5psw_rmon_ranges[] = {
  531. { 0, 64 },
  532. { 65, 127 },
  533. { 128, 255 },
  534. { 256, 511 },
  535. { 512, 1023 },
  536. { 1024, 1518 },
  537. { 1519, A5PSW_MAX_MTU },
  538. {}
  539. };
  540. static void a5psw_get_rmon_stats(struct dsa_switch *ds, int port,
  541. struct ethtool_rmon_stats *rmon_stats,
  542. const struct ethtool_rmon_hist_range **ranges)
  543. {
  544. struct a5psw *a5psw = ds->priv;
  545. #define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
  546. rmon_stats->undersize_pkts = RD(etherStatsUndersizePkts);
  547. rmon_stats->oversize_pkts = RD(etherStatsOversizePkts);
  548. rmon_stats->fragments = RD(etherStatsFragments);
  549. rmon_stats->jabbers = RD(etherStatsJabbers);
  550. rmon_stats->hist[0] = RD(etherStatsPkts64Octets);
  551. rmon_stats->hist[1] = RD(etherStatsPkts65to127Octets);
  552. rmon_stats->hist[2] = RD(etherStatsPkts128to255Octets);
  553. rmon_stats->hist[3] = RD(etherStatsPkts256to511Octets);
  554. rmon_stats->hist[4] = RD(etherStatsPkts512to1023Octets);
  555. rmon_stats->hist[5] = RD(etherStatsPkts1024to1518Octets);
  556. rmon_stats->hist[6] = RD(etherStatsPkts1519toXOctets);
  557. #undef RD
  558. *ranges = a5psw_rmon_ranges;
  559. }
  560. static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
  561. struct ethtool_eth_ctrl_stats *ctrl_stats)
  562. {
  563. struct a5psw *a5psw = ds->priv;
  564. u64 stat;
  565. stat = a5psw_read_stat(a5psw, A5PSW_aTxPAUSEMACCtrlFrames, port);
  566. ctrl_stats->MACControlFramesTransmitted = stat;
  567. stat = a5psw_read_stat(a5psw, A5PSW_aRxPAUSEMACCtrlFrames, port);
  568. ctrl_stats->MACControlFramesReceived = stat;
  569. }
  570. static int a5psw_setup(struct dsa_switch *ds)
  571. {
  572. struct a5psw *a5psw = ds->priv;
  573. int port, vlan, ret;
  574. struct dsa_port *dp;
  575. u32 reg;
  576. /* Validate that there is only 1 CPU port with index A5PSW_CPU_PORT */
  577. dsa_switch_for_each_cpu_port(dp, ds) {
  578. if (dp->index != A5PSW_CPU_PORT) {
  579. dev_err(a5psw->dev, "Invalid CPU port\n");
  580. return -EINVAL;
  581. }
  582. }
  583. /* Configure management port */
  584. reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE;
  585. a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
  586. /* Set pattern 0 to forward all frame to mgmt port */
  587. a5psw_reg_writel(a5psw, A5PSW_PATTERN_CTRL(A5PSW_PATTERN_MGMTFWD),
  588. A5PSW_PATTERN_CTRL_MGMTFWD);
  589. /* Enable port tagging */
  590. reg = FIELD_PREP(A5PSW_MGMT_TAG_CFG_TAGFIELD, ETH_P_DSA_A5PSW);
  591. reg |= A5PSW_MGMT_TAG_CFG_ENABLE | A5PSW_MGMT_TAG_CFG_ALL_FRAMES;
  592. a5psw_reg_writel(a5psw, A5PSW_MGMT_TAG_CFG, reg);
  593. /* Enable normal switch operation */
  594. reg = A5PSW_LK_ADDR_CTRL_BLOCKING | A5PSW_LK_ADDR_CTRL_LEARNING |
  595. A5PSW_LK_ADDR_CTRL_AGEING | A5PSW_LK_ADDR_CTRL_ALLOW_MIGR |
  596. A5PSW_LK_ADDR_CTRL_CLEAR_TABLE;
  597. a5psw_reg_writel(a5psw, A5PSW_LK_CTRL, reg);
  598. ret = readl_poll_timeout(a5psw->base + A5PSW_LK_CTRL, reg,
  599. !(reg & A5PSW_LK_ADDR_CTRL_CLEAR_TABLE),
  600. A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
  601. if (ret) {
  602. dev_err(a5psw->dev, "Failed to clear lookup table\n");
  603. return ret;
  604. }
  605. /* Reset learn count to 0 */
  606. reg = A5PSW_LK_LEARNCOUNT_MODE_SET;
  607. a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
  608. /* Clear VLAN resource table */
  609. reg = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_WR_TAGMASK;
  610. for (vlan = 0; vlan < A5PSW_VLAN_COUNT; vlan++)
  611. a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(vlan), reg);
  612. /* Reset all ports */
  613. dsa_switch_for_each_port(dp, ds) {
  614. port = dp->index;
  615. /* Reset the port */
  616. a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port),
  617. A5PSW_CMD_CFG_SW_RESET);
  618. /* Enable only CPU port */
  619. a5psw_port_enable_set(a5psw, port, dsa_port_is_cpu(dp));
  620. if (dsa_port_is_unused(dp))
  621. continue;
  622. /* Enable egress flooding and learning for CPU port */
  623. if (dsa_port_is_cpu(dp)) {
  624. a5psw_flooding_set_resolution(a5psw, port, true);
  625. a5psw_port_learning_set(a5psw, port, true);
  626. }
  627. /* Enable standalone mode for user ports */
  628. if (dsa_port_is_user(dp))
  629. a5psw_port_set_standalone(a5psw, port, true);
  630. }
  631. return 0;
  632. }
  633. static const struct dsa_switch_ops a5psw_switch_ops = {
  634. .get_tag_protocol = a5psw_get_tag_protocol,
  635. .setup = a5psw_setup,
  636. .port_disable = a5psw_port_disable,
  637. .port_enable = a5psw_port_enable,
  638. .phylink_get_caps = a5psw_phylink_get_caps,
  639. .phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs,
  640. .phylink_mac_link_down = a5psw_phylink_mac_link_down,
  641. .phylink_mac_link_up = a5psw_phylink_mac_link_up,
  642. .port_change_mtu = a5psw_port_change_mtu,
  643. .port_max_mtu = a5psw_port_max_mtu,
  644. .get_sset_count = a5psw_get_sset_count,
  645. .get_strings = a5psw_get_strings,
  646. .get_ethtool_stats = a5psw_get_ethtool_stats,
  647. .get_eth_mac_stats = a5psw_get_eth_mac_stats,
  648. .get_eth_ctrl_stats = a5psw_get_eth_ctrl_stats,
  649. .get_rmon_stats = a5psw_get_rmon_stats,
  650. .set_ageing_time = a5psw_set_ageing_time,
  651. .port_bridge_join = a5psw_port_bridge_join,
  652. .port_bridge_leave = a5psw_port_bridge_leave,
  653. .port_stp_state_set = a5psw_port_stp_state_set,
  654. .port_fast_age = a5psw_port_fast_age,
  655. .port_fdb_add = a5psw_port_fdb_add,
  656. .port_fdb_del = a5psw_port_fdb_del,
  657. .port_fdb_dump = a5psw_port_fdb_dump,
  658. };
  659. static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
  660. {
  661. u32 status;
  662. int err;
  663. err = readl_poll_timeout(a5psw->base + A5PSW_MDIO_CFG_STATUS, status,
  664. !(status & A5PSW_MDIO_CFG_STATUS_BUSY), 10,
  665. 1000 * USEC_PER_MSEC);
  666. if (err)
  667. dev_err(a5psw->dev, "MDIO command timeout\n");
  668. return err;
  669. }
  670. static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
  671. {
  672. struct a5psw *a5psw = bus->priv;
  673. u32 cmd, status;
  674. int ret;
  675. if (phy_reg & MII_ADDR_C45)
  676. return -EOPNOTSUPP;
  677. cmd = A5PSW_MDIO_COMMAND_READ;
  678. cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
  679. cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
  680. a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
  681. ret = a5psw_mdio_wait_busy(a5psw);
  682. if (ret)
  683. return ret;
  684. ret = a5psw_reg_readl(a5psw, A5PSW_MDIO_DATA) & A5PSW_MDIO_DATA_MASK;
  685. status = a5psw_reg_readl(a5psw, A5PSW_MDIO_CFG_STATUS);
  686. if (status & A5PSW_MDIO_CFG_STATUS_READERR)
  687. return -EIO;
  688. return ret;
  689. }
  690. static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
  691. u16 phy_data)
  692. {
  693. struct a5psw *a5psw = bus->priv;
  694. u32 cmd;
  695. if (phy_reg & MII_ADDR_C45)
  696. return -EOPNOTSUPP;
  697. cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
  698. cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
  699. a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
  700. a5psw_reg_writel(a5psw, A5PSW_MDIO_DATA, phy_data);
  701. return a5psw_mdio_wait_busy(a5psw);
  702. }
  703. static int a5psw_mdio_config(struct a5psw *a5psw, u32 mdio_freq)
  704. {
  705. unsigned long rate;
  706. unsigned long div;
  707. u32 cfgstatus;
  708. rate = clk_get_rate(a5psw->hclk);
  709. div = ((rate / mdio_freq) / 2);
  710. if (div > FIELD_MAX(A5PSW_MDIO_CFG_STATUS_CLKDIV) ||
  711. div < A5PSW_MDIO_CLK_DIV_MIN) {
  712. dev_err(a5psw->dev, "MDIO clock div %ld out of range\n", div);
  713. return -ERANGE;
  714. }
  715. cfgstatus = FIELD_PREP(A5PSW_MDIO_CFG_STATUS_CLKDIV, div);
  716. a5psw_reg_writel(a5psw, A5PSW_MDIO_CFG_STATUS, cfgstatus);
  717. return 0;
  718. }
  719. static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node)
  720. {
  721. struct device *dev = a5psw->dev;
  722. struct mii_bus *bus;
  723. u32 mdio_freq;
  724. int ret;
  725. if (of_property_read_u32(node, "clock-frequency", &mdio_freq))
  726. mdio_freq = A5PSW_MDIO_DEF_FREQ;
  727. ret = a5psw_mdio_config(a5psw, mdio_freq);
  728. if (ret)
  729. return ret;
  730. bus = devm_mdiobus_alloc(dev);
  731. if (!bus)
  732. return -ENOMEM;
  733. bus->name = "a5psw_mdio";
  734. bus->read = a5psw_mdio_read;
  735. bus->write = a5psw_mdio_write;
  736. bus->priv = a5psw;
  737. bus->parent = dev;
  738. snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
  739. a5psw->mii_bus = bus;
  740. return devm_of_mdiobus_register(dev, bus, node);
  741. }
  742. static void a5psw_pcs_free(struct a5psw *a5psw)
  743. {
  744. int i;
  745. for (i = 0; i < ARRAY_SIZE(a5psw->pcs); i++) {
  746. if (a5psw->pcs[i])
  747. miic_destroy(a5psw->pcs[i]);
  748. }
  749. }
  750. static int a5psw_pcs_get(struct a5psw *a5psw)
  751. {
  752. struct device_node *ports, *port, *pcs_node;
  753. struct phylink_pcs *pcs;
  754. int ret;
  755. u32 reg;
  756. ports = of_get_child_by_name(a5psw->dev->of_node, "ethernet-ports");
  757. if (!ports)
  758. return -EINVAL;
  759. for_each_available_child_of_node(ports, port) {
  760. pcs_node = of_parse_phandle(port, "pcs-handle", 0);
  761. if (!pcs_node)
  762. continue;
  763. if (of_property_read_u32(port, "reg", &reg)) {
  764. ret = -EINVAL;
  765. goto free_pcs;
  766. }
  767. if (reg >= ARRAY_SIZE(a5psw->pcs)) {
  768. ret = -ENODEV;
  769. goto free_pcs;
  770. }
  771. pcs = miic_create(a5psw->dev, pcs_node);
  772. if (IS_ERR(pcs)) {
  773. dev_err(a5psw->dev, "Failed to create PCS for port %d\n",
  774. reg);
  775. ret = PTR_ERR(pcs);
  776. goto free_pcs;
  777. }
  778. a5psw->pcs[reg] = pcs;
  779. of_node_put(pcs_node);
  780. }
  781. of_node_put(ports);
  782. return 0;
  783. free_pcs:
  784. of_node_put(pcs_node);
  785. of_node_put(port);
  786. of_node_put(ports);
  787. a5psw_pcs_free(a5psw);
  788. return ret;
  789. }
  790. static int a5psw_probe(struct platform_device *pdev)
  791. {
  792. struct device *dev = &pdev->dev;
  793. struct device_node *mdio;
  794. struct dsa_switch *ds;
  795. struct a5psw *a5psw;
  796. int ret;
  797. a5psw = devm_kzalloc(dev, sizeof(*a5psw), GFP_KERNEL);
  798. if (!a5psw)
  799. return -ENOMEM;
  800. a5psw->dev = dev;
  801. mutex_init(&a5psw->lk_lock);
  802. spin_lock_init(&a5psw->reg_lock);
  803. a5psw->base = devm_platform_ioremap_resource(pdev, 0);
  804. if (IS_ERR(a5psw->base))
  805. return PTR_ERR(a5psw->base);
  806. ret = a5psw_pcs_get(a5psw);
  807. if (ret)
  808. return ret;
  809. a5psw->hclk = devm_clk_get(dev, "hclk");
  810. if (IS_ERR(a5psw->hclk)) {
  811. dev_err(dev, "failed get hclk clock\n");
  812. ret = PTR_ERR(a5psw->hclk);
  813. goto free_pcs;
  814. }
  815. a5psw->clk = devm_clk_get(dev, "clk");
  816. if (IS_ERR(a5psw->clk)) {
  817. dev_err(dev, "failed get clk_switch clock\n");
  818. ret = PTR_ERR(a5psw->clk);
  819. goto free_pcs;
  820. }
  821. ret = clk_prepare_enable(a5psw->clk);
  822. if (ret)
  823. goto free_pcs;
  824. ret = clk_prepare_enable(a5psw->hclk);
  825. if (ret)
  826. goto clk_disable;
  827. mdio = of_get_child_by_name(dev->of_node, "mdio");
  828. if (of_device_is_available(mdio)) {
  829. ret = a5psw_probe_mdio(a5psw, mdio);
  830. if (ret) {
  831. of_node_put(mdio);
  832. dev_err(dev, "Failed to register MDIO: %d\n", ret);
  833. goto hclk_disable;
  834. }
  835. }
  836. of_node_put(mdio);
  837. ds = &a5psw->ds;
  838. ds->dev = dev;
  839. ds->num_ports = A5PSW_PORTS_NUM;
  840. ds->ops = &a5psw_switch_ops;
  841. ds->priv = a5psw;
  842. ret = dsa_register_switch(ds);
  843. if (ret) {
  844. dev_err(dev, "Failed to register DSA switch: %d\n", ret);
  845. goto hclk_disable;
  846. }
  847. return 0;
  848. hclk_disable:
  849. clk_disable_unprepare(a5psw->hclk);
  850. clk_disable:
  851. clk_disable_unprepare(a5psw->clk);
  852. free_pcs:
  853. a5psw_pcs_free(a5psw);
  854. return ret;
  855. }
  856. static int a5psw_remove(struct platform_device *pdev)
  857. {
  858. struct a5psw *a5psw = platform_get_drvdata(pdev);
  859. if (!a5psw)
  860. return 0;
  861. dsa_unregister_switch(&a5psw->ds);
  862. a5psw_pcs_free(a5psw);
  863. clk_disable_unprepare(a5psw->hclk);
  864. clk_disable_unprepare(a5psw->clk);
  865. return 0;
  866. }
  867. static void a5psw_shutdown(struct platform_device *pdev)
  868. {
  869. struct a5psw *a5psw = platform_get_drvdata(pdev);
  870. if (!a5psw)
  871. return;
  872. dsa_switch_shutdown(&a5psw->ds);
  873. platform_set_drvdata(pdev, NULL);
  874. }
  875. static const struct of_device_id a5psw_of_mtable[] = {
  876. { .compatible = "renesas,rzn1-a5psw", },
  877. { /* sentinel */ },
  878. };
  879. MODULE_DEVICE_TABLE(of, a5psw_of_mtable);
  880. static struct platform_driver a5psw_driver = {
  881. .driver = {
  882. .name = "rzn1_a5psw",
  883. .of_match_table = of_match_ptr(a5psw_of_mtable),
  884. },
  885. .probe = a5psw_probe,
  886. .remove = a5psw_remove,
  887. .shutdown = a5psw_shutdown,
  888. };
  889. module_platform_driver(a5psw_driver);
  890. MODULE_LICENSE("GPL");
  891. MODULE_DESCRIPTION("Renesas RZ/N1 Advanced 5-port Switch driver");
  892. MODULE_AUTHOR("Clément Léger <[email protected]>");