xrs700x.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020 NovaTech LLC
  4. * George McCollister <[email protected]>
  5. */
  6. #include <net/dsa.h>
  7. #include <linux/etherdevice.h>
  8. #include <linux/if_bridge.h>
  9. #include <linux/of_device.h>
  10. #include <linux/netdev_features.h>
  11. #include <linux/if_hsr.h>
  12. #include "xrs700x.h"
  13. #include "xrs700x_reg.h"
  14. #define XRS700X_MIB_INTERVAL msecs_to_jiffies(3000)
  15. #define XRS7000X_SUPPORTED_HSR_FEATURES \
  16. (NETIF_F_HW_HSR_TAG_INS | NETIF_F_HW_HSR_TAG_RM | \
  17. NETIF_F_HW_HSR_FWD | NETIF_F_HW_HSR_DUP)
  18. #define XRS7003E_ID 0x100
  19. #define XRS7003F_ID 0x101
  20. #define XRS7004E_ID 0x200
  21. #define XRS7004F_ID 0x201
  22. const struct xrs700x_info xrs7003e_info = {XRS7003E_ID, "XRS7003E", 3};
  23. EXPORT_SYMBOL(xrs7003e_info);
  24. const struct xrs700x_info xrs7003f_info = {XRS7003F_ID, "XRS7003F", 3};
  25. EXPORT_SYMBOL(xrs7003f_info);
  26. const struct xrs700x_info xrs7004e_info = {XRS7004E_ID, "XRS7004E", 4};
  27. EXPORT_SYMBOL(xrs7004e_info);
  28. const struct xrs700x_info xrs7004f_info = {XRS7004F_ID, "XRS7004F", 4};
  29. EXPORT_SYMBOL(xrs7004f_info);
  30. struct xrs700x_regfield {
  31. struct reg_field rf;
  32. struct regmap_field **rmf;
  33. };
  34. struct xrs700x_mib {
  35. unsigned int offset;
  36. const char *name;
  37. int stats64_offset;
  38. };
  39. #define XRS700X_MIB_ETHTOOL_ONLY(o, n) {o, n, -1}
  40. #define XRS700X_MIB(o, n, m) {o, n, offsetof(struct rtnl_link_stats64, m)}
  41. static const struct xrs700x_mib xrs700x_mibs[] = {
  42. XRS700X_MIB(XRS_RX_GOOD_OCTETS_L, "rx_good_octets", rx_bytes),
  43. XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_BAD_OCTETS_L, "rx_bad_octets"),
  44. XRS700X_MIB(XRS_RX_UNICAST_L, "rx_unicast", rx_packets),
  45. XRS700X_MIB(XRS_RX_BROADCAST_L, "rx_broadcast", rx_packets),
  46. XRS700X_MIB(XRS_RX_MULTICAST_L, "rx_multicast", multicast),
  47. XRS700X_MIB(XRS_RX_UNDERSIZE_L, "rx_undersize", rx_length_errors),
  48. XRS700X_MIB(XRS_RX_FRAGMENTS_L, "rx_fragments", rx_length_errors),
  49. XRS700X_MIB(XRS_RX_OVERSIZE_L, "rx_oversize", rx_length_errors),
  50. XRS700X_MIB(XRS_RX_JABBER_L, "rx_jabber", rx_length_errors),
  51. XRS700X_MIB(XRS_RX_ERR_L, "rx_err", rx_errors),
  52. XRS700X_MIB(XRS_RX_CRC_L, "rx_crc", rx_crc_errors),
  53. XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_64_L, "rx_64"),
  54. XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_65_127_L, "rx_65_127"),
  55. XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_128_255_L, "rx_128_255"),
  56. XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_256_511_L, "rx_256_511"),
  57. XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_512_1023_L, "rx_512_1023"),
  58. XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_1024_1536_L, "rx_1024_1536"),
  59. XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_HSR_PRP_L, "rx_hsr_prp"),
  60. XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_WRONGLAN_L, "rx_wronglan"),
  61. XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_DUPLICATE_L, "rx_duplicate"),
  62. XRS700X_MIB(XRS_TX_OCTETS_L, "tx_octets", tx_bytes),
  63. XRS700X_MIB(XRS_TX_UNICAST_L, "tx_unicast", tx_packets),
  64. XRS700X_MIB(XRS_TX_BROADCAST_L, "tx_broadcast", tx_packets),
  65. XRS700X_MIB(XRS_TX_MULTICAST_L, "tx_multicast", tx_packets),
  66. XRS700X_MIB_ETHTOOL_ONLY(XRS_TX_HSR_PRP_L, "tx_hsr_prp"),
  67. XRS700X_MIB(XRS_PRIQ_DROP_L, "priq_drop", tx_dropped),
  68. XRS700X_MIB(XRS_EARLY_DROP_L, "early_drop", tx_dropped),
  69. };
  70. static const u8 eth_hsrsup_addr[ETH_ALEN] = {
  71. 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00};
  72. static void xrs700x_get_strings(struct dsa_switch *ds, int port,
  73. u32 stringset, u8 *data)
  74. {
  75. int i;
  76. if (stringset != ETH_SS_STATS)
  77. return;
  78. for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
  79. strscpy(data, xrs700x_mibs[i].name, ETH_GSTRING_LEN);
  80. data += ETH_GSTRING_LEN;
  81. }
  82. }
  83. static int xrs700x_get_sset_count(struct dsa_switch *ds, int port, int sset)
  84. {
  85. if (sset != ETH_SS_STATS)
  86. return -EOPNOTSUPP;
  87. return ARRAY_SIZE(xrs700x_mibs);
  88. }
  89. static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
  90. {
  91. struct xrs700x_port *p = &priv->ports[port];
  92. struct rtnl_link_stats64 stats;
  93. unsigned long flags;
  94. int i;
  95. memset(&stats, 0, sizeof(stats));
  96. mutex_lock(&p->mib_mutex);
  97. /* Capture counter values */
  98. regmap_write(priv->regmap, XRS_CNT_CTRL(port), 1);
  99. for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
  100. unsigned int high = 0, low = 0, reg;
  101. reg = xrs700x_mibs[i].offset + XRS_PORT_OFFSET * port;
  102. regmap_read(priv->regmap, reg, &low);
  103. regmap_read(priv->regmap, reg + 2, &high);
  104. p->mib_data[i] += (high << 16) | low;
  105. if (xrs700x_mibs[i].stats64_offset >= 0) {
  106. u8 *s = (u8 *)&stats + xrs700x_mibs[i].stats64_offset;
  107. *(u64 *)s += p->mib_data[i];
  108. }
  109. }
  110. /* multicast must be added to rx_packets (which already includes
  111. * unicast and broadcast)
  112. */
  113. stats.rx_packets += stats.multicast;
  114. flags = u64_stats_update_begin_irqsave(&p->syncp);
  115. p->stats64 = stats;
  116. u64_stats_update_end_irqrestore(&p->syncp, flags);
  117. mutex_unlock(&p->mib_mutex);
  118. }
  119. static void xrs700x_mib_work(struct work_struct *work)
  120. {
  121. struct xrs700x *priv = container_of(work, struct xrs700x,
  122. mib_work.work);
  123. int i;
  124. for (i = 0; i < priv->ds->num_ports; i++)
  125. xrs700x_read_port_counters(priv, i);
  126. schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
  127. }
  128. static void xrs700x_get_ethtool_stats(struct dsa_switch *ds, int port,
  129. u64 *data)
  130. {
  131. struct xrs700x *priv = ds->priv;
  132. struct xrs700x_port *p = &priv->ports[port];
  133. xrs700x_read_port_counters(priv, port);
  134. mutex_lock(&p->mib_mutex);
  135. memcpy(data, p->mib_data, sizeof(*data) * ARRAY_SIZE(xrs700x_mibs));
  136. mutex_unlock(&p->mib_mutex);
  137. }
  138. static void xrs700x_get_stats64(struct dsa_switch *ds, int port,
  139. struct rtnl_link_stats64 *s)
  140. {
  141. struct xrs700x *priv = ds->priv;
  142. struct xrs700x_port *p = &priv->ports[port];
  143. unsigned int start;
  144. do {
  145. start = u64_stats_fetch_begin(&p->syncp);
  146. *s = p->stats64;
  147. } while (u64_stats_fetch_retry(&p->syncp, start));
  148. }
  149. static int xrs700x_setup_regmap_range(struct xrs700x *priv)
  150. {
  151. struct xrs700x_regfield regfields[] = {
  152. {
  153. .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 0, 1,
  154. priv->ds->num_ports,
  155. XRS_PORT_OFFSET),
  156. .rmf = &priv->ps_forward
  157. },
  158. {
  159. .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 2, 3,
  160. priv->ds->num_ports,
  161. XRS_PORT_OFFSET),
  162. .rmf = &priv->ps_management
  163. },
  164. {
  165. .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 4, 9,
  166. priv->ds->num_ports,
  167. XRS_PORT_OFFSET),
  168. .rmf = &priv->ps_sel_speed
  169. },
  170. {
  171. .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 10, 11,
  172. priv->ds->num_ports,
  173. XRS_PORT_OFFSET),
  174. .rmf = &priv->ps_cur_speed
  175. }
  176. };
  177. int i = 0;
  178. for (; i < ARRAY_SIZE(regfields); i++) {
  179. *regfields[i].rmf = devm_regmap_field_alloc(priv->dev,
  180. priv->regmap,
  181. regfields[i].rf);
  182. if (IS_ERR(*regfields[i].rmf))
  183. return PTR_ERR(*regfields[i].rmf);
  184. }
  185. return 0;
  186. }
  187. static enum dsa_tag_protocol xrs700x_get_tag_protocol(struct dsa_switch *ds,
  188. int port,
  189. enum dsa_tag_protocol m)
  190. {
  191. return DSA_TAG_PROTO_XRS700X;
  192. }
  193. static int xrs700x_reset(struct dsa_switch *ds)
  194. {
  195. struct xrs700x *priv = ds->priv;
  196. unsigned int val;
  197. int ret;
  198. ret = regmap_write(priv->regmap, XRS_GENERAL, XRS_GENERAL_RESET);
  199. if (ret)
  200. goto error;
  201. ret = regmap_read_poll_timeout(priv->regmap, XRS_GENERAL,
  202. val, !(val & XRS_GENERAL_RESET),
  203. 10, 1000);
  204. error:
  205. if (ret) {
  206. dev_err_ratelimited(priv->dev, "error resetting switch: %d\n",
  207. ret);
  208. }
  209. return ret;
  210. }
  211. static void xrs700x_port_stp_state_set(struct dsa_switch *ds, int port,
  212. u8 state)
  213. {
  214. struct xrs700x *priv = ds->priv;
  215. unsigned int bpdus = 1;
  216. unsigned int val;
  217. switch (state) {
  218. case BR_STATE_DISABLED:
  219. bpdus = 0;
  220. fallthrough;
  221. case BR_STATE_BLOCKING:
  222. case BR_STATE_LISTENING:
  223. val = XRS_PORT_DISABLED;
  224. break;
  225. case BR_STATE_LEARNING:
  226. val = XRS_PORT_LEARNING;
  227. break;
  228. case BR_STATE_FORWARDING:
  229. val = XRS_PORT_FORWARDING;
  230. break;
  231. default:
  232. dev_err(ds->dev, "invalid STP state: %d\n", state);
  233. return;
  234. }
  235. regmap_fields_write(priv->ps_forward, port, val);
  236. /* Enable/disable inbound policy added by xrs700x_port_add_bpdu_ipf()
  237. * which allows BPDU forwarding to the CPU port when the front facing
  238. * port is in disabled/learning state.
  239. */
  240. regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 1, bpdus);
  241. dev_dbg_ratelimited(priv->dev, "%s - port: %d, state: %u, val: 0x%x\n",
  242. __func__, port, state, val);
  243. }
  244. /* Add an inbound policy filter which matches the BPDU destination MAC
  245. * and forwards to the CPU port. Leave the policy disabled, it will be
  246. * enabled as needed.
  247. */
  248. static int xrs700x_port_add_bpdu_ipf(struct dsa_switch *ds, int port)
  249. {
  250. struct xrs700x *priv = ds->priv;
  251. unsigned int val = 0;
  252. int i = 0;
  253. int ret;
  254. /* Compare all 48 bits of the destination MAC address. */
  255. ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 48 << 2);
  256. if (ret)
  257. return ret;
  258. /* match BPDU destination 01:80:c2:00:00:00 */
  259. for (i = 0; i < sizeof(eth_stp_addr); i += 2) {
  260. ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 0) + i,
  261. eth_stp_addr[i] |
  262. (eth_stp_addr[i + 1] << 8));
  263. if (ret)
  264. return ret;
  265. }
  266. /* Mirror BPDU to CPU port */
  267. for (i = 0; i < ds->num_ports; i++) {
  268. if (dsa_is_cpu_port(ds, i))
  269. val |= BIT(i);
  270. }
  271. ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 0), val);
  272. if (ret)
  273. return ret;
  274. ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 0), 0);
  275. if (ret)
  276. return ret;
  277. return 0;
  278. }
  279. /* Add an inbound policy filter which matches the HSR/PRP supervision MAC
  280. * range and forwards to the CPU port without discarding duplicates.
  281. * This is required to correctly populate the HSR/PRP node_table.
  282. * Leave the policy disabled, it will be enabled as needed.
  283. */
  284. static int xrs700x_port_add_hsrsup_ipf(struct dsa_switch *ds, int port,
  285. int fwdport)
  286. {
  287. struct xrs700x *priv = ds->priv;
  288. unsigned int val = 0;
  289. int i = 0;
  290. int ret;
  291. /* Compare 40 bits of the destination MAC address. */
  292. ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 40 << 2);
  293. if (ret)
  294. return ret;
  295. /* match HSR/PRP supervision destination 01:15:4e:00:01:XX */
  296. for (i = 0; i < sizeof(eth_hsrsup_addr); i += 2) {
  297. ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 1) + i,
  298. eth_hsrsup_addr[i] |
  299. (eth_hsrsup_addr[i + 1] << 8));
  300. if (ret)
  301. return ret;
  302. }
  303. /* Mirror HSR/PRP supervision to CPU port */
  304. for (i = 0; i < ds->num_ports; i++) {
  305. if (dsa_is_cpu_port(ds, i))
  306. val |= BIT(i);
  307. }
  308. ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 1), val);
  309. if (ret)
  310. return ret;
  311. if (fwdport >= 0)
  312. val |= BIT(fwdport);
  313. /* Allow must be set prevent duplicate discard */
  314. ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 1), val);
  315. if (ret)
  316. return ret;
  317. return 0;
  318. }
  319. static int xrs700x_port_setup(struct dsa_switch *ds, int port)
  320. {
  321. bool cpu_port = dsa_is_cpu_port(ds, port);
  322. struct xrs700x *priv = ds->priv;
  323. unsigned int val = 0;
  324. int ret, i;
  325. xrs700x_port_stp_state_set(ds, port, BR_STATE_DISABLED);
  326. /* Disable forwarding to non-CPU ports */
  327. for (i = 0; i < ds->num_ports; i++) {
  328. if (!dsa_is_cpu_port(ds, i))
  329. val |= BIT(i);
  330. }
  331. /* 1 = Disable forwarding to the port */
  332. ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
  333. if (ret)
  334. return ret;
  335. val = cpu_port ? XRS_PORT_MODE_MANAGEMENT : XRS_PORT_MODE_NORMAL;
  336. ret = regmap_fields_write(priv->ps_management, port, val);
  337. if (ret)
  338. return ret;
  339. if (!cpu_port) {
  340. ret = xrs700x_port_add_bpdu_ipf(ds, port);
  341. if (ret)
  342. return ret;
  343. }
  344. return 0;
  345. }
  346. static int xrs700x_setup(struct dsa_switch *ds)
  347. {
  348. struct xrs700x *priv = ds->priv;
  349. int ret, i;
  350. ret = xrs700x_reset(ds);
  351. if (ret)
  352. return ret;
  353. for (i = 0; i < ds->num_ports; i++) {
  354. ret = xrs700x_port_setup(ds, i);
  355. if (ret)
  356. return ret;
  357. }
  358. schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
  359. return 0;
  360. }
  361. static void xrs700x_teardown(struct dsa_switch *ds)
  362. {
  363. struct xrs700x *priv = ds->priv;
  364. cancel_delayed_work_sync(&priv->mib_work);
  365. }
  366. static void xrs700x_phylink_get_caps(struct dsa_switch *ds, int port,
  367. struct phylink_config *config)
  368. {
  369. switch (port) {
  370. case 0:
  371. __set_bit(PHY_INTERFACE_MODE_RMII,
  372. config->supported_interfaces);
  373. config->mac_capabilities = MAC_10FD | MAC_100FD;
  374. break;
  375. case 1:
  376. case 2:
  377. case 3:
  378. phy_interface_set_rgmii(config->supported_interfaces);
  379. config->mac_capabilities = MAC_10FD | MAC_100FD | MAC_1000FD;
  380. break;
  381. default:
  382. dev_err(ds->dev, "Unsupported port: %i\n", port);
  383. break;
  384. }
  385. }
  386. static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
  387. unsigned int mode, phy_interface_t interface,
  388. struct phy_device *phydev,
  389. int speed, int duplex,
  390. bool tx_pause, bool rx_pause)
  391. {
  392. struct xrs700x *priv = ds->priv;
  393. unsigned int val;
  394. switch (speed) {
  395. case SPEED_1000:
  396. val = XRS_PORT_SPEED_1000;
  397. break;
  398. case SPEED_100:
  399. val = XRS_PORT_SPEED_100;
  400. break;
  401. case SPEED_10:
  402. val = XRS_PORT_SPEED_10;
  403. break;
  404. default:
  405. return;
  406. }
  407. regmap_fields_write(priv->ps_sel_speed, port, val);
  408. dev_dbg_ratelimited(priv->dev, "%s: port: %d mode: %u speed: %u\n",
  409. __func__, port, mode, speed);
  410. }
  411. static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
  412. struct dsa_bridge bridge, bool join)
  413. {
  414. unsigned int i, cpu_mask = 0, mask = 0;
  415. struct xrs700x *priv = ds->priv;
  416. int ret;
  417. for (i = 0; i < ds->num_ports; i++) {
  418. if (dsa_is_cpu_port(ds, i))
  419. continue;
  420. cpu_mask |= BIT(i);
  421. if (dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
  422. continue;
  423. mask |= BIT(i);
  424. }
  425. for (i = 0; i < ds->num_ports; i++) {
  426. if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
  427. continue;
  428. /* 1 = Disable forwarding to the port */
  429. ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(i), mask);
  430. if (ret)
  431. return ret;
  432. }
  433. if (!join) {
  434. ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port),
  435. cpu_mask);
  436. if (ret)
  437. return ret;
  438. }
  439. return 0;
  440. }
  441. static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
  442. struct dsa_bridge bridge, bool *tx_fwd_offload,
  443. struct netlink_ext_ack *extack)
  444. {
  445. return xrs700x_bridge_common(ds, port, bridge, true);
  446. }
  447. static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
  448. struct dsa_bridge bridge)
  449. {
  450. xrs700x_bridge_common(ds, port, bridge, false);
  451. }
  452. static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
  453. struct net_device *hsr)
  454. {
  455. unsigned int val = XRS_HSR_CFG_HSR_PRP;
  456. struct dsa_port *partner = NULL, *dp;
  457. struct xrs700x *priv = ds->priv;
  458. struct net_device *slave;
  459. int ret, i, hsr_pair[2];
  460. enum hsr_version ver;
  461. bool fwd = false;
  462. ret = hsr_get_version(hsr, &ver);
  463. if (ret)
  464. return ret;
  465. /* Only ports 1 and 2 can be HSR/PRP redundant ports. */
  466. if (port != 1 && port != 2)
  467. return -EOPNOTSUPP;
  468. if (ver == HSR_V1)
  469. val |= XRS_HSR_CFG_HSR;
  470. else if (ver == PRP_V1)
  471. val |= XRS_HSR_CFG_PRP;
  472. else
  473. return -EOPNOTSUPP;
  474. dsa_hsr_foreach_port(dp, ds, hsr) {
  475. if (dp->index != port) {
  476. partner = dp;
  477. break;
  478. }
  479. }
  480. /* We can't enable redundancy on the switch until both
  481. * redundant ports have signed up.
  482. */
  483. if (!partner)
  484. return 0;
  485. regmap_fields_write(priv->ps_forward, partner->index,
  486. XRS_PORT_DISABLED);
  487. regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
  488. regmap_write(priv->regmap, XRS_HSR_CFG(partner->index),
  489. val | XRS_HSR_CFG_LANID_A);
  490. regmap_write(priv->regmap, XRS_HSR_CFG(port),
  491. val | XRS_HSR_CFG_LANID_B);
  492. /* Clear bits for both redundant ports (HSR only) and the CPU port to
  493. * enable forwarding.
  494. */
  495. val = GENMASK(ds->num_ports - 1, 0);
  496. if (ver == HSR_V1) {
  497. val &= ~BIT(partner->index);
  498. val &= ~BIT(port);
  499. fwd = true;
  500. }
  501. val &= ~BIT(dsa_upstream_port(ds, port));
  502. regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
  503. regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
  504. regmap_fields_write(priv->ps_forward, partner->index,
  505. XRS_PORT_FORWARDING);
  506. regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
  507. /* Enable inbound policy which allows HSR/PRP supervision forwarding
  508. * to the CPU port without discarding duplicates. Continue to
  509. * forward to redundant ports when in HSR mode while discarding
  510. * duplicates.
  511. */
  512. ret = xrs700x_port_add_hsrsup_ipf(ds, partner->index, fwd ? port : -1);
  513. if (ret)
  514. return ret;
  515. ret = xrs700x_port_add_hsrsup_ipf(ds, port, fwd ? partner->index : -1);
  516. if (ret)
  517. return ret;
  518. regmap_update_bits(priv->regmap,
  519. XRS_ETH_ADDR_CFG(partner->index, 1), 1, 1);
  520. regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 1);
  521. hsr_pair[0] = port;
  522. hsr_pair[1] = partner->index;
  523. for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
  524. slave = dsa_to_port(ds, hsr_pair[i])->slave;
  525. slave->features |= XRS7000X_SUPPORTED_HSR_FEATURES;
  526. }
  527. return 0;
  528. }
  529. static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
  530. struct net_device *hsr)
  531. {
  532. struct dsa_port *partner = NULL, *dp;
  533. struct xrs700x *priv = ds->priv;
  534. struct net_device *slave;
  535. int i, hsr_pair[2];
  536. unsigned int val;
  537. dsa_hsr_foreach_port(dp, ds, hsr) {
  538. if (dp->index != port) {
  539. partner = dp;
  540. break;
  541. }
  542. }
  543. if (!partner)
  544. return 0;
  545. regmap_fields_write(priv->ps_forward, partner->index,
  546. XRS_PORT_DISABLED);
  547. regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
  548. regmap_write(priv->regmap, XRS_HSR_CFG(partner->index), 0);
  549. regmap_write(priv->regmap, XRS_HSR_CFG(port), 0);
  550. /* Clear bit for the CPU port to enable forwarding. */
  551. val = GENMASK(ds->num_ports - 1, 0);
  552. val &= ~BIT(dsa_upstream_port(ds, port));
  553. regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
  554. regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
  555. regmap_fields_write(priv->ps_forward, partner->index,
  556. XRS_PORT_FORWARDING);
  557. regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
  558. /* Disable inbound policy added by xrs700x_port_add_hsrsup_ipf()
  559. * which allows HSR/PRP supervision forwarding to the CPU port without
  560. * discarding duplicates.
  561. */
  562. regmap_update_bits(priv->regmap,
  563. XRS_ETH_ADDR_CFG(partner->index, 1), 1, 0);
  564. regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 0);
  565. hsr_pair[0] = port;
  566. hsr_pair[1] = partner->index;
  567. for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
  568. slave = dsa_to_port(ds, hsr_pair[i])->slave;
  569. slave->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES;
  570. }
  571. return 0;
  572. }
  573. static const struct dsa_switch_ops xrs700x_ops = {
  574. .get_tag_protocol = xrs700x_get_tag_protocol,
  575. .setup = xrs700x_setup,
  576. .teardown = xrs700x_teardown,
  577. .port_stp_state_set = xrs700x_port_stp_state_set,
  578. .phylink_get_caps = xrs700x_phylink_get_caps,
  579. .phylink_mac_link_up = xrs700x_mac_link_up,
  580. .get_strings = xrs700x_get_strings,
  581. .get_sset_count = xrs700x_get_sset_count,
  582. .get_ethtool_stats = xrs700x_get_ethtool_stats,
  583. .get_stats64 = xrs700x_get_stats64,
  584. .port_bridge_join = xrs700x_bridge_join,
  585. .port_bridge_leave = xrs700x_bridge_leave,
  586. .port_hsr_join = xrs700x_hsr_join,
  587. .port_hsr_leave = xrs700x_hsr_leave,
  588. };
  589. static int xrs700x_detect(struct xrs700x *priv)
  590. {
  591. const struct xrs700x_info *info;
  592. unsigned int id;
  593. int ret;
  594. ret = regmap_read(priv->regmap, XRS_DEV_ID0, &id);
  595. if (ret) {
  596. dev_err(priv->dev, "error %d while reading switch id.\n",
  597. ret);
  598. return ret;
  599. }
  600. info = of_device_get_match_data(priv->dev);
  601. if (!info)
  602. return -EINVAL;
  603. if (info->id == id) {
  604. priv->ds->num_ports = info->num_ports;
  605. dev_info(priv->dev, "%s detected.\n", info->name);
  606. return 0;
  607. }
  608. dev_err(priv->dev, "expected switch id 0x%x but found 0x%x.\n",
  609. info->id, id);
  610. return -ENODEV;
  611. }
  612. struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv)
  613. {
  614. struct dsa_switch *ds;
  615. struct xrs700x *priv;
  616. ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
  617. if (!ds)
  618. return NULL;
  619. ds->dev = base;
  620. priv = devm_kzalloc(base, sizeof(*priv), GFP_KERNEL);
  621. if (!priv)
  622. return NULL;
  623. INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work);
  624. ds->ops = &xrs700x_ops;
  625. ds->priv = priv;
  626. priv->dev = base;
  627. priv->ds = ds;
  628. priv->priv = devpriv;
  629. return priv;
  630. }
  631. EXPORT_SYMBOL(xrs700x_switch_alloc);
  632. static int xrs700x_alloc_port_mib(struct xrs700x *priv, int port)
  633. {
  634. struct xrs700x_port *p = &priv->ports[port];
  635. p->mib_data = devm_kcalloc(priv->dev, ARRAY_SIZE(xrs700x_mibs),
  636. sizeof(*p->mib_data), GFP_KERNEL);
  637. if (!p->mib_data)
  638. return -ENOMEM;
  639. mutex_init(&p->mib_mutex);
  640. u64_stats_init(&p->syncp);
  641. return 0;
  642. }
  643. int xrs700x_switch_register(struct xrs700x *priv)
  644. {
  645. int ret;
  646. int i;
  647. ret = xrs700x_detect(priv);
  648. if (ret)
  649. return ret;
  650. ret = xrs700x_setup_regmap_range(priv);
  651. if (ret)
  652. return ret;
  653. priv->ports = devm_kcalloc(priv->dev, priv->ds->num_ports,
  654. sizeof(*priv->ports), GFP_KERNEL);
  655. if (!priv->ports)
  656. return -ENOMEM;
  657. for (i = 0; i < priv->ds->num_ports; i++) {
  658. ret = xrs700x_alloc_port_mib(priv, i);
  659. if (ret)
  660. return ret;
  661. }
  662. return dsa_register_switch(priv->ds);
  663. }
  664. EXPORT_SYMBOL(xrs700x_switch_register);
  665. void xrs700x_switch_remove(struct xrs700x *priv)
  666. {
  667. dsa_unregister_switch(priv->ds);
  668. }
  669. EXPORT_SYMBOL(xrs700x_switch_remove);
  670. void xrs700x_switch_shutdown(struct xrs700x *priv)
  671. {
  672. dsa_switch_shutdown(priv->ds);
  673. }
  674. EXPORT_SYMBOL(xrs700x_switch_shutdown);
  675. MODULE_AUTHOR("George McCollister <[email protected]>");
  676. MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
  677. MODULE_LICENSE("GPL v2");