qca8k-common.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2009 Felix Fietkau <[email protected]>
  4. * Copyright (C) 2011-2012 Gabor Juhos <[email protected]>
  5. * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
  6. * Copyright (c) 2016 John Crispin <[email protected]>
  7. */
  8. #include <linux/netdevice.h>
  9. #include <net/dsa.h>
  10. #include <linux/if_bridge.h>
  11. #include "qca8k.h"
  12. #define MIB_DESC(_s, _o, _n) \
  13. { \
  14. .size = (_s), \
  15. .offset = (_o), \
  16. .name = (_n), \
  17. }
  18. const struct qca8k_mib_desc ar8327_mib[] = {
  19. MIB_DESC(1, 0x00, "RxBroad"),
  20. MIB_DESC(1, 0x04, "RxPause"),
  21. MIB_DESC(1, 0x08, "RxMulti"),
  22. MIB_DESC(1, 0x0c, "RxFcsErr"),
  23. MIB_DESC(1, 0x10, "RxAlignErr"),
  24. MIB_DESC(1, 0x14, "RxRunt"),
  25. MIB_DESC(1, 0x18, "RxFragment"),
  26. MIB_DESC(1, 0x1c, "Rx64Byte"),
  27. MIB_DESC(1, 0x20, "Rx128Byte"),
  28. MIB_DESC(1, 0x24, "Rx256Byte"),
  29. MIB_DESC(1, 0x28, "Rx512Byte"),
  30. MIB_DESC(1, 0x2c, "Rx1024Byte"),
  31. MIB_DESC(1, 0x30, "Rx1518Byte"),
  32. MIB_DESC(1, 0x34, "RxMaxByte"),
  33. MIB_DESC(1, 0x38, "RxTooLong"),
  34. MIB_DESC(2, 0x3c, "RxGoodByte"),
  35. MIB_DESC(2, 0x44, "RxBadByte"),
  36. MIB_DESC(1, 0x4c, "RxOverFlow"),
  37. MIB_DESC(1, 0x50, "Filtered"),
  38. MIB_DESC(1, 0x54, "TxBroad"),
  39. MIB_DESC(1, 0x58, "TxPause"),
  40. MIB_DESC(1, 0x5c, "TxMulti"),
  41. MIB_DESC(1, 0x60, "TxUnderRun"),
  42. MIB_DESC(1, 0x64, "Tx64Byte"),
  43. MIB_DESC(1, 0x68, "Tx128Byte"),
  44. MIB_DESC(1, 0x6c, "Tx256Byte"),
  45. MIB_DESC(1, 0x70, "Tx512Byte"),
  46. MIB_DESC(1, 0x74, "Tx1024Byte"),
  47. MIB_DESC(1, 0x78, "Tx1518Byte"),
  48. MIB_DESC(1, 0x7c, "TxMaxByte"),
  49. MIB_DESC(1, 0x80, "TxOverSize"),
  50. MIB_DESC(2, 0x84, "TxByte"),
  51. MIB_DESC(1, 0x8c, "TxCollision"),
  52. MIB_DESC(1, 0x90, "TxAbortCol"),
  53. MIB_DESC(1, 0x94, "TxMultiCol"),
  54. MIB_DESC(1, 0x98, "TxSingleCol"),
  55. MIB_DESC(1, 0x9c, "TxExcDefer"),
  56. MIB_DESC(1, 0xa0, "TxDefer"),
  57. MIB_DESC(1, 0xa4, "TxLateCol"),
  58. MIB_DESC(1, 0xa8, "RXUnicast"),
  59. MIB_DESC(1, 0xac, "TXUnicast"),
  60. };
  61. int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
  62. {
  63. return regmap_read(priv->regmap, reg, val);
  64. }
  65. int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
  66. {
  67. return regmap_write(priv->regmap, reg, val);
  68. }
  69. int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
  70. {
  71. return regmap_update_bits(priv->regmap, reg, mask, write_val);
  72. }
  73. static const struct regmap_range qca8k_readable_ranges[] = {
  74. regmap_reg_range(0x0000, 0x00e4), /* Global control */
  75. regmap_reg_range(0x0100, 0x0168), /* EEE control */
  76. regmap_reg_range(0x0200, 0x0270), /* Parser control */
  77. regmap_reg_range(0x0400, 0x0454), /* ACL */
  78. regmap_reg_range(0x0600, 0x0718), /* Lookup */
  79. regmap_reg_range(0x0800, 0x0b70), /* QM */
  80. regmap_reg_range(0x0c00, 0x0c80), /* PKT */
  81. regmap_reg_range(0x0e00, 0x0e98), /* L3 */
  82. regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
  83. regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
  84. regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
  85. regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
  86. regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
  87. regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
  88. regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
  89. };
  90. const struct regmap_access_table qca8k_readable_table = {
  91. .yes_ranges = qca8k_readable_ranges,
  92. .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
  93. };
  94. /* TODO: remove these extra ops when we can support regmap bulk read/write */
  95. static int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
  96. {
  97. int i, count = len / sizeof(u32), ret;
  98. if (priv->mgmt_master && priv->info->ops->read_eth &&
  99. !priv->info->ops->read_eth(priv, reg, val, len))
  100. return 0;
  101. for (i = 0; i < count; i++) {
  102. ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
  103. if (ret < 0)
  104. return ret;
  105. }
  106. return 0;
  107. }
  108. /* TODO: remove these extra ops when we can support regmap bulk read/write */
  109. static int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
  110. {
  111. int i, count = len / sizeof(u32), ret;
  112. u32 tmp;
  113. if (priv->mgmt_master && priv->info->ops->write_eth &&
  114. !priv->info->ops->write_eth(priv, reg, val, len))
  115. return 0;
  116. for (i = 0; i < count; i++) {
  117. tmp = val[i];
  118. ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
  119. if (ret < 0)
  120. return ret;
  121. }
  122. return 0;
  123. }
  124. static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
  125. {
  126. u32 val;
  127. return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
  128. QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
  129. }
  130. static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
  131. {
  132. u32 reg[3];
  133. int ret;
  134. /* load the ARL table into an array */
  135. ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
  136. if (ret)
  137. return ret;
  138. /* vid - 83:72 */
  139. fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
  140. /* aging - 67:64 */
  141. fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
  142. /* portmask - 54:48 */
  143. fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
  144. /* mac - 47:0 */
  145. fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
  146. fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
  147. fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
  148. fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
  149. fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
  150. fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
  151. return 0;
  152. }
  153. static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
  154. const u8 *mac, u8 aging)
  155. {
  156. u32 reg[3] = { 0 };
  157. /* vid - 83:72 */
  158. reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
  159. /* aging - 67:64 */
  160. reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
  161. /* portmask - 54:48 */
  162. reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
  163. /* mac - 47:0 */
  164. reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
  165. reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
  166. reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
  167. reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
  168. reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
  169. reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
  170. /* load the array into the ARL table */
  171. qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
  172. }
  173. static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
  174. int port)
  175. {
  176. u32 reg;
  177. int ret;
  178. /* Set the command and FDB index */
  179. reg = QCA8K_ATU_FUNC_BUSY;
  180. reg |= cmd;
  181. if (port >= 0) {
  182. reg |= QCA8K_ATU_FUNC_PORT_EN;
  183. reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
  184. }
  185. /* Write the function register triggering the table access */
  186. ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
  187. if (ret)
  188. return ret;
  189. /* wait for completion */
  190. ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
  191. if (ret)
  192. return ret;
  193. /* Check for table full violation when adding an entry */
  194. if (cmd == QCA8K_FDB_LOAD) {
  195. ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
  196. if (ret < 0)
  197. return ret;
  198. if (reg & QCA8K_ATU_FUNC_FULL)
  199. return -1;
  200. }
  201. return 0;
  202. }
  203. static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
  204. int port)
  205. {
  206. int ret;
  207. qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
  208. ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
  209. if (ret < 0)
  210. return ret;
  211. return qca8k_fdb_read(priv, fdb);
  212. }
  213. static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
  214. u16 port_mask, u16 vid, u8 aging)
  215. {
  216. int ret;
  217. mutex_lock(&priv->reg_mutex);
  218. qca8k_fdb_write(priv, vid, port_mask, mac, aging);
  219. ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
  220. mutex_unlock(&priv->reg_mutex);
  221. return ret;
  222. }
  223. static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
  224. u16 port_mask, u16 vid)
  225. {
  226. int ret;
  227. mutex_lock(&priv->reg_mutex);
  228. qca8k_fdb_write(priv, vid, port_mask, mac, 0);
  229. ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
  230. mutex_unlock(&priv->reg_mutex);
  231. return ret;
  232. }
  233. void qca8k_fdb_flush(struct qca8k_priv *priv)
  234. {
  235. mutex_lock(&priv->reg_mutex);
  236. qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
  237. mutex_unlock(&priv->reg_mutex);
  238. }
  239. static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
  240. const u8 *mac, u16 vid, u8 aging)
  241. {
  242. struct qca8k_fdb fdb = { 0 };
  243. int ret;
  244. mutex_lock(&priv->reg_mutex);
  245. qca8k_fdb_write(priv, vid, 0, mac, 0);
  246. ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
  247. if (ret < 0)
  248. goto exit;
  249. ret = qca8k_fdb_read(priv, &fdb);
  250. if (ret < 0)
  251. goto exit;
  252. /* Rule exist. Delete first */
  253. if (fdb.aging) {
  254. ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
  255. if (ret)
  256. goto exit;
  257. } else {
  258. fdb.aging = aging;
  259. }
  260. /* Add port to fdb portmask */
  261. fdb.port_mask |= port_mask;
  262. qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
  263. ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
  264. exit:
  265. mutex_unlock(&priv->reg_mutex);
  266. return ret;
  267. }
  268. static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
  269. const u8 *mac, u16 vid)
  270. {
  271. struct qca8k_fdb fdb = { 0 };
  272. int ret;
  273. mutex_lock(&priv->reg_mutex);
  274. qca8k_fdb_write(priv, vid, 0, mac, 0);
  275. ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
  276. if (ret < 0)
  277. goto exit;
  278. ret = qca8k_fdb_read(priv, &fdb);
  279. if (ret < 0)
  280. goto exit;
  281. /* Rule doesn't exist. Why delete? */
  282. if (!fdb.aging) {
  283. ret = -EINVAL;
  284. goto exit;
  285. }
  286. ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
  287. if (ret)
  288. goto exit;
  289. /* Only port in the rule is this port. Don't re insert */
  290. if (fdb.port_mask == port_mask)
  291. goto exit;
  292. /* Remove port from port mask */
  293. fdb.port_mask &= ~port_mask;
  294. qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
  295. ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
  296. exit:
  297. mutex_unlock(&priv->reg_mutex);
  298. return ret;
  299. }
  300. static int qca8k_vlan_access(struct qca8k_priv *priv,
  301. enum qca8k_vlan_cmd cmd, u16 vid)
  302. {
  303. u32 reg;
  304. int ret;
  305. /* Set the command and VLAN index */
  306. reg = QCA8K_VTU_FUNC1_BUSY;
  307. reg |= cmd;
  308. reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
  309. /* Write the function register triggering the table access */
  310. ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
  311. if (ret)
  312. return ret;
  313. /* wait for completion */
  314. ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
  315. if (ret)
  316. return ret;
  317. /* Check for table full violation when adding an entry */
  318. if (cmd == QCA8K_VLAN_LOAD) {
  319. ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
  320. if (ret < 0)
  321. return ret;
  322. if (reg & QCA8K_VTU_FUNC1_FULL)
  323. return -ENOMEM;
  324. }
  325. return 0;
  326. }
  327. static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
  328. bool untagged)
  329. {
  330. u32 reg;
  331. int ret;
  332. /* We do the right thing with VLAN 0 and treat it as untagged while
  333. * preserving the tag on egress.
  334. */
  335. if (vid == 0)
  336. return 0;
  337. mutex_lock(&priv->reg_mutex);
  338. ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
  339. if (ret < 0)
  340. goto out;
  341. ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
  342. if (ret < 0)
  343. goto out;
  344. reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
  345. reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
  346. if (untagged)
  347. reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
  348. else
  349. reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
  350. ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
  351. if (ret)
  352. goto out;
  353. ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
  354. out:
  355. mutex_unlock(&priv->reg_mutex);
  356. return ret;
  357. }
  358. static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
  359. {
  360. u32 reg, mask;
  361. int ret, i;
  362. bool del;
  363. mutex_lock(&priv->reg_mutex);
  364. ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
  365. if (ret < 0)
  366. goto out;
  367. ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
  368. if (ret < 0)
  369. goto out;
  370. reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
  371. reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
  372. /* Check if we're the last member to be removed */
  373. del = true;
  374. for (i = 0; i < QCA8K_NUM_PORTS; i++) {
  375. mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
  376. if ((reg & mask) != mask) {
  377. del = false;
  378. break;
  379. }
  380. }
  381. if (del) {
  382. ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
  383. } else {
  384. ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
  385. if (ret)
  386. goto out;
  387. ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
  388. }
  389. out:
  390. mutex_unlock(&priv->reg_mutex);
  391. return ret;
  392. }
  393. int qca8k_mib_init(struct qca8k_priv *priv)
  394. {
  395. int ret;
  396. mutex_lock(&priv->reg_mutex);
  397. ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
  398. QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
  399. FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
  400. QCA8K_MIB_BUSY);
  401. if (ret)
  402. goto exit;
  403. ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
  404. if (ret)
  405. goto exit;
  406. ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
  407. if (ret)
  408. goto exit;
  409. ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
  410. exit:
  411. mutex_unlock(&priv->reg_mutex);
  412. return ret;
  413. }
  414. void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
  415. {
  416. u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
  417. /* Port 0 and 6 have no internal PHY */
  418. if (port > 0 && port < 6)
  419. mask |= QCA8K_PORT_STATUS_LINK_AUTO;
  420. if (enable)
  421. regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
  422. else
  423. regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
  424. }
  425. void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
  426. uint8_t *data)
  427. {
  428. struct qca8k_priv *priv = ds->priv;
  429. int i;
  430. if (stringset != ETH_SS_STATS)
  431. return;
  432. for (i = 0; i < priv->info->mib_count; i++)
  433. strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
  434. ETH_GSTRING_LEN);
  435. }
  436. void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
  437. uint64_t *data)
  438. {
  439. struct qca8k_priv *priv = ds->priv;
  440. const struct qca8k_mib_desc *mib;
  441. u32 reg, i, val;
  442. u32 hi = 0;
  443. int ret;
  444. if (priv->mgmt_master && priv->info->ops->autocast_mib &&
  445. priv->info->ops->autocast_mib(ds, port, data) > 0)
  446. return;
  447. for (i = 0; i < priv->info->mib_count; i++) {
  448. mib = &ar8327_mib[i];
  449. reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
  450. ret = qca8k_read(priv, reg, &val);
  451. if (ret < 0)
  452. continue;
  453. if (mib->size == 2) {
  454. ret = qca8k_read(priv, reg + 4, &hi);
  455. if (ret < 0)
  456. continue;
  457. }
  458. data[i] = val;
  459. if (mib->size == 2)
  460. data[i] |= (u64)hi << 32;
  461. }
  462. }
  463. int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
  464. {
  465. struct qca8k_priv *priv = ds->priv;
  466. if (sset != ETH_SS_STATS)
  467. return 0;
  468. return priv->info->mib_count;
  469. }
  470. int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
  471. struct ethtool_eee *eee)
  472. {
  473. u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
  474. struct qca8k_priv *priv = ds->priv;
  475. u32 reg;
  476. int ret;
  477. mutex_lock(&priv->reg_mutex);
  478. ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
  479. if (ret < 0)
  480. goto exit;
  481. if (eee->eee_enabled)
  482. reg |= lpi_en;
  483. else
  484. reg &= ~lpi_en;
  485. ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
  486. exit:
  487. mutex_unlock(&priv->reg_mutex);
  488. return ret;
  489. }
  490. int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
  491. struct ethtool_eee *e)
  492. {
  493. /* Nothing to do on the port's MAC */
  494. return 0;
  495. }
  496. void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
  497. {
  498. struct qca8k_priv *priv = ds->priv;
  499. u32 stp_state;
  500. switch (state) {
  501. case BR_STATE_DISABLED:
  502. stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
  503. break;
  504. case BR_STATE_BLOCKING:
  505. stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
  506. break;
  507. case BR_STATE_LISTENING:
  508. stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
  509. break;
  510. case BR_STATE_LEARNING:
  511. stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
  512. break;
  513. case BR_STATE_FORWARDING:
  514. default:
  515. stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
  516. break;
  517. }
  518. qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
  519. QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
  520. }
  521. int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
  522. struct dsa_bridge bridge,
  523. bool *tx_fwd_offload,
  524. struct netlink_ext_ack *extack)
  525. {
  526. struct qca8k_priv *priv = ds->priv;
  527. int port_mask, cpu_port;
  528. int i, ret;
  529. cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
  530. port_mask = BIT(cpu_port);
  531. for (i = 0; i < QCA8K_NUM_PORTS; i++) {
  532. if (dsa_is_cpu_port(ds, i))
  533. continue;
  534. if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
  535. continue;
  536. /* Add this port to the portvlan mask of the other ports
  537. * in the bridge
  538. */
  539. ret = regmap_set_bits(priv->regmap,
  540. QCA8K_PORT_LOOKUP_CTRL(i),
  541. BIT(port));
  542. if (ret)
  543. return ret;
  544. if (i != port)
  545. port_mask |= BIT(i);
  546. }
  547. /* Add all other ports to this ports portvlan mask */
  548. ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
  549. QCA8K_PORT_LOOKUP_MEMBER, port_mask);
  550. return ret;
  551. }
  552. void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
  553. struct dsa_bridge bridge)
  554. {
  555. struct qca8k_priv *priv = ds->priv;
  556. int cpu_port, i;
  557. cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
  558. for (i = 0; i < QCA8K_NUM_PORTS; i++) {
  559. if (dsa_is_cpu_port(ds, i))
  560. continue;
  561. if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
  562. continue;
  563. /* Remove this port to the portvlan mask of the other ports
  564. * in the bridge
  565. */
  566. regmap_clear_bits(priv->regmap,
  567. QCA8K_PORT_LOOKUP_CTRL(i),
  568. BIT(port));
  569. }
  570. /* Set the cpu port to be the only one in the portvlan mask of
  571. * this port
  572. */
  573. qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
  574. QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
  575. }
  576. void qca8k_port_fast_age(struct dsa_switch *ds, int port)
  577. {
  578. struct qca8k_priv *priv = ds->priv;
  579. mutex_lock(&priv->reg_mutex);
  580. qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
  581. mutex_unlock(&priv->reg_mutex);
  582. }
  583. int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
  584. {
  585. struct qca8k_priv *priv = ds->priv;
  586. unsigned int secs = msecs / 1000;
  587. u32 val;
  588. /* AGE_TIME reg is set in 7s step */
  589. val = secs / 7;
  590. /* Handle case with 0 as val to NOT disable
  591. * learning
  592. */
  593. if (!val)
  594. val = 1;
  595. return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
  596. QCA8K_ATU_AGE_TIME_MASK,
  597. QCA8K_ATU_AGE_TIME(val));
  598. }
  599. int qca8k_port_enable(struct dsa_switch *ds, int port,
  600. struct phy_device *phy)
  601. {
  602. struct qca8k_priv *priv = ds->priv;
  603. qca8k_port_set_status(priv, port, 1);
  604. priv->port_enabled_map |= BIT(port);
  605. if (dsa_is_user_port(ds, port))
  606. phy_support_asym_pause(phy);
  607. return 0;
  608. }
  609. void qca8k_port_disable(struct dsa_switch *ds, int port)
  610. {
  611. struct qca8k_priv *priv = ds->priv;
  612. qca8k_port_set_status(priv, port, 0);
  613. priv->port_enabled_map &= ~BIT(port);
  614. }
  615. int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
  616. {
  617. struct qca8k_priv *priv = ds->priv;
  618. int ret;
  619. /* We have only have a general MTU setting.
  620. * DSA always set the CPU port's MTU to the largest MTU of the slave
  621. * ports.
  622. * Setting MTU just for the CPU port is sufficient to correctly set a
  623. * value for every port.
  624. */
  625. if (!dsa_is_cpu_port(ds, port))
  626. return 0;
  627. /* To change the MAX_FRAME_SIZE the cpu ports must be off or
  628. * the switch panics.
  629. * Turn off both cpu ports before applying the new value to prevent
  630. * this.
  631. */
  632. if (priv->port_enabled_map & BIT(0))
  633. qca8k_port_set_status(priv, 0, 0);
  634. if (priv->port_enabled_map & BIT(6))
  635. qca8k_port_set_status(priv, 6, 0);
  636. /* Include L2 header / FCS length */
  637. ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
  638. ETH_HLEN + ETH_FCS_LEN);
  639. if (priv->port_enabled_map & BIT(0))
  640. qca8k_port_set_status(priv, 0, 1);
  641. if (priv->port_enabled_map & BIT(6))
  642. qca8k_port_set_status(priv, 6, 1);
  643. return ret;
  644. }
  645. int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
  646. {
  647. return QCA8K_MAX_MTU;
  648. }
  649. int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
  650. u16 port_mask, u16 vid)
  651. {
  652. /* Set the vid to the port vlan id if no vid is set */
  653. if (!vid)
  654. vid = QCA8K_PORT_VID_DEF;
  655. return qca8k_fdb_add(priv, addr, port_mask, vid,
  656. QCA8K_ATU_STATUS_STATIC);
  657. }
  658. int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
  659. const unsigned char *addr, u16 vid,
  660. struct dsa_db db)
  661. {
  662. struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
  663. u16 port_mask = BIT(port);
  664. return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
  665. }
  666. int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
  667. const unsigned char *addr, u16 vid,
  668. struct dsa_db db)
  669. {
  670. struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
  671. u16 port_mask = BIT(port);
  672. if (!vid)
  673. vid = QCA8K_PORT_VID_DEF;
  674. return qca8k_fdb_del(priv, addr, port_mask, vid);
  675. }
  676. int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
  677. dsa_fdb_dump_cb_t *cb, void *data)
  678. {
  679. struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
  680. struct qca8k_fdb _fdb = { 0 };
  681. int cnt = QCA8K_NUM_FDB_RECORDS;
  682. bool is_static;
  683. int ret = 0;
  684. mutex_lock(&priv->reg_mutex);
  685. while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
  686. if (!_fdb.aging)
  687. break;
  688. is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
  689. ret = cb(_fdb.mac, _fdb.vid, is_static, data);
  690. if (ret)
  691. break;
  692. }
  693. mutex_unlock(&priv->reg_mutex);
  694. return 0;
  695. }
  696. int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
  697. const struct switchdev_obj_port_mdb *mdb,
  698. struct dsa_db db)
  699. {
  700. struct qca8k_priv *priv = ds->priv;
  701. const u8 *addr = mdb->addr;
  702. u16 vid = mdb->vid;
  703. if (!vid)
  704. vid = QCA8K_PORT_VID_DEF;
  705. return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid,
  706. QCA8K_ATU_STATUS_STATIC);
  707. }
  708. int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
  709. const struct switchdev_obj_port_mdb *mdb,
  710. struct dsa_db db)
  711. {
  712. struct qca8k_priv *priv = ds->priv;
  713. const u8 *addr = mdb->addr;
  714. u16 vid = mdb->vid;
  715. if (!vid)
  716. vid = QCA8K_PORT_VID_DEF;
  717. return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
  718. }
  719. int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
  720. struct dsa_mall_mirror_tc_entry *mirror,
  721. bool ingress, struct netlink_ext_ack *extack)
  722. {
  723. struct qca8k_priv *priv = ds->priv;
  724. int monitor_port, ret;
  725. u32 reg, val;
  726. /* Check for existent entry */
  727. if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
  728. return -EEXIST;
  729. ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
  730. if (ret)
  731. return ret;
  732. /* QCA83xx can have only one port set to mirror mode.
  733. * Check that the correct port is requested and return error otherwise.
  734. * When no mirror port is set, the values is set to 0xF
  735. */
  736. monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
  737. if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
  738. return -EEXIST;
  739. /* Set the monitor port */
  740. val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
  741. mirror->to_local_port);
  742. ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
  743. QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
  744. if (ret)
  745. return ret;
  746. if (ingress) {
  747. reg = QCA8K_PORT_LOOKUP_CTRL(port);
  748. val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
  749. } else {
  750. reg = QCA8K_REG_PORT_HOL_CTRL1(port);
  751. val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
  752. }
  753. ret = regmap_update_bits(priv->regmap, reg, val, val);
  754. if (ret)
  755. return ret;
  756. /* Track mirror port for tx and rx to decide when the
  757. * mirror port has to be disabled.
  758. */
  759. if (ingress)
  760. priv->mirror_rx |= BIT(port);
  761. else
  762. priv->mirror_tx |= BIT(port);
  763. return 0;
  764. }
  765. void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
  766. struct dsa_mall_mirror_tc_entry *mirror)
  767. {
  768. struct qca8k_priv *priv = ds->priv;
  769. u32 reg, val;
  770. int ret;
  771. if (mirror->ingress) {
  772. reg = QCA8K_PORT_LOOKUP_CTRL(port);
  773. val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
  774. } else {
  775. reg = QCA8K_REG_PORT_HOL_CTRL1(port);
  776. val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
  777. }
  778. ret = regmap_clear_bits(priv->regmap, reg, val);
  779. if (ret)
  780. goto err;
  781. if (mirror->ingress)
  782. priv->mirror_rx &= ~BIT(port);
  783. else
  784. priv->mirror_tx &= ~BIT(port);
  785. /* No port set to send packet to mirror port. Disable mirror port */
  786. if (!priv->mirror_rx && !priv->mirror_tx) {
  787. val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
  788. ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
  789. QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
  790. if (ret)
  791. goto err;
  792. }
  793. err:
  794. dev_err(priv->dev, "Failed to del mirror port from %d", port);
  795. }
  796. int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
  797. bool vlan_filtering,
  798. struct netlink_ext_ack *extack)
  799. {
  800. struct qca8k_priv *priv = ds->priv;
  801. int ret;
  802. if (vlan_filtering) {
  803. ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
  804. QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
  805. QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
  806. } else {
  807. ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
  808. QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
  809. QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
  810. }
  811. return ret;
  812. }
  813. int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
  814. const struct switchdev_obj_port_vlan *vlan,
  815. struct netlink_ext_ack *extack)
  816. {
  817. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  818. bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  819. struct qca8k_priv *priv = ds->priv;
  820. int ret;
  821. ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
  822. if (ret) {
  823. dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
  824. return ret;
  825. }
  826. if (pvid) {
  827. ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
  828. QCA8K_EGREES_VLAN_PORT_MASK(port),
  829. QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
  830. if (ret)
  831. return ret;
  832. ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
  833. QCA8K_PORT_VLAN_CVID(vlan->vid) |
  834. QCA8K_PORT_VLAN_SVID(vlan->vid));
  835. }
  836. return ret;
  837. }
  838. int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
  839. const struct switchdev_obj_port_vlan *vlan)
  840. {
  841. struct qca8k_priv *priv = ds->priv;
  842. int ret;
  843. ret = qca8k_vlan_del(priv, port, vlan->vid);
  844. if (ret)
  845. dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
  846. return ret;
  847. }
  848. static bool qca8k_lag_can_offload(struct dsa_switch *ds,
  849. struct dsa_lag lag,
  850. struct netdev_lag_upper_info *info,
  851. struct netlink_ext_ack *extack)
  852. {
  853. struct dsa_port *dp;
  854. int members = 0;
  855. if (!lag.id)
  856. return false;
  857. dsa_lag_foreach_port(dp, ds->dst, &lag)
  858. /* Includes the port joining the LAG */
  859. members++;
  860. if (members > QCA8K_NUM_PORTS_FOR_LAG) {
  861. NL_SET_ERR_MSG_MOD(extack,
  862. "Cannot offload more than 4 LAG ports");
  863. return false;
  864. }
  865. if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
  866. NL_SET_ERR_MSG_MOD(extack,
  867. "Can only offload LAG using hash TX type");
  868. return false;
  869. }
  870. if (info->hash_type != NETDEV_LAG_HASH_L2 &&
  871. info->hash_type != NETDEV_LAG_HASH_L23) {
  872. NL_SET_ERR_MSG_MOD(extack,
  873. "Can only offload L2 or L2+L3 TX hash");
  874. return false;
  875. }
  876. return true;
  877. }
  878. static int qca8k_lag_setup_hash(struct dsa_switch *ds,
  879. struct dsa_lag lag,
  880. struct netdev_lag_upper_info *info)
  881. {
  882. struct net_device *lag_dev = lag.dev;
  883. struct qca8k_priv *priv = ds->priv;
  884. bool unique_lag = true;
  885. unsigned int i;
  886. u32 hash = 0;
  887. switch (info->hash_type) {
  888. case NETDEV_LAG_HASH_L23:
  889. hash |= QCA8K_TRUNK_HASH_SIP_EN;
  890. hash |= QCA8K_TRUNK_HASH_DIP_EN;
  891. fallthrough;
  892. case NETDEV_LAG_HASH_L2:
  893. hash |= QCA8K_TRUNK_HASH_SA_EN;
  894. hash |= QCA8K_TRUNK_HASH_DA_EN;
  895. break;
  896. default: /* We should NEVER reach this */
  897. return -EOPNOTSUPP;
  898. }
  899. /* Check if we are the unique configured LAG */
  900. dsa_lags_foreach_id(i, ds->dst)
  901. if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
  902. unique_lag = false;
  903. break;
  904. }
  905. /* Hash Mode is global. Make sure the same Hash Mode
  906. * is set to all the 4 possible lag.
  907. * If we are the unique LAG we can set whatever hash
  908. * mode we want.
  909. * To change hash mode it's needed to remove all LAG
  910. * and change the mode with the latest.
  911. */
  912. if (unique_lag) {
  913. priv->lag_hash_mode = hash;
  914. } else if (priv->lag_hash_mode != hash) {
  915. netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
  916. return -EOPNOTSUPP;
  917. }
  918. return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
  919. QCA8K_TRUNK_HASH_MASK, hash);
  920. }
  921. static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
  922. struct dsa_lag lag, bool delete)
  923. {
  924. struct qca8k_priv *priv = ds->priv;
  925. int ret, id, i;
  926. u32 val;
  927. /* DSA LAG IDs are one-based, hardware is zero-based */
  928. id = lag.id - 1;
  929. /* Read current port member */
  930. ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
  931. if (ret)
  932. return ret;
  933. /* Shift val to the correct trunk */
  934. val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
  935. val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
  936. if (delete)
  937. val &= ~BIT(port);
  938. else
  939. val |= BIT(port);
  940. /* Update port member. With empty portmap disable trunk */
  941. ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
  942. QCA8K_REG_GOL_TRUNK_MEMBER(id) |
  943. QCA8K_REG_GOL_TRUNK_EN(id),
  944. !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
  945. val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
  946. /* Search empty member if adding or port on deleting */
  947. for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
  948. ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
  949. if (ret)
  950. return ret;
  951. val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
  952. val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
  953. if (delete) {
  954. /* If port flagged to be disabled assume this member is
  955. * empty
  956. */
  957. if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
  958. continue;
  959. val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
  960. if (val != port)
  961. continue;
  962. } else {
  963. /* If port flagged to be enabled assume this member is
  964. * already set
  965. */
  966. if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
  967. continue;
  968. }
  969. /* We have found the member to add/remove */
  970. break;
  971. }
  972. /* Set port in the correct port mask or disable port if in delete mode */
  973. return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
  974. QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
  975. QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
  976. !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
  977. port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
  978. }
  979. int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
  980. struct netdev_lag_upper_info *info,
  981. struct netlink_ext_ack *extack)
  982. {
  983. int ret;
  984. if (!qca8k_lag_can_offload(ds, lag, info, extack))
  985. return -EOPNOTSUPP;
  986. ret = qca8k_lag_setup_hash(ds, lag, info);
  987. if (ret)
  988. return ret;
  989. return qca8k_lag_refresh_portmap(ds, port, lag, false);
  990. }
  991. int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
  992. struct dsa_lag lag)
  993. {
  994. return qca8k_lag_refresh_portmap(ds, port, lag, true);
  995. }
  996. int qca8k_read_switch_id(struct qca8k_priv *priv)
  997. {
  998. u32 val;
  999. u8 id;
  1000. int ret;
  1001. if (!priv->info)
  1002. return -ENODEV;
  1003. ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
  1004. if (ret < 0)
  1005. return -ENODEV;
  1006. id = QCA8K_MASK_CTRL_DEVICE_ID(val);
  1007. if (id != priv->info->id) {
  1008. dev_err(priv->dev,
  1009. "Switch id detected %x but expected %x",
  1010. id, priv->info->id);
  1011. return -ENODEV;
  1012. }
  1013. priv->switch_id = id;
  1014. /* Save revision to communicate to the internal PHY driver */
  1015. priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
  1016. return 0;
  1017. }