ksz9477.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Microchip KSZ9477 switch driver main logic
  4. *
  5. * Copyright (C) 2017-2019 Microchip Technology Inc.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/iopoll.h>
  10. #include <linux/platform_data/microchip-ksz.h>
  11. #include <linux/phy.h>
  12. #include <linux/if_bridge.h>
  13. #include <linux/if_vlan.h>
  14. #include <net/dsa.h>
  15. #include <net/switchdev.h>
  16. #include "ksz9477_reg.h"
  17. #include "ksz_common.h"
  18. #include "ksz9477.h"
  19. static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
  20. {
  21. regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
  22. }
  23. static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
  24. bool set)
  25. {
  26. regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
  27. bits, set ? bits : 0);
  28. }
  29. static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
  30. {
  31. regmap_update_bits(dev->regmap[2], addr, bits, set ? bits : 0);
  32. }
  33. static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
  34. u32 bits, bool set)
  35. {
  36. regmap_update_bits(dev->regmap[2], PORT_CTRL_ADDR(port, offset),
  37. bits, set ? bits : 0);
  38. }
  39. int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
  40. {
  41. u16 frame_size, max_frame = 0;
  42. int i;
  43. frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
  44. /* Cache the per-port MTU setting */
  45. dev->ports[port].max_frame = frame_size;
  46. for (i = 0; i < dev->info->port_cnt; i++)
  47. max_frame = max(max_frame, dev->ports[i].max_frame);
  48. return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2,
  49. REG_SW_MTU_MASK, max_frame);
  50. }
  51. int ksz9477_max_mtu(struct ksz_device *dev, int port)
  52. {
  53. return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
  54. }
  55. static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
  56. {
  57. unsigned int val;
  58. return regmap_read_poll_timeout(dev->regmap[0], REG_SW_VLAN_CTRL,
  59. val, !(val & VLAN_START), 10, 1000);
  60. }
  61. static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
  62. u32 *vlan_table)
  63. {
  64. int ret;
  65. mutex_lock(&dev->vlan_mutex);
  66. ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
  67. ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
  68. /* wait to be cleared */
  69. ret = ksz9477_wait_vlan_ctrl_ready(dev);
  70. if (ret) {
  71. dev_dbg(dev->dev, "Failed to read vlan table\n");
  72. goto exit;
  73. }
  74. ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
  75. ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
  76. ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
  77. ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
  78. exit:
  79. mutex_unlock(&dev->vlan_mutex);
  80. return ret;
  81. }
  82. static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
  83. u32 *vlan_table)
  84. {
  85. int ret;
  86. mutex_lock(&dev->vlan_mutex);
  87. ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
  88. ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
  89. ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
  90. ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
  91. ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
  92. /* wait to be cleared */
  93. ret = ksz9477_wait_vlan_ctrl_ready(dev);
  94. if (ret) {
  95. dev_dbg(dev->dev, "Failed to write vlan table\n");
  96. goto exit;
  97. }
  98. ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
  99. /* update vlan cache table */
  100. dev->vlan_cache[vid].table[0] = vlan_table[0];
  101. dev->vlan_cache[vid].table[1] = vlan_table[1];
  102. dev->vlan_cache[vid].table[2] = vlan_table[2];
  103. exit:
  104. mutex_unlock(&dev->vlan_mutex);
  105. return ret;
  106. }
  107. static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
  108. {
  109. ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
  110. ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
  111. ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
  112. ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
  113. }
  114. static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
  115. {
  116. ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
  117. ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
  118. ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
  119. ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
  120. }
  121. static int ksz9477_wait_alu_ready(struct ksz_device *dev)
  122. {
  123. unsigned int val;
  124. return regmap_read_poll_timeout(dev->regmap[2], REG_SW_ALU_CTRL__4,
  125. val, !(val & ALU_START), 10, 1000);
  126. }
  127. static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
  128. {
  129. unsigned int val;
  130. return regmap_read_poll_timeout(dev->regmap[2],
  131. REG_SW_ALU_STAT_CTRL__4,
  132. val, !(val & ALU_STAT_START),
  133. 10, 1000);
  134. }
  135. int ksz9477_reset_switch(struct ksz_device *dev)
  136. {
  137. u8 data8;
  138. u32 data32;
  139. /* reset switch */
  140. ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
  141. /* turn off SPI DO Edge select */
  142. regmap_update_bits(dev->regmap[0], REG_SW_GLOBAL_SERIAL_CTRL_0,
  143. SPI_AUTO_EDGE_DETECTION, 0);
  144. /* default configuration */
  145. ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
  146. data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
  147. SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
  148. ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
  149. /* disable interrupts */
  150. ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
  151. ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
  152. ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
  153. /* KSZ9893 compatible chips do not support refclk configuration */
  154. if (dev->chip_id == KSZ9893_CHIP_ID ||
  155. dev->chip_id == KSZ8563_CHIP_ID)
  156. return 0;
  157. data8 = SW_ENABLE_REFCLKO;
  158. if (dev->synclko_disable)
  159. data8 = 0;
  160. else if (dev->synclko_125)
  161. data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
  162. ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
  163. return 0;
  164. }
  165. void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
  166. {
  167. struct ksz_port *p = &dev->ports[port];
  168. unsigned int val;
  169. u32 data;
  170. int ret;
  171. /* retain the flush/freeze bit */
  172. data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
  173. data |= MIB_COUNTER_READ;
  174. data |= (addr << MIB_COUNTER_INDEX_S);
  175. ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
  176. ret = regmap_read_poll_timeout(dev->regmap[2],
  177. PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4),
  178. val, !(val & MIB_COUNTER_READ), 10, 1000);
  179. /* failed to read MIB. get out of loop */
  180. if (ret) {
  181. dev_dbg(dev->dev, "Failed to get MIB\n");
  182. return;
  183. }
  184. /* count resets upon read */
  185. ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
  186. *cnt += data;
  187. }
  188. void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
  189. u64 *dropped, u64 *cnt)
  190. {
  191. addr = dev->info->mib_names[addr].index;
  192. ksz9477_r_mib_cnt(dev, port, addr, cnt);
  193. }
  194. void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
  195. {
  196. u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
  197. struct ksz_port *p = &dev->ports[port];
  198. /* enable/disable the port for flush/freeze function */
  199. mutex_lock(&p->mib.cnt_mutex);
  200. ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val);
  201. /* used by MIB counter reading code to know freeze is enabled */
  202. p->freeze = freeze;
  203. mutex_unlock(&p->mib.cnt_mutex);
  204. }
  205. void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
  206. {
  207. struct ksz_port_mib *mib = &dev->ports[port].mib;
  208. /* flush all enabled port MIB counters */
  209. mutex_lock(&mib->cnt_mutex);
  210. ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
  211. MIB_COUNTER_FLUSH_FREEZE);
  212. ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
  213. ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
  214. mutex_unlock(&mib->cnt_mutex);
  215. }
  216. static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
  217. u16 *data)
  218. {
  219. /* KSZ8563R do not have extended registers but BMSR_ESTATEN and
  220. * BMSR_ERCAP bits are set.
  221. */
  222. if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
  223. *data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
  224. }
  225. int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
  226. {
  227. u16 val = 0xffff;
  228. int ret;
  229. /* No real PHY after this. Simulate the PHY.
  230. * A fixed PHY can be setup in the device tree, but this function is
  231. * still called for that port during initialization.
  232. * For RGMII PHY there is no way to access it so the fixed PHY should
  233. * be used. For SGMII PHY the supporting code will be added later.
  234. */
  235. if (!dev->info->internal_phy[addr]) {
  236. struct ksz_port *p = &dev->ports[addr];
  237. switch (reg) {
  238. case MII_BMCR:
  239. val = 0x1140;
  240. break;
  241. case MII_BMSR:
  242. val = 0x796d;
  243. break;
  244. case MII_PHYSID1:
  245. val = 0x0022;
  246. break;
  247. case MII_PHYSID2:
  248. val = 0x1631;
  249. break;
  250. case MII_ADVERTISE:
  251. val = 0x05e1;
  252. break;
  253. case MII_LPA:
  254. val = 0xc5e1;
  255. break;
  256. case MII_CTRL1000:
  257. val = 0x0700;
  258. break;
  259. case MII_STAT1000:
  260. if (p->phydev.speed == SPEED_1000)
  261. val = 0x3800;
  262. else
  263. val = 0;
  264. break;
  265. }
  266. } else {
  267. ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
  268. if (ret)
  269. return ret;
  270. ksz9477_r_phy_quirks(dev, addr, reg, &val);
  271. }
  272. *data = val;
  273. return 0;
  274. }
  275. int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
  276. {
  277. /* No real PHY after this. */
  278. if (!dev->info->internal_phy[addr])
  279. return 0;
  280. return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
  281. }
  282. void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
  283. {
  284. ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
  285. }
  286. void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
  287. {
  288. const u16 *regs = dev->info->regs;
  289. u8 data;
  290. regmap_update_bits(dev->regmap[0], REG_SW_LUE_CTRL_2,
  291. SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
  292. SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
  293. if (port < dev->info->port_cnt) {
  294. /* flush individual port */
  295. ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
  296. if (!(data & PORT_LEARN_DISABLE))
  297. ksz_pwrite8(dev, port, regs[P_STP_CTRL],
  298. data | PORT_LEARN_DISABLE);
  299. ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
  300. ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
  301. } else {
  302. /* flush all */
  303. ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
  304. }
  305. }
  306. int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
  307. bool flag, struct netlink_ext_ack *extack)
  308. {
  309. if (flag) {
  310. ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
  311. PORT_VLAN_LOOKUP_VID_0, true);
  312. ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
  313. } else {
  314. ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
  315. ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
  316. PORT_VLAN_LOOKUP_VID_0, false);
  317. }
  318. return 0;
  319. }
  320. int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
  321. const struct switchdev_obj_port_vlan *vlan,
  322. struct netlink_ext_ack *extack)
  323. {
  324. u32 vlan_table[3];
  325. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  326. int err;
  327. err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
  328. if (err) {
  329. NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table");
  330. return err;
  331. }
  332. vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
  333. if (untagged)
  334. vlan_table[1] |= BIT(port);
  335. else
  336. vlan_table[1] &= ~BIT(port);
  337. vlan_table[1] &= ~(BIT(dev->cpu_port));
  338. vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
  339. err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
  340. if (err) {
  341. NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table");
  342. return err;
  343. }
  344. /* change PVID */
  345. if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
  346. ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
  347. return 0;
  348. }
  349. int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
  350. const struct switchdev_obj_port_vlan *vlan)
  351. {
  352. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  353. u32 vlan_table[3];
  354. u16 pvid;
  355. ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
  356. pvid = pvid & 0xFFF;
  357. if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
  358. dev_dbg(dev->dev, "Failed to get vlan table\n");
  359. return -ETIMEDOUT;
  360. }
  361. vlan_table[2] &= ~BIT(port);
  362. if (pvid == vlan->vid)
  363. pvid = 1;
  364. if (untagged)
  365. vlan_table[1] &= ~BIT(port);
  366. if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
  367. dev_dbg(dev->dev, "Failed to set vlan table\n");
  368. return -ETIMEDOUT;
  369. }
  370. ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
  371. return 0;
  372. }
  373. int ksz9477_fdb_add(struct ksz_device *dev, int port,
  374. const unsigned char *addr, u16 vid, struct dsa_db db)
  375. {
  376. u32 alu_table[4];
  377. u32 data;
  378. int ret = 0;
  379. mutex_lock(&dev->alu_mutex);
  380. /* find any entry with mac & vid */
  381. data = vid << ALU_FID_INDEX_S;
  382. data |= ((addr[0] << 8) | addr[1]);
  383. ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
  384. data = ((addr[2] << 24) | (addr[3] << 16));
  385. data |= ((addr[4] << 8) | addr[5]);
  386. ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
  387. /* start read operation */
  388. ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
  389. /* wait to be finished */
  390. ret = ksz9477_wait_alu_ready(dev);
  391. if (ret) {
  392. dev_dbg(dev->dev, "Failed to read ALU\n");
  393. goto exit;
  394. }
  395. /* read ALU entry */
  396. ksz9477_read_table(dev, alu_table);
  397. /* update ALU entry */
  398. alu_table[0] = ALU_V_STATIC_VALID;
  399. alu_table[1] |= BIT(port);
  400. if (vid)
  401. alu_table[1] |= ALU_V_USE_FID;
  402. alu_table[2] = (vid << ALU_V_FID_S);
  403. alu_table[2] |= ((addr[0] << 8) | addr[1]);
  404. alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
  405. alu_table[3] |= ((addr[4] << 8) | addr[5]);
  406. ksz9477_write_table(dev, alu_table);
  407. ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
  408. /* wait to be finished */
  409. ret = ksz9477_wait_alu_ready(dev);
  410. if (ret)
  411. dev_dbg(dev->dev, "Failed to write ALU\n");
  412. exit:
  413. mutex_unlock(&dev->alu_mutex);
  414. return ret;
  415. }
  416. int ksz9477_fdb_del(struct ksz_device *dev, int port,
  417. const unsigned char *addr, u16 vid, struct dsa_db db)
  418. {
  419. u32 alu_table[4];
  420. u32 data;
  421. int ret = 0;
  422. mutex_lock(&dev->alu_mutex);
  423. /* read any entry with mac & vid */
  424. data = vid << ALU_FID_INDEX_S;
  425. data |= ((addr[0] << 8) | addr[1]);
  426. ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
  427. data = ((addr[2] << 24) | (addr[3] << 16));
  428. data |= ((addr[4] << 8) | addr[5]);
  429. ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
  430. /* start read operation */
  431. ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
  432. /* wait to be finished */
  433. ret = ksz9477_wait_alu_ready(dev);
  434. if (ret) {
  435. dev_dbg(dev->dev, "Failed to read ALU\n");
  436. goto exit;
  437. }
  438. ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
  439. if (alu_table[0] & ALU_V_STATIC_VALID) {
  440. ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
  441. ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
  442. ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
  443. /* clear forwarding port */
  444. alu_table[1] &= ~BIT(port);
  445. /* if there is no port to forward, clear table */
  446. if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
  447. alu_table[0] = 0;
  448. alu_table[1] = 0;
  449. alu_table[2] = 0;
  450. alu_table[3] = 0;
  451. }
  452. } else {
  453. alu_table[0] = 0;
  454. alu_table[1] = 0;
  455. alu_table[2] = 0;
  456. alu_table[3] = 0;
  457. }
  458. ksz9477_write_table(dev, alu_table);
  459. ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
  460. /* wait to be finished */
  461. ret = ksz9477_wait_alu_ready(dev);
  462. if (ret)
  463. dev_dbg(dev->dev, "Failed to write ALU\n");
  464. exit:
  465. mutex_unlock(&dev->alu_mutex);
  466. return ret;
  467. }
  468. static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
  469. {
  470. alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
  471. alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
  472. alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
  473. alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
  474. ALU_V_PRIO_AGE_CNT_M;
  475. alu->mstp = alu_table[0] & ALU_V_MSTP_M;
  476. alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
  477. alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
  478. alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
  479. alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
  480. alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
  481. alu->mac[1] = alu_table[2] & 0xFF;
  482. alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
  483. alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
  484. alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
  485. alu->mac[5] = alu_table[3] & 0xFF;
  486. }
  487. int ksz9477_fdb_dump(struct ksz_device *dev, int port,
  488. dsa_fdb_dump_cb_t *cb, void *data)
  489. {
  490. int ret = 0;
  491. u32 ksz_data;
  492. u32 alu_table[4];
  493. struct alu_struct alu;
  494. int timeout;
  495. mutex_lock(&dev->alu_mutex);
  496. /* start ALU search */
  497. ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
  498. do {
  499. timeout = 1000;
  500. do {
  501. ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
  502. if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
  503. break;
  504. usleep_range(1, 10);
  505. } while (timeout-- > 0);
  506. if (!timeout) {
  507. dev_dbg(dev->dev, "Failed to search ALU\n");
  508. ret = -ETIMEDOUT;
  509. goto exit;
  510. }
  511. if (!(ksz_data & ALU_VALID))
  512. continue;
  513. /* read ALU table */
  514. ksz9477_read_table(dev, alu_table);
  515. ksz9477_convert_alu(&alu, alu_table);
  516. if (alu.port_forward & BIT(port)) {
  517. ret = cb(alu.mac, alu.fid, alu.is_static, data);
  518. if (ret)
  519. goto exit;
  520. }
  521. } while (ksz_data & ALU_START);
  522. exit:
  523. /* stop ALU search */
  524. ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
  525. mutex_unlock(&dev->alu_mutex);
  526. return ret;
  527. }
  528. int ksz9477_mdb_add(struct ksz_device *dev, int port,
  529. const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
  530. {
  531. u32 static_table[4];
  532. const u8 *shifts;
  533. const u32 *masks;
  534. u32 data;
  535. int index;
  536. u32 mac_hi, mac_lo;
  537. int err = 0;
  538. shifts = dev->info->shifts;
  539. masks = dev->info->masks;
  540. mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
  541. mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
  542. mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
  543. mutex_lock(&dev->alu_mutex);
  544. for (index = 0; index < dev->info->num_statics; index++) {
  545. /* find empty slot first */
  546. data = (index << shifts[ALU_STAT_INDEX]) |
  547. masks[ALU_STAT_READ] | ALU_STAT_START;
  548. ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
  549. /* wait to be finished */
  550. err = ksz9477_wait_alu_sta_ready(dev);
  551. if (err) {
  552. dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
  553. goto exit;
  554. }
  555. /* read ALU static table */
  556. ksz9477_read_table(dev, static_table);
  557. if (static_table[0] & ALU_V_STATIC_VALID) {
  558. /* check this has same vid & mac address */
  559. if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
  560. ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
  561. static_table[3] == mac_lo) {
  562. /* found matching one */
  563. break;
  564. }
  565. } else {
  566. /* found empty one */
  567. break;
  568. }
  569. }
  570. /* no available entry */
  571. if (index == dev->info->num_statics) {
  572. err = -ENOSPC;
  573. goto exit;
  574. }
  575. /* add entry */
  576. static_table[0] = ALU_V_STATIC_VALID;
  577. static_table[1] |= BIT(port);
  578. if (mdb->vid)
  579. static_table[1] |= ALU_V_USE_FID;
  580. static_table[2] = (mdb->vid << ALU_V_FID_S);
  581. static_table[2] |= mac_hi;
  582. static_table[3] = mac_lo;
  583. ksz9477_write_table(dev, static_table);
  584. data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
  585. ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
  586. /* wait to be finished */
  587. if (ksz9477_wait_alu_sta_ready(dev))
  588. dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
  589. exit:
  590. mutex_unlock(&dev->alu_mutex);
  591. return err;
  592. }
  593. int ksz9477_mdb_del(struct ksz_device *dev, int port,
  594. const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
  595. {
  596. u32 static_table[4];
  597. const u8 *shifts;
  598. const u32 *masks;
  599. u32 data;
  600. int index;
  601. int ret = 0;
  602. u32 mac_hi, mac_lo;
  603. shifts = dev->info->shifts;
  604. masks = dev->info->masks;
  605. mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
  606. mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
  607. mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
  608. mutex_lock(&dev->alu_mutex);
  609. for (index = 0; index < dev->info->num_statics; index++) {
  610. /* find empty slot first */
  611. data = (index << shifts[ALU_STAT_INDEX]) |
  612. masks[ALU_STAT_READ] | ALU_STAT_START;
  613. ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
  614. /* wait to be finished */
  615. ret = ksz9477_wait_alu_sta_ready(dev);
  616. if (ret) {
  617. dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
  618. goto exit;
  619. }
  620. /* read ALU static table */
  621. ksz9477_read_table(dev, static_table);
  622. if (static_table[0] & ALU_V_STATIC_VALID) {
  623. /* check this has same vid & mac address */
  624. if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
  625. ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
  626. static_table[3] == mac_lo) {
  627. /* found matching one */
  628. break;
  629. }
  630. }
  631. }
  632. /* no available entry */
  633. if (index == dev->info->num_statics)
  634. goto exit;
  635. /* clear port */
  636. static_table[1] &= ~BIT(port);
  637. if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
  638. /* delete entry */
  639. static_table[0] = 0;
  640. static_table[1] = 0;
  641. static_table[2] = 0;
  642. static_table[3] = 0;
  643. }
  644. ksz9477_write_table(dev, static_table);
  645. data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
  646. ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
  647. /* wait to be finished */
  648. ret = ksz9477_wait_alu_sta_ready(dev);
  649. if (ret)
  650. dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
  651. exit:
  652. mutex_unlock(&dev->alu_mutex);
  653. return ret;
  654. }
  655. int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
  656. struct dsa_mall_mirror_tc_entry *mirror,
  657. bool ingress, struct netlink_ext_ack *extack)
  658. {
  659. u8 data;
  660. int p;
  661. /* Limit to one sniffer port
  662. * Check if any of the port is already set for sniffing
  663. * If yes, instruct the user to remove the previous entry & exit
  664. */
  665. for (p = 0; p < dev->info->port_cnt; p++) {
  666. /* Skip the current sniffing port */
  667. if (p == mirror->to_local_port)
  668. continue;
  669. ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
  670. if (data & PORT_MIRROR_SNIFFER) {
  671. NL_SET_ERR_MSG_MOD(extack,
  672. "Sniffer port is already configured, delete existing rules & retry");
  673. return -EBUSY;
  674. }
  675. }
  676. if (ingress)
  677. ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
  678. else
  679. ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
  680. /* configure mirror port */
  681. ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
  682. PORT_MIRROR_SNIFFER, true);
  683. ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
  684. return 0;
  685. }
  686. void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
  687. struct dsa_mall_mirror_tc_entry *mirror)
  688. {
  689. bool in_use = false;
  690. u8 data;
  691. int p;
  692. if (mirror->ingress)
  693. ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
  694. else
  695. ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
  696. /* Check if any of the port is still referring to sniffer port */
  697. for (p = 0; p < dev->info->port_cnt; p++) {
  698. ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
  699. if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
  700. in_use = true;
  701. break;
  702. }
  703. }
  704. /* delete sniffing if there are no other mirroring rules */
  705. if (!in_use)
  706. ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
  707. PORT_MIRROR_SNIFFER, false);
  708. }
  709. static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
  710. {
  711. phy_interface_t interface;
  712. bool gbit;
  713. if (dev->info->internal_phy[port])
  714. return PHY_INTERFACE_MODE_NA;
  715. gbit = ksz_get_gbit(dev, port);
  716. interface = ksz_get_xmii(dev, port, gbit);
  717. return interface;
  718. }
  719. static void ksz9477_port_mmd_write(struct ksz_device *dev, int port,
  720. u8 dev_addr, u16 reg_addr, u16 val)
  721. {
  722. ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP,
  723. MMD_SETUP(PORT_MMD_OP_INDEX, dev_addr));
  724. ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, reg_addr);
  725. ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP,
  726. MMD_SETUP(PORT_MMD_OP_DATA_NO_INCR, dev_addr));
  727. ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, val);
  728. }
  729. static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
  730. {
  731. /* Apply PHY settings to address errata listed in
  732. * KSZ9477, KSZ9897, KSZ9896, KSZ9567, KSZ8565
  733. * Silicon Errata and Data Sheet Clarification documents:
  734. *
  735. * Register settings are needed to improve PHY receive performance
  736. */
  737. ksz9477_port_mmd_write(dev, port, 0x01, 0x6f, 0xdd0b);
  738. ksz9477_port_mmd_write(dev, port, 0x01, 0x8f, 0x6032);
  739. ksz9477_port_mmd_write(dev, port, 0x01, 0x9d, 0x248c);
  740. ksz9477_port_mmd_write(dev, port, 0x01, 0x75, 0x0060);
  741. ksz9477_port_mmd_write(dev, port, 0x01, 0xd3, 0x7777);
  742. ksz9477_port_mmd_write(dev, port, 0x1c, 0x06, 0x3008);
  743. ksz9477_port_mmd_write(dev, port, 0x1c, 0x08, 0x2001);
  744. /* Transmit waveform amplitude can be improved
  745. * (1000BASE-T, 100BASE-TX, 10BASE-Te)
  746. */
  747. ksz9477_port_mmd_write(dev, port, 0x1c, 0x04, 0x00d0);
  748. /* Energy Efficient Ethernet (EEE) feature select must
  749. * be manually disabled (except on KSZ8565 which is 100Mbit)
  750. */
  751. if (dev->info->gbit_capable[port])
  752. ksz9477_port_mmd_write(dev, port, 0x07, 0x3c, 0x0000);
  753. /* Register settings are required to meet data sheet
  754. * supply current specifications
  755. */
  756. ksz9477_port_mmd_write(dev, port, 0x1c, 0x13, 0x6eff);
  757. ksz9477_port_mmd_write(dev, port, 0x1c, 0x14, 0xe6ff);
  758. ksz9477_port_mmd_write(dev, port, 0x1c, 0x15, 0x6eff);
  759. ksz9477_port_mmd_write(dev, port, 0x1c, 0x16, 0xe6ff);
  760. ksz9477_port_mmd_write(dev, port, 0x1c, 0x17, 0x00ff);
  761. ksz9477_port_mmd_write(dev, port, 0x1c, 0x18, 0x43ff);
  762. ksz9477_port_mmd_write(dev, port, 0x1c, 0x19, 0xc3ff);
  763. ksz9477_port_mmd_write(dev, port, 0x1c, 0x1a, 0x6fff);
  764. ksz9477_port_mmd_write(dev, port, 0x1c, 0x1b, 0x07ff);
  765. ksz9477_port_mmd_write(dev, port, 0x1c, 0x1c, 0x0fff);
  766. ksz9477_port_mmd_write(dev, port, 0x1c, 0x1d, 0xe7ff);
  767. ksz9477_port_mmd_write(dev, port, 0x1c, 0x1e, 0xefff);
  768. ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee);
  769. }
  770. void ksz9477_get_caps(struct ksz_device *dev, int port,
  771. struct phylink_config *config)
  772. {
  773. config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
  774. MAC_SYM_PAUSE;
  775. if (dev->info->gbit_capable[port])
  776. config->mac_capabilities |= MAC_1000FD;
  777. }
  778. int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
  779. {
  780. u32 secs = msecs / 1000;
  781. u8 value;
  782. u8 data;
  783. int ret;
  784. value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
  785. ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
  786. if (ret < 0)
  787. return ret;
  788. data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
  789. ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
  790. if (ret < 0)
  791. return ret;
  792. value &= ~SW_AGE_CNT_M;
  793. value |= FIELD_PREP(SW_AGE_CNT_M, data);
  794. return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
  795. }
  796. void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
  797. {
  798. struct dsa_switch *ds = dev->ds;
  799. u16 data16;
  800. u8 member;
  801. /* enable tag tail for host port */
  802. if (cpu_port)
  803. ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
  804. true);
  805. ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
  806. /* set back pressure */
  807. ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
  808. /* enable broadcast storm limit */
  809. ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
  810. /* disable DiffServ priority */
  811. ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
  812. /* replace priority */
  813. ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
  814. false);
  815. ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
  816. MTI_PVID_REPLACE, false);
  817. /* enable 802.1p priority */
  818. ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
  819. if (dev->info->internal_phy[port]) {
  820. /* do not force flow control */
  821. ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
  822. PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
  823. false);
  824. if (dev->info->phy_errata_9477)
  825. ksz9477_phy_errata_setup(dev, port);
  826. } else {
  827. /* force flow control */
  828. ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
  829. PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
  830. true);
  831. }
  832. if (cpu_port)
  833. member = dsa_user_ports(ds);
  834. else
  835. member = BIT(dsa_upstream_port(ds, port));
  836. ksz9477_cfg_port_member(dev, port, member);
  837. /* clear pending interrupts */
  838. if (dev->info->internal_phy[port])
  839. ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
  840. }
  841. void ksz9477_config_cpu_port(struct dsa_switch *ds)
  842. {
  843. struct ksz_device *dev = ds->priv;
  844. struct ksz_port *p;
  845. int i;
  846. for (i = 0; i < dev->info->port_cnt; i++) {
  847. if (dsa_is_cpu_port(ds, i) &&
  848. (dev->info->cpu_ports & (1 << i))) {
  849. phy_interface_t interface;
  850. const char *prev_msg;
  851. const char *prev_mode;
  852. dev->cpu_port = i;
  853. p = &dev->ports[i];
  854. /* Read from XMII register to determine host port
  855. * interface. If set specifically in device tree
  856. * note the difference to help debugging.
  857. */
  858. interface = ksz9477_get_interface(dev, i);
  859. if (!p->interface) {
  860. if (dev->compat_interface) {
  861. dev_warn(dev->dev,
  862. "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
  863. "Please update your device tree.\n",
  864. i);
  865. p->interface = dev->compat_interface;
  866. } else {
  867. p->interface = interface;
  868. }
  869. }
  870. if (interface && interface != p->interface) {
  871. prev_msg = " instead of ";
  872. prev_mode = phy_modes(interface);
  873. } else {
  874. prev_msg = "";
  875. prev_mode = "";
  876. }
  877. dev_info(dev->dev,
  878. "Port%d: using phy mode %s%s%s\n",
  879. i,
  880. phy_modes(p->interface),
  881. prev_msg,
  882. prev_mode);
  883. /* enable cpu port */
  884. ksz9477_port_setup(dev, i, true);
  885. }
  886. }
  887. for (i = 0; i < dev->info->port_cnt; i++) {
  888. if (i == dev->cpu_port)
  889. continue;
  890. ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
  891. }
  892. }
  893. int ksz9477_enable_stp_addr(struct ksz_device *dev)
  894. {
  895. const u32 *masks;
  896. u32 data;
  897. int ret;
  898. masks = dev->info->masks;
  899. /* Enable Reserved multicast table */
  900. ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
  901. /* Set the Override bit for forwarding BPDU packet to CPU */
  902. ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
  903. ALU_V_OVERRIDE | BIT(dev->cpu_port));
  904. if (ret < 0)
  905. return ret;
  906. data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
  907. ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
  908. if (ret < 0)
  909. return ret;
  910. /* wait to be finished */
  911. ret = ksz9477_wait_alu_sta_ready(dev);
  912. if (ret < 0) {
  913. dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
  914. return ret;
  915. }
  916. return 0;
  917. }
  918. int ksz9477_setup(struct dsa_switch *ds)
  919. {
  920. struct ksz_device *dev = ds->priv;
  921. int ret = 0;
  922. /* Required for port partitioning. */
  923. ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
  924. true);
  925. /* Do not work correctly with tail tagging. */
  926. ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
  927. /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
  928. ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
  929. /* Now we can configure default MTU value */
  930. ret = regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, REG_SW_MTU_MASK,
  931. VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
  932. if (ret)
  933. return ret;
  934. /* queue based egress rate limit */
  935. ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
  936. /* enable global MIB counter freeze function */
  937. ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
  938. return 0;
  939. }
  940. u32 ksz9477_get_port_addr(int port, int offset)
  941. {
  942. return PORT_CTRL_ADDR(port, offset);
  943. }
  944. int ksz9477_switch_init(struct ksz_device *dev)
  945. {
  946. u8 data8;
  947. int ret;
  948. dev->port_mask = (1 << dev->info->port_cnt) - 1;
  949. /* turn off SPI DO Edge select */
  950. ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
  951. if (ret)
  952. return ret;
  953. data8 &= ~SPI_AUTO_EDGE_DETECTION;
  954. ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
  955. if (ret)
  956. return ret;
  957. return 0;
  958. }
  959. void ksz9477_switch_exit(struct ksz_device *dev)
  960. {
  961. ksz9477_reset_switch(dev);
  962. }
  963. MODULE_AUTHOR("Woojung Huh <[email protected]>");
  964. MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
  965. MODULE_LICENSE("GPL");