bcm_sf2_cfp.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Broadcom Starfighter 2 DSA switch CFP support
  4. *
  5. * Copyright (C) 2016, Broadcom
  6. */
  7. #include <linux/list.h>
  8. #include <linux/ethtool.h>
  9. #include <linux/if_ether.h>
  10. #include <linux/in.h>
  11. #include <linux/netdevice.h>
  12. #include <net/dsa.h>
  13. #include <linux/bitmap.h>
  14. #include <net/flow_offload.h>
  15. #include <net/switchdev.h>
  16. #include <uapi/linux/if_bridge.h>
  17. #include "bcm_sf2.h"
  18. #include "bcm_sf2_regs.h"
  19. struct cfp_rule {
  20. int port;
  21. struct ethtool_rx_flow_spec fs;
  22. struct list_head next;
  23. };
  24. struct cfp_udf_slice_layout {
  25. u8 slices[UDFS_PER_SLICE];
  26. u32 mask_value;
  27. u32 base_offset;
  28. };
  29. struct cfp_udf_layout {
  30. struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES];
  31. };
  32. static const u8 zero_slice[UDFS_PER_SLICE] = { };
  33. /* UDF slices layout for a TCPv4/UDPv4 specification */
  34. static const struct cfp_udf_layout udf_tcpip4_layout = {
  35. .udfs = {
  36. [1] = {
  37. .slices = {
  38. /* End of L2, byte offset 12, src IP[0:15] */
  39. CFG_UDF_EOL2 | 6,
  40. /* End of L2, byte offset 14, src IP[16:31] */
  41. CFG_UDF_EOL2 | 7,
  42. /* End of L2, byte offset 16, dst IP[0:15] */
  43. CFG_UDF_EOL2 | 8,
  44. /* End of L2, byte offset 18, dst IP[16:31] */
  45. CFG_UDF_EOL2 | 9,
  46. /* End of L3, byte offset 0, src port */
  47. CFG_UDF_EOL3 | 0,
  48. /* End of L3, byte offset 2, dst port */
  49. CFG_UDF_EOL3 | 1,
  50. 0, 0, 0
  51. },
  52. .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
  53. .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET,
  54. },
  55. },
  56. };
  57. /* UDF slices layout for a TCPv6/UDPv6 specification */
  58. static const struct cfp_udf_layout udf_tcpip6_layout = {
  59. .udfs = {
  60. [0] = {
  61. .slices = {
  62. /* End of L2, byte offset 8, src IP[0:15] */
  63. CFG_UDF_EOL2 | 4,
  64. /* End of L2, byte offset 10, src IP[16:31] */
  65. CFG_UDF_EOL2 | 5,
  66. /* End of L2, byte offset 12, src IP[32:47] */
  67. CFG_UDF_EOL2 | 6,
  68. /* End of L2, byte offset 14, src IP[48:63] */
  69. CFG_UDF_EOL2 | 7,
  70. /* End of L2, byte offset 16, src IP[64:79] */
  71. CFG_UDF_EOL2 | 8,
  72. /* End of L2, byte offset 18, src IP[80:95] */
  73. CFG_UDF_EOL2 | 9,
  74. /* End of L2, byte offset 20, src IP[96:111] */
  75. CFG_UDF_EOL2 | 10,
  76. /* End of L2, byte offset 22, src IP[112:127] */
  77. CFG_UDF_EOL2 | 11,
  78. /* End of L3, byte offset 0, src port */
  79. CFG_UDF_EOL3 | 0,
  80. },
  81. .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
  82. .base_offset = CORE_UDF_0_B_0_8_PORT_0,
  83. },
  84. [3] = {
  85. .slices = {
  86. /* End of L2, byte offset 24, dst IP[0:15] */
  87. CFG_UDF_EOL2 | 12,
  88. /* End of L2, byte offset 26, dst IP[16:31] */
  89. CFG_UDF_EOL2 | 13,
  90. /* End of L2, byte offset 28, dst IP[32:47] */
  91. CFG_UDF_EOL2 | 14,
  92. /* End of L2, byte offset 30, dst IP[48:63] */
  93. CFG_UDF_EOL2 | 15,
  94. /* End of L2, byte offset 32, dst IP[64:79] */
  95. CFG_UDF_EOL2 | 16,
  96. /* End of L2, byte offset 34, dst IP[80:95] */
  97. CFG_UDF_EOL2 | 17,
  98. /* End of L2, byte offset 36, dst IP[96:111] */
  99. CFG_UDF_EOL2 | 18,
  100. /* End of L2, byte offset 38, dst IP[112:127] */
  101. CFG_UDF_EOL2 | 19,
  102. /* End of L3, byte offset 2, dst port */
  103. CFG_UDF_EOL3 | 1,
  104. },
  105. .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
  106. .base_offset = CORE_UDF_0_D_0_11_PORT_0,
  107. },
  108. },
  109. };
  110. static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
  111. {
  112. unsigned int i, count = 0;
  113. for (i = 0; i < UDFS_PER_SLICE; i++) {
  114. if (layout[i] != 0)
  115. count++;
  116. }
  117. return count;
  118. }
  119. static inline u32 udf_upper_bits(int num_udf)
  120. {
  121. return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1);
  122. }
  123. static inline u32 udf_lower_bits(int num_udf)
  124. {
  125. return (u8)GENMASK(num_udf - 1, 0);
  126. }
  127. static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l,
  128. unsigned int start)
  129. {
  130. const struct cfp_udf_slice_layout *slice_layout;
  131. unsigned int slice_idx;
  132. for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) {
  133. slice_layout = &l->udfs[slice_idx];
  134. if (memcmp(slice_layout->slices, zero_slice,
  135. sizeof(zero_slice)))
  136. break;
  137. }
  138. return slice_idx;
  139. }
  140. static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
  141. const struct cfp_udf_layout *layout,
  142. unsigned int slice_num)
  143. {
  144. u32 offset = layout->udfs[slice_num].base_offset;
  145. unsigned int i;
  146. for (i = 0; i < UDFS_PER_SLICE; i++)
  147. core_writel(priv, layout->udfs[slice_num].slices[i],
  148. offset + i * 4);
  149. }
  150. static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
  151. {
  152. unsigned int timeout = 1000;
  153. u32 reg;
  154. reg = core_readl(priv, CORE_CFP_ACC);
  155. reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
  156. reg |= OP_STR_DONE | op;
  157. core_writel(priv, reg, CORE_CFP_ACC);
  158. do {
  159. reg = core_readl(priv, CORE_CFP_ACC);
  160. if (!(reg & OP_STR_DONE))
  161. break;
  162. cpu_relax();
  163. } while (timeout--);
  164. if (!timeout)
  165. return -ETIMEDOUT;
  166. return 0;
  167. }
  168. static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
  169. unsigned int addr)
  170. {
  171. u32 reg;
  172. WARN_ON(addr >= priv->num_cfp_rules);
  173. reg = core_readl(priv, CORE_CFP_ACC);
  174. reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
  175. reg |= addr << XCESS_ADDR_SHIFT;
  176. core_writel(priv, reg, CORE_CFP_ACC);
  177. }
  178. static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
  179. {
  180. /* Entry #0 is reserved */
  181. return priv->num_cfp_rules - 1;
  182. }
  183. static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
  184. unsigned int rule_index,
  185. int src_port,
  186. unsigned int port_num,
  187. unsigned int queue_num,
  188. bool fwd_map_change)
  189. {
  190. int ret;
  191. u32 reg;
  192. /* Replace ARL derived destination with DST_MAP derived, define
  193. * which port and queue this should be forwarded to.
  194. */
  195. if (fwd_map_change)
  196. reg = CHANGE_FWRD_MAP_IB_REP_ARL |
  197. BIT(port_num + DST_MAP_IB_SHIFT) |
  198. CHANGE_TC | queue_num << NEW_TC_SHIFT;
  199. else
  200. reg = 0;
  201. /* Enable looping back to the original port */
  202. if (src_port == port_num)
  203. reg |= LOOP_BK_EN;
  204. core_writel(priv, reg, CORE_ACT_POL_DATA0);
  205. /* Set classification ID that needs to be put in Broadcom tag */
  206. core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1);
  207. core_writel(priv, 0, CORE_ACT_POL_DATA2);
  208. /* Configure policer RAM now */
  209. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
  210. if (ret) {
  211. pr_err("Policer entry at %d failed\n", rule_index);
  212. return ret;
  213. }
  214. /* Disable the policer */
  215. core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
  216. /* Now the rate meter */
  217. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
  218. if (ret) {
  219. pr_err("Meter entry at %d failed\n", rule_index);
  220. return ret;
  221. }
  222. return 0;
  223. }
  224. static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
  225. struct flow_dissector_key_ipv4_addrs *addrs,
  226. struct flow_dissector_key_ports *ports,
  227. const __be16 vlan_tci,
  228. unsigned int slice_num, u8 num_udf,
  229. bool mask)
  230. {
  231. u32 reg, offset;
  232. /* UDF_Valid[7:0] [31:24]
  233. * S-Tag [23:8]
  234. * C-Tag [7:0]
  235. */
  236. reg = udf_lower_bits(num_udf) << 24 | be16_to_cpu(vlan_tci) >> 8;
  237. if (mask)
  238. core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
  239. else
  240. core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
  241. /* C-Tag [31:24]
  242. * UDF_n_A8 [23:8]
  243. * UDF_n_A7 [7:0]
  244. */
  245. reg = (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
  246. if (mask)
  247. offset = CORE_CFP_MASK_PORT(4);
  248. else
  249. offset = CORE_CFP_DATA_PORT(4);
  250. core_writel(priv, reg, offset);
  251. /* UDF_n_A7 [31:24]
  252. * UDF_n_A6 [23:8]
  253. * UDF_n_A5 [7:0]
  254. */
  255. reg = be16_to_cpu(ports->dst) >> 8;
  256. if (mask)
  257. offset = CORE_CFP_MASK_PORT(3);
  258. else
  259. offset = CORE_CFP_DATA_PORT(3);
  260. core_writel(priv, reg, offset);
  261. /* UDF_n_A5 [31:24]
  262. * UDF_n_A4 [23:8]
  263. * UDF_n_A3 [7:0]
  264. */
  265. reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
  266. (u32)be16_to_cpu(ports->src) << 8 |
  267. (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
  268. if (mask)
  269. offset = CORE_CFP_MASK_PORT(2);
  270. else
  271. offset = CORE_CFP_DATA_PORT(2);
  272. core_writel(priv, reg, offset);
  273. /* UDF_n_A3 [31:24]
  274. * UDF_n_A2 [23:8]
  275. * UDF_n_A1 [7:0]
  276. */
  277. reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
  278. (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
  279. (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
  280. if (mask)
  281. offset = CORE_CFP_MASK_PORT(1);
  282. else
  283. offset = CORE_CFP_DATA_PORT(1);
  284. core_writel(priv, reg, offset);
  285. /* UDF_n_A1 [31:24]
  286. * UDF_n_A0 [23:8]
  287. * Reserved [7:4]
  288. * Slice ID [3:2]
  289. * Slice valid [1:0]
  290. */
  291. reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
  292. (u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
  293. SLICE_NUM(slice_num) | SLICE_VALID;
  294. if (mask)
  295. offset = CORE_CFP_MASK_PORT(0);
  296. else
  297. offset = CORE_CFP_DATA_PORT(0);
  298. core_writel(priv, reg, offset);
  299. }
  300. static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
  301. unsigned int port_num,
  302. unsigned int queue_num,
  303. struct ethtool_rx_flow_spec *fs)
  304. {
  305. __be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
  306. struct ethtool_rx_flow_spec_input input = {};
  307. const struct cfp_udf_layout *layout;
  308. unsigned int slice_num, rule_index;
  309. struct ethtool_rx_flow_rule *flow;
  310. struct flow_match_ipv4_addrs ipv4;
  311. struct flow_match_ports ports;
  312. struct flow_match_ip ip;
  313. u8 ip_proto, ip_frag;
  314. u8 num_udf;
  315. u32 reg;
  316. int ret;
  317. switch (fs->flow_type & ~FLOW_EXT) {
  318. case TCP_V4_FLOW:
  319. ip_proto = IPPROTO_TCP;
  320. break;
  321. case UDP_V4_FLOW:
  322. ip_proto = IPPROTO_UDP;
  323. break;
  324. default:
  325. return -EINVAL;
  326. }
  327. ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
  328. /* Extract VLAN TCI */
  329. if (fs->flow_type & FLOW_EXT) {
  330. vlan_tci = fs->h_ext.vlan_tci;
  331. vlan_m_tci = fs->m_ext.vlan_tci;
  332. }
  333. /* Locate the first rule available */
  334. if (fs->location == RX_CLS_LOC_ANY)
  335. rule_index = find_first_zero_bit(priv->cfp.used,
  336. priv->num_cfp_rules);
  337. else
  338. rule_index = fs->location;
  339. if (rule_index > bcm_sf2_cfp_rule_size(priv))
  340. return -ENOSPC;
  341. input.fs = fs;
  342. flow = ethtool_rx_flow_rule_create(&input);
  343. if (IS_ERR(flow))
  344. return PTR_ERR(flow);
  345. flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
  346. flow_rule_match_ports(flow->rule, &ports);
  347. flow_rule_match_ip(flow->rule, &ip);
  348. layout = &udf_tcpip4_layout;
  349. /* We only use one UDF slice for now */
  350. slice_num = bcm_sf2_get_slice_number(layout, 0);
  351. if (slice_num == UDF_NUM_SLICES) {
  352. ret = -EINVAL;
  353. goto out_err_flow_rule;
  354. }
  355. num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
  356. /* Apply the UDF layout for this filter */
  357. bcm_sf2_cfp_udf_set(priv, layout, slice_num);
  358. /* Apply to all packets received through this port */
  359. core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
  360. /* Source port map match */
  361. core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
  362. /* S-Tag status [31:30]
  363. * C-Tag status [29:28]
  364. * L2 framing [27:26]
  365. * L3 framing [25:24]
  366. * IP ToS [23:16]
  367. * IP proto [15:08]
  368. * IP Fragm [7]
  369. * Non 1st frag [6]
  370. * IP Authen [5]
  371. * TTL range [4:3]
  372. * PPPoE session [2]
  373. * Reserved [1]
  374. * UDF_Valid[8] [0]
  375. */
  376. core_writel(priv, ip.key->tos << IPTOS_SHIFT |
  377. ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
  378. udf_upper_bits(num_udf),
  379. CORE_CFP_DATA_PORT(6));
  380. /* Mask with the specific layout for IPv4 packets */
  381. core_writel(priv, layout->udfs[slice_num].mask_value |
  382. udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
  383. /* Program the match and the mask */
  384. bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, vlan_tci,
  385. slice_num, num_udf, false);
  386. bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, vlan_m_tci,
  387. SLICE_NUM_MASK, num_udf, true);
  388. /* Insert into TCAM now */
  389. bcm_sf2_cfp_rule_addr_set(priv, rule_index);
  390. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
  391. if (ret) {
  392. pr_err("TCAM entry at addr %d failed\n", rule_index);
  393. goto out_err_flow_rule;
  394. }
  395. /* Insert into Action and policer RAMs now */
  396. ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num,
  397. queue_num, true);
  398. if (ret)
  399. goto out_err_flow_rule;
  400. /* Turn on CFP for this rule now */
  401. reg = core_readl(priv, CORE_CFP_CTL_REG);
  402. reg |= BIT(port);
  403. core_writel(priv, reg, CORE_CFP_CTL_REG);
  404. /* Flag the rule as being used and return it */
  405. set_bit(rule_index, priv->cfp.used);
  406. set_bit(rule_index, priv->cfp.unique);
  407. fs->location = rule_index;
  408. return 0;
  409. out_err_flow_rule:
  410. ethtool_rx_flow_rule_destroy(flow);
  411. return ret;
  412. }
  413. static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
  414. const __be32 *ip6_addr, const __be16 port,
  415. const __be16 vlan_tci,
  416. unsigned int slice_num, u32 udf_bits,
  417. bool mask)
  418. {
  419. u32 reg, tmp, val, offset;
  420. /* UDF_Valid[7:0] [31:24]
  421. * S-Tag [23:8]
  422. * C-Tag [7:0]
  423. */
  424. reg = udf_bits << 24 | be16_to_cpu(vlan_tci) >> 8;
  425. if (mask)
  426. core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
  427. else
  428. core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
  429. /* C-Tag [31:24]
  430. * UDF_n_B8 [23:8] (port)
  431. * UDF_n_B7 (upper) [7:0] (addr[15:8])
  432. */
  433. reg = be32_to_cpu(ip6_addr[3]);
  434. val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff);
  435. val |= (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
  436. if (mask)
  437. offset = CORE_CFP_MASK_PORT(4);
  438. else
  439. offset = CORE_CFP_DATA_PORT(4);
  440. core_writel(priv, val, offset);
  441. /* UDF_n_B7 (lower) [31:24] (addr[7:0])
  442. * UDF_n_B6 [23:8] (addr[31:16])
  443. * UDF_n_B5 (upper) [7:0] (addr[47:40])
  444. */
  445. tmp = be32_to_cpu(ip6_addr[2]);
  446. val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
  447. ((tmp >> 8) & 0xff);
  448. if (mask)
  449. offset = CORE_CFP_MASK_PORT(3);
  450. else
  451. offset = CORE_CFP_DATA_PORT(3);
  452. core_writel(priv, val, offset);
  453. /* UDF_n_B5 (lower) [31:24] (addr[39:32])
  454. * UDF_n_B4 [23:8] (addr[63:48])
  455. * UDF_n_B3 (upper) [7:0] (addr[79:72])
  456. */
  457. reg = be32_to_cpu(ip6_addr[1]);
  458. val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
  459. ((reg >> 8) & 0xff);
  460. if (mask)
  461. offset = CORE_CFP_MASK_PORT(2);
  462. else
  463. offset = CORE_CFP_DATA_PORT(2);
  464. core_writel(priv, val, offset);
  465. /* UDF_n_B3 (lower) [31:24] (addr[71:64])
  466. * UDF_n_B2 [23:8] (addr[95:80])
  467. * UDF_n_B1 (upper) [7:0] (addr[111:104])
  468. */
  469. tmp = be32_to_cpu(ip6_addr[0]);
  470. val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
  471. ((tmp >> 8) & 0xff);
  472. if (mask)
  473. offset = CORE_CFP_MASK_PORT(1);
  474. else
  475. offset = CORE_CFP_DATA_PORT(1);
  476. core_writel(priv, val, offset);
  477. /* UDF_n_B1 (lower) [31:24] (addr[103:96])
  478. * UDF_n_B0 [23:8] (addr[127:112])
  479. * Reserved [7:4]
  480. * Slice ID [3:2]
  481. * Slice valid [1:0]
  482. */
  483. reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
  484. SLICE_NUM(slice_num) | SLICE_VALID;
  485. if (mask)
  486. offset = CORE_CFP_MASK_PORT(0);
  487. else
  488. offset = CORE_CFP_DATA_PORT(0);
  489. core_writel(priv, reg, offset);
  490. }
  491. static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
  492. int port, u32 location)
  493. {
  494. struct cfp_rule *rule;
  495. list_for_each_entry(rule, &priv->cfp.rules_list, next) {
  496. if (rule->port == port && rule->fs.location == location)
  497. return rule;
  498. }
  499. return NULL;
  500. }
  501. static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
  502. struct ethtool_rx_flow_spec *fs)
  503. {
  504. struct cfp_rule *rule = NULL;
  505. size_t fs_size = 0;
  506. int ret = 1;
  507. if (list_empty(&priv->cfp.rules_list))
  508. return ret;
  509. list_for_each_entry(rule, &priv->cfp.rules_list, next) {
  510. ret = 1;
  511. if (rule->port != port)
  512. continue;
  513. if (rule->fs.flow_type != fs->flow_type ||
  514. rule->fs.ring_cookie != fs->ring_cookie ||
  515. rule->fs.h_ext.data[0] != fs->h_ext.data[0])
  516. continue;
  517. switch (fs->flow_type & ~FLOW_EXT) {
  518. case TCP_V6_FLOW:
  519. case UDP_V6_FLOW:
  520. fs_size = sizeof(struct ethtool_tcpip6_spec);
  521. break;
  522. case TCP_V4_FLOW:
  523. case UDP_V4_FLOW:
  524. fs_size = sizeof(struct ethtool_tcpip4_spec);
  525. break;
  526. default:
  527. continue;
  528. }
  529. ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
  530. ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
  531. /* Compare VLAN TCI values as well */
  532. if (rule->fs.flow_type & FLOW_EXT) {
  533. ret |= rule->fs.h_ext.vlan_tci != fs->h_ext.vlan_tci;
  534. ret |= rule->fs.m_ext.vlan_tci != fs->m_ext.vlan_tci;
  535. }
  536. if (ret == 0)
  537. break;
  538. }
  539. return ret;
  540. }
  541. static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
  542. unsigned int port_num,
  543. unsigned int queue_num,
  544. struct ethtool_rx_flow_spec *fs)
  545. {
  546. __be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
  547. struct ethtool_rx_flow_spec_input input = {};
  548. unsigned int slice_num, rule_index[2];
  549. const struct cfp_udf_layout *layout;
  550. struct ethtool_rx_flow_rule *flow;
  551. struct flow_match_ipv6_addrs ipv6;
  552. struct flow_match_ports ports;
  553. u8 ip_proto, ip_frag;
  554. int ret = 0;
  555. u8 num_udf;
  556. u32 reg;
  557. switch (fs->flow_type & ~FLOW_EXT) {
  558. case TCP_V6_FLOW:
  559. ip_proto = IPPROTO_TCP;
  560. break;
  561. case UDP_V6_FLOW:
  562. ip_proto = IPPROTO_UDP;
  563. break;
  564. default:
  565. return -EINVAL;
  566. }
  567. ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
  568. /* Extract VLAN TCI */
  569. if (fs->flow_type & FLOW_EXT) {
  570. vlan_tci = fs->h_ext.vlan_tci;
  571. vlan_m_tci = fs->m_ext.vlan_tci;
  572. }
  573. layout = &udf_tcpip6_layout;
  574. slice_num = bcm_sf2_get_slice_number(layout, 0);
  575. if (slice_num == UDF_NUM_SLICES)
  576. return -EINVAL;
  577. num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
  578. /* Negotiate two indexes, one for the second half which we are chained
  579. * from, which is what we will return to user-space, and a second one
  580. * which is used to store its first half. That first half does not
  581. * allow any choice of placement, so it just needs to find the next
  582. * available bit. We return the second half as fs->location because
  583. * that helps with the rule lookup later on since the second half is
  584. * chained from its first half, we can easily identify IPv6 CFP rules
  585. * by looking whether they carry a CHAIN_ID.
  586. *
  587. * We also want the second half to have a lower rule_index than its
  588. * first half because the HW search is by incrementing addresses.
  589. */
  590. if (fs->location == RX_CLS_LOC_ANY)
  591. rule_index[1] = find_first_zero_bit(priv->cfp.used,
  592. priv->num_cfp_rules);
  593. else
  594. rule_index[1] = fs->location;
  595. if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
  596. return -ENOSPC;
  597. /* Flag it as used (cleared on error path) such that we can immediately
  598. * obtain a second one to chain from.
  599. */
  600. set_bit(rule_index[1], priv->cfp.used);
  601. rule_index[0] = find_first_zero_bit(priv->cfp.used,
  602. priv->num_cfp_rules);
  603. if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
  604. ret = -ENOSPC;
  605. goto out_err;
  606. }
  607. input.fs = fs;
  608. flow = ethtool_rx_flow_rule_create(&input);
  609. if (IS_ERR(flow)) {
  610. ret = PTR_ERR(flow);
  611. goto out_err;
  612. }
  613. flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
  614. flow_rule_match_ports(flow->rule, &ports);
  615. /* Apply the UDF layout for this filter */
  616. bcm_sf2_cfp_udf_set(priv, layout, slice_num);
  617. /* Apply to all packets received through this port */
  618. core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
  619. /* Source port map match */
  620. core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
  621. /* S-Tag status [31:30]
  622. * C-Tag status [29:28]
  623. * L2 framing [27:26]
  624. * L3 framing [25:24]
  625. * IP ToS [23:16]
  626. * IP proto [15:08]
  627. * IP Fragm [7]
  628. * Non 1st frag [6]
  629. * IP Authen [5]
  630. * TTL range [4:3]
  631. * PPPoE session [2]
  632. * Reserved [1]
  633. * UDF_Valid[8] [0]
  634. */
  635. reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT |
  636. ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf);
  637. core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
  638. /* Mask with the specific layout for IPv6 packets including
  639. * UDF_Valid[8]
  640. */
  641. reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf);
  642. core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
  643. /* Slice the IPv6 source address and port */
  644. bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
  645. ports.key->src, vlan_tci, slice_num,
  646. udf_lower_bits(num_udf), false);
  647. bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
  648. ports.mask->src, vlan_m_tci, SLICE_NUM_MASK,
  649. udf_lower_bits(num_udf), true);
  650. /* Insert into TCAM now because we need to insert a second rule */
  651. bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
  652. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
  653. if (ret) {
  654. pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
  655. goto out_err_flow_rule;
  656. }
  657. /* Insert into Action and policer RAMs now */
  658. ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num,
  659. queue_num, false);
  660. if (ret)
  661. goto out_err_flow_rule;
  662. /* Now deal with the second slice to chain this rule */
  663. slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
  664. if (slice_num == UDF_NUM_SLICES) {
  665. ret = -EINVAL;
  666. goto out_err_flow_rule;
  667. }
  668. num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
  669. /* Apply the UDF layout for this filter */
  670. bcm_sf2_cfp_udf_set(priv, layout, slice_num);
  671. /* Chained rule, source port match is coming from the rule we are
  672. * chained from.
  673. */
  674. core_writel(priv, 0, CORE_CFP_DATA_PORT(7));
  675. core_writel(priv, 0, CORE_CFP_MASK_PORT(7));
  676. /*
  677. * CHAIN ID [31:24] chain to previous slice
  678. * Reserved [23:20]
  679. * UDF_Valid[11:8] [19:16]
  680. * UDF_Valid[7:0] [15:8]
  681. * UDF_n_D11 [7:0]
  682. */
  683. reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 |
  684. udf_lower_bits(num_udf) << 8;
  685. core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
  686. /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */
  687. reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 |
  688. udf_lower_bits(num_udf) << 8;
  689. core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
  690. bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
  691. ports.key->dst, 0, slice_num,
  692. 0, false);
  693. bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
  694. ports.key->dst, 0, SLICE_NUM_MASK,
  695. 0, true);
  696. /* Insert into TCAM now */
  697. bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
  698. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
  699. if (ret) {
  700. pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
  701. goto out_err_flow_rule;
  702. }
  703. /* Insert into Action and policer RAMs now, set chain ID to
  704. * the one we are chained to
  705. */
  706. ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num,
  707. queue_num, true);
  708. if (ret)
  709. goto out_err_flow_rule;
  710. /* Turn on CFP for this rule now */
  711. reg = core_readl(priv, CORE_CFP_CTL_REG);
  712. reg |= BIT(port);
  713. core_writel(priv, reg, CORE_CFP_CTL_REG);
  714. /* Flag the second half rule as being used now, return it as the
  715. * location, and flag it as unique while dumping rules
  716. */
  717. set_bit(rule_index[0], priv->cfp.used);
  718. set_bit(rule_index[1], priv->cfp.unique);
  719. fs->location = rule_index[1];
  720. return ret;
  721. out_err_flow_rule:
  722. ethtool_rx_flow_rule_destroy(flow);
  723. out_err:
  724. clear_bit(rule_index[1], priv->cfp.used);
  725. return ret;
  726. }
  727. static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
  728. struct ethtool_rx_flow_spec *fs)
  729. {
  730. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  731. s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
  732. __u64 ring_cookie = fs->ring_cookie;
  733. struct switchdev_obj_port_vlan vlan;
  734. unsigned int queue_num, port_num;
  735. u16 vid;
  736. int ret;
  737. /* This rule is a Wake-on-LAN filter and we must specifically
  738. * target the CPU port in order for it to be working.
  739. */
  740. if (ring_cookie == RX_CLS_FLOW_WAKE)
  741. ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
  742. /* We do not support discarding packets, check that the
  743. * destination port is enabled and that we are within the
  744. * number of ports supported by the switch
  745. */
  746. port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES;
  747. if (ring_cookie == RX_CLS_FLOW_DISC ||
  748. !(dsa_is_user_port(ds, port_num) ||
  749. dsa_is_cpu_port(ds, port_num)) ||
  750. port_num >= priv->hw_params.num_ports)
  751. return -EINVAL;
  752. /* If the rule is matching a particular VLAN, make sure that we honor
  753. * the matching and have it tagged or untagged on the destination port,
  754. * we do this on egress with a VLAN entry. The egress tagging attribute
  755. * is expected to be provided in h_ext.data[1] bit 0. A 1 means untagged,
  756. * a 0 means tagged.
  757. */
  758. if (fs->flow_type & FLOW_EXT) {
  759. /* We cannot support matching multiple VLAN IDs yet */
  760. if ((be16_to_cpu(fs->m_ext.vlan_tci) & VLAN_VID_MASK) !=
  761. VLAN_VID_MASK)
  762. return -EINVAL;
  763. vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
  764. vlan.vid = vid;
  765. if (be32_to_cpu(fs->h_ext.data[1]) & 1)
  766. vlan.flags = BRIDGE_VLAN_INFO_UNTAGGED;
  767. else
  768. vlan.flags = 0;
  769. ret = ds->ops->port_vlan_add(ds, port_num, &vlan, NULL);
  770. if (ret)
  771. return ret;
  772. }
  773. /*
  774. * We have a small oddity where Port 6 just does not have a
  775. * valid bit here (so we substract by one).
  776. */
  777. queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES;
  778. if (port_num >= 7)
  779. port_num -= 1;
  780. switch (fs->flow_type & ~FLOW_EXT) {
  781. case TCP_V4_FLOW:
  782. case UDP_V4_FLOW:
  783. ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num,
  784. queue_num, fs);
  785. break;
  786. case TCP_V6_FLOW:
  787. case UDP_V6_FLOW:
  788. ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num,
  789. queue_num, fs);
  790. break;
  791. default:
  792. ret = -EINVAL;
  793. break;
  794. }
  795. return ret;
  796. }
  797. static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
  798. struct ethtool_rx_flow_spec *fs)
  799. {
  800. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  801. struct cfp_rule *rule = NULL;
  802. int ret = -EINVAL;
  803. /* Check for unsupported extensions */
  804. if (fs->flow_type & FLOW_MAC_EXT)
  805. return -EINVAL;
  806. if (fs->location != RX_CLS_LOC_ANY &&
  807. fs->location > bcm_sf2_cfp_rule_size(priv))
  808. return -EINVAL;
  809. if ((fs->flow_type & FLOW_EXT) &&
  810. !(ds->ops->port_vlan_add || ds->ops->port_vlan_del))
  811. return -EOPNOTSUPP;
  812. if (fs->location != RX_CLS_LOC_ANY &&
  813. test_bit(fs->location, priv->cfp.used))
  814. return -EBUSY;
  815. ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
  816. if (ret == 0)
  817. return -EEXIST;
  818. rule = kzalloc(sizeof(*rule), GFP_KERNEL);
  819. if (!rule)
  820. return -ENOMEM;
  821. ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
  822. if (ret) {
  823. kfree(rule);
  824. return ret;
  825. }
  826. rule->port = port;
  827. memcpy(&rule->fs, fs, sizeof(*fs));
  828. list_add_tail(&rule->next, &priv->cfp.rules_list);
  829. return ret;
  830. }
  831. static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
  832. u32 loc, u32 *next_loc)
  833. {
  834. int ret;
  835. u32 reg;
  836. /* Indicate which rule we want to read */
  837. bcm_sf2_cfp_rule_addr_set(priv, loc);
  838. ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
  839. if (ret)
  840. return ret;
  841. /* Check if this is possibly an IPv6 rule that would
  842. * indicate we need to delete its companion rule
  843. * as well
  844. */
  845. reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
  846. if (next_loc)
  847. *next_loc = (reg >> 24) & CHAIN_ID_MASK;
  848. /* Clear its valid bits */
  849. reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
  850. reg &= ~SLICE_VALID;
  851. core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
  852. /* Write back this entry into the TCAM now */
  853. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
  854. if (ret)
  855. return ret;
  856. clear_bit(loc, priv->cfp.used);
  857. clear_bit(loc, priv->cfp.unique);
  858. return 0;
  859. }
  860. static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
  861. u32 loc)
  862. {
  863. u32 next_loc = 0;
  864. int ret;
  865. ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
  866. if (ret)
  867. return ret;
  868. /* If this was an IPv6 rule, delete is companion rule too */
  869. if (next_loc)
  870. ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
  871. return ret;
  872. }
  873. static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
  874. {
  875. struct cfp_rule *rule;
  876. int ret;
  877. if (loc > bcm_sf2_cfp_rule_size(priv))
  878. return -EINVAL;
  879. /* Refuse deleting unused rules, and those that are not unique since
  880. * that could leave IPv6 rules with one of the chained rule in the
  881. * table.
  882. */
  883. if (!test_bit(loc, priv->cfp.unique) || loc == 0)
  884. return -EINVAL;
  885. rule = bcm_sf2_cfp_rule_find(priv, port, loc);
  886. if (!rule)
  887. return -EINVAL;
  888. ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
  889. list_del(&rule->next);
  890. kfree(rule);
  891. return ret;
  892. }
  893. static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
  894. {
  895. unsigned int i;
  896. for (i = 0; i < sizeof(flow->m_u); i++)
  897. flow->m_u.hdata[i] ^= 0xff;
  898. flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
  899. flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
  900. flow->m_ext.data[0] ^= cpu_to_be32(~0);
  901. flow->m_ext.data[1] ^= cpu_to_be32(~0);
  902. }
  903. static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
  904. struct ethtool_rxnfc *nfc)
  905. {
  906. struct cfp_rule *rule;
  907. rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
  908. if (!rule)
  909. return -EINVAL;
  910. memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
  911. bcm_sf2_invert_masks(&nfc->fs);
  912. /* Put the TCAM size here */
  913. nfc->data = bcm_sf2_cfp_rule_size(priv);
  914. return 0;
  915. }
  916. /* We implement the search doing a TCAM search operation */
  917. static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
  918. int port, struct ethtool_rxnfc *nfc,
  919. u32 *rule_locs)
  920. {
  921. unsigned int index = 1, rules_cnt = 0;
  922. for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) {
  923. rule_locs[rules_cnt] = index;
  924. rules_cnt++;
  925. }
  926. /* Put the TCAM size here */
  927. nfc->data = bcm_sf2_cfp_rule_size(priv);
  928. nfc->rule_cnt = rules_cnt;
  929. return 0;
  930. }
  931. int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
  932. struct ethtool_rxnfc *nfc, u32 *rule_locs)
  933. {
  934. struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
  935. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  936. int ret = 0;
  937. mutex_lock(&priv->cfp.lock);
  938. switch (nfc->cmd) {
  939. case ETHTOOL_GRXCLSRLCNT:
  940. /* Subtract the default, unusable rule */
  941. nfc->rule_cnt = bitmap_weight(priv->cfp.unique,
  942. priv->num_cfp_rules) - 1;
  943. /* We support specifying rule locations */
  944. nfc->data |= RX_CLS_LOC_SPECIAL;
  945. break;
  946. case ETHTOOL_GRXCLSRULE:
  947. ret = bcm_sf2_cfp_rule_get(priv, port, nfc);
  948. break;
  949. case ETHTOOL_GRXCLSRLALL:
  950. ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
  951. break;
  952. default:
  953. ret = -EOPNOTSUPP;
  954. break;
  955. }
  956. mutex_unlock(&priv->cfp.lock);
  957. if (ret)
  958. return ret;
  959. /* Pass up the commands to the attached master network device */
  960. if (p->ethtool_ops->get_rxnfc) {
  961. ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs);
  962. if (ret == -EOPNOTSUPP)
  963. ret = 0;
  964. }
  965. return ret;
  966. }
  967. int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
  968. struct ethtool_rxnfc *nfc)
  969. {
  970. struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
  971. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  972. int ret = 0;
  973. mutex_lock(&priv->cfp.lock);
  974. switch (nfc->cmd) {
  975. case ETHTOOL_SRXCLSRLINS:
  976. ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
  977. break;
  978. case ETHTOOL_SRXCLSRLDEL:
  979. ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
  980. break;
  981. default:
  982. ret = -EOPNOTSUPP;
  983. break;
  984. }
  985. mutex_unlock(&priv->cfp.lock);
  986. if (ret)
  987. return ret;
  988. /* Pass up the commands to the attached master network device.
  989. * This can fail, so rollback the operation if we need to.
  990. */
  991. if (p->ethtool_ops->set_rxnfc) {
  992. ret = p->ethtool_ops->set_rxnfc(p, nfc);
  993. if (ret && ret != -EOPNOTSUPP) {
  994. mutex_lock(&priv->cfp.lock);
  995. bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
  996. mutex_unlock(&priv->cfp.lock);
  997. } else {
  998. ret = 0;
  999. }
  1000. }
  1001. return ret;
  1002. }
  1003. int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
  1004. {
  1005. unsigned int timeout = 1000;
  1006. u32 reg;
  1007. reg = core_readl(priv, CORE_CFP_ACC);
  1008. reg |= TCAM_RESET;
  1009. core_writel(priv, reg, CORE_CFP_ACC);
  1010. do {
  1011. reg = core_readl(priv, CORE_CFP_ACC);
  1012. if (!(reg & TCAM_RESET))
  1013. break;
  1014. cpu_relax();
  1015. } while (timeout--);
  1016. if (!timeout)
  1017. return -ETIMEDOUT;
  1018. return 0;
  1019. }
  1020. void bcm_sf2_cfp_exit(struct dsa_switch *ds)
  1021. {
  1022. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  1023. struct cfp_rule *rule, *n;
  1024. if (list_empty(&priv->cfp.rules_list))
  1025. return;
  1026. list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
  1027. bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
  1028. }
  1029. int bcm_sf2_cfp_resume(struct dsa_switch *ds)
  1030. {
  1031. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  1032. struct cfp_rule *rule;
  1033. int ret = 0;
  1034. u32 reg;
  1035. if (list_empty(&priv->cfp.rules_list))
  1036. return ret;
  1037. reg = core_readl(priv, CORE_CFP_CTL_REG);
  1038. reg &= ~CFP_EN_MAP_MASK;
  1039. core_writel(priv, reg, CORE_CFP_CTL_REG);
  1040. ret = bcm_sf2_cfp_rst(priv);
  1041. if (ret)
  1042. return ret;
  1043. list_for_each_entry(rule, &priv->cfp.rules_list, next) {
  1044. ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
  1045. rule->fs.location);
  1046. if (ret) {
  1047. dev_err(ds->dev, "failed to remove rule\n");
  1048. return ret;
  1049. }
  1050. ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
  1051. if (ret) {
  1052. dev_err(ds->dev, "failed to restore rule\n");
  1053. return ret;
  1054. }
  1055. }
  1056. return ret;
  1057. }
  1058. static const struct bcm_sf2_cfp_stat {
  1059. unsigned int offset;
  1060. unsigned int ram_loc;
  1061. const char *name;
  1062. } bcm_sf2_cfp_stats[] = {
  1063. {
  1064. .offset = CORE_STAT_GREEN_CNTR,
  1065. .ram_loc = GREEN_STAT_RAM,
  1066. .name = "Green"
  1067. },
  1068. {
  1069. .offset = CORE_STAT_YELLOW_CNTR,
  1070. .ram_loc = YELLOW_STAT_RAM,
  1071. .name = "Yellow"
  1072. },
  1073. {
  1074. .offset = CORE_STAT_RED_CNTR,
  1075. .ram_loc = RED_STAT_RAM,
  1076. .name = "Red"
  1077. },
  1078. };
  1079. void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
  1080. u32 stringset, uint8_t *data)
  1081. {
  1082. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  1083. unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
  1084. char buf[ETH_GSTRING_LEN];
  1085. unsigned int i, j, iter;
  1086. if (stringset != ETH_SS_STATS)
  1087. return;
  1088. for (i = 1; i < priv->num_cfp_rules; i++) {
  1089. for (j = 0; j < s; j++) {
  1090. snprintf(buf, sizeof(buf),
  1091. "CFP%03d_%sCntr",
  1092. i, bcm_sf2_cfp_stats[j].name);
  1093. iter = (i - 1) * s + j;
  1094. strscpy(data + iter * ETH_GSTRING_LEN,
  1095. buf, ETH_GSTRING_LEN);
  1096. }
  1097. }
  1098. }
  1099. void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
  1100. uint64_t *data)
  1101. {
  1102. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  1103. unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
  1104. const struct bcm_sf2_cfp_stat *stat;
  1105. unsigned int i, j, iter;
  1106. struct cfp_rule *rule;
  1107. int ret;
  1108. mutex_lock(&priv->cfp.lock);
  1109. for (i = 1; i < priv->num_cfp_rules; i++) {
  1110. rule = bcm_sf2_cfp_rule_find(priv, port, i);
  1111. if (!rule)
  1112. continue;
  1113. for (j = 0; j < s; j++) {
  1114. stat = &bcm_sf2_cfp_stats[j];
  1115. bcm_sf2_cfp_rule_addr_set(priv, i);
  1116. ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ);
  1117. if (ret)
  1118. continue;
  1119. iter = (i - 1) * s + j;
  1120. data[iter] = core_readl(priv, stat->offset);
  1121. }
  1122. }
  1123. mutex_unlock(&priv->cfp.lock);
  1124. }
  1125. int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset)
  1126. {
  1127. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  1128. if (sset != ETH_SS_STATS)
  1129. return 0;
  1130. /* 3 counters per CFP rules */
  1131. return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats);
  1132. }