dpaa2-switch-flower.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * DPAA2 Ethernet Switch flower support
  4. *
  5. * Copyright 2021 NXP
  6. *
  7. */
  8. #include "dpaa2-switch.h"
  9. static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
  10. struct dpsw_acl_key *acl_key)
  11. {
  12. struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
  13. struct flow_dissector *dissector = rule->match.dissector;
  14. struct netlink_ext_ack *extack = cls->common.extack;
  15. struct dpsw_acl_fields *acl_h, *acl_m;
  16. if (dissector->used_keys &
  17. ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
  18. BIT(FLOW_DISSECTOR_KEY_CONTROL) |
  19. BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
  20. BIT(FLOW_DISSECTOR_KEY_VLAN) |
  21. BIT(FLOW_DISSECTOR_KEY_PORTS) |
  22. BIT(FLOW_DISSECTOR_KEY_IP) |
  23. BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
  24. BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
  25. NL_SET_ERR_MSG_MOD(extack,
  26. "Unsupported keys used");
  27. return -EOPNOTSUPP;
  28. }
  29. acl_h = &acl_key->match;
  30. acl_m = &acl_key->mask;
  31. if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
  32. struct flow_match_basic match;
  33. flow_rule_match_basic(rule, &match);
  34. acl_h->l3_protocol = match.key->ip_proto;
  35. acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
  36. acl_m->l3_protocol = match.mask->ip_proto;
  37. acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
  38. }
  39. if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  40. struct flow_match_eth_addrs match;
  41. flow_rule_match_eth_addrs(rule, &match);
  42. ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
  43. ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
  44. ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
  45. ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
  46. }
  47. if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
  48. struct flow_match_vlan match;
  49. flow_rule_match_vlan(rule, &match);
  50. acl_h->l2_vlan_id = match.key->vlan_id;
  51. acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
  52. acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
  53. match.key->vlan_dei;
  54. acl_m->l2_vlan_id = match.mask->vlan_id;
  55. acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
  56. acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
  57. match.mask->vlan_dei;
  58. }
  59. if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
  60. struct flow_match_ipv4_addrs match;
  61. flow_rule_match_ipv4_addrs(rule, &match);
  62. acl_h->l3_source_ip = be32_to_cpu(match.key->src);
  63. acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
  64. acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
  65. acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
  66. }
  67. if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
  68. struct flow_match_ports match;
  69. flow_rule_match_ports(rule, &match);
  70. acl_h->l4_source_port = be16_to_cpu(match.key->src);
  71. acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
  72. acl_m->l4_source_port = be16_to_cpu(match.mask->src);
  73. acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
  74. }
  75. if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
  76. struct flow_match_ip match;
  77. flow_rule_match_ip(rule, &match);
  78. if (match.mask->ttl != 0) {
  79. NL_SET_ERR_MSG_MOD(extack,
  80. "Matching on TTL not supported");
  81. return -EOPNOTSUPP;
  82. }
  83. if ((match.mask->tos & 0x3) != 0) {
  84. NL_SET_ERR_MSG_MOD(extack,
  85. "Matching on ECN not supported, only DSCP");
  86. return -EOPNOTSUPP;
  87. }
  88. acl_h->l3_dscp = match.key->tos >> 2;
  89. acl_m->l3_dscp = match.mask->tos >> 2;
  90. }
  91. return 0;
  92. }
  93. int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
  94. struct dpaa2_switch_acl_entry *entry)
  95. {
  96. struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
  97. struct ethsw_core *ethsw = filter_block->ethsw;
  98. struct dpsw_acl_key *acl_key = &entry->key;
  99. struct device *dev = ethsw->dev;
  100. u8 *cmd_buff;
  101. int err;
  102. cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
  103. if (!cmd_buff)
  104. return -ENOMEM;
  105. dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
  106. acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
  107. DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
  108. DMA_TO_DEVICE);
  109. if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
  110. dev_err(dev, "DMA mapping failed\n");
  111. kfree(cmd_buff);
  112. return -EFAULT;
  113. }
  114. err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
  115. filter_block->acl_id, acl_entry_cfg);
  116. dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
  117. DMA_TO_DEVICE);
  118. if (err) {
  119. dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
  120. kfree(cmd_buff);
  121. return err;
  122. }
  123. kfree(cmd_buff);
  124. return 0;
  125. }
  126. static int
  127. dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
  128. struct dpaa2_switch_acl_entry *entry)
  129. {
  130. struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
  131. struct dpsw_acl_key *acl_key = &entry->key;
  132. struct ethsw_core *ethsw = block->ethsw;
  133. struct device *dev = ethsw->dev;
  134. u8 *cmd_buff;
  135. int err;
  136. cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
  137. if (!cmd_buff)
  138. return -ENOMEM;
  139. dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
  140. acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
  141. DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
  142. DMA_TO_DEVICE);
  143. if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
  144. dev_err(dev, "DMA mapping failed\n");
  145. kfree(cmd_buff);
  146. return -EFAULT;
  147. }
  148. err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
  149. block->acl_id, acl_entry_cfg);
  150. dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
  151. DMA_TO_DEVICE);
  152. if (err) {
  153. dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
  154. kfree(cmd_buff);
  155. return err;
  156. }
  157. kfree(cmd_buff);
  158. return 0;
  159. }
  160. static int
  161. dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
  162. struct dpaa2_switch_acl_entry *entry)
  163. {
  164. struct dpaa2_switch_acl_entry *tmp;
  165. struct list_head *pos, *n;
  166. int index = 0;
  167. if (list_empty(&block->acl_entries)) {
  168. list_add(&entry->list, &block->acl_entries);
  169. return index;
  170. }
  171. list_for_each_safe(pos, n, &block->acl_entries) {
  172. tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
  173. if (entry->prio < tmp->prio)
  174. break;
  175. index++;
  176. }
  177. list_add(&entry->list, pos->prev);
  178. return index;
  179. }
  180. static struct dpaa2_switch_acl_entry*
  181. dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
  182. int index)
  183. {
  184. struct dpaa2_switch_acl_entry *tmp;
  185. int i = 0;
  186. list_for_each_entry(tmp, &block->acl_entries, list) {
  187. if (i == index)
  188. return tmp;
  189. ++i;
  190. }
  191. return NULL;
  192. }
  193. static int
  194. dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
  195. struct dpaa2_switch_acl_entry *entry,
  196. int precedence)
  197. {
  198. int err;
  199. err = dpaa2_switch_acl_entry_remove(block, entry);
  200. if (err)
  201. return err;
  202. entry->cfg.precedence = precedence;
  203. return dpaa2_switch_acl_entry_add(block, entry);
  204. }
  205. static int
  206. dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
  207. struct dpaa2_switch_acl_entry *entry)
  208. {
  209. struct dpaa2_switch_acl_entry *tmp;
  210. int index, i, precedence, err;
  211. /* Add the new ACL entry to the linked list and get its index */
  212. index = dpaa2_switch_acl_entry_add_to_list(block, entry);
  213. /* Move up in priority the ACL entries to make space
  214. * for the new filter.
  215. */
  216. precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
  217. for (i = 0; i < index; i++) {
  218. tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
  219. err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
  220. precedence);
  221. if (err)
  222. return err;
  223. precedence++;
  224. }
  225. /* Add the new entry to hardware */
  226. entry->cfg.precedence = precedence;
  227. err = dpaa2_switch_acl_entry_add(block, entry);
  228. block->num_acl_rules++;
  229. return err;
  230. }
  231. static struct dpaa2_switch_acl_entry *
  232. dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
  233. unsigned long cookie)
  234. {
  235. struct dpaa2_switch_acl_entry *tmp, *n;
  236. list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
  237. if (tmp->cookie == cookie)
  238. return tmp;
  239. }
  240. return NULL;
  241. }
  242. static int
  243. dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
  244. struct dpaa2_switch_acl_entry *entry)
  245. {
  246. struct dpaa2_switch_acl_entry *tmp, *n;
  247. int index = 0;
  248. list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
  249. if (tmp->cookie == entry->cookie)
  250. return index;
  251. index++;
  252. }
  253. return -ENOENT;
  254. }
  255. static struct dpaa2_switch_mirror_entry *
  256. dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
  257. unsigned long cookie)
  258. {
  259. struct dpaa2_switch_mirror_entry *tmp, *n;
  260. list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
  261. if (tmp->cookie == cookie)
  262. return tmp;
  263. }
  264. return NULL;
  265. }
  266. static int
  267. dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
  268. struct dpaa2_switch_acl_entry *entry)
  269. {
  270. struct dpaa2_switch_acl_entry *tmp;
  271. int index, i, precedence, err;
  272. index = dpaa2_switch_acl_entry_get_index(block, entry);
  273. /* Remove from hardware the ACL entry */
  274. err = dpaa2_switch_acl_entry_remove(block, entry);
  275. if (err)
  276. return err;
  277. block->num_acl_rules--;
  278. /* Remove it from the list also */
  279. list_del(&entry->list);
  280. /* Move down in priority the entries over the deleted one */
  281. precedence = entry->cfg.precedence;
  282. for (i = index - 1; i >= 0; i--) {
  283. tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
  284. err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
  285. precedence);
  286. if (err)
  287. return err;
  288. precedence--;
  289. }
  290. kfree(entry);
  291. return 0;
  292. }
  293. static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
  294. struct flow_action_entry *cls_act,
  295. struct dpsw_acl_result *dpsw_act,
  296. struct netlink_ext_ack *extack)
  297. {
  298. int err = 0;
  299. switch (cls_act->id) {
  300. case FLOW_ACTION_TRAP:
  301. dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
  302. break;
  303. case FLOW_ACTION_REDIRECT:
  304. if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
  305. NL_SET_ERR_MSG_MOD(extack,
  306. "Destination not a DPAA2 switch port");
  307. return -EOPNOTSUPP;
  308. }
  309. dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
  310. dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
  311. break;
  312. case FLOW_ACTION_DROP:
  313. dpsw_act->action = DPSW_ACL_ACTION_DROP;
  314. break;
  315. default:
  316. NL_SET_ERR_MSG_MOD(extack,
  317. "Action not supported");
  318. err = -EOPNOTSUPP;
  319. goto out;
  320. }
  321. out:
  322. return err;
  323. }
  324. static int
  325. dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
  326. struct dpaa2_switch_mirror_entry *entry,
  327. u16 to, struct netlink_ext_ack *extack)
  328. {
  329. unsigned long block_ports = block->ports;
  330. struct ethsw_core *ethsw = block->ethsw;
  331. struct ethsw_port_priv *port_priv;
  332. unsigned long ports_added = 0;
  333. u16 vlan = entry->cfg.vlan_id;
  334. bool mirror_port_enabled;
  335. int err, port;
  336. /* Setup the mirroring port */
  337. mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
  338. if (!mirror_port_enabled) {
  339. err = dpsw_set_reflection_if(ethsw->mc_io, 0,
  340. ethsw->dpsw_handle, to);
  341. if (err)
  342. return err;
  343. ethsw->mirror_port = to;
  344. }
  345. /* Setup the same egress mirroring configuration on all the switch
  346. * ports that share the same filter block.
  347. */
  348. for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
  349. port_priv = ethsw->ports[port];
  350. /* We cannot add a per VLAN mirroring rule if the VLAN in
  351. * question is not installed on the switch port.
  352. */
  353. if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
  354. !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
  355. NL_SET_ERR_MSG(extack,
  356. "VLAN must be installed on the switch port");
  357. err = -EINVAL;
  358. goto err_remove_filters;
  359. }
  360. err = dpsw_if_add_reflection(ethsw->mc_io, 0,
  361. ethsw->dpsw_handle,
  362. port, &entry->cfg);
  363. if (err)
  364. goto err_remove_filters;
  365. ports_added |= BIT(port);
  366. }
  367. list_add(&entry->list, &block->mirror_entries);
  368. return 0;
  369. err_remove_filters:
  370. for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
  371. dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
  372. port, &entry->cfg);
  373. }
  374. if (!mirror_port_enabled)
  375. ethsw->mirror_port = ethsw->sw_attr.num_ifs;
  376. return err;
  377. }
  378. static int
  379. dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
  380. struct dpaa2_switch_mirror_entry *entry)
  381. {
  382. struct dpsw_reflection_cfg *cfg = &entry->cfg;
  383. unsigned long block_ports = block->ports;
  384. struct ethsw_core *ethsw = block->ethsw;
  385. int port;
  386. /* Remove this mirroring configuration from all the ports belonging to
  387. * the filter block.
  388. */
  389. for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
  390. dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
  391. port, cfg);
  392. /* Also remove it from the list of mirror filters */
  393. list_del(&entry->list);
  394. kfree(entry);
  395. /* If this was the last mirror filter, then unset the mirror port */
  396. if (list_empty(&block->mirror_entries))
  397. ethsw->mirror_port = ethsw->sw_attr.num_ifs;
  398. return 0;
  399. }
  400. static int
  401. dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
  402. struct flow_cls_offload *cls)
  403. {
  404. struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
  405. struct netlink_ext_ack *extack = cls->common.extack;
  406. struct dpaa2_switch_acl_entry *acl_entry;
  407. struct ethsw_core *ethsw = block->ethsw;
  408. struct flow_action_entry *act;
  409. int err;
  410. if (dpaa2_switch_acl_tbl_is_full(block)) {
  411. NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
  412. return -ENOMEM;
  413. }
  414. acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
  415. if (!acl_entry)
  416. return -ENOMEM;
  417. err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
  418. if (err)
  419. goto free_acl_entry;
  420. act = &rule->action.entries[0];
  421. err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
  422. &acl_entry->cfg.result, extack);
  423. if (err)
  424. goto free_acl_entry;
  425. acl_entry->prio = cls->common.prio;
  426. acl_entry->cookie = cls->cookie;
  427. err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
  428. if (err)
  429. goto free_acl_entry;
  430. return 0;
  431. free_acl_entry:
  432. kfree(acl_entry);
  433. return err;
  434. }
  435. static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
  436. u16 *vlan)
  437. {
  438. struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
  439. struct flow_dissector *dissector = rule->match.dissector;
  440. struct netlink_ext_ack *extack = cls->common.extack;
  441. int ret = -EOPNOTSUPP;
  442. if (dissector->used_keys &
  443. ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
  444. BIT(FLOW_DISSECTOR_KEY_CONTROL) |
  445. BIT(FLOW_DISSECTOR_KEY_VLAN))) {
  446. NL_SET_ERR_MSG_MOD(extack,
  447. "Mirroring is supported only per VLAN");
  448. return -EOPNOTSUPP;
  449. }
  450. if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
  451. struct flow_match_vlan match;
  452. flow_rule_match_vlan(rule, &match);
  453. if (match.mask->vlan_priority != 0 ||
  454. match.mask->vlan_dei != 0) {
  455. NL_SET_ERR_MSG_MOD(extack,
  456. "Only matching on VLAN ID supported");
  457. return -EOPNOTSUPP;
  458. }
  459. if (match.mask->vlan_id != 0xFFF) {
  460. NL_SET_ERR_MSG_MOD(extack,
  461. "Masked matching not supported");
  462. return -EOPNOTSUPP;
  463. }
  464. *vlan = (u16)match.key->vlan_id;
  465. ret = 0;
  466. }
  467. return ret;
  468. }
  469. static int
  470. dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
  471. struct flow_cls_offload *cls)
  472. {
  473. struct netlink_ext_ack *extack = cls->common.extack;
  474. struct dpaa2_switch_mirror_entry *mirror_entry;
  475. struct ethsw_core *ethsw = block->ethsw;
  476. struct dpaa2_switch_mirror_entry *tmp;
  477. struct flow_action_entry *cls_act;
  478. struct list_head *pos, *n;
  479. bool mirror_port_enabled;
  480. u16 if_id, vlan;
  481. int err;
  482. mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
  483. cls_act = &cls->rule->action.entries[0];
  484. /* Offload rules only when the destination is a DPAA2 switch port */
  485. if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
  486. NL_SET_ERR_MSG_MOD(extack,
  487. "Destination not a DPAA2 switch port");
  488. return -EOPNOTSUPP;
  489. }
  490. if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
  491. /* We have a single mirror port but can configure egress mirroring on
  492. * all the other switch ports. We need to allow mirroring rules only
  493. * when the destination port is the same.
  494. */
  495. if (mirror_port_enabled && ethsw->mirror_port != if_id) {
  496. NL_SET_ERR_MSG_MOD(extack,
  497. "Multiple mirror ports not supported");
  498. return -EBUSY;
  499. }
  500. /* Parse the key */
  501. err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
  502. if (err)
  503. return err;
  504. /* Make sure that we don't already have a mirror rule with the same
  505. * configuration.
  506. */
  507. list_for_each_safe(pos, n, &block->mirror_entries) {
  508. tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
  509. if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
  510. tmp->cfg.vlan_id == vlan) {
  511. NL_SET_ERR_MSG_MOD(extack,
  512. "VLAN mirror filter already installed");
  513. return -EBUSY;
  514. }
  515. }
  516. mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
  517. if (!mirror_entry)
  518. return -ENOMEM;
  519. mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
  520. mirror_entry->cfg.vlan_id = vlan;
  521. mirror_entry->cookie = cls->cookie;
  522. return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
  523. extack);
  524. }
  525. int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
  526. struct flow_cls_offload *cls)
  527. {
  528. struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
  529. struct netlink_ext_ack *extack = cls->common.extack;
  530. struct flow_action_entry *act;
  531. if (!flow_offload_has_one_action(&rule->action)) {
  532. NL_SET_ERR_MSG(extack, "Only singular actions are supported");
  533. return -EOPNOTSUPP;
  534. }
  535. act = &rule->action.entries[0];
  536. switch (act->id) {
  537. case FLOW_ACTION_REDIRECT:
  538. case FLOW_ACTION_TRAP:
  539. case FLOW_ACTION_DROP:
  540. return dpaa2_switch_cls_flower_replace_acl(block, cls);
  541. case FLOW_ACTION_MIRRED:
  542. return dpaa2_switch_cls_flower_replace_mirror(block, cls);
  543. default:
  544. NL_SET_ERR_MSG_MOD(extack, "Action not supported");
  545. return -EOPNOTSUPP;
  546. }
  547. }
  548. int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
  549. struct flow_cls_offload *cls)
  550. {
  551. struct dpaa2_switch_mirror_entry *mirror_entry;
  552. struct dpaa2_switch_acl_entry *acl_entry;
  553. /* If this filter is a an ACL one, remove it */
  554. acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
  555. cls->cookie);
  556. if (acl_entry)
  557. return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
  558. /* If not, then it has to be a mirror */
  559. mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
  560. cls->cookie);
  561. if (mirror_entry)
  562. return dpaa2_switch_block_remove_mirror(block,
  563. mirror_entry);
  564. return 0;
  565. }
  566. static int
  567. dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
  568. struct tc_cls_matchall_offload *cls)
  569. {
  570. struct netlink_ext_ack *extack = cls->common.extack;
  571. struct ethsw_core *ethsw = block->ethsw;
  572. struct dpaa2_switch_acl_entry *acl_entry;
  573. struct flow_action_entry *act;
  574. int err;
  575. if (dpaa2_switch_acl_tbl_is_full(block)) {
  576. NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
  577. return -ENOMEM;
  578. }
  579. acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
  580. if (!acl_entry)
  581. return -ENOMEM;
  582. act = &cls->rule->action.entries[0];
  583. err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
  584. &acl_entry->cfg.result, extack);
  585. if (err)
  586. goto free_acl_entry;
  587. acl_entry->prio = cls->common.prio;
  588. acl_entry->cookie = cls->cookie;
  589. err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
  590. if (err)
  591. goto free_acl_entry;
  592. return 0;
  593. free_acl_entry:
  594. kfree(acl_entry);
  595. return err;
  596. }
  597. static int
  598. dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
  599. struct tc_cls_matchall_offload *cls)
  600. {
  601. struct netlink_ext_ack *extack = cls->common.extack;
  602. struct dpaa2_switch_mirror_entry *mirror_entry;
  603. struct ethsw_core *ethsw = block->ethsw;
  604. struct dpaa2_switch_mirror_entry *tmp;
  605. struct flow_action_entry *cls_act;
  606. struct list_head *pos, *n;
  607. bool mirror_port_enabled;
  608. u16 if_id;
  609. mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
  610. cls_act = &cls->rule->action.entries[0];
  611. /* Offload rules only when the destination is a DPAA2 switch port */
  612. if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
  613. NL_SET_ERR_MSG_MOD(extack,
  614. "Destination not a DPAA2 switch port");
  615. return -EOPNOTSUPP;
  616. }
  617. if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
  618. /* We have a single mirror port but can configure egress mirroring on
  619. * all the other switch ports. We need to allow mirroring rules only
  620. * when the destination port is the same.
  621. */
  622. if (mirror_port_enabled && ethsw->mirror_port != if_id) {
  623. NL_SET_ERR_MSG_MOD(extack,
  624. "Multiple mirror ports not supported");
  625. return -EBUSY;
  626. }
  627. /* Make sure that we don't already have a mirror rule with the same
  628. * configuration. One matchall rule per block is the maximum.
  629. */
  630. list_for_each_safe(pos, n, &block->mirror_entries) {
  631. tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
  632. if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
  633. NL_SET_ERR_MSG_MOD(extack,
  634. "Matchall mirror filter already installed");
  635. return -EBUSY;
  636. }
  637. }
  638. mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
  639. if (!mirror_entry)
  640. return -ENOMEM;
  641. mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
  642. mirror_entry->cookie = cls->cookie;
  643. return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
  644. extack);
  645. }
  646. int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
  647. struct tc_cls_matchall_offload *cls)
  648. {
  649. struct netlink_ext_ack *extack = cls->common.extack;
  650. struct flow_action_entry *act;
  651. if (!flow_offload_has_one_action(&cls->rule->action)) {
  652. NL_SET_ERR_MSG(extack, "Only singular actions are supported");
  653. return -EOPNOTSUPP;
  654. }
  655. act = &cls->rule->action.entries[0];
  656. switch (act->id) {
  657. case FLOW_ACTION_REDIRECT:
  658. case FLOW_ACTION_TRAP:
  659. case FLOW_ACTION_DROP:
  660. return dpaa2_switch_cls_matchall_replace_acl(block, cls);
  661. case FLOW_ACTION_MIRRED:
  662. return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
  663. default:
  664. NL_SET_ERR_MSG_MOD(extack, "Action not supported");
  665. return -EOPNOTSUPP;
  666. }
  667. }
  668. int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
  669. struct ethsw_port_priv *port_priv)
  670. {
  671. struct ethsw_core *ethsw = port_priv->ethsw_data;
  672. struct dpaa2_switch_mirror_entry *tmp;
  673. int err;
  674. list_for_each_entry(tmp, &block->mirror_entries, list) {
  675. err = dpsw_if_add_reflection(ethsw->mc_io, 0,
  676. ethsw->dpsw_handle,
  677. port_priv->idx, &tmp->cfg);
  678. if (err)
  679. goto unwind_add;
  680. }
  681. return 0;
  682. unwind_add:
  683. list_for_each_entry(tmp, &block->mirror_entries, list)
  684. dpsw_if_remove_reflection(ethsw->mc_io, 0,
  685. ethsw->dpsw_handle,
  686. port_priv->idx, &tmp->cfg);
  687. return err;
  688. }
  689. int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
  690. struct ethsw_port_priv *port_priv)
  691. {
  692. struct ethsw_core *ethsw = port_priv->ethsw_data;
  693. struct dpaa2_switch_mirror_entry *tmp;
  694. int err;
  695. list_for_each_entry(tmp, &block->mirror_entries, list) {
  696. err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
  697. ethsw->dpsw_handle,
  698. port_priv->idx, &tmp->cfg);
  699. if (err)
  700. goto unwind_remove;
  701. }
  702. return 0;
  703. unwind_remove:
  704. list_for_each_entry(tmp, &block->mirror_entries, list)
  705. dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
  706. port_priv->idx, &tmp->cfg);
  707. return err;
  708. }
  709. int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
  710. struct tc_cls_matchall_offload *cls)
  711. {
  712. struct dpaa2_switch_mirror_entry *mirror_entry;
  713. struct dpaa2_switch_acl_entry *acl_entry;
  714. /* If this filter is a an ACL one, remove it */
  715. acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
  716. cls->cookie);
  717. if (acl_entry)
  718. return dpaa2_switch_acl_tbl_remove_entry(block,
  719. acl_entry);
  720. /* If not, then it has to be a mirror */
  721. mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
  722. cls->cookie);
  723. if (mirror_entry)
  724. return dpaa2_switch_block_remove_mirror(block,
  725. mirror_entry);
  726. return 0;
  727. }