pkt_cls.h 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NET_PKT_CLS_H
  3. #define __NET_PKT_CLS_H
  4. #include <linux/pkt_cls.h>
  5. #include <linux/workqueue.h>
  6. #include <net/sch_generic.h>
  7. #include <net/act_api.h>
  8. #include <net/net_namespace.h>
  9. /* TC action not accessible from user space */
  10. #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
  11. /* Basic packet classifier frontend definitions. */
  12. struct tcf_walker {
  13. int stop;
  14. int skip;
  15. int count;
  16. bool nonempty;
  17. unsigned long cookie;
  18. int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
  19. };
  20. int register_tcf_proto_ops(struct tcf_proto_ops *ops);
  21. void unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
  22. struct tcf_block_ext_info {
  23. enum flow_block_binder_type binder_type;
  24. tcf_chain_head_change_t *chain_head_change;
  25. void *chain_head_change_priv;
  26. u32 block_index;
  27. };
  28. struct tcf_qevent {
  29. struct tcf_block *block;
  30. struct tcf_block_ext_info info;
  31. struct tcf_proto __rcu *filter_chain;
  32. };
  33. struct tcf_block_cb;
  34. bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
  35. #ifdef CONFIG_NET_CLS
  36. struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
  37. u32 chain_index);
  38. void tcf_chain_put_by_act(struct tcf_chain *chain);
  39. struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
  40. struct tcf_chain *chain);
  41. struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
  42. struct tcf_proto *tp);
  43. void tcf_block_netif_keep_dst(struct tcf_block *block);
  44. int tcf_block_get(struct tcf_block **p_block,
  45. struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
  46. struct netlink_ext_ack *extack);
  47. int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  48. struct tcf_block_ext_info *ei,
  49. struct netlink_ext_ack *extack);
  50. void tcf_block_put(struct tcf_block *block);
  51. void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
  52. struct tcf_block_ext_info *ei);
  53. static inline bool tcf_block_shared(struct tcf_block *block)
  54. {
  55. return block->index;
  56. }
  57. static inline bool tcf_block_non_null_shared(struct tcf_block *block)
  58. {
  59. return block && block->index;
  60. }
  61. static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
  62. {
  63. WARN_ON(tcf_block_shared(block));
  64. return block->q;
  65. }
  66. int tcf_classify(struct sk_buff *skb,
  67. const struct tcf_block *block,
  68. const struct tcf_proto *tp, struct tcf_result *res,
  69. bool compat_mode);
  70. static inline bool tc_cls_stats_dump(struct tcf_proto *tp,
  71. struct tcf_walker *arg,
  72. void *filter)
  73. {
  74. if (arg->count >= arg->skip && arg->fn(tp, filter, arg) < 0) {
  75. arg->stop = 1;
  76. return false;
  77. }
  78. arg->count++;
  79. return true;
  80. }
  81. #else
  82. static inline bool tcf_block_shared(struct tcf_block *block)
  83. {
  84. return false;
  85. }
  86. static inline bool tcf_block_non_null_shared(struct tcf_block *block)
  87. {
  88. return false;
  89. }
  90. static inline
  91. int tcf_block_get(struct tcf_block **p_block,
  92. struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
  93. struct netlink_ext_ack *extack)
  94. {
  95. return 0;
  96. }
  97. static inline
  98. int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  99. struct tcf_block_ext_info *ei,
  100. struct netlink_ext_ack *extack)
  101. {
  102. return 0;
  103. }
  104. static inline void tcf_block_put(struct tcf_block *block)
  105. {
  106. }
  107. static inline
  108. void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
  109. struct tcf_block_ext_info *ei)
  110. {
  111. }
  112. static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
  113. {
  114. return NULL;
  115. }
  116. static inline
  117. int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
  118. void *cb_priv)
  119. {
  120. return 0;
  121. }
  122. static inline
  123. void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
  124. void *cb_priv)
  125. {
  126. }
  127. static inline int tcf_classify(struct sk_buff *skb,
  128. const struct tcf_block *block,
  129. const struct tcf_proto *tp,
  130. struct tcf_result *res, bool compat_mode)
  131. {
  132. return TC_ACT_UNSPEC;
  133. }
  134. #endif
  135. static inline unsigned long
  136. __cls_set_class(unsigned long *clp, unsigned long cl)
  137. {
  138. return xchg(clp, cl);
  139. }
  140. static inline void
  141. __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
  142. {
  143. unsigned long cl;
  144. cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
  145. cl = __cls_set_class(&r->class, cl);
  146. if (cl)
  147. q->ops->cl_ops->unbind_tcf(q, cl);
  148. }
  149. static inline void
  150. tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
  151. {
  152. struct Qdisc *q = tp->chain->block->q;
  153. /* Check q as it is not set for shared blocks. In that case,
  154. * setting class is not supported.
  155. */
  156. if (!q)
  157. return;
  158. sch_tree_lock(q);
  159. __tcf_bind_filter(q, r, base);
  160. sch_tree_unlock(q);
  161. }
  162. static inline void
  163. __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
  164. {
  165. unsigned long cl;
  166. if ((cl = __cls_set_class(&r->class, 0)) != 0)
  167. q->ops->cl_ops->unbind_tcf(q, cl);
  168. }
  169. static inline void
  170. tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
  171. {
  172. struct Qdisc *q = tp->chain->block->q;
  173. if (!q)
  174. return;
  175. __tcf_unbind_filter(q, r);
  176. }
  177. static inline void tc_cls_bind_class(u32 classid, unsigned long cl,
  178. void *q, struct tcf_result *res,
  179. unsigned long base)
  180. {
  181. if (res->classid == classid) {
  182. if (cl)
  183. __tcf_bind_filter(q, res, base);
  184. else
  185. __tcf_unbind_filter(q, res);
  186. }
  187. }
  188. struct tcf_exts {
  189. #ifdef CONFIG_NET_CLS_ACT
  190. __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
  191. int nr_actions;
  192. struct tc_action **actions;
  193. struct net *net;
  194. netns_tracker ns_tracker;
  195. #endif
  196. /* Map to export classifier specific extension TLV types to the
  197. * generic extensions API. Unsupported extensions must be set to 0.
  198. */
  199. int action;
  200. int police;
  201. };
  202. static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
  203. int action, int police)
  204. {
  205. #ifdef CONFIG_NET_CLS_ACT
  206. exts->type = 0;
  207. exts->nr_actions = 0;
  208. /* Note: we do not own yet a reference on net.
  209. * This reference might be taken later from tcf_exts_get_net().
  210. */
  211. exts->net = net;
  212. exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
  213. GFP_KERNEL);
  214. if (!exts->actions)
  215. return -ENOMEM;
  216. #endif
  217. exts->action = action;
  218. exts->police = police;
  219. return 0;
  220. }
  221. /* Return false if the netns is being destroyed in cleanup_net(). Callers
  222. * need to do cleanup synchronously in this case, otherwise may race with
  223. * tc_action_net_exit(). Return true for other cases.
  224. */
  225. static inline bool tcf_exts_get_net(struct tcf_exts *exts)
  226. {
  227. #ifdef CONFIG_NET_CLS_ACT
  228. exts->net = maybe_get_net(exts->net);
  229. if (exts->net)
  230. netns_tracker_alloc(exts->net, &exts->ns_tracker, GFP_KERNEL);
  231. return exts->net != NULL;
  232. #else
  233. return true;
  234. #endif
  235. }
  236. static inline void tcf_exts_put_net(struct tcf_exts *exts)
  237. {
  238. #ifdef CONFIG_NET_CLS_ACT
  239. if (exts->net)
  240. put_net_track(exts->net, &exts->ns_tracker);
  241. #endif
  242. }
  243. #ifdef CONFIG_NET_CLS_ACT
  244. #define tcf_exts_for_each_action(i, a, exts) \
  245. for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
  246. #else
  247. #define tcf_exts_for_each_action(i, a, exts) \
  248. for (; 0; (void)(i), (void)(a), (void)(exts))
  249. #endif
  250. #define tcf_act_for_each_action(i, a, actions) \
  251. for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++)
  252. static inline void
  253. tcf_exts_hw_stats_update(const struct tcf_exts *exts,
  254. u64 bytes, u64 packets, u64 drops, u64 lastuse,
  255. u8 used_hw_stats, bool used_hw_stats_valid)
  256. {
  257. #ifdef CONFIG_NET_CLS_ACT
  258. int i;
  259. for (i = 0; i < exts->nr_actions; i++) {
  260. struct tc_action *a = exts->actions[i];
  261. /* if stats from hw, just skip */
  262. if (tcf_action_update_hw_stats(a)) {
  263. preempt_disable();
  264. tcf_action_stats_update(a, bytes, packets, drops,
  265. lastuse, true);
  266. preempt_enable();
  267. a->used_hw_stats = used_hw_stats;
  268. a->used_hw_stats_valid = used_hw_stats_valid;
  269. }
  270. }
  271. #endif
  272. }
  273. /**
  274. * tcf_exts_has_actions - check if at least one action is present
  275. * @exts: tc filter extensions handle
  276. *
  277. * Returns true if at least one action is present.
  278. */
  279. static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
  280. {
  281. #ifdef CONFIG_NET_CLS_ACT
  282. return exts->nr_actions;
  283. #else
  284. return false;
  285. #endif
  286. }
  287. /**
  288. * tcf_exts_exec - execute tc filter extensions
  289. * @skb: socket buffer
  290. * @exts: tc filter extensions handle
  291. * @res: desired result
  292. *
  293. * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
  294. * a negative number if the filter must be considered unmatched or
  295. * a positive action code (TC_ACT_*) which must be returned to the
  296. * underlying layer.
  297. */
  298. static inline int
  299. tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
  300. struct tcf_result *res)
  301. {
  302. #ifdef CONFIG_NET_CLS_ACT
  303. return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
  304. #endif
  305. return TC_ACT_OK;
  306. }
  307. int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
  308. struct nlattr **tb, struct nlattr *rate_tlv,
  309. struct tcf_exts *exts, u32 flags,
  310. struct netlink_ext_ack *extack);
  311. int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
  312. struct nlattr *rate_tlv, struct tcf_exts *exts,
  313. u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
  314. void tcf_exts_destroy(struct tcf_exts *exts);
  315. void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
  316. int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
  317. int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
  318. int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
  319. /**
  320. * struct tcf_pkt_info - packet information
  321. *
  322. * @ptr: start of the pkt data
  323. * @nexthdr: offset of the next header
  324. */
  325. struct tcf_pkt_info {
  326. unsigned char * ptr;
  327. int nexthdr;
  328. };
  329. #ifdef CONFIG_NET_EMATCH
  330. struct tcf_ematch_ops;
  331. /**
  332. * struct tcf_ematch - extended match (ematch)
  333. *
  334. * @matchid: identifier to allow userspace to reidentify a match
  335. * @flags: flags specifying attributes and the relation to other matches
  336. * @ops: the operations lookup table of the corresponding ematch module
  337. * @datalen: length of the ematch specific configuration data
  338. * @data: ematch specific data
  339. * @net: the network namespace
  340. */
  341. struct tcf_ematch {
  342. struct tcf_ematch_ops * ops;
  343. unsigned long data;
  344. unsigned int datalen;
  345. u16 matchid;
  346. u16 flags;
  347. struct net *net;
  348. };
  349. static inline int tcf_em_is_container(struct tcf_ematch *em)
  350. {
  351. return !em->ops;
  352. }
  353. static inline int tcf_em_is_simple(struct tcf_ematch *em)
  354. {
  355. return em->flags & TCF_EM_SIMPLE;
  356. }
  357. static inline int tcf_em_is_inverted(struct tcf_ematch *em)
  358. {
  359. return em->flags & TCF_EM_INVERT;
  360. }
  361. static inline int tcf_em_last_match(struct tcf_ematch *em)
  362. {
  363. return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
  364. }
  365. static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
  366. {
  367. if (tcf_em_last_match(em))
  368. return 1;
  369. if (result == 0 && em->flags & TCF_EM_REL_AND)
  370. return 1;
  371. if (result != 0 && em->flags & TCF_EM_REL_OR)
  372. return 1;
  373. return 0;
  374. }
  375. /**
  376. * struct tcf_ematch_tree - ematch tree handle
  377. *
  378. * @hdr: ematch tree header supplied by userspace
  379. * @matches: array of ematches
  380. */
  381. struct tcf_ematch_tree {
  382. struct tcf_ematch_tree_hdr hdr;
  383. struct tcf_ematch * matches;
  384. };
  385. /**
  386. * struct tcf_ematch_ops - ematch module operations
  387. *
  388. * @kind: identifier (kind) of this ematch module
  389. * @datalen: length of expected configuration data (optional)
  390. * @change: called during validation (optional)
  391. * @match: called during ematch tree evaluation, must return 1/0
  392. * @destroy: called during destroyage (optional)
  393. * @dump: called during dumping process (optional)
  394. * @owner: owner, must be set to THIS_MODULE
  395. * @link: link to previous/next ematch module (internal use)
  396. */
  397. struct tcf_ematch_ops {
  398. int kind;
  399. int datalen;
  400. int (*change)(struct net *net, void *,
  401. int, struct tcf_ematch *);
  402. int (*match)(struct sk_buff *, struct tcf_ematch *,
  403. struct tcf_pkt_info *);
  404. void (*destroy)(struct tcf_ematch *);
  405. int (*dump)(struct sk_buff *, struct tcf_ematch *);
  406. struct module *owner;
  407. struct list_head link;
  408. };
  409. int tcf_em_register(struct tcf_ematch_ops *);
  410. void tcf_em_unregister(struct tcf_ematch_ops *);
  411. int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
  412. struct tcf_ematch_tree *);
  413. void tcf_em_tree_destroy(struct tcf_ematch_tree *);
  414. int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
  415. int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
  416. struct tcf_pkt_info *);
  417. /**
  418. * tcf_em_tree_match - evaulate an ematch tree
  419. *
  420. * @skb: socket buffer of the packet in question
  421. * @tree: ematch tree to be used for evaluation
  422. * @info: packet information examined by classifier
  423. *
  424. * This function matches @skb against the ematch tree in @tree by going
  425. * through all ematches respecting their logic relations returning
  426. * as soon as the result is obvious.
  427. *
  428. * Returns 1 if the ematch tree as-one matches, no ematches are configured
  429. * or ematch is not enabled in the kernel, otherwise 0 is returned.
  430. */
  431. static inline int tcf_em_tree_match(struct sk_buff *skb,
  432. struct tcf_ematch_tree *tree,
  433. struct tcf_pkt_info *info)
  434. {
  435. if (tree->hdr.nmatches)
  436. return __tcf_em_tree_match(skb, tree, info);
  437. else
  438. return 1;
  439. }
  440. #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
  441. #else /* CONFIG_NET_EMATCH */
  442. struct tcf_ematch_tree {
  443. };
  444. #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
  445. #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
  446. #define tcf_em_tree_dump(skb, t, tlv) (0)
  447. #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
  448. #endif /* CONFIG_NET_EMATCH */
  449. static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
  450. {
  451. switch (layer) {
  452. case TCF_LAYER_LINK:
  453. return skb_mac_header(skb);
  454. case TCF_LAYER_NETWORK:
  455. return skb_network_header(skb);
  456. case TCF_LAYER_TRANSPORT:
  457. return skb_transport_header(skb);
  458. }
  459. return NULL;
  460. }
  461. static inline int tcf_valid_offset(const struct sk_buff *skb,
  462. const unsigned char *ptr, const int len)
  463. {
  464. return likely((ptr + len) <= skb_tail_pointer(skb) &&
  465. ptr >= skb->head &&
  466. (ptr <= (ptr + len)));
  467. }
  468. static inline int
  469. tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
  470. struct netlink_ext_ack *extack)
  471. {
  472. char indev[IFNAMSIZ];
  473. struct net_device *dev;
  474. if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) {
  475. NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
  476. "Interface name too long");
  477. return -EINVAL;
  478. }
  479. dev = __dev_get_by_name(net, indev);
  480. if (!dev) {
  481. NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
  482. "Network device not found");
  483. return -ENODEV;
  484. }
  485. return dev->ifindex;
  486. }
  487. static inline bool
  488. tcf_match_indev(struct sk_buff *skb, int ifindex)
  489. {
  490. if (!ifindex)
  491. return true;
  492. if (!skb->skb_iif)
  493. return false;
  494. return ifindex == skb->skb_iif;
  495. }
  496. int tc_setup_offload_action(struct flow_action *flow_action,
  497. const struct tcf_exts *exts,
  498. struct netlink_ext_ack *extack);
  499. void tc_cleanup_offload_action(struct flow_action *flow_action);
  500. int tc_setup_action(struct flow_action *flow_action,
  501. struct tc_action *actions[],
  502. struct netlink_ext_ack *extack);
  503. int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
  504. void *type_data, bool err_stop, bool rtnl_held);
  505. int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
  506. enum tc_setup_type type, void *type_data, bool err_stop,
  507. u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
  508. int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
  509. enum tc_setup_type type, void *type_data, bool err_stop,
  510. u32 *old_flags, unsigned int *old_in_hw_count,
  511. u32 *new_flags, unsigned int *new_in_hw_count,
  512. bool rtnl_held);
  513. int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
  514. enum tc_setup_type type, void *type_data, bool err_stop,
  515. u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
  516. int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
  517. bool add, flow_setup_cb_t *cb,
  518. enum tc_setup_type type, void *type_data,
  519. void *cb_priv, u32 *flags, unsigned int *in_hw_count);
  520. unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
  521. #ifdef CONFIG_NET_CLS_ACT
  522. int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
  523. enum flow_block_binder_type binder_type,
  524. struct nlattr *block_index_attr,
  525. struct netlink_ext_ack *extack);
  526. void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
  527. int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
  528. struct netlink_ext_ack *extack);
  529. struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
  530. struct sk_buff **to_free, int *ret);
  531. int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
  532. #else
  533. static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
  534. enum flow_block_binder_type binder_type,
  535. struct nlattr *block_index_attr,
  536. struct netlink_ext_ack *extack)
  537. {
  538. return 0;
  539. }
  540. static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
  541. {
  542. }
  543. static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
  544. struct netlink_ext_ack *extack)
  545. {
  546. return 0;
  547. }
  548. static inline struct sk_buff *
  549. tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
  550. struct sk_buff **to_free, int *ret)
  551. {
  552. return skb;
  553. }
  554. static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
  555. {
  556. return 0;
  557. }
  558. #endif
  559. struct tc_cls_u32_knode {
  560. struct tcf_exts *exts;
  561. struct tcf_result *res;
  562. struct tc_u32_sel *sel;
  563. u32 handle;
  564. u32 val;
  565. u32 mask;
  566. u32 link_handle;
  567. u8 fshift;
  568. };
  569. struct tc_cls_u32_hnode {
  570. u32 handle;
  571. u32 prio;
  572. unsigned int divisor;
  573. };
  574. enum tc_clsu32_command {
  575. TC_CLSU32_NEW_KNODE,
  576. TC_CLSU32_REPLACE_KNODE,
  577. TC_CLSU32_DELETE_KNODE,
  578. TC_CLSU32_NEW_HNODE,
  579. TC_CLSU32_REPLACE_HNODE,
  580. TC_CLSU32_DELETE_HNODE,
  581. };
  582. struct tc_cls_u32_offload {
  583. struct flow_cls_common_offload common;
  584. /* knode values */
  585. enum tc_clsu32_command command;
  586. union {
  587. struct tc_cls_u32_knode knode;
  588. struct tc_cls_u32_hnode hnode;
  589. };
  590. };
  591. static inline bool tc_can_offload(const struct net_device *dev)
  592. {
  593. return dev->features & NETIF_F_HW_TC;
  594. }
  595. static inline bool tc_can_offload_extack(const struct net_device *dev,
  596. struct netlink_ext_ack *extack)
  597. {
  598. bool can = tc_can_offload(dev);
  599. if (!can)
  600. NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
  601. return can;
  602. }
  603. static inline bool
  604. tc_cls_can_offload_and_chain0(const struct net_device *dev,
  605. struct flow_cls_common_offload *common)
  606. {
  607. if (!tc_can_offload_extack(dev, common->extack))
  608. return false;
  609. if (common->chain_index) {
  610. NL_SET_ERR_MSG(common->extack,
  611. "Driver supports only offload of chain 0");
  612. return false;
  613. }
  614. return true;
  615. }
  616. static inline bool tc_skip_hw(u32 flags)
  617. {
  618. return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
  619. }
  620. static inline bool tc_skip_sw(u32 flags)
  621. {
  622. return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
  623. }
  624. /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
  625. static inline bool tc_flags_valid(u32 flags)
  626. {
  627. if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
  628. TCA_CLS_FLAGS_VERBOSE))
  629. return false;
  630. flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
  631. if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
  632. return false;
  633. return true;
  634. }
  635. static inline bool tc_in_hw(u32 flags)
  636. {
  637. return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
  638. }
  639. static inline void
  640. tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
  641. const struct tcf_proto *tp, u32 flags,
  642. struct netlink_ext_ack *extack)
  643. {
  644. cls_common->chain_index = tp->chain->index;
  645. cls_common->protocol = tp->protocol;
  646. cls_common->prio = tp->prio >> 16;
  647. if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
  648. cls_common->extack = extack;
  649. }
  650. #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
  651. static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
  652. {
  653. struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
  654. if (tc_skb_ext)
  655. memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
  656. return tc_skb_ext;
  657. }
  658. #endif
  659. enum tc_matchall_command {
  660. TC_CLSMATCHALL_REPLACE,
  661. TC_CLSMATCHALL_DESTROY,
  662. TC_CLSMATCHALL_STATS,
  663. };
  664. struct tc_cls_matchall_offload {
  665. struct flow_cls_common_offload common;
  666. enum tc_matchall_command command;
  667. struct flow_rule *rule;
  668. struct flow_stats stats;
  669. unsigned long cookie;
  670. };
  671. enum tc_clsbpf_command {
  672. TC_CLSBPF_OFFLOAD,
  673. TC_CLSBPF_STATS,
  674. };
  675. struct tc_cls_bpf_offload {
  676. struct flow_cls_common_offload common;
  677. enum tc_clsbpf_command command;
  678. struct tcf_exts *exts;
  679. struct bpf_prog *prog;
  680. struct bpf_prog *oldprog;
  681. const char *name;
  682. bool exts_integrated;
  683. };
  684. struct tc_mqprio_qopt_offload {
  685. /* struct tc_mqprio_qopt must always be the first element */
  686. struct tc_mqprio_qopt qopt;
  687. u16 mode;
  688. u16 shaper;
  689. u32 flags;
  690. u64 min_rate[TC_QOPT_MAX_QUEUE];
  691. u64 max_rate[TC_QOPT_MAX_QUEUE];
  692. };
  693. /* This structure holds cookie structure that is passed from user
  694. * to the kernel for actions and classifiers
  695. */
  696. struct tc_cookie {
  697. u8 *data;
  698. u32 len;
  699. struct rcu_head rcu;
  700. };
  701. struct tc_qopt_offload_stats {
  702. struct gnet_stats_basic_sync *bstats;
  703. struct gnet_stats_queue *qstats;
  704. };
  705. enum tc_mq_command {
  706. TC_MQ_CREATE,
  707. TC_MQ_DESTROY,
  708. TC_MQ_STATS,
  709. TC_MQ_GRAFT,
  710. };
  711. struct tc_mq_opt_offload_graft_params {
  712. unsigned long queue;
  713. u32 child_handle;
  714. };
  715. struct tc_mq_qopt_offload {
  716. enum tc_mq_command command;
  717. u32 handle;
  718. union {
  719. struct tc_qopt_offload_stats stats;
  720. struct tc_mq_opt_offload_graft_params graft_params;
  721. };
  722. };
  723. enum tc_htb_command {
  724. /* Root */
  725. TC_HTB_CREATE, /* Initialize HTB offload. */
  726. TC_HTB_DESTROY, /* Destroy HTB offload. */
  727. /* Classes */
  728. /* Allocate qid and create leaf. */
  729. TC_HTB_LEAF_ALLOC_QUEUE,
  730. /* Convert leaf to inner, preserve and return qid, create new leaf. */
  731. TC_HTB_LEAF_TO_INNER,
  732. /* Delete leaf, while siblings remain. */
  733. TC_HTB_LEAF_DEL,
  734. /* Delete leaf, convert parent to leaf, preserving qid. */
  735. TC_HTB_LEAF_DEL_LAST,
  736. /* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */
  737. TC_HTB_LEAF_DEL_LAST_FORCE,
  738. /* Modify parameters of a node. */
  739. TC_HTB_NODE_MODIFY,
  740. /* Class qdisc */
  741. TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */
  742. };
  743. struct tc_htb_qopt_offload {
  744. struct netlink_ext_ack *extack;
  745. enum tc_htb_command command;
  746. u32 parent_classid;
  747. u16 classid;
  748. u16 qid;
  749. u64 rate;
  750. u64 ceil;
  751. };
  752. #define TC_HTB_CLASSID_ROOT U32_MAX
  753. enum tc_red_command {
  754. TC_RED_REPLACE,
  755. TC_RED_DESTROY,
  756. TC_RED_STATS,
  757. TC_RED_XSTATS,
  758. TC_RED_GRAFT,
  759. };
  760. struct tc_red_qopt_offload_params {
  761. u32 min;
  762. u32 max;
  763. u32 probability;
  764. u32 limit;
  765. bool is_ecn;
  766. bool is_harddrop;
  767. bool is_nodrop;
  768. struct gnet_stats_queue *qstats;
  769. };
  770. struct tc_red_qopt_offload {
  771. enum tc_red_command command;
  772. u32 handle;
  773. u32 parent;
  774. union {
  775. struct tc_red_qopt_offload_params set;
  776. struct tc_qopt_offload_stats stats;
  777. struct red_stats *xstats;
  778. u32 child_handle;
  779. };
  780. };
  781. enum tc_gred_command {
  782. TC_GRED_REPLACE,
  783. TC_GRED_DESTROY,
  784. TC_GRED_STATS,
  785. };
  786. struct tc_gred_vq_qopt_offload_params {
  787. bool present;
  788. u32 limit;
  789. u32 prio;
  790. u32 min;
  791. u32 max;
  792. bool is_ecn;
  793. bool is_harddrop;
  794. u32 probability;
  795. /* Only need backlog, see struct tc_prio_qopt_offload_params */
  796. u32 *backlog;
  797. };
  798. struct tc_gred_qopt_offload_params {
  799. bool grio_on;
  800. bool wred_on;
  801. unsigned int dp_cnt;
  802. unsigned int dp_def;
  803. struct gnet_stats_queue *qstats;
  804. struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
  805. };
  806. struct tc_gred_qopt_offload_stats {
  807. struct gnet_stats_basic_sync bstats[MAX_DPs];
  808. struct gnet_stats_queue qstats[MAX_DPs];
  809. struct red_stats *xstats[MAX_DPs];
  810. };
  811. struct tc_gred_qopt_offload {
  812. enum tc_gred_command command;
  813. u32 handle;
  814. u32 parent;
  815. union {
  816. struct tc_gred_qopt_offload_params set;
  817. struct tc_gred_qopt_offload_stats stats;
  818. };
  819. };
  820. enum tc_prio_command {
  821. TC_PRIO_REPLACE,
  822. TC_PRIO_DESTROY,
  823. TC_PRIO_STATS,
  824. TC_PRIO_GRAFT,
  825. };
  826. struct tc_prio_qopt_offload_params {
  827. int bands;
  828. u8 priomap[TC_PRIO_MAX + 1];
  829. /* At the point of un-offloading the Qdisc, the reported backlog and
  830. * qlen need to be reduced by the portion that is in HW.
  831. */
  832. struct gnet_stats_queue *qstats;
  833. };
  834. struct tc_prio_qopt_offload_graft_params {
  835. u8 band;
  836. u32 child_handle;
  837. };
  838. struct tc_prio_qopt_offload {
  839. enum tc_prio_command command;
  840. u32 handle;
  841. u32 parent;
  842. union {
  843. struct tc_prio_qopt_offload_params replace_params;
  844. struct tc_qopt_offload_stats stats;
  845. struct tc_prio_qopt_offload_graft_params graft_params;
  846. };
  847. };
  848. enum tc_root_command {
  849. TC_ROOT_GRAFT,
  850. };
  851. struct tc_root_qopt_offload {
  852. enum tc_root_command command;
  853. u32 handle;
  854. bool ingress;
  855. };
  856. enum tc_ets_command {
  857. TC_ETS_REPLACE,
  858. TC_ETS_DESTROY,
  859. TC_ETS_STATS,
  860. TC_ETS_GRAFT,
  861. };
  862. struct tc_ets_qopt_offload_replace_params {
  863. unsigned int bands;
  864. u8 priomap[TC_PRIO_MAX + 1];
  865. unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */
  866. unsigned int weights[TCQ_ETS_MAX_BANDS];
  867. struct gnet_stats_queue *qstats;
  868. };
  869. struct tc_ets_qopt_offload_graft_params {
  870. u8 band;
  871. u32 child_handle;
  872. };
  873. struct tc_ets_qopt_offload {
  874. enum tc_ets_command command;
  875. u32 handle;
  876. u32 parent;
  877. union {
  878. struct tc_ets_qopt_offload_replace_params replace_params;
  879. struct tc_qopt_offload_stats stats;
  880. struct tc_ets_qopt_offload_graft_params graft_params;
  881. };
  882. };
  883. enum tc_tbf_command {
  884. TC_TBF_REPLACE,
  885. TC_TBF_DESTROY,
  886. TC_TBF_STATS,
  887. TC_TBF_GRAFT,
  888. };
  889. struct tc_tbf_qopt_offload_replace_params {
  890. struct psched_ratecfg rate;
  891. u32 max_size;
  892. struct gnet_stats_queue *qstats;
  893. };
  894. struct tc_tbf_qopt_offload {
  895. enum tc_tbf_command command;
  896. u32 handle;
  897. u32 parent;
  898. union {
  899. struct tc_tbf_qopt_offload_replace_params replace_params;
  900. struct tc_qopt_offload_stats stats;
  901. u32 child_handle;
  902. };
  903. };
  904. enum tc_fifo_command {
  905. TC_FIFO_REPLACE,
  906. TC_FIFO_DESTROY,
  907. TC_FIFO_STATS,
  908. };
  909. struct tc_fifo_qopt_offload {
  910. enum tc_fifo_command command;
  911. u32 handle;
  912. u32 parent;
  913. union {
  914. struct tc_qopt_offload_stats stats;
  915. };
  916. };
  917. #ifdef CONFIG_NET_CLS_ACT
  918. DECLARE_STATIC_KEY_FALSE(tc_skb_ext_tc);
  919. void tc_skb_ext_tc_enable(void);
  920. void tc_skb_ext_tc_disable(void);
  921. #define tc_skb_ext_tc_enabled() static_branch_unlikely(&tc_skb_ext_tc)
  922. #else /* CONFIG_NET_CLS_ACT */
  923. static inline void tc_skb_ext_tc_enable(void) { }
  924. static inline void tc_skb_ext_tc_disable(void) { }
  925. #define tc_skb_ext_tc_enabled() false
  926. #endif
  927. #endif