flow_offload.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. #ifndef _NET_FLOW_OFFLOAD_H
  2. #define _NET_FLOW_OFFLOAD_H
  3. #include <linux/kernel.h>
  4. #include <linux/list.h>
  5. #include <linux/netlink.h>
  6. #include <net/flow_dissector.h>
  7. struct flow_match {
  8. struct flow_dissector *dissector;
  9. void *mask;
  10. void *key;
  11. };
  12. struct flow_match_meta {
  13. struct flow_dissector_key_meta *key, *mask;
  14. };
  15. struct flow_match_basic {
  16. struct flow_dissector_key_basic *key, *mask;
  17. };
  18. struct flow_match_control {
  19. struct flow_dissector_key_control *key, *mask;
  20. };
  21. struct flow_match_eth_addrs {
  22. struct flow_dissector_key_eth_addrs *key, *mask;
  23. };
  24. struct flow_match_vlan {
  25. struct flow_dissector_key_vlan *key, *mask;
  26. };
  27. struct flow_match_ipv4_addrs {
  28. struct flow_dissector_key_ipv4_addrs *key, *mask;
  29. };
  30. struct flow_match_ipv6_addrs {
  31. struct flow_dissector_key_ipv6_addrs *key, *mask;
  32. };
  33. struct flow_match_ip {
  34. struct flow_dissector_key_ip *key, *mask;
  35. };
  36. struct flow_match_ports {
  37. struct flow_dissector_key_ports *key, *mask;
  38. };
  39. struct flow_match_ports_range {
  40. struct flow_dissector_key_ports_range *key, *mask;
  41. };
  42. struct flow_match_icmp {
  43. struct flow_dissector_key_icmp *key, *mask;
  44. };
  45. struct flow_match_tcp {
  46. struct flow_dissector_key_tcp *key, *mask;
  47. };
  48. struct flow_match_mpls {
  49. struct flow_dissector_key_mpls *key, *mask;
  50. };
  51. struct flow_match_enc_keyid {
  52. struct flow_dissector_key_keyid *key, *mask;
  53. };
  54. struct flow_match_enc_opts {
  55. struct flow_dissector_key_enc_opts *key, *mask;
  56. };
  57. struct flow_match_ct {
  58. struct flow_dissector_key_ct *key, *mask;
  59. };
  60. struct flow_match_pppoe {
  61. struct flow_dissector_key_pppoe *key, *mask;
  62. };
  63. struct flow_match_l2tpv3 {
  64. struct flow_dissector_key_l2tpv3 *key, *mask;
  65. };
  66. struct flow_rule;
  67. void flow_rule_match_meta(const struct flow_rule *rule,
  68. struct flow_match_meta *out);
  69. void flow_rule_match_basic(const struct flow_rule *rule,
  70. struct flow_match_basic *out);
  71. void flow_rule_match_control(const struct flow_rule *rule,
  72. struct flow_match_control *out);
  73. void flow_rule_match_eth_addrs(const struct flow_rule *rule,
  74. struct flow_match_eth_addrs *out);
  75. void flow_rule_match_vlan(const struct flow_rule *rule,
  76. struct flow_match_vlan *out);
  77. void flow_rule_match_cvlan(const struct flow_rule *rule,
  78. struct flow_match_vlan *out);
  79. void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
  80. struct flow_match_ipv4_addrs *out);
  81. void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
  82. struct flow_match_ipv6_addrs *out);
  83. void flow_rule_match_ip(const struct flow_rule *rule,
  84. struct flow_match_ip *out);
  85. void flow_rule_match_ports(const struct flow_rule *rule,
  86. struct flow_match_ports *out);
  87. void flow_rule_match_ports_range(const struct flow_rule *rule,
  88. struct flow_match_ports_range *out);
  89. void flow_rule_match_tcp(const struct flow_rule *rule,
  90. struct flow_match_tcp *out);
  91. void flow_rule_match_icmp(const struct flow_rule *rule,
  92. struct flow_match_icmp *out);
  93. void flow_rule_match_mpls(const struct flow_rule *rule,
  94. struct flow_match_mpls *out);
  95. void flow_rule_match_enc_control(const struct flow_rule *rule,
  96. struct flow_match_control *out);
  97. void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
  98. struct flow_match_ipv4_addrs *out);
  99. void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
  100. struct flow_match_ipv6_addrs *out);
  101. void flow_rule_match_enc_ip(const struct flow_rule *rule,
  102. struct flow_match_ip *out);
  103. void flow_rule_match_enc_ports(const struct flow_rule *rule,
  104. struct flow_match_ports *out);
  105. void flow_rule_match_enc_keyid(const struct flow_rule *rule,
  106. struct flow_match_enc_keyid *out);
  107. void flow_rule_match_enc_opts(const struct flow_rule *rule,
  108. struct flow_match_enc_opts *out);
  109. void flow_rule_match_ct(const struct flow_rule *rule,
  110. struct flow_match_ct *out);
  111. void flow_rule_match_pppoe(const struct flow_rule *rule,
  112. struct flow_match_pppoe *out);
  113. void flow_rule_match_l2tpv3(const struct flow_rule *rule,
  114. struct flow_match_l2tpv3 *out);
  115. enum flow_action_id {
  116. FLOW_ACTION_ACCEPT = 0,
  117. FLOW_ACTION_DROP,
  118. FLOW_ACTION_TRAP,
  119. FLOW_ACTION_GOTO,
  120. FLOW_ACTION_REDIRECT,
  121. FLOW_ACTION_MIRRED,
  122. FLOW_ACTION_REDIRECT_INGRESS,
  123. FLOW_ACTION_MIRRED_INGRESS,
  124. FLOW_ACTION_VLAN_PUSH,
  125. FLOW_ACTION_VLAN_POP,
  126. FLOW_ACTION_VLAN_MANGLE,
  127. FLOW_ACTION_TUNNEL_ENCAP,
  128. FLOW_ACTION_TUNNEL_DECAP,
  129. FLOW_ACTION_MANGLE,
  130. FLOW_ACTION_ADD,
  131. FLOW_ACTION_CSUM,
  132. FLOW_ACTION_MARK,
  133. FLOW_ACTION_PTYPE,
  134. FLOW_ACTION_PRIORITY,
  135. FLOW_ACTION_WAKE,
  136. FLOW_ACTION_QUEUE,
  137. FLOW_ACTION_SAMPLE,
  138. FLOW_ACTION_POLICE,
  139. FLOW_ACTION_CT,
  140. FLOW_ACTION_CT_METADATA,
  141. FLOW_ACTION_MPLS_PUSH,
  142. FLOW_ACTION_MPLS_POP,
  143. FLOW_ACTION_MPLS_MANGLE,
  144. FLOW_ACTION_GATE,
  145. FLOW_ACTION_PPPOE_PUSH,
  146. FLOW_ACTION_JUMP,
  147. FLOW_ACTION_PIPE,
  148. FLOW_ACTION_VLAN_PUSH_ETH,
  149. FLOW_ACTION_VLAN_POP_ETH,
  150. FLOW_ACTION_CONTINUE,
  151. NUM_FLOW_ACTIONS,
  152. };
  153. /* This is mirroring enum pedit_header_type definition for easy mapping between
  154. * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
  155. * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
  156. */
  157. enum flow_action_mangle_base {
  158. FLOW_ACT_MANGLE_UNSPEC = 0,
  159. FLOW_ACT_MANGLE_HDR_TYPE_ETH,
  160. FLOW_ACT_MANGLE_HDR_TYPE_IP4,
  161. FLOW_ACT_MANGLE_HDR_TYPE_IP6,
  162. FLOW_ACT_MANGLE_HDR_TYPE_TCP,
  163. FLOW_ACT_MANGLE_HDR_TYPE_UDP,
  164. };
  165. enum flow_action_hw_stats_bit {
  166. FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
  167. FLOW_ACTION_HW_STATS_DELAYED_BIT,
  168. FLOW_ACTION_HW_STATS_DISABLED_BIT,
  169. FLOW_ACTION_HW_STATS_NUM_BITS
  170. };
  171. enum flow_action_hw_stats {
  172. FLOW_ACTION_HW_STATS_IMMEDIATE =
  173. BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
  174. FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
  175. FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
  176. FLOW_ACTION_HW_STATS_DELAYED,
  177. FLOW_ACTION_HW_STATS_DISABLED =
  178. BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT),
  179. FLOW_ACTION_HW_STATS_DONT_CARE = BIT(FLOW_ACTION_HW_STATS_NUM_BITS) - 1,
  180. };
  181. typedef void (*action_destr)(void *priv);
  182. struct flow_action_cookie {
  183. u32 cookie_len;
  184. u8 cookie[];
  185. };
  186. struct flow_action_cookie *flow_action_cookie_create(void *data,
  187. unsigned int len,
  188. gfp_t gfp);
  189. void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
  190. struct flow_action_entry {
  191. enum flow_action_id id;
  192. u32 hw_index;
  193. enum flow_action_hw_stats hw_stats;
  194. action_destr destructor;
  195. void *destructor_priv;
  196. union {
  197. u32 chain_index; /* FLOW_ACTION_GOTO */
  198. struct net_device *dev; /* FLOW_ACTION_REDIRECT */
  199. struct { /* FLOW_ACTION_VLAN */
  200. u16 vid;
  201. __be16 proto;
  202. u8 prio;
  203. } vlan;
  204. struct { /* FLOW_ACTION_VLAN_PUSH_ETH */
  205. unsigned char dst[ETH_ALEN];
  206. unsigned char src[ETH_ALEN];
  207. } vlan_push_eth;
  208. struct { /* FLOW_ACTION_MANGLE */
  209. /* FLOW_ACTION_ADD */
  210. enum flow_action_mangle_base htype;
  211. u32 offset;
  212. u32 mask;
  213. u32 val;
  214. } mangle;
  215. struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */
  216. u32 csum_flags; /* FLOW_ACTION_CSUM */
  217. u32 mark; /* FLOW_ACTION_MARK */
  218. u16 ptype; /* FLOW_ACTION_PTYPE */
  219. u32 priority; /* FLOW_ACTION_PRIORITY */
  220. struct { /* FLOW_ACTION_QUEUE */
  221. u32 ctx;
  222. u32 index;
  223. u8 vf;
  224. } queue;
  225. struct { /* FLOW_ACTION_SAMPLE */
  226. struct psample_group *psample_group;
  227. u32 rate;
  228. u32 trunc_size;
  229. bool truncate;
  230. } sample;
  231. struct { /* FLOW_ACTION_POLICE */
  232. u32 burst;
  233. u64 rate_bytes_ps;
  234. u64 peakrate_bytes_ps;
  235. u32 avrate;
  236. u16 overhead;
  237. u64 burst_pkt;
  238. u64 rate_pkt_ps;
  239. u32 mtu;
  240. struct {
  241. enum flow_action_id act_id;
  242. u32 extval;
  243. } exceed, notexceed;
  244. } police;
  245. struct { /* FLOW_ACTION_CT */
  246. int action;
  247. u16 zone;
  248. struct nf_flowtable *flow_table;
  249. } ct;
  250. struct {
  251. unsigned long cookie;
  252. u32 mark;
  253. u32 labels[4];
  254. bool orig_dir;
  255. } ct_metadata;
  256. struct { /* FLOW_ACTION_MPLS_PUSH */
  257. u32 label;
  258. __be16 proto;
  259. u8 tc;
  260. u8 bos;
  261. u8 ttl;
  262. } mpls_push;
  263. struct { /* FLOW_ACTION_MPLS_POP */
  264. __be16 proto;
  265. } mpls_pop;
  266. struct { /* FLOW_ACTION_MPLS_MANGLE */
  267. u32 label;
  268. u8 tc;
  269. u8 bos;
  270. u8 ttl;
  271. } mpls_mangle;
  272. struct {
  273. s32 prio;
  274. u64 basetime;
  275. u64 cycletime;
  276. u64 cycletimeext;
  277. u32 num_entries;
  278. struct action_gate_entry *entries;
  279. } gate;
  280. struct { /* FLOW_ACTION_PPPOE_PUSH */
  281. u16 sid;
  282. } pppoe;
  283. };
  284. struct flow_action_cookie *cookie; /* user defined action cookie */
  285. };
  286. struct flow_action {
  287. unsigned int num_entries;
  288. struct flow_action_entry entries[];
  289. };
  290. static inline bool flow_action_has_entries(const struct flow_action *action)
  291. {
  292. return action->num_entries;
  293. }
  294. /**
  295. * flow_offload_has_one_action() - check if exactly one action is present
  296. * @action: tc filter flow offload action
  297. *
  298. * Returns true if exactly one action is present.
  299. */
  300. static inline bool flow_offload_has_one_action(const struct flow_action *action)
  301. {
  302. return action->num_entries == 1;
  303. }
  304. static inline bool flow_action_is_last_entry(const struct flow_action *action,
  305. const struct flow_action_entry *entry)
  306. {
  307. return entry == &action->entries[action->num_entries - 1];
  308. }
  309. #define flow_action_for_each(__i, __act, __actions) \
  310. for (__i = 0, __act = &(__actions)->entries[0]; \
  311. __i < (__actions)->num_entries; \
  312. __act = &(__actions)->entries[++__i])
  313. static inline bool
  314. flow_action_mixed_hw_stats_check(const struct flow_action *action,
  315. struct netlink_ext_ack *extack)
  316. {
  317. const struct flow_action_entry *action_entry;
  318. u8 last_hw_stats;
  319. int i;
  320. if (flow_offload_has_one_action(action))
  321. return true;
  322. flow_action_for_each(i, action_entry, action) {
  323. if (i && action_entry->hw_stats != last_hw_stats) {
  324. NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
  325. return false;
  326. }
  327. last_hw_stats = action_entry->hw_stats;
  328. }
  329. return true;
  330. }
  331. static inline const struct flow_action_entry *
  332. flow_action_first_entry_get(const struct flow_action *action)
  333. {
  334. WARN_ON(!flow_action_has_entries(action));
  335. return &action->entries[0];
  336. }
  337. static inline bool
  338. __flow_action_hw_stats_check(const struct flow_action *action,
  339. struct netlink_ext_ack *extack,
  340. bool check_allow_bit,
  341. enum flow_action_hw_stats_bit allow_bit)
  342. {
  343. const struct flow_action_entry *action_entry;
  344. if (!flow_action_has_entries(action))
  345. return true;
  346. if (!flow_action_mixed_hw_stats_check(action, extack))
  347. return false;
  348. action_entry = flow_action_first_entry_get(action);
  349. /* Zero is not a legal value for hw_stats, catch anyone passing it */
  350. WARN_ON_ONCE(!action_entry->hw_stats);
  351. if (!check_allow_bit &&
  352. ~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) {
  353. NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
  354. return false;
  355. } else if (check_allow_bit &&
  356. !(action_entry->hw_stats & BIT(allow_bit))) {
  357. NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type");
  358. return false;
  359. }
  360. return true;
  361. }
  362. static inline bool
  363. flow_action_hw_stats_check(const struct flow_action *action,
  364. struct netlink_ext_ack *extack,
  365. enum flow_action_hw_stats_bit allow_bit)
  366. {
  367. return __flow_action_hw_stats_check(action, extack, true, allow_bit);
  368. }
  369. static inline bool
  370. flow_action_basic_hw_stats_check(const struct flow_action *action,
  371. struct netlink_ext_ack *extack)
  372. {
  373. return __flow_action_hw_stats_check(action, extack, false, 0);
  374. }
  375. struct flow_rule {
  376. struct flow_match match;
  377. struct flow_action action;
  378. };
  379. struct flow_rule *flow_rule_alloc(unsigned int num_actions);
  380. static inline bool flow_rule_match_key(const struct flow_rule *rule,
  381. enum flow_dissector_key_id key)
  382. {
  383. return dissector_uses_key(rule->match.dissector, key);
  384. }
  385. struct flow_stats {
  386. u64 pkts;
  387. u64 bytes;
  388. u64 drops;
  389. u64 lastused;
  390. enum flow_action_hw_stats used_hw_stats;
  391. bool used_hw_stats_valid;
  392. };
  393. static inline void flow_stats_update(struct flow_stats *flow_stats,
  394. u64 bytes, u64 pkts,
  395. u64 drops, u64 lastused,
  396. enum flow_action_hw_stats used_hw_stats)
  397. {
  398. flow_stats->pkts += pkts;
  399. flow_stats->bytes += bytes;
  400. flow_stats->drops += drops;
  401. flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused);
  402. /* The driver should pass value with a maximum of one bit set.
  403. * Passing FLOW_ACTION_HW_STATS_ANY is invalid.
  404. */
  405. WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY);
  406. flow_stats->used_hw_stats |= used_hw_stats;
  407. flow_stats->used_hw_stats_valid = true;
  408. }
  409. enum flow_block_command {
  410. FLOW_BLOCK_BIND,
  411. FLOW_BLOCK_UNBIND,
  412. };
  413. enum flow_block_binder_type {
  414. FLOW_BLOCK_BINDER_TYPE_UNSPEC,
  415. FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
  416. FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
  417. FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
  418. FLOW_BLOCK_BINDER_TYPE_RED_MARK,
  419. };
  420. struct flow_block {
  421. struct list_head cb_list;
  422. };
  423. struct netlink_ext_ack;
  424. struct flow_block_offload {
  425. enum flow_block_command command;
  426. enum flow_block_binder_type binder_type;
  427. bool block_shared;
  428. bool unlocked_driver_cb;
  429. struct net *net;
  430. struct flow_block *block;
  431. struct list_head cb_list;
  432. struct list_head *driver_block_list;
  433. struct netlink_ext_ack *extack;
  434. struct Qdisc *sch;
  435. struct list_head *cb_list_head;
  436. };
  437. enum tc_setup_type;
  438. typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
  439. void *cb_priv);
  440. struct flow_block_cb;
  441. struct flow_block_indr {
  442. struct list_head list;
  443. struct net_device *dev;
  444. struct Qdisc *sch;
  445. enum flow_block_binder_type binder_type;
  446. void *data;
  447. void *cb_priv;
  448. void (*cleanup)(struct flow_block_cb *block_cb);
  449. };
  450. struct flow_block_cb {
  451. struct list_head driver_list;
  452. struct list_head list;
  453. flow_setup_cb_t *cb;
  454. void *cb_ident;
  455. void *cb_priv;
  456. void (*release)(void *cb_priv);
  457. struct flow_block_indr indr;
  458. unsigned int refcnt;
  459. };
  460. struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
  461. void *cb_ident, void *cb_priv,
  462. void (*release)(void *cb_priv));
  463. struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
  464. void *cb_ident, void *cb_priv,
  465. void (*release)(void *cb_priv),
  466. struct flow_block_offload *bo,
  467. struct net_device *dev,
  468. struct Qdisc *sch, void *data,
  469. void *indr_cb_priv,
  470. void (*cleanup)(struct flow_block_cb *block_cb));
  471. void flow_block_cb_free(struct flow_block_cb *block_cb);
  472. struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
  473. flow_setup_cb_t *cb, void *cb_ident);
  474. void *flow_block_cb_priv(struct flow_block_cb *block_cb);
  475. void flow_block_cb_incref(struct flow_block_cb *block_cb);
  476. unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb);
  477. static inline void flow_block_cb_add(struct flow_block_cb *block_cb,
  478. struct flow_block_offload *offload)
  479. {
  480. list_add_tail(&block_cb->list, &offload->cb_list);
  481. }
  482. static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
  483. struct flow_block_offload *offload)
  484. {
  485. list_move(&block_cb->list, &offload->cb_list);
  486. }
  487. static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
  488. struct flow_block_offload *offload)
  489. {
  490. list_del(&block_cb->indr.list);
  491. list_move(&block_cb->list, &offload->cb_list);
  492. }
  493. bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
  494. struct list_head *driver_block_list);
  495. int flow_block_cb_setup_simple(struct flow_block_offload *f,
  496. struct list_head *driver_list,
  497. flow_setup_cb_t *cb,
  498. void *cb_ident, void *cb_priv, bool ingress_only);
  499. enum flow_cls_command {
  500. FLOW_CLS_REPLACE,
  501. FLOW_CLS_DESTROY,
  502. FLOW_CLS_STATS,
  503. FLOW_CLS_TMPLT_CREATE,
  504. FLOW_CLS_TMPLT_DESTROY,
  505. };
  506. struct flow_cls_common_offload {
  507. u32 chain_index;
  508. __be16 protocol;
  509. u32 prio;
  510. struct netlink_ext_ack *extack;
  511. };
  512. struct flow_cls_offload {
  513. struct flow_cls_common_offload common;
  514. enum flow_cls_command command;
  515. unsigned long cookie;
  516. struct flow_rule *rule;
  517. struct flow_stats stats;
  518. u32 classid;
  519. };
  520. enum offload_act_command {
  521. FLOW_ACT_REPLACE,
  522. FLOW_ACT_DESTROY,
  523. FLOW_ACT_STATS,
  524. };
  525. struct flow_offload_action {
  526. struct netlink_ext_ack *extack; /* NULL in FLOW_ACT_STATS process*/
  527. enum offload_act_command command;
  528. enum flow_action_id id;
  529. u32 index;
  530. struct flow_stats stats;
  531. struct flow_action action;
  532. };
  533. struct flow_offload_action *offload_action_alloc(unsigned int num_actions);
  534. static inline struct flow_rule *
  535. flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
  536. {
  537. return flow_cmd->rule;
  538. }
  539. static inline void flow_block_init(struct flow_block *flow_block)
  540. {
  541. INIT_LIST_HEAD(&flow_block->cb_list);
  542. }
  543. typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
  544. enum tc_setup_type type, void *type_data,
  545. void *data,
  546. void (*cleanup)(struct flow_block_cb *block_cb));
  547. int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
  548. void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
  549. void (*release)(void *cb_priv));
  550. int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
  551. enum tc_setup_type type, void *data,
  552. struct flow_block_offload *bo,
  553. void (*cleanup)(struct flow_block_cb *block_cb));
  554. bool flow_indr_dev_exists(void);
  555. #endif /* _NET_FLOW_OFFLOAD_H */