Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: - Gustavo A. R. Silva keeps working on the implicit switch fallthru changes. - Support 802.11ax High-Efficiency wireless in cfg80211 et al, From Luca Coelho. - Re-enable ASPM in r8169, from Kai-Heng Feng. - Add virtual XFRM interfaces, which avoids all of the limitations of existing IPSEC tunnels. From Steffen Klassert. - Convert GRO over to use a hash table, so that when we have many flows active we don't traverse a long list during accumluation. - Many new self tests for routing, TC, tunnels, etc. Too many contributors to mention them all, but I'm really happy to keep seeing this stuff. - Hardware timestamping support for dpaa_eth/fsl-fman from Yangbo Lu. - Lots of cleanups and fixes in L2TP code from Guillaume Nault. - Add IPSEC offload support to netdevsim, from Shannon Nelson. - Add support for slotting with non-uniform distribution to netem packet scheduler, from Yousuk Seung. - Add UDP GSO support to mlx5e, from Boris Pismenny. - Support offloading of Team LAG in NFP, from John Hurley. - Allow to configure TX queue selection based upon RX queue, from Amritha Nambiar. - Support ethtool ring size configuration in aquantia, from Anton Mikaev. - Support DSCP and flowlabel per-transport in SCTP, from Xin Long. - Support list based batching and stack traversal of SKBs, this is very exciting work. From Edward Cree. - Busyloop optimizations in vhost_net, from Toshiaki Makita. - Introduce the ETF qdisc, which allows time based transmissions. IGB can offload this in hardware. From Vinicius Costa Gomes. - Add parameter support to devlink, from Moshe Shemesh. - Several multiplication and division optimizations for BPF JIT in nfp driver, from Jiong Wang. - Lots of prepatory work to make more of the packet scheduler layer lockless, when possible, from Vlad Buslov. - Add ACK filter and NAT awareness to sch_cake packet scheduler, from Toke Høiland-Jørgensen. - Support regions and region snapshots in devlink, from Alex Vesker. - Allow to attach XDP programs to both HW and SW at the same time on a given device, with initial support in nfp. From Jakub Kicinski. - Add TLS RX offload and support in mlx5, from Ilya Lesokhin. - Use PHYLIB in r8169 driver, from Heiner Kallweit. - All sorts of changes to support Spectrum 2 in mlxsw driver, from Ido Schimmel. - PTP support in mv88e6xxx DSA driver, from Andrew Lunn. - Make TCP_USER_TIMEOUT socket option more accurate, from Jon Maxwell. - Support for templates in packet scheduler classifier, from Jiri Pirko. - IPV6 support in RDS, from Ka-Cheong Poon. - Native tproxy support in nf_tables, from Máté Eckl. - Maintain IP fragment queue in an rbtree, but optimize properly for in-order frags. From Peter Oskolkov. - Improvde handling of ACKs on hole repairs, from Yuchung Cheng" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1996 commits) bpf: test: fix spelling mistake "REUSEEPORT" -> "REUSEPORT" hv/netvsc: Fix NULL dereference at single queue mode fallback net: filter: mark expected switch fall-through xen-netfront: fix warn message as irq device name has '/' cxgb4: Add new T5 PCI device ids 0x50af and 0x50b0 net: dsa: mv88e6xxx: missing unlock on error path rds: fix building with IPV6=m inet/connection_sock: prefer _THIS_IP_ to current_text_addr net: dsa: mv88e6xxx: bitwise vs logical bug net: sock_diag: Fix spectre v1 gadget in __sock_diag_cmd() ieee802154: hwsim: using right kind of iteration net: hns3: Add vlan filter setting by ethtool command -K net: hns3: Set tx ring' tc info when netdev is up net: hns3: Remove tx ring BD len register in hns3_enet net: hns3: Fix desc num set to default when setting channel net: hns3: Fix for phy link issue when using marvell phy driver net: hns3: Fix for information of phydev lost problem when down/up net: hns3: Fix for command format parsing error in hclge_is_all_function_id_zero net: hns3: Add support for serdes loopback selftest bnxt_en: take coredump_record structure off stack ...
This commit is contained in:
@@ -6,6 +6,7 @@
|
||||
* Public action API for classifiers/qdiscs
|
||||
*/
|
||||
|
||||
#include <linux/refcount.h>
|
||||
#include <net/sch_generic.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <net/net_namespace.h>
|
||||
@@ -26,8 +27,8 @@ struct tc_action {
|
||||
struct tcf_idrinfo *idrinfo;
|
||||
|
||||
u32 tcfa_index;
|
||||
int tcfa_refcnt;
|
||||
int tcfa_bindcnt;
|
||||
refcount_t tcfa_refcnt;
|
||||
atomic_t tcfa_bindcnt;
|
||||
u32 tcfa_capab;
|
||||
int tcfa_action;
|
||||
struct tcf_t tcfa_tm;
|
||||
@@ -37,7 +38,7 @@ struct tc_action {
|
||||
spinlock_t tcfa_lock;
|
||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
|
||||
struct gnet_stats_queue __percpu *cpu_qstats;
|
||||
struct tc_cookie *act_cookie;
|
||||
struct tc_cookie __rcu *act_cookie;
|
||||
struct tcf_chain *goto_chain;
|
||||
};
|
||||
#define tcf_index common.tcfa_index
|
||||
@@ -84,14 +85,15 @@ struct tc_action_ops {
|
||||
size_t size;
|
||||
struct module *owner;
|
||||
int (*act)(struct sk_buff *, const struct tc_action *,
|
||||
struct tcf_result *);
|
||||
struct tcf_result *); /* called under RCU BH lock*/
|
||||
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
|
||||
void (*cleanup)(struct tc_action *);
|
||||
int (*lookup)(struct net *net, struct tc_action **a, u32 index,
|
||||
struct netlink_ext_ack *extack);
|
||||
int (*init)(struct net *net, struct nlattr *nla,
|
||||
struct nlattr *est, struct tc_action **act, int ovr,
|
||||
int bind, struct netlink_ext_ack *extack);
|
||||
int bind, bool rtnl_held,
|
||||
struct netlink_ext_ack *extack);
|
||||
int (*walk)(struct net *, struct sk_buff *,
|
||||
struct netlink_callback *, int,
|
||||
const struct tc_action_ops *,
|
||||
@@ -99,6 +101,8 @@ struct tc_action_ops {
|
||||
void (*stats_update)(struct tc_action *, u64, u32, u64);
|
||||
size_t (*get_fill_size)(const struct tc_action *act);
|
||||
struct net_device *(*get_dev)(const struct tc_action *a);
|
||||
void (*put_dev)(struct net_device *dev);
|
||||
int (*delete)(struct net *net, u32 index);
|
||||
};
|
||||
|
||||
struct tc_action_net {
|
||||
@@ -151,6 +155,10 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
|
||||
int bind, bool cpustats);
|
||||
void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
|
||||
|
||||
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
|
||||
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
|
||||
struct tc_action **a, int bind);
|
||||
int tcf_idr_delete_index(struct tc_action_net *tn, u32 index);
|
||||
int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
|
||||
|
||||
static inline int tcf_idr_release(struct tc_action *a, bool bind)
|
||||
@@ -161,18 +169,20 @@ static inline int tcf_idr_release(struct tc_action *a, bool bind)
|
||||
int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
|
||||
int tcf_unregister_action(struct tc_action_ops *a,
|
||||
struct pernet_operations *ops);
|
||||
int tcf_action_destroy(struct list_head *actions, int bind);
|
||||
int tcf_action_destroy(struct tc_action *actions[], int bind);
|
||||
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
|
||||
int nr_actions, struct tcf_result *res);
|
||||
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
|
||||
struct nlattr *est, char *name, int ovr, int bind,
|
||||
struct list_head *actions, size_t *attr_size,
|
||||
struct netlink_ext_ack *extack);
|
||||
struct tc_action *actions[], size_t *attr_size,
|
||||
bool rtnl_held, struct netlink_ext_ack *extack);
|
||||
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
|
||||
struct nlattr *nla, struct nlattr *est,
|
||||
char *name, int ovr, int bind,
|
||||
bool rtnl_held,
|
||||
struct netlink_ext_ack *extack);
|
||||
int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int);
|
||||
int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
|
||||
int ref);
|
||||
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
|
||||
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
|
||||
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
|
||||
@@ -190,9 +200,6 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
|
||||
#endif
|
||||
}
|
||||
|
||||
typedef int tc_setup_cb_t(enum tc_setup_type type,
|
||||
void *type_data, void *cb_priv);
|
||||
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
int tc_setup_cb_egdev_register(const struct net_device *dev,
|
||||
tc_setup_cb_t *cb, void *cb_priv);
|
||||
|
@@ -108,6 +108,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
|
||||
u32 banned_flags);
|
||||
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
|
||||
bool match_wildcard);
|
||||
bool inet_rcv_saddr_any(const struct sock *sk);
|
||||
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
|
||||
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
|
||||
|
||||
|
@@ -56,6 +56,7 @@ struct sockaddr_ieee802154 {
|
||||
#define WPAN_WANTACK 0
|
||||
#define WPAN_SECURITY 1
|
||||
#define WPAN_SECURITY_LEVEL 2
|
||||
#define WPAN_WANTLQI 3
|
||||
|
||||
#define WPAN_SECURITY_DEFAULT 0
|
||||
#define WPAN_SECURITY_OFF 1
|
||||
|
@@ -61,7 +61,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
|
||||
struct msghdr *, size_t,
|
||||
rxrpc_notify_end_tx_t);
|
||||
int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
|
||||
void *, size_t, size_t *, bool, u32 *, u16 *);
|
||||
struct iov_iter *, bool, u32 *, u16 *);
|
||||
bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
|
||||
u32, int, const char *);
|
||||
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
|
||||
|
@@ -183,6 +183,15 @@ enum {
|
||||
* during the hdev->setup vendor callback.
|
||||
*/
|
||||
HCI_QUIRK_NON_PERSISTENT_DIAG,
|
||||
|
||||
/* When this quirk is set, setup() would be run after every
|
||||
* open() and not just after the first open().
|
||||
*
|
||||
* This quirk can be set before hci_register_dev is called or
|
||||
* during the hdev->setup vendor callback.
|
||||
*
|
||||
*/
|
||||
HCI_QUIRK_NON_PERSISTENT_SETUP,
|
||||
};
|
||||
|
||||
/* HCI device flags */
|
||||
@@ -260,6 +269,7 @@ enum {
|
||||
HCI_VENDOR_DIAG,
|
||||
HCI_FORCE_BREDR_SMP,
|
||||
HCI_FORCE_STATIC_ADDR,
|
||||
HCI_LL_RPA_RESOLUTION,
|
||||
|
||||
__HCI_NUM_FLAGS,
|
||||
};
|
||||
@@ -291,6 +301,14 @@ enum {
|
||||
#define HCI_DH3 0x0800
|
||||
#define HCI_DH5 0x8000
|
||||
|
||||
/* HCI packet types inverted masks */
|
||||
#define HCI_2DH1 0x0002
|
||||
#define HCI_3DH1 0x0004
|
||||
#define HCI_2DH3 0x0100
|
||||
#define HCI_3DH3 0x0200
|
||||
#define HCI_2DH5 0x1000
|
||||
#define HCI_3DH5 0x2000
|
||||
|
||||
#define HCI_HV1 0x0020
|
||||
#define HCI_HV2 0x0040
|
||||
#define HCI_HV3 0x0080
|
||||
@@ -354,6 +372,8 @@ enum {
|
||||
#define LMP_PCONTROL 0x04
|
||||
#define LMP_TRANSPARENT 0x08
|
||||
|
||||
#define LMP_EDR_2M 0x02
|
||||
#define LMP_EDR_3M 0x04
|
||||
#define LMP_RSSI_INQ 0x40
|
||||
#define LMP_ESCO 0x80
|
||||
|
||||
@@ -361,7 +381,9 @@ enum {
|
||||
#define LMP_EV5 0x02
|
||||
#define LMP_NO_BREDR 0x20
|
||||
#define LMP_LE 0x40
|
||||
#define LMP_EDR_3SLOT 0x80
|
||||
|
||||
#define LMP_EDR_5SLOT 0x01
|
||||
#define LMP_SNIFF_SUBR 0x02
|
||||
#define LMP_PAUSE_ENC 0x04
|
||||
#define LMP_EDR_ESCO_2M 0x20
|
||||
@@ -398,7 +420,12 @@ enum {
|
||||
#define HCI_LE_SLAVE_FEATURES 0x08
|
||||
#define HCI_LE_PING 0x10
|
||||
#define HCI_LE_DATA_LEN_EXT 0x20
|
||||
#define HCI_LE_PHY_2M 0x01
|
||||
#define HCI_LE_PHY_CODED 0x08
|
||||
#define HCI_LE_EXT_ADV 0x10
|
||||
#define HCI_LE_EXT_SCAN_POLICY 0x80
|
||||
#define HCI_LE_PHY_2M 0x01
|
||||
#define HCI_LE_PHY_CODED 0x08
|
||||
#define HCI_LE_CHAN_SEL_ALG2 0x40
|
||||
|
||||
/* Connection modes */
|
||||
@@ -1490,6 +1517,16 @@ struct hci_cp_le_write_def_data_len {
|
||||
__le16 tx_time;
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029
|
||||
|
||||
#define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a
|
||||
struct hci_rp_le_read_resolv_list_size {
|
||||
__u8 status;
|
||||
__u8 size;
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d
|
||||
|
||||
#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
|
||||
struct hci_rp_le_read_max_data_len {
|
||||
__u8 status;
|
||||
@@ -1506,6 +1543,134 @@ struct hci_cp_le_set_default_phy {
|
||||
__u8 rx_phys;
|
||||
} __packed;
|
||||
|
||||
#define HCI_LE_SET_PHY_1M 0x01
|
||||
#define HCI_LE_SET_PHY_2M 0x02
|
||||
#define HCI_LE_SET_PHY_CODED 0x04
|
||||
|
||||
#define HCI_OP_LE_SET_EXT_SCAN_PARAMS 0x2041
|
||||
struct hci_cp_le_set_ext_scan_params {
|
||||
__u8 own_addr_type;
|
||||
__u8 filter_policy;
|
||||
__u8 scanning_phys;
|
||||
__u8 data[0];
|
||||
} __packed;
|
||||
|
||||
#define LE_SCAN_PHY_1M 0x01
|
||||
#define LE_SCAN_PHY_2M 0x02
|
||||
#define LE_SCAN_PHY_CODED 0x04
|
||||
|
||||
struct hci_cp_le_scan_phy_params {
|
||||
__u8 type;
|
||||
__le16 interval;
|
||||
__le16 window;
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_LE_SET_EXT_SCAN_ENABLE 0x2042
|
||||
struct hci_cp_le_set_ext_scan_enable {
|
||||
__u8 enable;
|
||||
__u8 filter_dup;
|
||||
__le16 duration;
|
||||
__le16 period;
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_LE_EXT_CREATE_CONN 0x2043
|
||||
struct hci_cp_le_ext_create_conn {
|
||||
__u8 filter_policy;
|
||||
__u8 own_addr_type;
|
||||
__u8 peer_addr_type;
|
||||
bdaddr_t peer_addr;
|
||||
__u8 phys;
|
||||
__u8 data[0];
|
||||
} __packed;
|
||||
|
||||
struct hci_cp_le_ext_conn_param {
|
||||
__le16 scan_interval;
|
||||
__le16 scan_window;
|
||||
__le16 conn_interval_min;
|
||||
__le16 conn_interval_max;
|
||||
__le16 conn_latency;
|
||||
__le16 supervision_timeout;
|
||||
__le16 min_ce_len;
|
||||
__le16 max_ce_len;
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b
|
||||
struct hci_rp_le_read_num_supported_adv_sets {
|
||||
__u8 status;
|
||||
__u8 num_of_sets;
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_LE_SET_EXT_ADV_PARAMS 0x2036
|
||||
struct hci_cp_le_set_ext_adv_params {
|
||||
__u8 handle;
|
||||
__le16 evt_properties;
|
||||
__u8 min_interval[3];
|
||||
__u8 max_interval[3];
|
||||
__u8 channel_map;
|
||||
__u8 own_addr_type;
|
||||
__u8 peer_addr_type;
|
||||
bdaddr_t peer_addr;
|
||||
__u8 filter_policy;
|
||||
__u8 tx_power;
|
||||
__u8 primary_phy;
|
||||
__u8 secondary_max_skip;
|
||||
__u8 secondary_phy;
|
||||
__u8 sid;
|
||||
__u8 notif_enable;
|
||||
} __packed;
|
||||
|
||||
#define HCI_ADV_PHY_1M 0X01
|
||||
#define HCI_ADV_PHY_2M 0x02
|
||||
#define HCI_ADV_PHY_CODED 0x03
|
||||
|
||||
struct hci_rp_le_set_ext_adv_params {
|
||||
__u8 status;
|
||||
__u8 tx_power;
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
|
||||
struct hci_cp_le_set_ext_adv_enable {
|
||||
__u8 enable;
|
||||
__u8 num_of_sets;
|
||||
__u8 data[0];
|
||||
} __packed;
|
||||
|
||||
struct hci_cp_ext_adv_set {
|
||||
__u8 handle;
|
||||
__le16 duration;
|
||||
__u8 max_events;
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037
|
||||
struct hci_cp_le_set_ext_adv_data {
|
||||
__u8 handle;
|
||||
__u8 operation;
|
||||
__u8 frag_pref;
|
||||
__u8 length;
|
||||
__u8 data[HCI_MAX_AD_LENGTH];
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
|
||||
struct hci_cp_le_set_ext_scan_rsp_data {
|
||||
__u8 handle;
|
||||
__u8 operation;
|
||||
__u8 frag_pref;
|
||||
__u8 length;
|
||||
__u8 data[HCI_MAX_AD_LENGTH];
|
||||
} __packed;
|
||||
|
||||
#define LE_SET_ADV_DATA_OP_COMPLETE 0x03
|
||||
|
||||
#define LE_SET_ADV_DATA_NO_FRAG 0x01
|
||||
|
||||
#define HCI_OP_LE_CLEAR_ADV_SETS 0x203d
|
||||
|
||||
#define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035
|
||||
struct hci_cp_le_set_adv_set_rand_addr {
|
||||
__u8 handle;
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
|
||||
/* ---- HCI Events ---- */
|
||||
#define HCI_EV_INQUIRY_COMPLETE 0x01
|
||||
|
||||
@@ -1893,6 +2058,23 @@ struct hci_ev_le_conn_complete {
|
||||
#define LE_ADV_SCAN_IND 0x02
|
||||
#define LE_ADV_NONCONN_IND 0x03
|
||||
#define LE_ADV_SCAN_RSP 0x04
|
||||
#define LE_ADV_INVALID 0x05
|
||||
|
||||
/* Legacy event types in extended adv report */
|
||||
#define LE_LEGACY_ADV_IND 0x0013
|
||||
#define LE_LEGACY_ADV_DIRECT_IND 0x0015
|
||||
#define LE_LEGACY_ADV_SCAN_IND 0x0012
|
||||
#define LE_LEGACY_NONCONN_IND 0x0010
|
||||
#define LE_LEGACY_SCAN_RSP_ADV 0x001b
|
||||
#define LE_LEGACY_SCAN_RSP_ADV_SCAN 0x001a
|
||||
|
||||
/* Extended Advertising event types */
|
||||
#define LE_EXT_ADV_NON_CONN_IND 0x0000
|
||||
#define LE_EXT_ADV_CONN_IND 0x0001
|
||||
#define LE_EXT_ADV_SCAN_IND 0x0002
|
||||
#define LE_EXT_ADV_DIRECT_IND 0x0004
|
||||
#define LE_EXT_ADV_SCAN_RSP 0x0008
|
||||
#define LE_EXT_ADV_LEGACY_PDU 0x0010
|
||||
|
||||
#define ADDR_LE_DEV_PUBLIC 0x00
|
||||
#define ADDR_LE_DEV_RANDOM 0x01
|
||||
@@ -1957,6 +2139,48 @@ struct hci_ev_le_direct_adv_info {
|
||||
__s8 rssi;
|
||||
} __packed;
|
||||
|
||||
#define HCI_EV_LE_EXT_ADV_REPORT 0x0d
|
||||
struct hci_ev_le_ext_adv_report {
|
||||
__le16 evt_type;
|
||||
__u8 bdaddr_type;
|
||||
bdaddr_t bdaddr;
|
||||
__u8 primary_phy;
|
||||
__u8 secondary_phy;
|
||||
__u8 sid;
|
||||
__u8 tx_power;
|
||||
__s8 rssi;
|
||||
__le16 interval;
|
||||
__u8 direct_addr_type;
|
||||
bdaddr_t direct_addr;
|
||||
__u8 length;
|
||||
__u8 data[0];
|
||||
} __packed;
|
||||
|
||||
#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
|
||||
struct hci_ev_le_enh_conn_complete {
|
||||
__u8 status;
|
||||
__le16 handle;
|
||||
__u8 role;
|
||||
__u8 bdaddr_type;
|
||||
bdaddr_t bdaddr;
|
||||
bdaddr_t local_rpa;
|
||||
bdaddr_t peer_rpa;
|
||||
__le16 interval;
|
||||
__le16 latency;
|
||||
__le16 supervision_timeout;
|
||||
__u8 clk_accurancy;
|
||||
} __packed;
|
||||
|
||||
#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12
|
||||
struct hci_evt_le_ext_adv_set_term {
|
||||
__u8 status;
|
||||
__u8 handle;
|
||||
__le16 conn_handle;
|
||||
__u8 num_evts;
|
||||
} __packed;
|
||||
|
||||
#define HCI_EV_VENDOR 0xff
|
||||
|
||||
/* Internal events generated by Bluetooth stack */
|
||||
#define HCI_EV_STACK_INTERNAL 0xfd
|
||||
struct hci_ev_stack_internal {
|
||||
|
@@ -171,6 +171,10 @@ struct adv_info {
|
||||
__u8 adv_data[HCI_MAX_AD_LENGTH];
|
||||
__u16 scan_rsp_len;
|
||||
__u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
|
||||
__s8 tx_power;
|
||||
bdaddr_t random_addr;
|
||||
bool rpa_expired;
|
||||
struct delayed_work rpa_expired_cb;
|
||||
};
|
||||
|
||||
#define HCI_MAX_ADV_INSTANCES 5
|
||||
@@ -221,6 +225,8 @@ struct hci_dev {
|
||||
__u8 features[HCI_MAX_PAGES][8];
|
||||
__u8 le_features[8];
|
||||
__u8 le_white_list_size;
|
||||
__u8 le_resolv_list_size;
|
||||
__u8 le_num_of_adv_sets;
|
||||
__u8 le_states[8];
|
||||
__u8 commands[64];
|
||||
__u8 hci_ver;
|
||||
@@ -314,6 +320,9 @@ struct hci_dev {
|
||||
unsigned long sco_last_tx;
|
||||
unsigned long le_last_tx;
|
||||
|
||||
__u8 le_tx_def_phys;
|
||||
__u8 le_rx_def_phys;
|
||||
|
||||
struct workqueue_struct *workqueue;
|
||||
struct workqueue_struct *req_workqueue;
|
||||
|
||||
@@ -367,6 +376,7 @@ struct hci_dev {
|
||||
struct list_head identity_resolving_keys;
|
||||
struct list_head remote_oob_data;
|
||||
struct list_head le_white_list;
|
||||
struct list_head le_resolv_list;
|
||||
struct list_head le_conn_params;
|
||||
struct list_head pend_le_conns;
|
||||
struct list_head pend_le_reports;
|
||||
@@ -1106,6 +1116,7 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
|
||||
u16 scan_rsp_len, u8 *scan_rsp_data,
|
||||
u16 timeout, u16 duration);
|
||||
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
|
||||
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
|
||||
|
||||
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
|
||||
|
||||
@@ -1136,6 +1147,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
|
||||
#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
|
||||
#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
|
||||
#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
|
||||
#define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M)
|
||||
#define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M)
|
||||
#define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT)
|
||||
#define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT)
|
||||
|
||||
/* ----- Extended LMP capabilities ----- */
|
||||
#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
|
||||
@@ -1156,6 +1171,24 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
|
||||
#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
|
||||
hci_dev_test_flag(dev, HCI_SC_ENABLED))
|
||||
|
||||
#define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
|
||||
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
|
||||
|
||||
#define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \
|
||||
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
|
||||
|
||||
#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
|
||||
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
|
||||
|
||||
/* Use ext scanning if set ext scan param and ext scan enable is supported */
|
||||
#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
|
||||
((dev)->commands[37] & 0x40))
|
||||
/* Use ext create connection if command is supported */
|
||||
#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
|
||||
|
||||
/* Extended advertising support */
|
||||
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
|
||||
|
||||
/* ----- HCI protocols ----- */
|
||||
#define HCI_PROTO_DEFER 0x01
|
||||
|
||||
@@ -1529,6 +1562,7 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
|
||||
u8 instance);
|
||||
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
|
||||
u8 instance);
|
||||
int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
|
||||
|
||||
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
|
||||
u16 to_multiplier);
|
||||
|
@@ -101,6 +101,7 @@ struct mgmt_rp_read_index_list {
|
||||
#define MGMT_SETTING_PRIVACY 0x00002000
|
||||
#define MGMT_SETTING_CONFIGURATION 0x00004000
|
||||
#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
|
||||
#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
|
||||
|
||||
#define MGMT_OP_READ_INFO 0x0004
|
||||
#define MGMT_READ_INFO_SIZE 0
|
||||
@@ -561,6 +562,12 @@ struct mgmt_rp_add_advertising {
|
||||
#define MGMT_ADV_FLAG_TX_POWER BIT(4)
|
||||
#define MGMT_ADV_FLAG_APPEARANCE BIT(5)
|
||||
#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6)
|
||||
#define MGMT_ADV_FLAG_SEC_1M BIT(7)
|
||||
#define MGMT_ADV_FLAG_SEC_2M BIT(8)
|
||||
#define MGMT_ADV_FLAG_SEC_CODED BIT(9)
|
||||
|
||||
#define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \
|
||||
MGMT_ADV_FLAG_SEC_CODED)
|
||||
|
||||
#define MGMT_OP_REMOVE_ADVERTISING 0x003F
|
||||
struct mgmt_cp_remove_advertising {
|
||||
@@ -604,6 +611,49 @@ struct mgmt_cp_set_appearance {
|
||||
} __packed;
|
||||
#define MGMT_SET_APPEARANCE_SIZE 2
|
||||
|
||||
#define MGMT_OP_GET_PHY_CONFIGURATION 0x0044
|
||||
struct mgmt_rp_get_phy_confguration {
|
||||
__le32 supported_phys;
|
||||
__le32 configurable_phys;
|
||||
__le32 selected_phys;
|
||||
} __packed;
|
||||
#define MGMT_GET_PHY_CONFIGURATION_SIZE 0
|
||||
|
||||
#define MGMT_PHY_BR_1M_1SLOT 0x00000001
|
||||
#define MGMT_PHY_BR_1M_3SLOT 0x00000002
|
||||
#define MGMT_PHY_BR_1M_5SLOT 0x00000004
|
||||
#define MGMT_PHY_EDR_2M_1SLOT 0x00000008
|
||||
#define MGMT_PHY_EDR_2M_3SLOT 0x00000010
|
||||
#define MGMT_PHY_EDR_2M_5SLOT 0x00000020
|
||||
#define MGMT_PHY_EDR_3M_1SLOT 0x00000040
|
||||
#define MGMT_PHY_EDR_3M_3SLOT 0x00000080
|
||||
#define MGMT_PHY_EDR_3M_5SLOT 0x00000100
|
||||
#define MGMT_PHY_LE_1M_TX 0x00000200
|
||||
#define MGMT_PHY_LE_1M_RX 0x00000400
|
||||
#define MGMT_PHY_LE_2M_TX 0x00000800
|
||||
#define MGMT_PHY_LE_2M_RX 0x00001000
|
||||
#define MGMT_PHY_LE_CODED_TX 0x00002000
|
||||
#define MGMT_PHY_LE_CODED_RX 0x00004000
|
||||
|
||||
#define MGMT_PHY_BREDR_MASK (MGMT_PHY_BR_1M_1SLOT | MGMT_PHY_BR_1M_3SLOT | \
|
||||
MGMT_PHY_BR_1M_5SLOT | MGMT_PHY_EDR_2M_1SLOT | \
|
||||
MGMT_PHY_EDR_2M_3SLOT | MGMT_PHY_EDR_2M_5SLOT | \
|
||||
MGMT_PHY_EDR_3M_1SLOT | MGMT_PHY_EDR_3M_3SLOT | \
|
||||
MGMT_PHY_EDR_3M_5SLOT)
|
||||
#define MGMT_PHY_LE_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_1M_RX | \
|
||||
MGMT_PHY_LE_2M_TX | MGMT_PHY_LE_2M_RX | \
|
||||
MGMT_PHY_LE_CODED_TX | MGMT_PHY_LE_CODED_RX)
|
||||
#define MGMT_PHY_LE_TX_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_2M_TX | \
|
||||
MGMT_PHY_LE_CODED_TX)
|
||||
#define MGMT_PHY_LE_RX_MASK (MGMT_PHY_LE_1M_RX | MGMT_PHY_LE_2M_RX | \
|
||||
MGMT_PHY_LE_CODED_RX)
|
||||
|
||||
#define MGMT_OP_SET_PHY_CONFIGURATION 0x0045
|
||||
struct mgmt_cp_set_phy_confguration {
|
||||
__le32 selected_phys;
|
||||
} __packed;
|
||||
#define MGMT_SET_PHY_CONFIGURATION_SIZE 4
|
||||
|
||||
#define MGMT_EV_CMD_COMPLETE 0x0001
|
||||
struct mgmt_ev_cmd_complete {
|
||||
__le16 opcode;
|
||||
@@ -824,3 +874,8 @@ struct mgmt_ev_ext_info_changed {
|
||||
__le16 eir_len;
|
||||
__u8 eir[0];
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_PHY_CONFIGURATION_CHANGED 0x0026
|
||||
struct mgmt_ev_phy_configuration_changed {
|
||||
__le32 selected_phys;
|
||||
} __packed;
|
||||
|
@@ -283,7 +283,7 @@ static inline const char *bond_3ad_churn_desc(churn_state_t state)
|
||||
"none",
|
||||
"unknown"
|
||||
};
|
||||
int max_size = sizeof(churn_description) / sizeof(churn_description[0]);
|
||||
int max_size = ARRAY_SIZE(churn_description);
|
||||
|
||||
if (state >= max_size)
|
||||
state = max_size - 1;
|
||||
|
@@ -411,6 +411,19 @@ static inline bool bond_slave_can_tx(struct slave *slave)
|
||||
bond_is_active_slave(slave);
|
||||
}
|
||||
|
||||
static inline bool bond_is_active_slave_dev(const struct net_device *slave_dev)
|
||||
{
|
||||
struct slave *slave;
|
||||
bool active;
|
||||
|
||||
rcu_read_lock();
|
||||
slave = bond_slave_get_rcu(slave_dev);
|
||||
active = bond_is_active_slave(slave);
|
||||
rcu_read_unlock();
|
||||
|
||||
return active;
|
||||
}
|
||||
|
||||
static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len)
|
||||
{
|
||||
if (len == ETH_ALEN) {
|
||||
|
@@ -121,21 +121,6 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void sock_poll_busy_loop(struct socket *sock, __poll_t events)
|
||||
{
|
||||
if (sk_can_busy_loop(sock->sk) &&
|
||||
events && (events & POLL_BUSY_LOOP)) {
|
||||
/* once, only if requested by syscall */
|
||||
sk_busy_loop(sock->sk, 1);
|
||||
}
|
||||
}
|
||||
|
||||
/* if this socket can poll_ll, tell the system call */
|
||||
static inline __poll_t sock_poll_busy_flag(struct socket *sock)
|
||||
{
|
||||
return sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0;
|
||||
}
|
||||
|
||||
/* used in the NIC receive handler to mark the skb */
|
||||
static inline void skb_mark_napi_id(struct sk_buff *skb,
|
||||
struct napi_struct *napi)
|
||||
@@ -151,6 +136,7 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
sk->sk_napi_id = skb->napi_id;
|
||||
#endif
|
||||
sk_rx_queue_set(sk, skb);
|
||||
}
|
||||
|
||||
/* variant used for unconnected sockets */
|
||||
|
@@ -285,6 +285,41 @@ struct ieee80211_sta_vht_cap {
|
||||
struct ieee80211_vht_mcs_info vht_mcs;
|
||||
};
|
||||
|
||||
#define IEEE80211_HE_PPE_THRES_MAX_LEN 25
|
||||
|
||||
/**
|
||||
* struct ieee80211_sta_he_cap - STA's HE capabilities
|
||||
*
|
||||
* This structure describes most essential parameters needed
|
||||
* to describe 802.11ax HE capabilities for a STA.
|
||||
*
|
||||
* @has_he: true iff HE data is valid.
|
||||
* @he_cap_elem: Fixed portion of the HE capabilities element.
|
||||
* @he_mcs_nss_supp: The supported NSS/MCS combinations.
|
||||
* @ppe_thres: Holds the PPE Thresholds data.
|
||||
*/
|
||||
struct ieee80211_sta_he_cap {
|
||||
bool has_he;
|
||||
struct ieee80211_he_cap_elem he_cap_elem;
|
||||
struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp;
|
||||
u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ieee80211_sband_iftype_data
|
||||
*
|
||||
* This structure encapsulates sband data that is relevant for the
|
||||
* interface types defined in @types_mask. Each type in the
|
||||
* @types_mask must be unique across all instances of iftype_data.
|
||||
*
|
||||
* @types_mask: interface types mask
|
||||
* @he_cap: holds the HE capabilities
|
||||
*/
|
||||
struct ieee80211_sband_iftype_data {
|
||||
u16 types_mask;
|
||||
struct ieee80211_sta_he_cap he_cap;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ieee80211_supported_band - frequency band definition
|
||||
*
|
||||
@@ -301,6 +336,11 @@ struct ieee80211_sta_vht_cap {
|
||||
* @n_bitrates: Number of bitrates in @bitrates
|
||||
* @ht_cap: HT capabilities in this band
|
||||
* @vht_cap: VHT capabilities in this band
|
||||
* @n_iftype_data: number of iftype data entries
|
||||
* @iftype_data: interface type data entries. Note that the bits in
|
||||
* @types_mask inside this structure cannot overlap (i.e. only
|
||||
* one occurrence of each type is allowed across all instances of
|
||||
* iftype_data).
|
||||
*/
|
||||
struct ieee80211_supported_band {
|
||||
struct ieee80211_channel *channels;
|
||||
@@ -310,8 +350,55 @@ struct ieee80211_supported_band {
|
||||
int n_bitrates;
|
||||
struct ieee80211_sta_ht_cap ht_cap;
|
||||
struct ieee80211_sta_vht_cap vht_cap;
|
||||
u16 n_iftype_data;
|
||||
const struct ieee80211_sband_iftype_data *iftype_data;
|
||||
};
|
||||
|
||||
/**
|
||||
* ieee80211_get_sband_iftype_data - return sband data for a given iftype
|
||||
* @sband: the sband to search for the STA on
|
||||
* @iftype: enum nl80211_iftype
|
||||
*
|
||||
* Return: pointer to struct ieee80211_sband_iftype_data, or NULL is none found
|
||||
*/
|
||||
static inline const struct ieee80211_sband_iftype_data *
|
||||
ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
|
||||
u8 iftype)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < sband->n_iftype_data; i++) {
|
||||
const struct ieee80211_sband_iftype_data *data =
|
||||
&sband->iftype_data[i];
|
||||
|
||||
if (data->types_mask & BIT(iftype))
|
||||
return data;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA
|
||||
* @sband: the sband to search for the STA on
|
||||
*
|
||||
* Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found
|
||||
*/
|
||||
static inline const struct ieee80211_sta_he_cap *
|
||||
ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband)
|
||||
{
|
||||
const struct ieee80211_sband_iftype_data *data =
|
||||
ieee80211_get_sband_iftype_data(sband, NL80211_IFTYPE_STATION);
|
||||
|
||||
if (data && data->he_cap.has_he)
|
||||
return &data->he_cap;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* wiphy_read_of_freq_limits - read frequency limits from device tree
|
||||
*
|
||||
@@ -899,6 +986,8 @@ enum station_parameters_apply_mask {
|
||||
* @opmode_notif: operating mode field from Operating Mode Notification
|
||||
* @opmode_notif_used: information if operating mode field is used
|
||||
* @support_p2p_ps: information if station supports P2P PS mechanism
|
||||
* @he_capa: HE capabilities of station
|
||||
* @he_capa_len: the length of the HE capabilities
|
||||
*/
|
||||
struct station_parameters {
|
||||
const u8 *supported_rates;
|
||||
@@ -926,6 +1015,8 @@ struct station_parameters {
|
||||
u8 opmode_notif;
|
||||
bool opmode_notif_used;
|
||||
int support_p2p_ps;
|
||||
const struct ieee80211_he_cap_elem *he_capa;
|
||||
u8 he_capa_len;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -1000,12 +1091,14 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
|
||||
* @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
|
||||
* @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
|
||||
* @RATE_INFO_FLAGS_60G: 60GHz MCS
|
||||
* @RATE_INFO_FLAGS_HE_MCS: HE MCS information
|
||||
*/
|
||||
enum rate_info_flags {
|
||||
RATE_INFO_FLAGS_MCS = BIT(0),
|
||||
RATE_INFO_FLAGS_VHT_MCS = BIT(1),
|
||||
RATE_INFO_FLAGS_SHORT_GI = BIT(2),
|
||||
RATE_INFO_FLAGS_60G = BIT(3),
|
||||
RATE_INFO_FLAGS_HE_MCS = BIT(4),
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -1019,6 +1112,7 @@ enum rate_info_flags {
|
||||
* @RATE_INFO_BW_40: 40 MHz bandwidth
|
||||
* @RATE_INFO_BW_80: 80 MHz bandwidth
|
||||
* @RATE_INFO_BW_160: 160 MHz bandwidth
|
||||
* @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
|
||||
*/
|
||||
enum rate_info_bw {
|
||||
RATE_INFO_BW_20 = 0,
|
||||
@@ -1027,6 +1121,7 @@ enum rate_info_bw {
|
||||
RATE_INFO_BW_40,
|
||||
RATE_INFO_BW_80,
|
||||
RATE_INFO_BW_160,
|
||||
RATE_INFO_BW_HE_RU,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -1035,10 +1130,14 @@ enum rate_info_bw {
|
||||
* Information about a receiving or transmitting bitrate
|
||||
*
|
||||
* @flags: bitflag of flags from &enum rate_info_flags
|
||||
* @mcs: mcs index if struct describes a 802.11n bitrate
|
||||
* @mcs: mcs index if struct describes an HT/VHT/HE rate
|
||||
* @legacy: bitrate in 100kbit/s for 802.11abg
|
||||
* @nss: number of streams (VHT only)
|
||||
* @nss: number of streams (VHT & HE only)
|
||||
* @bw: bandwidth (from &enum rate_info_bw)
|
||||
* @he_gi: HE guard interval (from &enum nl80211_he_gi)
|
||||
* @he_dcm: HE DCM value
|
||||
* @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
|
||||
* only valid if bw is %RATE_INFO_BW_HE_RU)
|
||||
*/
|
||||
struct rate_info {
|
||||
u8 flags;
|
||||
@@ -1046,6 +1145,9 @@ struct rate_info {
|
||||
u16 legacy;
|
||||
u8 nss;
|
||||
u8 bw;
|
||||
u8 he_gi;
|
||||
u8 he_dcm;
|
||||
u8 he_ru_alloc;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -34,6 +34,19 @@ int dcb_ieee_setapp(struct net_device *, struct dcb_app *);
|
||||
int dcb_ieee_delapp(struct net_device *, struct dcb_app *);
|
||||
u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *);
|
||||
|
||||
struct dcb_ieee_app_prio_map {
|
||||
u64 map[IEEE_8021QAZ_MAX_TCS];
|
||||
};
|
||||
void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
|
||||
struct dcb_ieee_app_prio_map *p_map);
|
||||
|
||||
struct dcb_ieee_app_dscp_map {
|
||||
u8 map[64];
|
||||
};
|
||||
void dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
|
||||
struct dcb_ieee_app_dscp_map *p_map);
|
||||
u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev);
|
||||
|
||||
int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
|
||||
u32 seq, u32 pid);
|
||||
int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
|
||||
|
@@ -27,6 +27,9 @@ struct devlink {
|
||||
struct list_head sb_list;
|
||||
struct list_head dpipe_table_list;
|
||||
struct list_head resource_list;
|
||||
struct list_head param_list;
|
||||
struct list_head region_list;
|
||||
u32 snapshot_id;
|
||||
struct devlink_dpipe_headers *dpipe_headers;
|
||||
const struct devlink_ops *ops;
|
||||
struct device *dev;
|
||||
@@ -295,6 +298,115 @@ struct devlink_resource {
|
||||
|
||||
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
|
||||
|
||||
#define DEVLINK_PARAM_MAX_STRING_VALUE 32
|
||||
enum devlink_param_type {
|
||||
DEVLINK_PARAM_TYPE_U8,
|
||||
DEVLINK_PARAM_TYPE_U16,
|
||||
DEVLINK_PARAM_TYPE_U32,
|
||||
DEVLINK_PARAM_TYPE_STRING,
|
||||
DEVLINK_PARAM_TYPE_BOOL,
|
||||
};
|
||||
|
||||
union devlink_param_value {
|
||||
u8 vu8;
|
||||
u16 vu16;
|
||||
u32 vu32;
|
||||
const char *vstr;
|
||||
bool vbool;
|
||||
};
|
||||
|
||||
struct devlink_param_gset_ctx {
|
||||
union devlink_param_value val;
|
||||
enum devlink_param_cmode cmode;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct devlink_param - devlink configuration parameter data
|
||||
* @name: name of the parameter
|
||||
* @generic: indicates if the parameter is generic or driver specific
|
||||
* @type: parameter type
|
||||
* @supported_cmodes: bitmap of supported configuration modes
|
||||
* @get: get parameter value, used for runtime and permanent
|
||||
* configuration modes
|
||||
* @set: set parameter value, used for runtime and permanent
|
||||
* configuration modes
|
||||
* @validate: validate input value is applicable (within value range, etc.)
|
||||
*
|
||||
* This struct should be used by the driver to fill the data for
|
||||
* a parameter it registers.
|
||||
*/
|
||||
struct devlink_param {
|
||||
u32 id;
|
||||
const char *name;
|
||||
bool generic;
|
||||
enum devlink_param_type type;
|
||||
unsigned long supported_cmodes;
|
||||
int (*get)(struct devlink *devlink, u32 id,
|
||||
struct devlink_param_gset_ctx *ctx);
|
||||
int (*set)(struct devlink *devlink, u32 id,
|
||||
struct devlink_param_gset_ctx *ctx);
|
||||
int (*validate)(struct devlink *devlink, u32 id,
|
||||
union devlink_param_value val,
|
||||
struct netlink_ext_ack *extack);
|
||||
};
|
||||
|
||||
struct devlink_param_item {
|
||||
struct list_head list;
|
||||
const struct devlink_param *param;
|
||||
union devlink_param_value driverinit_value;
|
||||
bool driverinit_value_valid;
|
||||
};
|
||||
|
||||
enum devlink_param_generic_id {
|
||||
DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
|
||||
DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
|
||||
DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
|
||||
DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
|
||||
|
||||
/* add new param generic ids above here*/
|
||||
__DEVLINK_PARAM_GENERIC_ID_MAX,
|
||||
DEVLINK_PARAM_GENERIC_ID_MAX = __DEVLINK_PARAM_GENERIC_ID_MAX - 1,
|
||||
};
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME "internal_error_reset"
|
||||
#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC_MAX_MACS_NAME "max_macs"
|
||||
#define DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE DEVLINK_PARAM_TYPE_U32
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME "enable_sriov"
|
||||
#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE DEVLINK_PARAM_TYPE_BOOL
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable"
|
||||
#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
|
||||
{ \
|
||||
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
|
||||
.name = DEVLINK_PARAM_GENERIC_##_id##_NAME, \
|
||||
.type = DEVLINK_PARAM_GENERIC_##_id##_TYPE, \
|
||||
.generic = true, \
|
||||
.supported_cmodes = _cmodes, \
|
||||
.get = _get, \
|
||||
.set = _set, \
|
||||
.validate = _validate, \
|
||||
}
|
||||
|
||||
#define DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, _get, _set, _validate) \
|
||||
{ \
|
||||
.id = _id, \
|
||||
.name = _name, \
|
||||
.type = _type, \
|
||||
.supported_cmodes = _cmodes, \
|
||||
.get = _get, \
|
||||
.set = _set, \
|
||||
.validate = _validate, \
|
||||
}
|
||||
|
||||
struct devlink_region;
|
||||
|
||||
typedef void devlink_snapshot_data_dest_t(const void *data);
|
||||
|
||||
struct devlink_ops {
|
||||
int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack);
|
||||
int (*port_type_set)(struct devlink_port *devlink_port,
|
||||
@@ -430,6 +542,26 @@ void devlink_resource_occ_get_register(struct devlink *devlink,
|
||||
void *occ_get_priv);
|
||||
void devlink_resource_occ_get_unregister(struct devlink *devlink,
|
||||
u64 resource_id);
|
||||
int devlink_params_register(struct devlink *devlink,
|
||||
const struct devlink_param *params,
|
||||
size_t params_count);
|
||||
void devlink_params_unregister(struct devlink *devlink,
|
||||
const struct devlink_param *params,
|
||||
size_t params_count);
|
||||
int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
|
||||
union devlink_param_value *init_val);
|
||||
int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
|
||||
union devlink_param_value init_val);
|
||||
void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
|
||||
struct devlink_region *devlink_region_create(struct devlink *devlink,
|
||||
const char *region_name,
|
||||
u32 region_max_snapshots,
|
||||
u64 region_size);
|
||||
void devlink_region_destroy(struct devlink_region *region);
|
||||
u32 devlink_region_shapshot_id_get(struct devlink *devlink);
|
||||
int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
|
||||
u8 *data, u32 snapshot_id,
|
||||
devlink_snapshot_data_dest_t *data_destructor);
|
||||
|
||||
#else
|
||||
|
||||
@@ -622,6 +754,69 @@ devlink_resource_occ_get_unregister(struct devlink *devlink,
|
||||
{
|
||||
}
|
||||
|
||||
static inline int
|
||||
devlink_params_register(struct devlink *devlink,
|
||||
const struct devlink_param *params,
|
||||
size_t params_count)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
devlink_params_unregister(struct devlink *devlink,
|
||||
const struct devlink_param *params,
|
||||
size_t params_count)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static inline int
|
||||
devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
|
||||
union devlink_param_value *init_val)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int
|
||||
devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
|
||||
union devlink_param_value init_val)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void
|
||||
devlink_param_value_changed(struct devlink *devlink, u32 param_id)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct devlink_region *
|
||||
devlink_region_create(struct devlink *devlink,
|
||||
const char *region_name,
|
||||
u32 region_max_snapshots,
|
||||
u64 region_size)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void
|
||||
devlink_region_destroy(struct devlink_region *region)
|
||||
{
|
||||
}
|
||||
|
||||
static inline u32
|
||||
devlink_region_shapshot_id_get(struct devlink *devlink)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
|
||||
u8 *data, u32 snapshot_id,
|
||||
devlink_snapshot_data_dest_t *data_destructor)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _NET_DEVLINK_H_ */
|
||||
|
@@ -259,6 +259,9 @@ struct dsa_switch {
|
||||
/* Number of switch port queues */
|
||||
unsigned int num_tx_queues;
|
||||
|
||||
unsigned long *bitmap;
|
||||
unsigned long _bitmap;
|
||||
|
||||
/* Dynamically allocated ports, keep last */
|
||||
size_t num_ports;
|
||||
struct dsa_port ports[];
|
||||
|
@@ -475,6 +475,14 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
|
||||
return dst_orig;
|
||||
}
|
||||
|
||||
static inline struct dst_entry *
|
||||
xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig,
|
||||
const struct flowi *fl, const struct sock *sk,
|
||||
int flags, u32 if_id)
|
||||
{
|
||||
return dst_orig;
|
||||
}
|
||||
|
||||
static inline struct dst_entry *xfrm_lookup_route(struct net *net,
|
||||
struct dst_entry *dst_orig,
|
||||
const struct flowi *fl,
|
||||
@@ -494,6 +502,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
|
||||
const struct flowi *fl, const struct sock *sk,
|
||||
int flags);
|
||||
|
||||
struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
|
||||
struct dst_entry *dst_orig,
|
||||
const struct flowi *fl,
|
||||
const struct sock *sk, int flags,
|
||||
u32 if_id);
|
||||
|
||||
struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
|
||||
const struct flowi *fl, const struct sock *sk,
|
||||
int flags);
|
||||
|
@@ -47,7 +47,7 @@ struct flow_dissector_key_tags {
|
||||
struct flow_dissector_key_vlan {
|
||||
u16 vlan_id:12,
|
||||
vlan_priority:3;
|
||||
u16 padding;
|
||||
__be16 vlan_tpid;
|
||||
};
|
||||
|
||||
struct flow_dissector_key_mpls {
|
||||
@@ -57,6 +57,21 @@ struct flow_dissector_key_mpls {
|
||||
mpls_label:20;
|
||||
};
|
||||
|
||||
#define FLOW_DIS_TUN_OPTS_MAX 255
|
||||
/**
|
||||
* struct flow_dissector_key_enc_opts:
|
||||
* @data: tunnel option data
|
||||
* @len: length of tunnel option data
|
||||
* @dst_opt_type: tunnel option type
|
||||
*/
|
||||
struct flow_dissector_key_enc_opts {
|
||||
u8 data[FLOW_DIS_TUN_OPTS_MAX]; /* Using IP_TUNNEL_OPTS_MAX is desired
|
||||
* here but seems difficult to #include
|
||||
*/
|
||||
u8 len;
|
||||
__be16 dst_opt_type;
|
||||
};
|
||||
|
||||
struct flow_dissector_key_keyid {
|
||||
__be32 keyid;
|
||||
};
|
||||
@@ -206,6 +221,9 @@ enum flow_dissector_key_id {
|
||||
FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */
|
||||
FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */
|
||||
FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */
|
||||
FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */
|
||||
FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */
|
||||
FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */
|
||||
|
||||
FLOW_DISSECTOR_KEY_MAX,
|
||||
};
|
||||
@@ -237,6 +255,7 @@ struct flow_keys {
|
||||
struct flow_dissector_key_basic basic;
|
||||
struct flow_dissector_key_tags tags;
|
||||
struct flow_dissector_key_vlan vlan;
|
||||
struct flow_dissector_key_vlan cvlan;
|
||||
struct flow_dissector_key_keyid keyid;
|
||||
struct flow_dissector_key_ports ports;
|
||||
struct flow_dissector_key_addrs addrs;
|
||||
|
@@ -59,13 +59,13 @@ int gnet_stats_finish_copy(struct gnet_dump *d);
|
||||
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
|
||||
struct net_rate_estimator __rcu **rate_est,
|
||||
spinlock_t *stats_lock,
|
||||
spinlock_t *lock,
|
||||
seqcount_t *running, struct nlattr *opt);
|
||||
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
|
||||
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
|
||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
|
||||
struct net_rate_estimator __rcu **ptr,
|
||||
spinlock_t *stats_lock,
|
||||
spinlock_t *lock,
|
||||
seqcount_t *running, struct nlattr *opt);
|
||||
bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
|
||||
bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
|
||||
|
@@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2017 Intel Deutschland GmbH
|
||||
* Copyright (c) 2018 Intel Corporation
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
@@ -72,6 +73,8 @@ enum ieee80211_radiotap_presence {
|
||||
IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
|
||||
IEEE80211_RADIOTAP_VHT = 21,
|
||||
IEEE80211_RADIOTAP_TIMESTAMP = 22,
|
||||
IEEE80211_RADIOTAP_HE = 23,
|
||||
IEEE80211_RADIOTAP_HE_MU = 24,
|
||||
|
||||
/* valid in every it_present bitmap, even vendor namespaces */
|
||||
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
|
||||
@@ -202,6 +205,126 @@ enum ieee80211_radiotap_timestamp_flags {
|
||||
IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02,
|
||||
};
|
||||
|
||||
struct ieee80211_radiotap_he {
|
||||
__le16 data1, data2, data3, data4, data5, data6;
|
||||
};
|
||||
|
||||
enum ieee80211_radiotap_he_bits {
|
||||
IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK = 3,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU = 0,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU = 1,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU = 2,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG = 3,
|
||||
|
||||
IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN = 0x0004,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN = 0x0008,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN = 0x0010,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN = 0x0020,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN = 0x0040,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN = 0x0080,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN = 0x0100,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN = 0x0200,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN = 0x0400,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN = 0x0800,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN = 0x1000,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN = 0x2000,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN = 0x4000,
|
||||
IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN = 0x8000,
|
||||
|
||||
IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN = 0x0001,
|
||||
IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN = 0x0002,
|
||||
IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN = 0x0004,
|
||||
IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN = 0x0008,
|
||||
IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN = 0x0010,
|
||||
IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN = 0x0020,
|
||||
IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN = 0x0040,
|
||||
IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN = 0x0080,
|
||||
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET = 0x3f00,
|
||||
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN = 0x4000,
|
||||
IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC = 0x8000,
|
||||
|
||||
IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR = 0x003f,
|
||||
IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE = 0x0040,
|
||||
IEEE80211_RADIOTAP_HE_DATA3_UL_DL = 0x0080,
|
||||
IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS = 0x0f00,
|
||||
IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM = 0x1000,
|
||||
IEEE80211_RADIOTAP_HE_DATA3_CODING = 0x2000,
|
||||
IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG = 0x4000,
|
||||
IEEE80211_RADIOTAP_HE_DATA3_STBC = 0x8000,
|
||||
|
||||
IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE = 0x000f,
|
||||
IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID = 0x7ff0,
|
||||
IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1 = 0x000f,
|
||||
IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2 = 0x00f0,
|
||||
IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3 = 0x0f00,
|
||||
IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4 = 0xf000,
|
||||
|
||||
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC = 0x000f,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ = 0,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ = 1,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ = 2,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ = 3,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_26T = 4,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_52T = 5,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_106T = 6,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_242T = 7,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_484T = 8,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_996T = 9,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_2x996T = 10,
|
||||
|
||||
IEEE80211_RADIOTAP_HE_DATA5_GI = 0x0030,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_GI_0_8 = 0,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_GI_1_6 = 1,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_GI_3_2 = 2,
|
||||
|
||||
IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE = 0x00c0,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN = 0,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X = 1,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X = 2,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X = 3,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS = 0x0700,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD = 0x3000,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_TXBF = 0x4000,
|
||||
IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG = 0x8000,
|
||||
|
||||
IEEE80211_RADIOTAP_HE_DATA6_NSTS = 0x000f,
|
||||
IEEE80211_RADIOTAP_HE_DATA6_DOPPLER = 0x0010,
|
||||
IEEE80211_RADIOTAP_HE_DATA6_TXOP = 0x7f00,
|
||||
IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY = 0x8000,
|
||||
};
|
||||
|
||||
struct ieee80211_radiotap_he_mu {
|
||||
__le16 flags1, flags2;
|
||||
u8 ru_ch1[4];
|
||||
u8 ru_ch2[4];
|
||||
};
|
||||
|
||||
enum ieee80211_radiotap_he_mu_bits {
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS = 0x000f,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN = 0x0010,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM = 0x0020,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN = 0x0040,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN = 0x0080,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN = 0x0100,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN = 0x0200,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN = 0x1000,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU = 0x2000,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN = 0x4000,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN = 0x8000,
|
||||
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW = 0x0003,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_20MHZ = 0x0000,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_40MHZ = 0x0001,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_80MHZ = 0x0002,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_160MHZ = 0x0003,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN = 0x0004,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP = 0x0008,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS = 0x00f0,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW = 0x0300,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN= 0x0400,
|
||||
IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800,
|
||||
};
|
||||
|
||||
/**
|
||||
* ieee80211_get_radiotap_len - get radiotap header length
|
||||
*/
|
||||
|
@@ -43,7 +43,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
|
||||
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
|
||||
int *addr_len);
|
||||
|
||||
struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb);
|
||||
struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb);
|
||||
int inet_gro_complete(struct sk_buff *skb, int nhoff);
|
||||
struct sk_buff *inet_gso_segment(struct sk_buff *skb,
|
||||
netdev_features_t features);
|
||||
|
@@ -19,6 +19,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <net/inet_sock.h>
|
||||
#include <net/request_sock.h>
|
||||
@@ -167,7 +168,8 @@ enum inet_csk_ack_state_t {
|
||||
ICSK_ACK_SCHED = 1,
|
||||
ICSK_ACK_TIMER = 2,
|
||||
ICSK_ACK_PUSHED = 4,
|
||||
ICSK_ACK_PUSHED2 = 8
|
||||
ICSK_ACK_PUSHED2 = 8,
|
||||
ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */
|
||||
};
|
||||
|
||||
void inet_csk_init_xmit_timers(struct sock *sk,
|
||||
@@ -224,7 +226,7 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
|
||||
|
||||
if (when > max_when) {
|
||||
pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
|
||||
sk, what, when, current_text_addr());
|
||||
sk, what, when, (void *)_THIS_IP_);
|
||||
when = max_when;
|
||||
}
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
#ifndef __NET_FRAG_H__
|
||||
#define __NET_FRAG_H__
|
||||
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/rhashtable-types.h>
|
||||
|
||||
struct netns_frags {
|
||||
/* sysctls */
|
||||
@@ -57,7 +57,9 @@ struct frag_v6_compare_key {
|
||||
* @lock: spinlock protecting this frag
|
||||
* @refcnt: reference count of the queue
|
||||
* @fragments: received fragments head
|
||||
* @rb_fragments: received fragments rb-tree root
|
||||
* @fragments_tail: received fragments tail
|
||||
* @last_run_head: the head of the last "run". see ip_fragment.c
|
||||
* @stamp: timestamp of the last received fragment
|
||||
* @len: total length of the original datagram
|
||||
* @meat: length of received fragments so far
|
||||
@@ -75,8 +77,10 @@ struct inet_frag_queue {
|
||||
struct timer_list timer;
|
||||
spinlock_t lock;
|
||||
refcount_t refcnt;
|
||||
struct sk_buff *fragments;
|
||||
struct sk_buff *fragments; /* Used in IPv6. */
|
||||
struct rb_root rb_fragments; /* Used in IPv4. */
|
||||
struct sk_buff *fragments_tail;
|
||||
struct sk_buff *last_run_head;
|
||||
ktime_t stamp;
|
||||
int len;
|
||||
int meat;
|
||||
@@ -112,6 +116,9 @@ void inet_frag_kill(struct inet_frag_queue *q);
|
||||
void inet_frag_destroy(struct inet_frag_queue *q);
|
||||
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
|
||||
|
||||
/* Free all skbs in the queue; return the sum of their truesizes. */
|
||||
unsigned int inet_frag_rbtree_purge(struct rb_root *root);
|
||||
|
||||
static inline void inet_frag_put(struct inet_frag_queue *q)
|
||||
{
|
||||
if (refcount_dec_and_test(&q->refcnt))
|
||||
|
@@ -148,6 +148,7 @@ struct inet_cork {
|
||||
__s16 tos;
|
||||
char priority;
|
||||
__u16 gso_size;
|
||||
u64 transmit_time;
|
||||
};
|
||||
|
||||
struct inet_cork_full {
|
||||
@@ -358,4 +359,12 @@ static inline bool inet_get_convert_csum(struct sock *sk)
|
||||
return !!inet_sk(sk)->convert_csum;
|
||||
}
|
||||
|
||||
|
||||
static inline bool inet_can_nonlocal_bind(struct net *net,
|
||||
struct inet_sock *inet)
|
||||
{
|
||||
return net->ipv4.sysctl_ip_nonlocal_bind ||
|
||||
inet->freebind || inet->transparent;
|
||||
}
|
||||
|
||||
#endif /* _INET_SOCK_H */
|
||||
|
@@ -72,13 +72,27 @@ struct ipcm_cookie {
|
||||
__be32 addr;
|
||||
int oif;
|
||||
struct ip_options_rcu *opt;
|
||||
__u8 tx_flags;
|
||||
__u8 ttl;
|
||||
__s16 tos;
|
||||
char priority;
|
||||
__u16 gso_size;
|
||||
};
|
||||
|
||||
static inline void ipcm_init(struct ipcm_cookie *ipcm)
|
||||
{
|
||||
*ipcm = (struct ipcm_cookie) { .tos = -1 };
|
||||
}
|
||||
|
||||
static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
|
||||
const struct inet_sock *inet)
|
||||
{
|
||||
ipcm_init(ipcm);
|
||||
|
||||
ipcm->sockc.tsflags = inet->sk.sk_tsflags;
|
||||
ipcm->oif = inet->sk.sk_bound_dev_if;
|
||||
ipcm->addr = inet->inet_saddr;
|
||||
}
|
||||
|
||||
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
|
||||
#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
|
||||
|
||||
@@ -138,6 +152,8 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
|
||||
struct ip_options_rcu *opt);
|
||||
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
|
||||
struct net_device *orig_dev);
|
||||
void ip_list_rcv(struct list_head *head, struct packet_type *pt,
|
||||
struct net_device *orig_dev);
|
||||
int ip_local_deliver(struct sk_buff *skb);
|
||||
int ip_mr_input(struct sk_buff *skb);
|
||||
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
|
||||
@@ -148,7 +164,8 @@ void ip_send_check(struct iphdr *ip);
|
||||
int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
|
||||
int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
|
||||
int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
|
||||
__u8 tos);
|
||||
void ip_init(void);
|
||||
int ip_append_data(struct sock *sk, struct flowi4 *fl4,
|
||||
int getfrag(void *from, char *to, int offset, int len,
|
||||
@@ -174,6 +191,12 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
|
||||
struct ipcm_cookie *ipc, struct rtable **rtp,
|
||||
struct inet_cork *cork, unsigned int flags);
|
||||
|
||||
static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
|
||||
struct flowi *fl)
|
||||
{
|
||||
return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
|
||||
}
|
||||
|
||||
static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
|
||||
{
|
||||
return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
|
||||
|
@@ -466,10 +466,12 @@ static inline void ip_tunnel_info_opts_get(void *to,
|
||||
}
|
||||
|
||||
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
|
||||
const void *from, int len)
|
||||
const void *from, int len,
|
||||
__be16 flags)
|
||||
{
|
||||
memcpy(ip_tunnel_info_opts(info), from, len);
|
||||
info->options_len = len;
|
||||
info->key.tun_flags |= flags;
|
||||
}
|
||||
|
||||
static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
|
||||
@@ -511,9 +513,11 @@ static inline void ip_tunnel_info_opts_get(void *to,
|
||||
}
|
||||
|
||||
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
|
||||
const void *from, int len)
|
||||
const void *from, int len,
|
||||
__be16 flags)
|
||||
{
|
||||
info->options_len = 0;
|
||||
info->key.tun_flags |= flags;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_INET */
|
||||
|
@@ -335,6 +335,11 @@ enum ip_vs_sctp_states {
|
||||
IP_VS_SCTP_S_LAST
|
||||
};
|
||||
|
||||
/* Connection templates use bits from state */
|
||||
#define IP_VS_CTPL_S_NONE 0x0000
|
||||
#define IP_VS_CTPL_S_ASSURED 0x0001
|
||||
#define IP_VS_CTPL_S_LAST 0x0002
|
||||
|
||||
/* Delta sequence info structure
|
||||
* Each ip_vs_conn has 2 (output AND input seq. changes).
|
||||
* Only used in the VS/NAT.
|
||||
@@ -1221,7 +1226,7 @@ struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
|
||||
struct ip_vs_dest *dest, __u32 fwmark);
|
||||
void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
|
||||
|
||||
const char *ip_vs_state_name(__u16 proto, int state);
|
||||
const char *ip_vs_state_name(const struct ip_vs_conn *cp);
|
||||
|
||||
void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
|
||||
int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
|
||||
@@ -1289,6 +1294,17 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
|
||||
atomic_inc(&ctl_cp->n_control);
|
||||
}
|
||||
|
||||
/* Mark our template as assured */
|
||||
static inline void
|
||||
ip_vs_control_assure_ct(struct ip_vs_conn *cp)
|
||||
{
|
||||
struct ip_vs_conn *ct = cp->control;
|
||||
|
||||
if (ct && !(ct->state & IP_VS_CTPL_S_ASSURED) &&
|
||||
(ct->flags & IP_VS_CONN_F_TEMPLATE))
|
||||
ct->state |= IP_VS_CTPL_S_ASSURED;
|
||||
}
|
||||
|
||||
/* IPVS netns init & cleanup functions */
|
||||
int ip_vs_estimator_net_init(struct netns_ipvs *ipvs);
|
||||
int ip_vs_control_net_init(struct netns_ipvs *ipvs);
|
||||
|
@@ -294,6 +294,7 @@ struct ipv6_fl_socklist {
|
||||
};
|
||||
|
||||
struct ipcm6_cookie {
|
||||
struct sockcm_cookie sockc;
|
||||
__s16 hlimit;
|
||||
__s16 tclass;
|
||||
__s8 dontfrag;
|
||||
@@ -301,6 +302,25 @@ struct ipcm6_cookie {
|
||||
__u16 gso_size;
|
||||
};
|
||||
|
||||
static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
|
||||
{
|
||||
*ipc6 = (struct ipcm6_cookie) {
|
||||
.hlimit = -1,
|
||||
.tclass = -1,
|
||||
.dontfrag = -1,
|
||||
};
|
||||
}
|
||||
|
||||
static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
|
||||
const struct ipv6_pinfo *np)
|
||||
{
|
||||
*ipc6 = (struct ipcm6_cookie) {
|
||||
.hlimit = -1,
|
||||
.tclass = np->tclass,
|
||||
.dontfrag = np->dontfrag,
|
||||
};
|
||||
}
|
||||
|
||||
static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
|
||||
{
|
||||
struct ipv6_txoptions *opt;
|
||||
@@ -554,34 +574,6 @@ static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
|
||||
}
|
||||
#endif
|
||||
|
||||
struct inet_frag_queue;
|
||||
|
||||
enum ip6_defrag_users {
|
||||
IP6_DEFRAG_LOCAL_DELIVER,
|
||||
IP6_DEFRAG_CONNTRACK_IN,
|
||||
__IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
|
||||
IP6_DEFRAG_CONNTRACK_OUT,
|
||||
__IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
|
||||
IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
|
||||
__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
|
||||
};
|
||||
|
||||
void ip6_frag_init(struct inet_frag_queue *q, const void *a);
|
||||
extern const struct rhashtable_params ip6_rhash_params;
|
||||
|
||||
/*
|
||||
* Equivalent of ipv4 struct ip
|
||||
*/
|
||||
struct frag_queue {
|
||||
struct inet_frag_queue q;
|
||||
|
||||
int iif;
|
||||
__u16 nhoffset;
|
||||
u8 ecn;
|
||||
};
|
||||
|
||||
void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
|
||||
|
||||
static inline bool ipv6_addr_any(const struct in6_addr *a)
|
||||
{
|
||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
|
||||
@@ -790,6 +782,13 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
||||
static inline bool ipv6_can_nonlocal_bind(struct net *net,
|
||||
struct inet_sock *inet)
|
||||
{
|
||||
return net->ipv6.sysctl.ip_nonlocal_bind ||
|
||||
inet->freebind || inet->transparent;
|
||||
}
|
||||
|
||||
/* Sysctl settings for net ipv6.auto_flowlabels */
|
||||
#define IP6_AUTO_FLOW_LABEL_OFF 0
|
||||
#define IP6_AUTO_FLOW_LABEL_OPTOUT 1
|
||||
@@ -915,6 +914,8 @@ static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
|
||||
|
||||
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
struct packet_type *pt, struct net_device *orig_dev);
|
||||
void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
|
||||
struct net_device *orig_dev);
|
||||
|
||||
int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
@@ -931,8 +932,7 @@ int ip6_append_data(struct sock *sk,
|
||||
int odd, struct sk_buff *skb),
|
||||
void *from, int length, int transhdrlen,
|
||||
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
|
||||
struct rt6_info *rt, unsigned int flags,
|
||||
const struct sockcm_cookie *sockc);
|
||||
struct rt6_info *rt, unsigned int flags);
|
||||
|
||||
int ip6_push_pending_frames(struct sock *sk);
|
||||
|
||||
@@ -949,8 +949,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
|
||||
void *from, int length, int transhdrlen,
|
||||
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
|
||||
struct rt6_info *rt, unsigned int flags,
|
||||
struct inet_cork_full *cork,
|
||||
const struct sockcm_cookie *sockc);
|
||||
struct inet_cork_full *cork);
|
||||
|
||||
static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
|
||||
{
|
||||
|
104
include/net/ipv6_frag.h
Normal file
104
include/net/ipv6_frag.h
Normal file
@@ -0,0 +1,104 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _IPV6_FRAG_H
|
||||
#define _IPV6_FRAG_H
|
||||
#include <linux/kernel.h>
|
||||
#include <net/addrconf.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <net/inet_frag.h>
|
||||
|
||||
enum ip6_defrag_users {
|
||||
IP6_DEFRAG_LOCAL_DELIVER,
|
||||
IP6_DEFRAG_CONNTRACK_IN,
|
||||
__IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
|
||||
IP6_DEFRAG_CONNTRACK_OUT,
|
||||
__IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
|
||||
IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
|
||||
__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
|
||||
};
|
||||
|
||||
/*
|
||||
* Equivalent of ipv4 struct ip
|
||||
*/
|
||||
struct frag_queue {
|
||||
struct inet_frag_queue q;
|
||||
|
||||
int iif;
|
||||
__u16 nhoffset;
|
||||
u8 ecn;
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
|
||||
{
|
||||
struct frag_queue *fq = container_of(q, struct frag_queue, q);
|
||||
const struct frag_v6_compare_key *key = a;
|
||||
|
||||
q->key.v6 = *key;
|
||||
fq->ecn = 0;
|
||||
}
|
||||
|
||||
static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
|
||||
{
|
||||
return jhash2(data,
|
||||
sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
|
||||
}
|
||||
|
||||
static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
|
||||
{
|
||||
const struct inet_frag_queue *fq = data;
|
||||
|
||||
return jhash2((const u32 *)&fq->key.v6,
|
||||
sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
|
||||
{
|
||||
const struct frag_v6_compare_key *key = arg->key;
|
||||
const struct inet_frag_queue *fq = ptr;
|
||||
|
||||
return !!memcmp(&fq->key, key, sizeof(*key));
|
||||
}
|
||||
|
||||
static inline void
|
||||
ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
|
||||
{
|
||||
struct net_device *dev = NULL;
|
||||
struct sk_buff *head;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock(&fq->q.lock);
|
||||
|
||||
if (fq->q.flags & INET_FRAG_COMPLETE)
|
||||
goto out;
|
||||
|
||||
inet_frag_kill(&fq->q);
|
||||
|
||||
dev = dev_get_by_index_rcu(net, fq->iif);
|
||||
if (!dev)
|
||||
goto out;
|
||||
|
||||
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
|
||||
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
|
||||
|
||||
/* Don't send error if the first segment did not arrive. */
|
||||
head = fq->q.fragments;
|
||||
if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
|
||||
goto out;
|
||||
|
||||
head->dev = dev;
|
||||
skb_get(head);
|
||||
spin_unlock(&fq->q.lock);
|
||||
|
||||
icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
|
||||
kfree_skb(head);
|
||||
goto out_rcu_unlock;
|
||||
|
||||
out:
|
||||
spin_unlock(&fq->q.lock);
|
||||
out_rcu_unlock:
|
||||
rcu_read_unlock();
|
||||
inet_frag_put(&fq->q);
|
||||
}
|
||||
#endif
|
||||
#endif
|
17
include/net/lag.h
Normal file
17
include/net/lag.h
Normal file
@@ -0,0 +1,17 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_IF_LAG_H
|
||||
#define _LINUX_IF_LAG_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/if_team.h>
|
||||
#include <net/bonding.h>
|
||||
|
||||
static inline bool net_lag_port_dev_txable(const struct net_device *port_dev)
|
||||
{
|
||||
if (netif_is_team_port(port_dev))
|
||||
return team_port_dev_txable(port_dev);
|
||||
else
|
||||
return bond_is_active_slave_dev(port_dev);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_IF_LAG_H */
|
@@ -23,6 +23,7 @@
|
||||
#include <linux/ieee80211.h>
|
||||
#include <net/cfg80211.h>
|
||||
#include <net/codel.h>
|
||||
#include <net/ieee80211_radiotap.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
/**
|
||||
@@ -162,6 +163,8 @@ enum ieee80211_ac_numbers {
|
||||
* @txop: maximum burst time in units of 32 usecs, 0 meaning disabled
|
||||
* @acm: is mandatory admission control required for the access category
|
||||
* @uapsd: is U-APSD mode enabled for the queue
|
||||
* @mu_edca: is the MU EDCA configured
|
||||
* @mu_edca_param_rec: MU EDCA Parameter Record for HE
|
||||
*/
|
||||
struct ieee80211_tx_queue_params {
|
||||
u16 txop;
|
||||
@@ -170,6 +173,8 @@ struct ieee80211_tx_queue_params {
|
||||
u8 aifs;
|
||||
bool acm;
|
||||
bool uapsd;
|
||||
bool mu_edca;
|
||||
struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec;
|
||||
};
|
||||
|
||||
struct ieee80211_low_level_stats {
|
||||
@@ -463,6 +468,15 @@ struct ieee80211_mu_group_data {
|
||||
* This structure keeps information about a BSS (and an association
|
||||
* to that BSS) that can change during the lifetime of the BSS.
|
||||
*
|
||||
* @bss_color: 6-bit value to mark inter-BSS frame, if BSS supports HE
|
||||
* @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
|
||||
* @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK
|
||||
* @uora_exists: is the UORA element advertised by AP
|
||||
* @ack_enabled: indicates support to receive a multi-TID that solicits either
|
||||
* ACK, BACK or both
|
||||
* @uora_ocw_range: UORA element's OCW Range field
|
||||
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
|
||||
* @he_support: does this BSS support HE
|
||||
* @assoc: association status
|
||||
* @ibss_joined: indicates whether this station is part of an IBSS
|
||||
* or not
|
||||
@@ -550,6 +564,14 @@ struct ieee80211_mu_group_data {
|
||||
*/
|
||||
struct ieee80211_bss_conf {
|
||||
const u8 *bssid;
|
||||
u8 bss_color;
|
||||
u8 htc_trig_based_pkt_ext;
|
||||
bool multi_sta_back_32bit;
|
||||
bool uora_exists;
|
||||
bool ack_enabled;
|
||||
u8 uora_ocw_range;
|
||||
u16 frame_time_rts_th;
|
||||
bool he_support;
|
||||
/* association related data */
|
||||
bool assoc, ibss_joined;
|
||||
bool ibss_creator;
|
||||
@@ -1106,6 +1128,18 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
|
||||
* @RX_FLAG_AMPDU_EOF_BIT: Value of the EOF bit in the A-MPDU delimiter for this
|
||||
* frame
|
||||
* @RX_FLAG_AMPDU_EOF_BIT_KNOWN: The EOF value is known
|
||||
* @RX_FLAG_RADIOTAP_HE: HE radiotap data is present
|
||||
* (&struct ieee80211_radiotap_he, mac80211 will fill in
|
||||
* - DATA3_DATA_MCS
|
||||
* - DATA3_DATA_DCM
|
||||
* - DATA3_CODING
|
||||
* - DATA5_GI
|
||||
* - DATA5_DATA_BW_RU_ALLOC
|
||||
* - DATA6_NSTS
|
||||
* - DATA3_STBC
|
||||
* from the RX info data, so leave those zeroed when building this data)
|
||||
* @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present
|
||||
* (&struct ieee80211_radiotap_he_mu)
|
||||
*/
|
||||
enum mac80211_rx_flags {
|
||||
RX_FLAG_MMIC_ERROR = BIT(0),
|
||||
@@ -1134,6 +1168,8 @@ enum mac80211_rx_flags {
|
||||
RX_FLAG_ICV_STRIPPED = BIT(23),
|
||||
RX_FLAG_AMPDU_EOF_BIT = BIT(24),
|
||||
RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25),
|
||||
RX_FLAG_RADIOTAP_HE = BIT(26),
|
||||
RX_FLAG_RADIOTAP_HE_MU = BIT(27),
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -1164,6 +1200,7 @@ enum mac80211_rx_encoding {
|
||||
RX_ENC_LEGACY = 0,
|
||||
RX_ENC_HT,
|
||||
RX_ENC_VHT,
|
||||
RX_ENC_HE,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -1198,6 +1235,9 @@ enum mac80211_rx_encoding {
|
||||
* @encoding: &enum mac80211_rx_encoding
|
||||
* @bw: &enum rate_info_bw
|
||||
* @enc_flags: uses bits from &enum mac80211_rx_encoding_flags
|
||||
* @he_ru: HE RU, from &enum nl80211_he_ru_alloc
|
||||
* @he_gi: HE GI, from &enum nl80211_he_gi
|
||||
* @he_dcm: HE DCM value
|
||||
* @rx_flags: internal RX flags for mac80211
|
||||
* @ampdu_reference: A-MPDU reference number, must be a different value for
|
||||
* each A-MPDU but the same for each subframe within one A-MPDU
|
||||
@@ -1211,7 +1251,8 @@ struct ieee80211_rx_status {
|
||||
u32 flag;
|
||||
u16 freq;
|
||||
u8 enc_flags;
|
||||
u8 encoding:2, bw:3;
|
||||
u8 encoding:2, bw:3, he_ru:3;
|
||||
u8 he_gi:2, he_dcm:1;
|
||||
u8 rate_idx;
|
||||
u8 nss;
|
||||
u8 rx_flags;
|
||||
@@ -1770,6 +1811,7 @@ struct ieee80211_sta_rates {
|
||||
* @supp_rates: Bitmap of supported rates (per band)
|
||||
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
|
||||
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
|
||||
* @he_cap: HE capabilities of this STA
|
||||
* @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
|
||||
* that this station is allowed to transmit to us.
|
||||
* Can be modified by driver.
|
||||
@@ -1805,7 +1847,8 @@ struct ieee80211_sta {
|
||||
u16 aid;
|
||||
struct ieee80211_sta_ht_cap ht_cap;
|
||||
struct ieee80211_sta_vht_cap vht_cap;
|
||||
u8 max_rx_aggregation_subframes;
|
||||
struct ieee80211_sta_he_cap he_cap;
|
||||
u16 max_rx_aggregation_subframes;
|
||||
bool wme;
|
||||
u8 uapsd_queues;
|
||||
u8 max_sp;
|
||||
@@ -2196,10 +2239,11 @@ enum ieee80211_hw_flags {
|
||||
* it shouldn't be set.
|
||||
*
|
||||
* @max_tx_aggregation_subframes: maximum number of subframes in an
|
||||
* aggregate an HT driver will transmit. Though ADDBA will advertise
|
||||
* a constant value of 64 as some older APs can crash if the window
|
||||
* size is smaller (an example is LinkSys WRT120N with FW v1.0.07
|
||||
* build 002 Jun 18 2012).
|
||||
* aggregate an HT/HE device will transmit. In HT AddBA we'll
|
||||
* advertise a constant value of 64 as some older APs crash if
|
||||
* the window size is smaller (an example is LinkSys WRT120N
|
||||
* with FW v1.0.07 build 002 Jun 18 2012).
|
||||
* For AddBA to HE capable peers this value will be used.
|
||||
*
|
||||
* @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum
|
||||
* of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list.
|
||||
@@ -2216,6 +2260,8 @@ enum ieee80211_hw_flags {
|
||||
* the default is _GI | _BANDWIDTH.
|
||||
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
|
||||
*
|
||||
* @radiotap_he: HE radiotap validity flags
|
||||
*
|
||||
* @radiotap_timestamp: Information for the radiotap timestamp field; if the
|
||||
* 'units_pos' member is set to a non-negative value it must be set to
|
||||
* a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
|
||||
@@ -2263,8 +2309,8 @@ struct ieee80211_hw {
|
||||
u8 max_rates;
|
||||
u8 max_report_rates;
|
||||
u8 max_rate_tries;
|
||||
u8 max_rx_aggregation_subframes;
|
||||
u8 max_tx_aggregation_subframes;
|
||||
u16 max_rx_aggregation_subframes;
|
||||
u16 max_tx_aggregation_subframes;
|
||||
u8 max_tx_fragments;
|
||||
u8 offchannel_tx_hw_queue;
|
||||
u8 radiotap_mcs_details;
|
||||
@@ -2904,7 +2950,7 @@ struct ieee80211_ampdu_params {
|
||||
struct ieee80211_sta *sta;
|
||||
u16 tid;
|
||||
u16 ssn;
|
||||
u8 buf_size;
|
||||
u16 buf_size;
|
||||
bool amsdu;
|
||||
u16 timeout;
|
||||
};
|
||||
|
@@ -10,6 +10,7 @@
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/uidgid.h>
|
||||
|
||||
#include <net/flow.h>
|
||||
#include <net/netns/core.h>
|
||||
@@ -170,6 +171,8 @@ extern struct net init_net;
|
||||
struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
|
||||
struct net *old_net);
|
||||
|
||||
void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
|
||||
|
||||
void net_ns_barrier(void);
|
||||
#else /* CONFIG_NET_NS */
|
||||
#include <linux/sched.h>
|
||||
@@ -182,6 +185,13 @@ static inline struct net *copy_net_ns(unsigned long flags,
|
||||
return old_net;
|
||||
}
|
||||
|
||||
static inline void net_ns_get_ownership(const struct net *net,
|
||||
kuid_t *uid, kgid_t *gid)
|
||||
{
|
||||
*uid = GLOBAL_ROOT_UID;
|
||||
*gid = GLOBAL_ROOT_GID;
|
||||
}
|
||||
|
||||
static inline void net_ns_barrier(void) {}
|
||||
#endif /* CONFIG_NET_NS */
|
||||
|
||||
|
@@ -28,6 +28,7 @@ enum netevent_notif_type {
|
||||
NETEVENT_DELAY_PROBE_TIME_UPDATE, /* arg is struct neigh_parms ptr */
|
||||
NETEVENT_IPV4_MPATH_HASH_UPDATE, /* arg is struct net ptr */
|
||||
NETEVENT_IPV6_MPATH_HASH_UPDATE, /* arg is struct net ptr */
|
||||
NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE, /* arg is struct net ptr */
|
||||
};
|
||||
|
||||
int register_netevent_notifier(struct notifier_block *nb);
|
||||
|
@@ -10,9 +10,6 @@
|
||||
#ifndef _NF_CONNTRACK_IPV4_H
|
||||
#define _NF_CONNTRACK_IPV4_H
|
||||
|
||||
|
||||
const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
|
||||
|
||||
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
|
||||
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
|
||||
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
|
||||
|
@@ -41,6 +41,11 @@ union nf_conntrack_expect_proto {
|
||||
/* insert expect proto private data here */
|
||||
};
|
||||
|
||||
struct nf_conntrack_net {
|
||||
unsigned int users4;
|
||||
unsigned int users6;
|
||||
};
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
@@ -171,8 +176,6 @@ void nf_ct_netns_put(struct net *net, u8 nfproto);
|
||||
*/
|
||||
void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
|
||||
|
||||
void nf_ct_free_hashtable(void *hash, unsigned int size);
|
||||
|
||||
int nf_conntrack_hash_check_insert(struct nf_conn *ct);
|
||||
bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
|
||||
|
||||
|
@@ -14,7 +14,6 @@
|
||||
#define _NF_CONNTRACK_CORE_H
|
||||
|
||||
#include <linux/netfilter.h>
|
||||
#include <net/netfilter/nf_conntrack_l3proto.h>
|
||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||
#include <net/netfilter/nf_conntrack_ecache.h>
|
||||
|
||||
@@ -40,16 +39,8 @@ void nf_conntrack_cleanup_start(void);
|
||||
void nf_conntrack_init_end(void);
|
||||
void nf_conntrack_cleanup_end(void);
|
||||
|
||||
bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff,
|
||||
unsigned int dataoff, u_int16_t l3num, u_int8_t protonum,
|
||||
struct net *net,
|
||||
struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_l3proto *l3proto,
|
||||
const struct nf_conntrack_l4proto *l4proto);
|
||||
|
||||
bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig,
|
||||
const struct nf_conntrack_l3proto *l3proto,
|
||||
const struct nf_conntrack_l4proto *l4proto);
|
||||
|
||||
/* Find a connection corresponding to a tuple. */
|
||||
@@ -75,10 +66,8 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_l3proto *l3proto,
|
||||
const struct nf_conntrack_l4proto *proto);
|
||||
void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_l4proto *proto);
|
||||
|
||||
#define CONNTRACK_LOCKS 1024
|
||||
|
||||
|
@@ -1,8 +1,23 @@
|
||||
#ifndef _NF_CONNTRACK_COUNT_H
|
||||
#define _NF_CONNTRACK_COUNT_H
|
||||
|
||||
#include <linux/list.h>
|
||||
|
||||
struct nf_conncount_data;
|
||||
|
||||
enum nf_conncount_list_add {
|
||||
NF_CONNCOUNT_ADDED, /* list add was ok */
|
||||
NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */
|
||||
NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */
|
||||
};
|
||||
|
||||
struct nf_conncount_list {
|
||||
spinlock_t list_lock;
|
||||
struct list_head head; /* connections with the same filtering key */
|
||||
unsigned int count; /* length of list */
|
||||
bool dead;
|
||||
};
|
||||
|
||||
struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
|
||||
unsigned int keylen);
|
||||
void nf_conncount_destroy(struct net *net, unsigned int family,
|
||||
@@ -14,15 +29,21 @@ unsigned int nf_conncount_count(struct net *net,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_zone *zone);
|
||||
|
||||
unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_zone *zone,
|
||||
bool *addit);
|
||||
void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_zone *zone,
|
||||
bool *addit);
|
||||
|
||||
bool nf_conncount_add(struct hlist_head *head,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_zone *zone);
|
||||
void nf_conncount_list_init(struct nf_conncount_list *list);
|
||||
|
||||
void nf_conncount_cache_free(struct hlist_head *hhead);
|
||||
enum nf_conncount_list_add
|
||||
nf_conncount_add(struct nf_conncount_list *list,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_zone *zone);
|
||||
|
||||
bool nf_conncount_gc_list(struct net *net,
|
||||
struct nf_conncount_list *list);
|
||||
|
||||
void nf_conncount_cache_free(struct nf_conncount_list *list);
|
||||
|
||||
#endif
|
||||
|
@@ -103,9 +103,7 @@ int nf_conntrack_helpers_register(struct nf_conntrack_helper *, unsigned int);
|
||||
void nf_conntrack_helpers_unregister(struct nf_conntrack_helper *,
|
||||
unsigned int);
|
||||
|
||||
struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct,
|
||||
struct nf_conntrack_helper *helper,
|
||||
gfp_t gfp);
|
||||
struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
|
||||
|
||||
int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
|
||||
gfp_t flags);
|
||||
|
@@ -1,84 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C)2003,2004 USAGI/WIDE Project
|
||||
*
|
||||
* Header for use in defining a given L3 protocol for connection tracking.
|
||||
*
|
||||
* Author:
|
||||
* Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
|
||||
*
|
||||
* Derived from include/netfilter_ipv4/ip_conntrack_protocol.h
|
||||
*/
|
||||
|
||||
#ifndef _NF_CONNTRACK_L3PROTO_H
|
||||
#define _NF_CONNTRACK_L3PROTO_H
|
||||
#include <linux/netlink.h>
|
||||
#include <net/netlink.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <net/netfilter/nf_conntrack.h>
|
||||
|
||||
struct nf_conntrack_l3proto {
|
||||
/* L3 Protocol Family number. ex) PF_INET */
|
||||
u_int16_t l3proto;
|
||||
|
||||
/* size of tuple nlattr, fills a hole */
|
||||
u16 nla_size;
|
||||
|
||||
/*
|
||||
* Try to fill in the third arg: nhoff is offset of l3 proto
|
||||
* hdr. Return true if possible.
|
||||
*/
|
||||
bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff,
|
||||
struct nf_conntrack_tuple *tuple);
|
||||
|
||||
/*
|
||||
* Invert the per-proto part of the tuple: ie. turn xmit into reply.
|
||||
* Some packets can't be inverted: return 0 in that case.
|
||||
*/
|
||||
bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig);
|
||||
|
||||
/*
|
||||
* Called before tracking.
|
||||
* *dataoff: offset of protocol header (TCP, UDP,...) in skb
|
||||
* *protonum: protocol number
|
||||
*/
|
||||
int (*get_l4proto)(const struct sk_buff *skb, unsigned int nhoff,
|
||||
unsigned int *dataoff, u_int8_t *protonum);
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
||||
int (*tuple_to_nlattr)(struct sk_buff *skb,
|
||||
const struct nf_conntrack_tuple *t);
|
||||
int (*nlattr_to_tuple)(struct nlattr *tb[],
|
||||
struct nf_conntrack_tuple *t);
|
||||
const struct nla_policy *nla_policy;
|
||||
#endif
|
||||
|
||||
/* Called when netns wants to use connection tracking */
|
||||
int (*net_ns_get)(struct net *);
|
||||
void (*net_ns_put)(struct net *);
|
||||
|
||||
/* Module (if any) which this is connected to. */
|
||||
struct module *me;
|
||||
};
|
||||
|
||||
extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO];
|
||||
|
||||
/* Protocol global registration. */
|
||||
int nf_ct_l3proto_register(const struct nf_conntrack_l3proto *proto);
|
||||
void nf_ct_l3proto_unregister(const struct nf_conntrack_l3proto *proto);
|
||||
|
||||
const struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
|
||||
|
||||
/* Existing built-in protocols */
|
||||
extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
|
||||
|
||||
static inline struct nf_conntrack_l3proto *
|
||||
__nf_ct_l3proto_find(u_int16_t l3proto)
|
||||
{
|
||||
if (unlikely(l3proto >= NFPROTO_NUMPROTO))
|
||||
return &nf_conntrack_l3proto_generic;
|
||||
return rcu_dereference(nf_ct_l3protos[l3proto]);
|
||||
}
|
||||
|
||||
#endif /*_NF_CONNTRACK_L3PROTO_H*/
|
@@ -36,7 +36,7 @@ struct nf_conntrack_l4proto {
|
||||
struct net *net, struct nf_conntrack_tuple *tuple);
|
||||
|
||||
/* Invert the per-proto part of the tuple: ie. turn xmit into reply.
|
||||
* Some packets can't be inverted: return 0 in that case.
|
||||
* Only used by icmp, most protocols use a generic version.
|
||||
*/
|
||||
bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig);
|
||||
@@ -45,13 +45,12 @@ struct nf_conntrack_l4proto {
|
||||
int (*packet)(struct nf_conn *ct,
|
||||
const struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
unsigned int *timeouts);
|
||||
enum ip_conntrack_info ctinfo);
|
||||
|
||||
/* Called when a new connection for this protocol found;
|
||||
* returns TRUE if it's OK. If so, packet() called next. */
|
||||
bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
unsigned int dataoff, unsigned int *timeouts);
|
||||
unsigned int dataoff);
|
||||
|
||||
/* Called when a conntrack entry is destroyed */
|
||||
void (*destroy)(struct nf_conn *ct);
|
||||
@@ -63,9 +62,6 @@ struct nf_conntrack_l4proto {
|
||||
/* called by gc worker if table is full */
|
||||
bool (*can_early_drop)(const struct nf_conn *ct);
|
||||
|
||||
/* Return the array of timeouts for this protocol. */
|
||||
unsigned int *(*get_timeouts)(struct net *net);
|
||||
|
||||
/* convert protoinfo to nfnetink attributes */
|
||||
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
|
||||
struct nf_conn *ct);
|
||||
@@ -81,7 +77,6 @@ struct nf_conntrack_l4proto {
|
||||
struct nf_conntrack_tuple *t);
|
||||
const struct nla_policy *nla_policy;
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
|
||||
struct {
|
||||
int (*nlattr_to_obj)(struct nlattr *tb[],
|
||||
struct net *net, void *data);
|
||||
@@ -91,7 +86,6 @@ struct nf_conntrack_l4proto {
|
||||
u16 nlattr_max;
|
||||
const struct nla_policy *nla_policy;
|
||||
} ctnl_timeout;
|
||||
#endif
|
||||
#ifdef CONFIG_NF_CONNTRACK_PROCFS
|
||||
/* Print out the private part of the conntrack. */
|
||||
void (*print_conntrack)(struct seq_file *s, struct nf_conn *);
|
||||
@@ -134,10 +128,6 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
|
||||
/* Protocol global registration. */
|
||||
int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto);
|
||||
void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
|
||||
int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[],
|
||||
unsigned int num_proto);
|
||||
void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[],
|
||||
unsigned int num_proto);
|
||||
|
||||
/* Generic netlink helpers */
|
||||
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
|
||||
|
@@ -11,24 +11,28 @@
|
||||
|
||||
#define CTNL_TIMEOUT_NAME_MAX 32
|
||||
|
||||
struct ctnl_timeout {
|
||||
struct list_head head;
|
||||
struct rcu_head rcu_head;
|
||||
refcount_t refcnt;
|
||||
char name[CTNL_TIMEOUT_NAME_MAX];
|
||||
struct nf_ct_timeout {
|
||||
__u16 l3num;
|
||||
const struct nf_conntrack_l4proto *l4proto;
|
||||
char data[0];
|
||||
};
|
||||
|
||||
struct ctnl_timeout {
|
||||
struct list_head head;
|
||||
struct rcu_head rcu_head;
|
||||
refcount_t refcnt;
|
||||
char name[CTNL_TIMEOUT_NAME_MAX];
|
||||
struct nf_ct_timeout timeout;
|
||||
};
|
||||
|
||||
struct nf_conn_timeout {
|
||||
struct ctnl_timeout __rcu *timeout;
|
||||
struct nf_ct_timeout __rcu *timeout;
|
||||
};
|
||||
|
||||
static inline unsigned int *
|
||||
nf_ct_timeout_data(struct nf_conn_timeout *t)
|
||||
{
|
||||
struct ctnl_timeout *timeout;
|
||||
struct nf_ct_timeout *timeout;
|
||||
|
||||
timeout = rcu_dereference(t->timeout);
|
||||
if (timeout == NULL)
|
||||
@@ -49,7 +53,7 @@ struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct)
|
||||
|
||||
static inline
|
||||
struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
|
||||
struct ctnl_timeout *timeout,
|
||||
struct nf_ct_timeout *timeout,
|
||||
gfp_t gfp)
|
||||
{
|
||||
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
|
||||
@@ -67,32 +71,23 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline unsigned int *
|
||||
nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
|
||||
const struct nf_conntrack_l4proto *l4proto)
|
||||
static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct)
|
||||
{
|
||||
unsigned int *timeouts = NULL;
|
||||
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
|
||||
struct nf_conn_timeout *timeout_ext;
|
||||
unsigned int *timeouts;
|
||||
|
||||
timeout_ext = nf_ct_timeout_find(ct);
|
||||
if (timeout_ext) {
|
||||
if (timeout_ext)
|
||||
timeouts = nf_ct_timeout_data(timeout_ext);
|
||||
if (unlikely(!timeouts))
|
||||
timeouts = l4proto->get_timeouts(net);
|
||||
} else {
|
||||
timeouts = l4proto->get_timeouts(net);
|
||||
}
|
||||
|
||||
return timeouts;
|
||||
#else
|
||||
return l4proto->get_timeouts(net);
|
||||
#endif
|
||||
return timeouts;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
|
||||
int nf_conntrack_timeout_init(void);
|
||||
void nf_conntrack_timeout_fini(void);
|
||||
void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout);
|
||||
#else
|
||||
static inline int nf_conntrack_timeout_init(void)
|
||||
{
|
||||
@@ -106,8 +101,8 @@ static inline void nf_conntrack_timeout_fini(void)
|
||||
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
|
||||
extern struct ctnl_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name);
|
||||
extern void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout);
|
||||
extern struct nf_ct_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name);
|
||||
extern void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout);
|
||||
#endif
|
||||
|
||||
#endif /* _NF_CONNTRACK_TIMEOUT_H */
|
||||
|
@@ -4,7 +4,7 @@
|
||||
#include <linux/in.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/rhashtable-types.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/netfilter/nf_conntrack_tuple_common.h>
|
||||
#include <net/dst.h>
|
||||
|
@@ -106,7 +106,8 @@ int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
|
||||
int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
|
||||
u8 proto, int fragment, unsigned int offset,
|
||||
unsigned int logflags);
|
||||
void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk);
|
||||
void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
|
||||
struct sock *sk);
|
||||
void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
|
||||
unsigned int hooknum, const struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
|
@@ -71,4 +71,11 @@ extern struct nft_set_type nft_set_hash_fast_type;
|
||||
extern struct nft_set_type nft_set_rbtree_type;
|
||||
extern struct nft_set_type nft_set_bitmap_type;
|
||||
|
||||
struct nft_expr;
|
||||
struct nft_regs;
|
||||
struct nft_pktinfo;
|
||||
void nft_meta_get_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs, const struct nft_pktinfo *pkt);
|
||||
void nft_lookup_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs, const struct nft_pktinfo *pkt);
|
||||
#endif /* _NET_NF_TABLES_CORE_H */
|
||||
|
@@ -17,6 +17,14 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* assign a socket to the skb -- consumes sk */
|
||||
static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
|
||||
{
|
||||
skb_orphan(skb);
|
||||
skb->sk = sk;
|
||||
skb->destructor = sock_edemux;
|
||||
}
|
||||
|
||||
__be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
|
||||
|
||||
/**
|
||||
|
@@ -9,12 +9,7 @@ struct net;
|
||||
static inline u32 net_hash_mix(const struct net *net)
|
||||
{
|
||||
#ifdef CONFIG_NET_NS
|
||||
/*
|
||||
* shift this right to eliminate bits, that are
|
||||
* always zeroed
|
||||
*/
|
||||
|
||||
return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
|
||||
return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
|
@@ -98,6 +98,7 @@ struct netns_ipv4 {
|
||||
int sysctl_ip_default_ttl;
|
||||
int sysctl_ip_no_pmtu_disc;
|
||||
int sysctl_ip_fwd_use_pmtu;
|
||||
int sysctl_ip_fwd_update_priority;
|
||||
int sysctl_ip_nonlocal_bind;
|
||||
/* Shall we try to damage output packets if routing dev changes? */
|
||||
int sysctl_ip_dynaddr;
|
||||
|
@@ -32,6 +32,7 @@ struct netns_sysctl_ipv6 {
|
||||
int flowlabel_consistency;
|
||||
int auto_flowlabels;
|
||||
int icmpv6_time;
|
||||
int icmpv6_echo_ignore_all;
|
||||
int anycast_src_echo_reply;
|
||||
int ip_nonlocal_bind;
|
||||
int fwmark_reflect;
|
||||
|
@@ -7,6 +7,7 @@
|
||||
struct netns_nftables {
|
||||
struct list_head tables;
|
||||
struct list_head commit_list;
|
||||
struct mutex commit_mutex;
|
||||
unsigned int base_seq;
|
||||
u8 gencursor;
|
||||
u8 validate_state;
|
||||
|
@@ -7,12 +7,16 @@
|
||||
#include <net/sch_generic.h>
|
||||
#include <net/act_api.h>
|
||||
|
||||
/* TC action not accessible from user space */
|
||||
#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1)
|
||||
|
||||
/* Basic packet classifier frontend definitions. */
|
||||
|
||||
struct tcf_walker {
|
||||
int stop;
|
||||
int skip;
|
||||
int count;
|
||||
unsigned long cookie;
|
||||
int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
|
||||
};
|
||||
|
||||
@@ -36,9 +40,9 @@ struct tcf_block_cb;
|
||||
bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
|
||||
|
||||
#ifdef CONFIG_NET_CLS
|
||||
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
|
||||
bool create);
|
||||
void tcf_chain_put(struct tcf_chain *chain);
|
||||
struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
|
||||
u32 chain_index);
|
||||
void tcf_chain_put_by_act(struct tcf_chain *chain);
|
||||
void tcf_block_netif_keep_dst(struct tcf_block *block);
|
||||
int tcf_block_get(struct tcf_block **p_block,
|
||||
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
|
||||
@@ -73,11 +77,13 @@ void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
|
||||
unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
|
||||
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident,
|
||||
void *cb_priv);
|
||||
void *cb_priv,
|
||||
struct netlink_ext_ack *extack);
|
||||
int tcf_block_cb_register(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident,
|
||||
void *cb_priv);
|
||||
void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
|
||||
void *cb_priv, struct netlink_ext_ack *extack);
|
||||
void __tcf_block_cb_unregister(struct tcf_block *block,
|
||||
struct tcf_block_cb *block_cb);
|
||||
void tcf_block_cb_unregister(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident);
|
||||
|
||||
@@ -111,11 +117,6 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool tcf_block_shared(struct tcf_block *block)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
|
||||
{
|
||||
return NULL;
|
||||
@@ -166,7 +167,8 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
|
||||
static inline
|
||||
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident,
|
||||
void *cb_priv)
|
||||
void *cb_priv,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
@@ -174,13 +176,14 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
|
||||
static inline
|
||||
int tcf_block_cb_register(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident,
|
||||
void *cb_priv)
|
||||
void *cb_priv, struct netlink_ext_ack *extack)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
|
||||
void __tcf_block_cb_unregister(struct tcf_block *block,
|
||||
struct tcf_block_cb *block_cb)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -601,6 +604,7 @@ struct tc_block_offload {
|
||||
enum tc_block_command command;
|
||||
enum tcf_block_binder_type binder_type;
|
||||
struct tcf_block *block;
|
||||
struct netlink_ext_ack *extack;
|
||||
};
|
||||
|
||||
struct tc_cls_common_offload {
|
||||
@@ -720,6 +724,8 @@ enum tc_fl_command {
|
||||
TC_CLSFLOWER_REPLACE,
|
||||
TC_CLSFLOWER_DESTROY,
|
||||
TC_CLSFLOWER_STATS,
|
||||
TC_CLSFLOWER_TMPLT_CREATE,
|
||||
TC_CLSFLOWER_TMPLT_DESTROY,
|
||||
};
|
||||
|
||||
struct tc_cls_flower_offload {
|
||||
@@ -776,6 +782,7 @@ struct tc_mqprio_qopt_offload {
|
||||
struct tc_cookie {
|
||||
u8 *data;
|
||||
u32 len;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
struct tc_qopt_offload_stats {
|
||||
|
@@ -72,6 +72,8 @@ struct qdisc_watchdog {
|
||||
struct Qdisc *qdisc;
|
||||
};
|
||||
|
||||
void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
|
||||
clockid_t clockid);
|
||||
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
|
||||
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
|
||||
|
||||
@@ -153,4 +155,9 @@ struct tc_cbs_qopt_offload {
|
||||
s32 sendslope;
|
||||
};
|
||||
|
||||
struct tc_etf_qopt_offload {
|
||||
u8 enable;
|
||||
s32 queue;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@@ -20,6 +20,9 @@ struct qdisc_walker;
|
||||
struct tcf_walker;
|
||||
struct module;
|
||||
|
||||
typedef int tc_setup_cb_t(enum tc_setup_type type,
|
||||
void *type_data, void *cb_priv);
|
||||
|
||||
struct qdisc_rate_table {
|
||||
struct tc_ratespec rate;
|
||||
u32 data[256];
|
||||
@@ -232,9 +235,17 @@ struct tcf_result {
|
||||
u32 classid;
|
||||
};
|
||||
const struct tcf_proto *goto_tp;
|
||||
|
||||
/* used by the TC_ACT_REINSERT action */
|
||||
struct {
|
||||
bool ingress;
|
||||
struct gnet_stats_queue *qstats;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
struct tcf_chain;
|
||||
|
||||
struct tcf_proto_ops {
|
||||
struct list_head head;
|
||||
char kind[IFNAMSIZ];
|
||||
@@ -256,11 +267,22 @@ struct tcf_proto_ops {
|
||||
bool *last,
|
||||
struct netlink_ext_ack *);
|
||||
void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
|
||||
int (*reoffload)(struct tcf_proto *tp, bool add,
|
||||
tc_setup_cb_t *cb, void *cb_priv,
|
||||
struct netlink_ext_ack *extack);
|
||||
void (*bind_class)(void *, u32, unsigned long);
|
||||
void * (*tmplt_create)(struct net *net,
|
||||
struct tcf_chain *chain,
|
||||
struct nlattr **tca,
|
||||
struct netlink_ext_ack *extack);
|
||||
void (*tmplt_destroy)(void *tmplt_priv);
|
||||
|
||||
/* rtnetlink specific */
|
||||
int (*dump)(struct net*, struct tcf_proto*, void *,
|
||||
struct sk_buff *skb, struct tcmsg*);
|
||||
int (*tmplt_dump)(struct sk_buff *skb,
|
||||
struct net *net,
|
||||
void *tmplt_priv);
|
||||
|
||||
struct module *owner;
|
||||
};
|
||||
@@ -269,6 +291,8 @@ struct tcf_proto {
|
||||
/* Fast access part */
|
||||
struct tcf_proto __rcu *next;
|
||||
void __rcu *root;
|
||||
|
||||
/* called under RCU BH lock*/
|
||||
int (*classify)(struct sk_buff *,
|
||||
const struct tcf_proto *,
|
||||
struct tcf_result *);
|
||||
@@ -294,11 +318,14 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
|
||||
|
||||
struct tcf_chain {
|
||||
struct tcf_proto __rcu *filter_chain;
|
||||
struct list_head filter_chain_list;
|
||||
struct list_head list;
|
||||
struct tcf_block *block;
|
||||
u32 index; /* chain index */
|
||||
unsigned int refcnt;
|
||||
unsigned int action_refcnt;
|
||||
bool explicitly_created;
|
||||
const struct tcf_proto_ops *tmplt_ops;
|
||||
void *tmplt_priv;
|
||||
};
|
||||
|
||||
struct tcf_block {
|
||||
@@ -312,6 +339,10 @@ struct tcf_block {
|
||||
bool keep_dst;
|
||||
unsigned int offloadcnt; /* Number of oddloaded filters */
|
||||
unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
|
||||
struct {
|
||||
struct tcf_chain *chain;
|
||||
struct list_head filter_chain_list;
|
||||
} chain0;
|
||||
};
|
||||
|
||||
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
|
||||
@@ -330,6 +361,21 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
|
||||
block->offloadcnt--;
|
||||
}
|
||||
|
||||
static inline void
|
||||
tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt,
|
||||
u32 *flags, bool add)
|
||||
{
|
||||
if (add) {
|
||||
if (!*cnt)
|
||||
tcf_block_offload_inc(block, flags);
|
||||
(*cnt)++;
|
||||
} else {
|
||||
(*cnt)--;
|
||||
if (!*cnt)
|
||||
tcf_block_offload_dec(block, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
|
||||
{
|
||||
struct qdisc_skb_cb *qcb;
|
||||
@@ -529,6 +575,15 @@ static inline void skb_reset_tc(struct sk_buff *skb)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
return skb->tc_redirected;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
@@ -1068,4 +1123,17 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
|
||||
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
|
||||
struct mini_Qdisc __rcu **p_miniq);
|
||||
|
||||
static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
|
||||
{
|
||||
struct gnet_stats_queue *stats = res->qstats;
|
||||
int ret;
|
||||
|
||||
if (res->ingress)
|
||||
ret = netif_receive_skb(skb);
|
||||
else
|
||||
ret = dev_queue_xmit(skb);
|
||||
if (ret && stats)
|
||||
qstats_overlimit_inc(res->qstats);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -48,7 +48,7 @@
|
||||
#define __sctp_structs_h__
|
||||
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/rhashtable-types.h>
|
||||
#include <linux/socket.h> /* linux/in.h needs this!! */
|
||||
#include <linux/in.h> /* We get struct sockaddr_in. */
|
||||
#include <linux/in6.h> /* We get struct in6_addr */
|
||||
@@ -57,6 +57,7 @@
|
||||
#include <linux/atomic.h> /* This gets us atomic counters. */
|
||||
#include <linux/skbuff.h> /* We need sk_buff_head. */
|
||||
#include <linux/workqueue.h> /* We need tq_struct. */
|
||||
#include <linux/flex_array.h> /* We need flex_array. */
|
||||
#include <linux/sctp.h> /* We need sctp* header structs. */
|
||||
#include <net/sctp/auth.h> /* We need auth specific structs */
|
||||
#include <net/ip.h> /* For inet_skb_parm */
|
||||
@@ -193,6 +194,9 @@ struct sctp_sock {
|
||||
/* This is the max_retrans value for new associations. */
|
||||
__u16 pathmaxrxt;
|
||||
|
||||
__u32 flowlabel;
|
||||
__u8 dscp;
|
||||
|
||||
/* The initial Path MTU to use for new associations. */
|
||||
__u32 pathmtu;
|
||||
|
||||
@@ -220,6 +224,7 @@ struct sctp_sock {
|
||||
__u32 adaptation_ind;
|
||||
__u32 pd_point;
|
||||
__u16 nodelay:1,
|
||||
reuse:1,
|
||||
disable_fragments:1,
|
||||
v4mapped:1,
|
||||
frag_interleave:1,
|
||||
@@ -394,37 +399,35 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
|
||||
|
||||
/* What is the current SSN number for this stream? */
|
||||
#define sctp_ssn_peek(stream, type, sid) \
|
||||
((stream)->type[sid].ssn)
|
||||
(sctp_stream_##type((stream), (sid))->ssn)
|
||||
|
||||
/* Return the next SSN number for this stream. */
|
||||
#define sctp_ssn_next(stream, type, sid) \
|
||||
((stream)->type[sid].ssn++)
|
||||
(sctp_stream_##type((stream), (sid))->ssn++)
|
||||
|
||||
/* Skip over this ssn and all below. */
|
||||
#define sctp_ssn_skip(stream, type, sid, ssn) \
|
||||
((stream)->type[sid].ssn = ssn + 1)
|
||||
(sctp_stream_##type((stream), (sid))->ssn = ssn + 1)
|
||||
|
||||
/* What is the current MID number for this stream? */
|
||||
#define sctp_mid_peek(stream, type, sid) \
|
||||
((stream)->type[sid].mid)
|
||||
(sctp_stream_##type((stream), (sid))->mid)
|
||||
|
||||
/* Return the next MID number for this stream. */
|
||||
#define sctp_mid_next(stream, type, sid) \
|
||||
((stream)->type[sid].mid++)
|
||||
(sctp_stream_##type((stream), (sid))->mid++)
|
||||
|
||||
/* Skip over this mid and all below. */
|
||||
#define sctp_mid_skip(stream, type, sid, mid) \
|
||||
((stream)->type[sid].mid = mid + 1)
|
||||
|
||||
#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid])
|
||||
(sctp_stream_##type((stream), (sid))->mid = mid + 1)
|
||||
|
||||
/* What is the current MID_uo number for this stream? */
|
||||
#define sctp_mid_uo_peek(stream, type, sid) \
|
||||
((stream)->type[sid].mid_uo)
|
||||
(sctp_stream_##type((stream), (sid))->mid_uo)
|
||||
|
||||
/* Return the next MID_uo number for this stream. */
|
||||
#define sctp_mid_uo_next(stream, type, sid) \
|
||||
((stream)->type[sid].mid_uo++)
|
||||
(sctp_stream_##type((stream), (sid))->mid_uo++)
|
||||
|
||||
/*
|
||||
* Pointers to address related SCTP functions.
|
||||
@@ -894,6 +897,9 @@ struct sctp_transport {
|
||||
*/
|
||||
__u16 pathmaxrxt;
|
||||
|
||||
__u32 flowlabel;
|
||||
__u8 dscp;
|
||||
|
||||
/* This is the partially failed retrans value for the transport
|
||||
* and will be initialized from the assocs value. This can be changed
|
||||
* using the SCTP_PEER_ADDR_THLDS socket option
|
||||
@@ -1433,8 +1439,8 @@ struct sctp_stream_in {
|
||||
};
|
||||
|
||||
struct sctp_stream {
|
||||
struct sctp_stream_out *out;
|
||||
struct sctp_stream_in *in;
|
||||
struct flex_array *out;
|
||||
struct flex_array *in;
|
||||
__u16 outcnt;
|
||||
__u16 incnt;
|
||||
/* Current stream being sent, if any */
|
||||
@@ -1456,6 +1462,23 @@ struct sctp_stream {
|
||||
struct sctp_stream_interleave *si;
|
||||
};
|
||||
|
||||
static inline struct sctp_stream_out *sctp_stream_out(
|
||||
const struct sctp_stream *stream,
|
||||
__u16 sid)
|
||||
{
|
||||
return flex_array_get(stream->out, sid);
|
||||
}
|
||||
|
||||
static inline struct sctp_stream_in *sctp_stream_in(
|
||||
const struct sctp_stream *stream,
|
||||
__u16 sid)
|
||||
{
|
||||
return flex_array_get(stream->in, sid);
|
||||
}
|
||||
|
||||
#define SCTP_SO(s, i) sctp_stream_out((s), (i))
|
||||
#define SCTP_SI(s, i) sctp_stream_in((s), (i))
|
||||
|
||||
#define SCTP_STREAM_CLOSED 0x00
|
||||
#define SCTP_STREAM_OPEN 0x01
|
||||
|
||||
@@ -1771,6 +1794,9 @@ struct sctp_association {
|
||||
*/
|
||||
__u16 pathmaxrxt;
|
||||
|
||||
__u32 flowlabel;
|
||||
__u8 dscp;
|
||||
|
||||
/* Flag that path mtu update is pending */
|
||||
__u8 pmtu_pending;
|
||||
|
||||
|
@@ -18,7 +18,7 @@
|
||||
#include <linux/ipv6.h>
|
||||
#include <net/lwtunnel.h>
|
||||
#include <linux/seg6.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/rhashtable-types.h>
|
||||
|
||||
static inline void update_csum_diff4(struct sk_buff *skb, __be32 from,
|
||||
__be32 to)
|
||||
|
@@ -22,7 +22,7 @@
|
||||
#include <linux/route.h>
|
||||
#include <net/seg6.h>
|
||||
#include <linux/seg6_hmac.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/rhashtable-types.h>
|
||||
|
||||
#define SEG6_HMAC_MAX_DIGESTSIZE 160
|
||||
#define SEG6_HMAC_RING_SIZE 256
|
||||
|
@@ -21,10 +21,12 @@
|
||||
|
||||
extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
|
||||
u32 tbl_id);
|
||||
extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb);
|
||||
|
||||
struct seg6_bpf_srh_state {
|
||||
bool valid;
|
||||
struct ipv6_sr_hdr *srh;
|
||||
u16 hdrlen;
|
||||
bool valid;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
|
||||
|
@@ -11,6 +11,8 @@
|
||||
#ifndef _SMC_H
|
||||
#define _SMC_H
|
||||
|
||||
#define SMC_MAX_PNETID_LEN 16 /* Max. length of PNET id */
|
||||
|
||||
struct smc_hashinfo {
|
||||
rwlock_t lock;
|
||||
struct hlist_head ht;
|
||||
@@ -18,4 +20,67 @@ struct smc_hashinfo {
|
||||
|
||||
int smc_hash_sk(struct sock *sk);
|
||||
void smc_unhash_sk(struct sock *sk);
|
||||
|
||||
/* SMCD/ISM device driver interface */
|
||||
struct smcd_dmb {
|
||||
u64 dmb_tok;
|
||||
u64 rgid;
|
||||
u32 dmb_len;
|
||||
u32 sba_idx;
|
||||
u32 vlan_valid;
|
||||
u32 vlan_id;
|
||||
void *cpu_addr;
|
||||
dma_addr_t dma_addr;
|
||||
};
|
||||
|
||||
#define ISM_EVENT_DMB 0
|
||||
#define ISM_EVENT_GID 1
|
||||
#define ISM_EVENT_SWR 2
|
||||
|
||||
struct smcd_event {
|
||||
u32 type;
|
||||
u32 code;
|
||||
u64 tok;
|
||||
u64 time;
|
||||
u64 info;
|
||||
};
|
||||
|
||||
struct smcd_dev;
|
||||
|
||||
struct smcd_ops {
|
||||
int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid,
|
||||
u32 vid);
|
||||
int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
|
||||
int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
|
||||
int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
|
||||
int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
|
||||
int (*set_vlan_required)(struct smcd_dev *dev);
|
||||
int (*reset_vlan_required)(struct smcd_dev *dev);
|
||||
int (*signal_event)(struct smcd_dev *dev, u64 rgid, u32 trigger_irq,
|
||||
u32 event_code, u64 info);
|
||||
int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
|
||||
bool sf, unsigned int offset, void *data,
|
||||
unsigned int size);
|
||||
};
|
||||
|
||||
struct smcd_dev {
|
||||
const struct smcd_ops *ops;
|
||||
struct device dev;
|
||||
void *priv;
|
||||
u64 local_gid;
|
||||
struct list_head list;
|
||||
spinlock_t lock;
|
||||
struct smc_connection **conn;
|
||||
struct list_head vlan;
|
||||
struct workqueue_struct *event_wq;
|
||||
u8 pnetid[SMC_MAX_PNETID_LEN];
|
||||
};
|
||||
|
||||
struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
|
||||
const struct smcd_ops *ops, int max_dmbs);
|
||||
int smcd_register_dev(struct smcd_dev *smcd);
|
||||
void smcd_unregister_dev(struct smcd_dev *smcd);
|
||||
void smcd_free_dev(struct smcd_dev *smcd);
|
||||
void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event);
|
||||
void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit);
|
||||
#endif /* _SMC_H */
|
||||
|
@@ -139,6 +139,7 @@ typedef __u64 __bitwise __addrpair;
|
||||
* @skc_node: main hash linkage for various protocol lookup tables
|
||||
* @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
|
||||
* @skc_tx_queue_mapping: tx queue number for this connection
|
||||
* @skc_rx_queue_mapping: rx queue number for this connection
|
||||
* @skc_flags: place holder for sk_flags
|
||||
* %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
|
||||
* %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
|
||||
@@ -214,7 +215,10 @@ struct sock_common {
|
||||
struct hlist_node skc_node;
|
||||
struct hlist_nulls_node skc_nulls_node;
|
||||
};
|
||||
int skc_tx_queue_mapping;
|
||||
unsigned short skc_tx_queue_mapping;
|
||||
#ifdef CONFIG_XPS
|
||||
unsigned short skc_rx_queue_mapping;
|
||||
#endif
|
||||
union {
|
||||
int skc_incoming_cpu;
|
||||
u32 skc_rcv_wnd;
|
||||
@@ -315,6 +319,9 @@ struct sock_common {
|
||||
* @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
|
||||
* @sk_reuseport_cb: reuseport group container
|
||||
* @sk_rcu: used during RCU grace period
|
||||
* @sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
|
||||
* @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
|
||||
* @sk_txtime_unused: unused txtime flags
|
||||
*/
|
||||
struct sock {
|
||||
/*
|
||||
@@ -326,6 +333,9 @@ struct sock {
|
||||
#define sk_nulls_node __sk_common.skc_nulls_node
|
||||
#define sk_refcnt __sk_common.skc_refcnt
|
||||
#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
|
||||
#ifdef CONFIG_XPS
|
||||
#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
|
||||
#endif
|
||||
|
||||
#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
|
||||
#define sk_dontcopy_end __sk_common.skc_dontcopy_end
|
||||
@@ -468,6 +478,12 @@ struct sock {
|
||||
u8 sk_shutdown;
|
||||
u32 sk_tskey;
|
||||
atomic_t sk_zckey;
|
||||
|
||||
u8 sk_clockid;
|
||||
u8 sk_txtime_deadline_mode : 1,
|
||||
sk_txtime_report_errors : 1,
|
||||
sk_txtime_unused : 6;
|
||||
|
||||
struct socket *sk_socket;
|
||||
void *sk_user_data;
|
||||
#ifdef CONFIG_SECURITY
|
||||
@@ -783,6 +799,7 @@ enum sock_flags {
|
||||
SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
|
||||
SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
|
||||
SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
|
||||
SOCK_TXTIME,
|
||||
};
|
||||
|
||||
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
|
||||
@@ -1578,10 +1595,17 @@ void sock_kzfree_s(struct sock *sk, void *mem, int size);
|
||||
void sk_send_sigurg(struct sock *sk);
|
||||
|
||||
struct sockcm_cookie {
|
||||
u64 transmit_time;
|
||||
u32 mark;
|
||||
u16 tsflags;
|
||||
};
|
||||
|
||||
static inline void sockcm_init(struct sockcm_cookie *sockc,
|
||||
const struct sock *sk)
|
||||
{
|
||||
*sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
|
||||
}
|
||||
|
||||
int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
|
||||
struct sockcm_cookie *sockc);
|
||||
int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
|
||||
@@ -1681,19 +1705,58 @@ static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
|
||||
{
|
||||
/* sk_tx_queue_mapping accept only upto a 16-bit value */
|
||||
if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
|
||||
return;
|
||||
sk->sk_tx_queue_mapping = tx_queue;
|
||||
}
|
||||
|
||||
#define NO_QUEUE_MAPPING USHRT_MAX
|
||||
|
||||
static inline void sk_tx_queue_clear(struct sock *sk)
|
||||
{
|
||||
sk->sk_tx_queue_mapping = -1;
|
||||
sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
|
||||
}
|
||||
|
||||
static inline int sk_tx_queue_get(const struct sock *sk)
|
||||
{
|
||||
return sk ? sk->sk_tx_queue_mapping : -1;
|
||||
if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
|
||||
return sk->sk_tx_queue_mapping;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_XPS
|
||||
if (skb_rx_queue_recorded(skb)) {
|
||||
u16 rx_queue = skb_get_rx_queue(skb);
|
||||
|
||||
if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
|
||||
return;
|
||||
|
||||
sk->sk_rx_queue_mapping = rx_queue;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void sk_rx_queue_clear(struct sock *sk)
|
||||
{
|
||||
#ifdef CONFIG_XPS
|
||||
sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XPS
|
||||
static inline int sk_rx_queue_get(const struct sock *sk)
|
||||
{
|
||||
if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
|
||||
return sk->sk_rx_queue_mapping;
|
||||
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
|
||||
{
|
||||
sk_tx_queue_clear(sk);
|
||||
@@ -1725,7 +1788,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
|
||||
{
|
||||
WARN_ON(parent->sk);
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
sk->sk_wq = parent->wq;
|
||||
rcu_assign_pointer(sk->sk_wq, parent->wq);
|
||||
parent->sk = sk;
|
||||
sk_set_socket(sk, parent);
|
||||
sk->sk_uid = SOCK_INODE(parent)->i_uid;
|
||||
@@ -1994,16 +2057,16 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
|
||||
/**
|
||||
* sock_poll_wait - place memory barrier behind the poll_wait call.
|
||||
* @filp: file
|
||||
* @wait_address: socket wait queue
|
||||
* @p: poll_table
|
||||
*
|
||||
* See the comments in the wq_has_sleeper function.
|
||||
*/
|
||||
static inline void sock_poll_wait(struct file *filp,
|
||||
wait_queue_head_t *wait_address, poll_table *p)
|
||||
static inline void sock_poll_wait(struct file *filp, poll_table *p)
|
||||
{
|
||||
if (!poll_does_not_wait(p) && wait_address) {
|
||||
poll_wait(filp, wait_address, p);
|
||||
struct socket *sock = filp->private_data;
|
||||
|
||||
if (!poll_does_not_wait(p)) {
|
||||
poll_wait(filp, &sock->wq->wait, p);
|
||||
/* We need to be sure we are in sync with the
|
||||
* socket flags modification.
|
||||
*
|
||||
|
@@ -5,25 +5,36 @@
|
||||
#include <linux/filter.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
extern spinlock_t reuseport_lock;
|
||||
|
||||
struct sock_reuseport {
|
||||
struct rcu_head rcu;
|
||||
|
||||
u16 max_socks; /* length of socks */
|
||||
u16 num_socks; /* elements in socks */
|
||||
/* The last synq overflow event timestamp of this
|
||||
* reuse->socks[] group.
|
||||
*/
|
||||
unsigned int synq_overflow_ts;
|
||||
/* ID stays the same even after the size of socks[] grows. */
|
||||
unsigned int reuseport_id;
|
||||
bool bind_inany;
|
||||
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
|
||||
struct sock *socks[0]; /* array of sock pointers */
|
||||
};
|
||||
|
||||
extern int reuseport_alloc(struct sock *sk);
|
||||
extern int reuseport_add_sock(struct sock *sk, struct sock *sk2);
|
||||
extern int reuseport_alloc(struct sock *sk, bool bind_inany);
|
||||
extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
|
||||
bool bind_inany);
|
||||
extern void reuseport_detach_sock(struct sock *sk);
|
||||
extern struct sock *reuseport_select_sock(struct sock *sk,
|
||||
u32 hash,
|
||||
struct sk_buff *skb,
|
||||
int hdr_len);
|
||||
extern struct bpf_prog *reuseport_attach_prog(struct sock *sk,
|
||||
struct bpf_prog *prog);
|
||||
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
|
||||
int reuseport_get_id(struct sock_reuseport *reuse);
|
||||
|
||||
#endif /* _SOCK_REUSEPORT_H */
|
||||
|
@@ -17,6 +17,7 @@ struct tcf_pedit {
|
||||
struct tc_pedit_key *tcfp_keys;
|
||||
struct tcf_pedit_key_ex *tcfp_keys_ex;
|
||||
};
|
||||
|
||||
#define to_pedit(a) ((struct tcf_pedit *)a)
|
||||
|
||||
static inline bool is_tcf_pedit(const struct tc_action *a)
|
||||
|
@@ -22,14 +22,19 @@
|
||||
#include <net/act_api.h>
|
||||
#include <linux/tc_act/tc_skbedit.h>
|
||||
|
||||
struct tcf_skbedit_params {
|
||||
u32 flags;
|
||||
u32 priority;
|
||||
u32 mark;
|
||||
u32 mask;
|
||||
u16 queue_mapping;
|
||||
u16 ptype;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
struct tcf_skbedit {
|
||||
struct tc_action common;
|
||||
u32 flags;
|
||||
u32 priority;
|
||||
u32 mark;
|
||||
u32 mask;
|
||||
u16 queue_mapping;
|
||||
u16 ptype;
|
||||
struct tc_action common;
|
||||
struct tcf_skbedit_params __rcu *params;
|
||||
};
|
||||
#define to_skbedit(a) ((struct tcf_skbedit *)a)
|
||||
|
||||
@@ -37,15 +42,27 @@ struct tcf_skbedit {
|
||||
static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
|
||||
{
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
if (a->ops && a->ops->type == TCA_ACT_SKBEDIT)
|
||||
return to_skbedit(a)->flags == SKBEDIT_F_MARK;
|
||||
u32 flags;
|
||||
|
||||
if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) {
|
||||
rcu_read_lock();
|
||||
flags = rcu_dereference(to_skbedit(a)->params)->flags;
|
||||
rcu_read_unlock();
|
||||
return flags == SKBEDIT_F_MARK;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline u32 tcf_skbedit_mark(const struct tc_action *a)
|
||||
{
|
||||
return to_skbedit(a)->mark;
|
||||
u32 mark;
|
||||
|
||||
rcu_read_lock();
|
||||
mark = rcu_dereference(to_skbedit(a)->params)->mark;
|
||||
rcu_read_unlock();
|
||||
|
||||
return mark;
|
||||
}
|
||||
|
||||
#endif /* __NET_TC_SKBEDIT_H */
|
||||
|
@@ -36,6 +36,7 @@
|
||||
#include <net/inet_hashtables.h>
|
||||
#include <net/checksum.h>
|
||||
#include <net/request_sock.h>
|
||||
#include <net/sock_reuseport.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/snmp.h>
|
||||
#include <net/ip.h>
|
||||
@@ -473,19 +474,45 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
|
||||
*/
|
||||
static inline void tcp_synq_overflow(const struct sock *sk)
|
||||
{
|
||||
unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
|
||||
unsigned long now = jiffies;
|
||||
unsigned int last_overflow;
|
||||
unsigned int now = jiffies;
|
||||
|
||||
if (time_after(now, last_overflow + HZ))
|
||||
if (sk->sk_reuseport) {
|
||||
struct sock_reuseport *reuse;
|
||||
|
||||
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
||||
if (likely(reuse)) {
|
||||
last_overflow = READ_ONCE(reuse->synq_overflow_ts);
|
||||
if (time_after32(now, last_overflow + HZ))
|
||||
WRITE_ONCE(reuse->synq_overflow_ts, now);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
|
||||
if (time_after32(now, last_overflow + HZ))
|
||||
tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
|
||||
}
|
||||
|
||||
/* syncookies: no recent synqueue overflow on this listening socket? */
|
||||
static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
|
||||
{
|
||||
unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
|
||||
unsigned int last_overflow;
|
||||
unsigned int now = jiffies;
|
||||
|
||||
return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
|
||||
if (sk->sk_reuseport) {
|
||||
struct sock_reuseport *reuse;
|
||||
|
||||
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
||||
if (likely(reuse)) {
|
||||
last_overflow = READ_ONCE(reuse->synq_overflow_ts);
|
||||
return time_after32(now, last_overflow +
|
||||
TCP_SYNCOOKIE_VALID);
|
||||
}
|
||||
}
|
||||
|
||||
last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
|
||||
return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID);
|
||||
}
|
||||
|
||||
static inline u32 tcp_cookie_time(void)
|
||||
@@ -963,6 +990,8 @@ struct rate_sample {
|
||||
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
|
||||
s32 delivered; /* number of packets delivered over interval */
|
||||
long interval_us; /* time for tp->delivered to incr "delivered" */
|
||||
u32 snd_interval_us; /* snd interval for delivered packets */
|
||||
u32 rcv_interval_us; /* rcv interval for delivered packets */
|
||||
long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
|
||||
int losses; /* number of packets marked lost upon ACK */
|
||||
u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
|
||||
@@ -1194,6 +1223,17 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
|
||||
return tp->is_cwnd_limited;
|
||||
}
|
||||
|
||||
/* BBR congestion control needs pacing.
|
||||
* Same remark for SO_MAX_PACING_RATE.
|
||||
* sch_fq packet scheduler is efficiently handling pacing,
|
||||
* but is not always installed/used.
|
||||
* Return true if TCP stack should pace packets itself.
|
||||
*/
|
||||
static inline bool tcp_needs_internal_pacing(const struct sock *sk)
|
||||
{
|
||||
return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
|
||||
}
|
||||
|
||||
/* Something is really bad, we could not queue an additional packet,
|
||||
* because qdisc is full or receiver sent a 0 window.
|
||||
* We do not want to add fuel to the fire, or abort too early,
|
||||
@@ -1371,7 +1411,8 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
|
||||
{
|
||||
if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
|
||||
return true;
|
||||
if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
|
||||
if (unlikely(!time_before32(ktime_get_seconds(),
|
||||
rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
|
||||
return true;
|
||||
/*
|
||||
* Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
|
||||
@@ -1401,7 +1442,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
|
||||
|
||||
However, we can relax time bounds for RST segments to MSL.
|
||||
*/
|
||||
if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
|
||||
if (rst && !time_before32(ktime_get_seconds(),
|
||||
rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
@@ -1787,7 +1829,7 @@ void tcp_v4_destroy_sock(struct sock *sk);
|
||||
|
||||
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
|
||||
netdev_features_t features);
|
||||
struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
|
||||
struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
|
||||
int tcp_gro_complete(struct sk_buff *skb);
|
||||
|
||||
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
|
||||
|
@@ -83,6 +83,16 @@ struct tls_device {
|
||||
void (*unhash)(struct tls_device *device, struct sock *sk);
|
||||
};
|
||||
|
||||
enum {
|
||||
TLS_BASE,
|
||||
TLS_SW,
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
TLS_HW,
|
||||
#endif
|
||||
TLS_HW_RECORD,
|
||||
TLS_NUM_CONFIG,
|
||||
};
|
||||
|
||||
struct tls_sw_context_tx {
|
||||
struct crypto_aead *aead_send;
|
||||
struct crypto_wait async_wait;
|
||||
@@ -114,10 +124,6 @@ struct tls_sw_context_rx {
|
||||
struct sk_buff *recv_pkt;
|
||||
u8 control;
|
||||
bool decrypted;
|
||||
|
||||
char rx_aad_ciphertext[TLS_AAD_SPACE_SIZE];
|
||||
char rx_aad_plaintext[TLS_AAD_SPACE_SIZE];
|
||||
|
||||
};
|
||||
|
||||
struct tls_record_info {
|
||||
@@ -128,7 +134,7 @@ struct tls_record_info {
|
||||
skb_frag_t frags[MAX_SKB_FRAGS];
|
||||
};
|
||||
|
||||
struct tls_offload_context {
|
||||
struct tls_offload_context_tx {
|
||||
struct crypto_aead *aead_send;
|
||||
spinlock_t lock; /* protects records list */
|
||||
struct list_head records_list;
|
||||
@@ -147,8 +153,8 @@ struct tls_offload_context {
|
||||
#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
|
||||
};
|
||||
|
||||
#define TLS_OFFLOAD_CONTEXT_SIZE \
|
||||
(ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) + \
|
||||
#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
|
||||
(ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
|
||||
TLS_DRIVER_STATE_SIZE)
|
||||
|
||||
enum {
|
||||
@@ -197,6 +203,7 @@ struct tls_context {
|
||||
int (*push_pending_record)(struct sock *sk, int flags);
|
||||
|
||||
void (*sk_write_space)(struct sock *sk);
|
||||
void (*sk_destruct)(struct sock *sk);
|
||||
void (*sk_proto_close)(struct sock *sk, long timeout);
|
||||
|
||||
int (*setsockopt)(struct sock *sk, int level,
|
||||
@@ -209,13 +216,27 @@ struct tls_context {
|
||||
void (*unhash)(struct sock *sk);
|
||||
};
|
||||
|
||||
struct tls_offload_context_rx {
|
||||
/* sw must be the first member of tls_offload_context_rx */
|
||||
struct tls_sw_context_rx sw;
|
||||
atomic64_t resync_req;
|
||||
u8 driver_state[];
|
||||
/* The TLS layer reserves room for driver specific state
|
||||
* Currently the belief is that there is not enough
|
||||
* driver specific state to justify another layer of indirection
|
||||
*/
|
||||
};
|
||||
|
||||
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
|
||||
(ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
|
||||
TLS_DRIVER_STATE_SIZE)
|
||||
|
||||
int wait_on_pending_writer(struct sock *sk, long *timeo);
|
||||
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
|
||||
int __user *optlen);
|
||||
int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
|
||||
unsigned int optlen);
|
||||
|
||||
|
||||
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
|
||||
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
|
||||
int tls_sw_sendpage(struct sock *sk, struct page *page,
|
||||
@@ -223,6 +244,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
|
||||
void tls_sw_close(struct sock *sk, long timeout);
|
||||
void tls_sw_free_resources_tx(struct sock *sk);
|
||||
void tls_sw_free_resources_rx(struct sock *sk);
|
||||
void tls_sw_release_resources_rx(struct sock *sk);
|
||||
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
||||
int nonblock, int flags, int *addr_len);
|
||||
unsigned int tls_sw_poll(struct file *file, struct socket *sock,
|
||||
@@ -239,7 +261,7 @@ void tls_device_sk_destruct(struct sock *sk);
|
||||
void tls_device_init(void);
|
||||
void tls_device_cleanup(void);
|
||||
|
||||
struct tls_record_info *tls_get_record(struct tls_offload_context *context,
|
||||
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
|
||||
u32 seq, u64 *p_record_sn);
|
||||
|
||||
static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
|
||||
@@ -289,11 +311,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
|
||||
return tls_ctx->pending_open_record_frags;
|
||||
}
|
||||
|
||||
struct sk_buff *
|
||||
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
|
||||
struct sk_buff *skb);
|
||||
|
||||
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
|
||||
{
|
||||
return sk_fullsock(sk) &&
|
||||
/* matches smp_store_release in tls_set_device_offload */
|
||||
smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct;
|
||||
#ifdef CONFIG_SOCK_VALIDATE_XMIT
|
||||
return sk_fullsock(sk) &
|
||||
(smp_load_acquire(&sk->sk_validate_xmit_skb) ==
|
||||
&tls_validate_xmit_skb);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void tls_err_abort(struct sock *sk, int err)
|
||||
@@ -380,23 +410,47 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
|
||||
return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
|
||||
}
|
||||
|
||||
static inline struct tls_offload_context *tls_offload_ctx(
|
||||
const struct tls_context *tls_ctx)
|
||||
static inline struct tls_offload_context_tx *
|
||||
tls_offload_ctx_tx(const struct tls_context *tls_ctx)
|
||||
{
|
||||
return (struct tls_offload_context *)tls_ctx->priv_ctx_tx;
|
||||
return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
|
||||
}
|
||||
|
||||
static inline struct tls_offload_context_rx *
|
||||
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
|
||||
{
|
||||
return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
|
||||
}
|
||||
|
||||
/* The TLS context is valid until sk_destruct is called */
|
||||
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
|
||||
|
||||
atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1));
|
||||
}
|
||||
|
||||
|
||||
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
|
||||
unsigned char *record_type);
|
||||
void tls_register_device(struct tls_device *device);
|
||||
void tls_unregister_device(struct tls_device *device);
|
||||
int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
|
||||
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
|
||||
struct scatterlist *sgout);
|
||||
|
||||
struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
|
||||
struct net_device *dev,
|
||||
struct sk_buff *skb);
|
||||
|
||||
int tls_sw_fallback_init(struct sock *sk,
|
||||
struct tls_offload_context *offload_ctx,
|
||||
struct tls_offload_context_tx *offload_ctx,
|
||||
struct tls_crypto_info *crypto_info);
|
||||
|
||||
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
|
||||
|
||||
void tls_device_offload_cleanup_rx(struct sock *sk);
|
||||
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
|
||||
|
||||
#endif /* _TLS_OFFLOAD_H */
|
||||
|
@@ -42,8 +42,7 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
|
||||
struct sk_buff *skb);
|
||||
|
||||
int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
|
||||
struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
|
||||
struct sockcm_cookie *sockc);
|
||||
struct flowi6 *fl6, struct ipcm6_cookie *ipc6);
|
||||
|
||||
void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
|
||||
__u16 srcp, __u16 destp, int rqueue, int bucket);
|
||||
|
@@ -170,8 +170,8 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
|
||||
typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
|
||||
__be16 dport);
|
||||
|
||||
struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
|
||||
struct udphdr *uh, udp_lookup_t lookup);
|
||||
struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
|
||||
struct udphdr *uh, udp_lookup_t lookup);
|
||||
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
|
||||
|
||||
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
|
||||
|
@@ -65,9 +65,9 @@ static inline int udp_sock_create(struct net *net,
|
||||
|
||||
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
|
||||
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
|
||||
typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk,
|
||||
struct sk_buff **head,
|
||||
struct sk_buff *skb);
|
||||
typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
|
||||
struct list_head *head,
|
||||
struct sk_buff *skb);
|
||||
typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
|
||||
int nhoff);
|
||||
|
||||
|
@@ -84,6 +84,13 @@ struct xdp_frame {
|
||||
struct net_device *dev_rx; /* used by cpumap */
|
||||
};
|
||||
|
||||
/* Clear kernel pointers in xdp_frame */
|
||||
static inline void xdp_scrub_frame(struct xdp_frame *frame)
|
||||
{
|
||||
frame->data = NULL;
|
||||
frame->dev_rx = NULL;
|
||||
}
|
||||
|
||||
/* Convert xdp_buff to xdp_frame */
|
||||
static inline
|
||||
struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
|
||||
@@ -144,4 +151,17 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
|
||||
return unlikely(xdp->data_meta > xdp->data);
|
||||
}
|
||||
|
||||
struct xdp_attachment_info {
|
||||
struct bpf_prog *prog;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
struct netdev_bpf;
|
||||
int xdp_attachment_query(struct xdp_attachment_info *info,
|
||||
struct netdev_bpf *bpf);
|
||||
bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
|
||||
struct netdev_bpf *bpf);
|
||||
void xdp_attachment_setup(struct xdp_attachment_info *info,
|
||||
struct netdev_bpf *bpf);
|
||||
|
||||
#endif /* __LINUX_NET_XDP_H__ */
|
||||
|
@@ -23,6 +23,7 @@
|
||||
#include <net/ipv6.h>
|
||||
#include <net/ip6_fib.h>
|
||||
#include <net/flow.h>
|
||||
#include <net/gro_cells.h>
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
@@ -147,6 +148,7 @@ struct xfrm_state {
|
||||
struct xfrm_id id;
|
||||
struct xfrm_selector sel;
|
||||
struct xfrm_mark mark;
|
||||
u32 if_id;
|
||||
u32 tfcpad;
|
||||
|
||||
u32 genid;
|
||||
@@ -166,7 +168,7 @@ struct xfrm_state {
|
||||
int header_len;
|
||||
int trailer_len;
|
||||
u32 extra_flags;
|
||||
u32 output_mark;
|
||||
struct xfrm_mark smark;
|
||||
} props;
|
||||
|
||||
struct xfrm_lifetime_cfg lft;
|
||||
@@ -225,7 +227,7 @@ struct xfrm_state {
|
||||
long saved_tmo;
|
||||
|
||||
/* Last used time */
|
||||
unsigned long lastused;
|
||||
time64_t lastused;
|
||||
|
||||
struct page_frag xfrag;
|
||||
|
||||
@@ -292,6 +294,13 @@ struct xfrm_replay {
|
||||
int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
|
||||
};
|
||||
|
||||
struct xfrm_if_cb {
|
||||
struct xfrm_if *(*decode_session)(struct sk_buff *skb);
|
||||
};
|
||||
|
||||
void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
|
||||
void xfrm_if_unregister_cb(void);
|
||||
|
||||
struct net_device;
|
||||
struct xfrm_type;
|
||||
struct xfrm_dst;
|
||||
@@ -323,7 +332,6 @@ int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int fam
|
||||
void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
|
||||
void km_policy_notify(struct xfrm_policy *xp, int dir,
|
||||
const struct km_event *c);
|
||||
void xfrm_policy_cache_flush(void);
|
||||
void km_state_notify(struct xfrm_state *x, const struct km_event *c);
|
||||
|
||||
struct xfrm_tmpl;
|
||||
@@ -574,6 +582,7 @@ struct xfrm_policy {
|
||||
atomic_t genid;
|
||||
u32 priority;
|
||||
u32 index;
|
||||
u32 if_id;
|
||||
struct xfrm_mark mark;
|
||||
struct xfrm_selector selector;
|
||||
struct xfrm_lifetime_cfg lft;
|
||||
@@ -1037,6 +1046,22 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
|
||||
|
||||
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
|
||||
|
||||
struct xfrm_if_parms {
|
||||
char name[IFNAMSIZ]; /* name of XFRM device */
|
||||
int link; /* ifindex of underlying L2 interface */
|
||||
u32 if_id; /* interface identifyer */
|
||||
};
|
||||
|
||||
struct xfrm_if {
|
||||
struct xfrm_if __rcu *next; /* next interface in list */
|
||||
struct net_device *dev; /* virtual device associated with interface */
|
||||
struct net_device *phydev; /* physical device */
|
||||
struct net *net; /* netns for packet i/o */
|
||||
struct xfrm_if_parms p; /* interface parms */
|
||||
|
||||
struct gro_cells gro_cells;
|
||||
};
|
||||
|
||||
struct xfrm_offload {
|
||||
/* Output sequence number for replay protection on offloading. */
|
||||
struct {
|
||||
@@ -1532,8 +1557,8 @@ struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
|
||||
const struct flowi *fl,
|
||||
struct xfrm_tmpl *tmpl,
|
||||
struct xfrm_policy *pol, int *err,
|
||||
unsigned short family);
|
||||
struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
|
||||
unsigned short family, u32 if_id);
|
||||
struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
|
||||
xfrm_address_t *daddr,
|
||||
xfrm_address_t *saddr,
|
||||
unsigned short family,
|
||||
@@ -1690,20 +1715,20 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
|
||||
void *);
|
||||
void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
|
||||
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
|
||||
struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
|
||||
struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
|
||||
u8 type, int dir,
|
||||
struct xfrm_selector *sel,
|
||||
struct xfrm_sec_ctx *ctx, int delete,
|
||||
int *err);
|
||||
struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
|
||||
u32 id, int delete, int *err);
|
||||
struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
|
||||
int dir, u32 id, int delete, int *err);
|
||||
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
|
||||
void xfrm_policy_hash_rebuild(struct net *net);
|
||||
u32 xfrm_get_acqseq(void);
|
||||
int verify_spi_info(u8 proto, u32 min, u32 max);
|
||||
int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
|
||||
struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
|
||||
u8 mode, u32 reqid, u8 proto,
|
||||
u8 mode, u32 reqid, u32 if_id, u8 proto,
|
||||
const xfrm_address_t *daddr,
|
||||
const xfrm_address_t *saddr, int create,
|
||||
unsigned short family);
|
||||
@@ -2012,6 +2037,22 @@ static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
|
||||
{
|
||||
struct xfrm_mark *m = &x->props.smark;
|
||||
|
||||
return (m->v & m->m) | (mark & ~m->m);
|
||||
}
|
||||
|
||||
static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (if_id)
|
||||
ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
|
||||
unsigned int family)
|
||||
{
|
||||
|
Reference in New Issue
Block a user