Merge remote-tracking branch 'net-next/master'

This commit is contained in:
Stefan Schmidt
2018-08-06 09:04:48 +02:00
1936 zmienionych plików z 80166 dodań i 22327 usunięć

Wyświetl plik

@@ -85,7 +85,7 @@ struct tc_action_ops {
size_t size;
struct module *owner;
int (*act)(struct sk_buff *, const struct tc_action *,
struct tcf_result *);
struct tcf_result *); /* called under RCU BH lock*/
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
void (*cleanup)(struct tc_action *);
int (*lookup)(struct net *net, struct tc_action **a, u32 index,

Wyświetl plik

@@ -61,7 +61,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t,
rxrpc_notify_end_tx_t);
int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
void *, size_t, size_t *, bool, u32 *, u16 *);
struct iov_iter *, bool, u32 *, u16 *);
bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
u32, int, const char *);
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);

Wyświetl plik

@@ -183,6 +183,15 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_NON_PERSISTENT_DIAG,
/* When this quirk is set, setup() would be run after every
* open() and not just after the first open().
*
* This quirk can be set before hci_register_dev is called or
* during the hdev->setup vendor callback.
*
*/
HCI_QUIRK_NON_PERSISTENT_SETUP,
};
/* HCI device flags */
@@ -291,6 +300,14 @@ enum {
#define HCI_DH3 0x0800
#define HCI_DH5 0x8000
/* HCI packet types inverted masks */
#define HCI_2DH1 0x0002
#define HCI_3DH1 0x0004
#define HCI_2DH3 0x0100
#define HCI_3DH3 0x0200
#define HCI_2DH5 0x1000
#define HCI_3DH5 0x2000
#define HCI_HV1 0x0020
#define HCI_HV2 0x0040
#define HCI_HV3 0x0080
@@ -354,6 +371,8 @@ enum {
#define LMP_PCONTROL 0x04
#define LMP_TRANSPARENT 0x08
#define LMP_EDR_2M 0x02
#define LMP_EDR_3M 0x04
#define LMP_RSSI_INQ 0x40
#define LMP_ESCO 0x80
@@ -361,7 +380,9 @@ enum {
#define LMP_EV5 0x02
#define LMP_NO_BREDR 0x20
#define LMP_LE 0x40
#define LMP_EDR_3SLOT 0x80
#define LMP_EDR_5SLOT 0x01
#define LMP_SNIFF_SUBR 0x02
#define LMP_PAUSE_ENC 0x04
#define LMP_EDR_ESCO_2M 0x20
@@ -398,7 +419,12 @@ enum {
#define HCI_LE_SLAVE_FEATURES 0x08
#define HCI_LE_PING 0x10
#define HCI_LE_DATA_LEN_EXT 0x20
#define HCI_LE_PHY_2M 0x01
#define HCI_LE_PHY_CODED 0x08
#define HCI_LE_EXT_ADV 0x10
#define HCI_LE_EXT_SCAN_POLICY 0x80
#define HCI_LE_PHY_2M 0x01
#define HCI_LE_PHY_CODED 0x08
#define HCI_LE_CHAN_SEL_ALG2 0x40
/* Connection modes */
@@ -1490,6 +1516,14 @@ struct hci_cp_le_write_def_data_len {
__le16 tx_time;
} __packed;
#define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029
#define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a
struct hci_rp_le_read_resolv_list_size {
__u8 status;
__u8 size;
} __packed;
#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
struct hci_rp_le_read_max_data_len {
__u8 status;
@@ -1506,6 +1540,134 @@ struct hci_cp_le_set_default_phy {
__u8 rx_phys;
} __packed;
#define HCI_LE_SET_PHY_1M 0x01
#define HCI_LE_SET_PHY_2M 0x02
#define HCI_LE_SET_PHY_CODED 0x04
#define HCI_OP_LE_SET_EXT_SCAN_PARAMS 0x2041
struct hci_cp_le_set_ext_scan_params {
__u8 own_addr_type;
__u8 filter_policy;
__u8 scanning_phys;
__u8 data[0];
} __packed;
#define LE_SCAN_PHY_1M 0x01
#define LE_SCAN_PHY_2M 0x02
#define LE_SCAN_PHY_CODED 0x04
struct hci_cp_le_scan_phy_params {
__u8 type;
__le16 interval;
__le16 window;
} __packed;
#define HCI_OP_LE_SET_EXT_SCAN_ENABLE 0x2042
struct hci_cp_le_set_ext_scan_enable {
__u8 enable;
__u8 filter_dup;
__le16 duration;
__le16 period;
} __packed;
#define HCI_OP_LE_EXT_CREATE_CONN 0x2043
struct hci_cp_le_ext_create_conn {
__u8 filter_policy;
__u8 own_addr_type;
__u8 peer_addr_type;
bdaddr_t peer_addr;
__u8 phys;
__u8 data[0];
} __packed;
struct hci_cp_le_ext_conn_param {
__le16 scan_interval;
__le16 scan_window;
__le16 conn_interval_min;
__le16 conn_interval_max;
__le16 conn_latency;
__le16 supervision_timeout;
__le16 min_ce_len;
__le16 max_ce_len;
} __packed;
#define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b
struct hci_rp_le_read_num_supported_adv_sets {
__u8 status;
__u8 num_of_sets;
} __packed;
#define HCI_OP_LE_SET_EXT_ADV_PARAMS 0x2036
struct hci_cp_le_set_ext_adv_params {
__u8 handle;
__le16 evt_properties;
__u8 min_interval[3];
__u8 max_interval[3];
__u8 channel_map;
__u8 own_addr_type;
__u8 peer_addr_type;
bdaddr_t peer_addr;
__u8 filter_policy;
__u8 tx_power;
__u8 primary_phy;
__u8 secondary_max_skip;
__u8 secondary_phy;
__u8 sid;
__u8 notif_enable;
} __packed;
#define HCI_ADV_PHY_1M 0X01
#define HCI_ADV_PHY_2M 0x02
#define HCI_ADV_PHY_CODED 0x03
struct hci_rp_le_set_ext_adv_params {
__u8 status;
__u8 tx_power;
} __packed;
#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
struct hci_cp_le_set_ext_adv_enable {
__u8 enable;
__u8 num_of_sets;
__u8 data[0];
} __packed;
struct hci_cp_ext_adv_set {
__u8 handle;
__le16 duration;
__u8 max_events;
} __packed;
#define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037
struct hci_cp_le_set_ext_adv_data {
__u8 handle;
__u8 operation;
__u8 frag_pref;
__u8 length;
__u8 data[HCI_MAX_AD_LENGTH];
} __packed;
#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
struct hci_cp_le_set_ext_scan_rsp_data {
__u8 handle;
__u8 operation;
__u8 frag_pref;
__u8 length;
__u8 data[HCI_MAX_AD_LENGTH];
} __packed;
#define LE_SET_ADV_DATA_OP_COMPLETE 0x03
#define LE_SET_ADV_DATA_NO_FRAG 0x01
#define HCI_OP_LE_CLEAR_ADV_SETS 0x203d
#define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035
struct hci_cp_le_set_adv_set_rand_addr {
__u8 handle;
bdaddr_t bdaddr;
} __packed;
/* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01
@@ -1893,6 +2055,23 @@ struct hci_ev_le_conn_complete {
#define LE_ADV_SCAN_IND 0x02
#define LE_ADV_NONCONN_IND 0x03
#define LE_ADV_SCAN_RSP 0x04
#define LE_ADV_INVALID 0x05
/* Legacy event types in extended adv report */
#define LE_LEGACY_ADV_IND 0x0013
#define LE_LEGACY_ADV_DIRECT_IND 0x0015
#define LE_LEGACY_ADV_SCAN_IND 0x0012
#define LE_LEGACY_NONCONN_IND 0x0010
#define LE_LEGACY_SCAN_RSP_ADV 0x001b
#define LE_LEGACY_SCAN_RSP_ADV_SCAN 0x001a
/* Extended Advertising event types */
#define LE_EXT_ADV_NON_CONN_IND 0x0000
#define LE_EXT_ADV_CONN_IND 0x0001
#define LE_EXT_ADV_SCAN_IND 0x0002
#define LE_EXT_ADV_DIRECT_IND 0x0004
#define LE_EXT_ADV_SCAN_RSP 0x0008
#define LE_EXT_ADV_LEGACY_PDU 0x0010
#define ADDR_LE_DEV_PUBLIC 0x00
#define ADDR_LE_DEV_RANDOM 0x01
@@ -1957,6 +2136,46 @@ struct hci_ev_le_direct_adv_info {
__s8 rssi;
} __packed;
#define HCI_EV_LE_EXT_ADV_REPORT 0x0d
struct hci_ev_le_ext_adv_report {
__le16 evt_type;
__u8 bdaddr_type;
bdaddr_t bdaddr;
__u8 primary_phy;
__u8 secondary_phy;
__u8 sid;
__u8 tx_power;
__s8 rssi;
__le16 interval;
__u8 direct_addr_type;
bdaddr_t direct_addr;
__u8 length;
__u8 data[0];
} __packed;
#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
struct hci_ev_le_enh_conn_complete {
__u8 status;
__le16 handle;
__u8 role;
__u8 bdaddr_type;
bdaddr_t bdaddr;
bdaddr_t local_rpa;
bdaddr_t peer_rpa;
__le16 interval;
__le16 latency;
__le16 supervision_timeout;
__u8 clk_accurancy;
} __packed;
#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12
struct hci_evt_le_ext_adv_set_term {
__u8 status;
__u8 handle;
__le16 conn_handle;
__u8 num_evts;
} __packed;
/* Internal events generated by Bluetooth stack */
#define HCI_EV_STACK_INTERNAL 0xfd
struct hci_ev_stack_internal {

Wyświetl plik

@@ -171,6 +171,10 @@ struct adv_info {
__u8 adv_data[HCI_MAX_AD_LENGTH];
__u16 scan_rsp_len;
__u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
__s8 tx_power;
bdaddr_t random_addr;
bool rpa_expired;
struct delayed_work rpa_expired_cb;
};
#define HCI_MAX_ADV_INSTANCES 5
@@ -221,6 +225,8 @@ struct hci_dev {
__u8 features[HCI_MAX_PAGES][8];
__u8 le_features[8];
__u8 le_white_list_size;
__u8 le_resolv_list_size;
__u8 le_num_of_adv_sets;
__u8 le_states[8];
__u8 commands[64];
__u8 hci_ver;
@@ -314,6 +320,9 @@ struct hci_dev {
unsigned long sco_last_tx;
unsigned long le_last_tx;
__u8 le_tx_def_phys;
__u8 le_rx_def_phys;
struct workqueue_struct *workqueue;
struct workqueue_struct *req_workqueue;
@@ -367,6 +376,7 @@ struct hci_dev {
struct list_head identity_resolving_keys;
struct list_head remote_oob_data;
struct list_head le_white_list;
struct list_head le_resolv_list;
struct list_head le_conn_params;
struct list_head pend_le_conns;
struct list_head pend_le_reports;
@@ -1106,6 +1116,7 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
u16 scan_rsp_len, u8 *scan_rsp_data,
u16 timeout, u16 duration);
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
@@ -1136,6 +1147,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
#define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M)
#define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M)
#define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT)
#define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT)
/* ----- Extended LMP capabilities ----- */
#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
@@ -1156,6 +1171,24 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
hci_dev_test_flag(dev, HCI_SC_ENABLED))
#define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
#define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
/* Use ext scanning if set ext scan param and ext scan enable is supported */
#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
((dev)->commands[37] & 0x40))
/* Use ext create connection if command is supported */
#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
/* Extended advertising support */
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1529,6 +1562,7 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance);
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
u8 instance);
int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
u16 to_multiplier);

Wyświetl plik

@@ -101,6 +101,7 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_PRIVACY 0x00002000
#define MGMT_SETTING_CONFIGURATION 0x00004000
#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -561,6 +562,12 @@ struct mgmt_rp_add_advertising {
#define MGMT_ADV_FLAG_TX_POWER BIT(4)
#define MGMT_ADV_FLAG_APPEARANCE BIT(5)
#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6)
#define MGMT_ADV_FLAG_SEC_1M BIT(7)
#define MGMT_ADV_FLAG_SEC_2M BIT(8)
#define MGMT_ADV_FLAG_SEC_CODED BIT(9)
#define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \
MGMT_ADV_FLAG_SEC_CODED)
#define MGMT_OP_REMOVE_ADVERTISING 0x003F
struct mgmt_cp_remove_advertising {
@@ -604,6 +611,49 @@ struct mgmt_cp_set_appearance {
} __packed;
#define MGMT_SET_APPEARANCE_SIZE 2
#define MGMT_OP_GET_PHY_CONFIGURATION 0x0044
struct mgmt_rp_get_phy_confguration {
__le32 supported_phys;
__le32 configurable_phys;
__le32 selected_phys;
} __packed;
#define MGMT_GET_PHY_CONFIGURATION_SIZE 0
#define MGMT_PHY_BR_1M_1SLOT 0x00000001
#define MGMT_PHY_BR_1M_3SLOT 0x00000002
#define MGMT_PHY_BR_1M_5SLOT 0x00000004
#define MGMT_PHY_EDR_2M_1SLOT 0x00000008
#define MGMT_PHY_EDR_2M_3SLOT 0x00000010
#define MGMT_PHY_EDR_2M_5SLOT 0x00000020
#define MGMT_PHY_EDR_3M_1SLOT 0x00000040
#define MGMT_PHY_EDR_3M_3SLOT 0x00000080
#define MGMT_PHY_EDR_3M_5SLOT 0x00000100
#define MGMT_PHY_LE_1M_TX 0x00000200
#define MGMT_PHY_LE_1M_RX 0x00000400
#define MGMT_PHY_LE_2M_TX 0x00000800
#define MGMT_PHY_LE_2M_RX 0x00001000
#define MGMT_PHY_LE_CODED_TX 0x00002000
#define MGMT_PHY_LE_CODED_RX 0x00004000
#define MGMT_PHY_BREDR_MASK (MGMT_PHY_BR_1M_1SLOT | MGMT_PHY_BR_1M_3SLOT | \
MGMT_PHY_BR_1M_5SLOT | MGMT_PHY_EDR_2M_1SLOT | \
MGMT_PHY_EDR_2M_3SLOT | MGMT_PHY_EDR_2M_5SLOT | \
MGMT_PHY_EDR_3M_1SLOT | MGMT_PHY_EDR_3M_3SLOT | \
MGMT_PHY_EDR_3M_5SLOT)
#define MGMT_PHY_LE_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_1M_RX | \
MGMT_PHY_LE_2M_TX | MGMT_PHY_LE_2M_RX | \
MGMT_PHY_LE_CODED_TX | MGMT_PHY_LE_CODED_RX)
#define MGMT_PHY_LE_TX_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_2M_TX | \
MGMT_PHY_LE_CODED_TX)
#define MGMT_PHY_LE_RX_MASK (MGMT_PHY_LE_1M_RX | MGMT_PHY_LE_2M_RX | \
MGMT_PHY_LE_CODED_RX)
#define MGMT_OP_SET_PHY_CONFIGURATION 0x0045
struct mgmt_cp_set_phy_confguration {
__le32 selected_phys;
} __packed;
#define MGMT_SET_PHY_CONFIGURATION_SIZE 4
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -824,3 +874,8 @@ struct mgmt_ev_ext_info_changed {
__le16 eir_len;
__u8 eir[0];
} __packed;
#define MGMT_EV_PHY_CONFIGURATION_CHANGED 0x0026
struct mgmt_ev_phy_configuration_changed {
__le32 selected_phys;
} __packed;

Wyświetl plik

@@ -283,7 +283,7 @@ static inline const char *bond_3ad_churn_desc(churn_state_t state)
"none",
"unknown"
};
int max_size = sizeof(churn_description) / sizeof(churn_description[0]);
int max_size = ARRAY_SIZE(churn_description);
if (state >= max_size)
state = max_size - 1;

Wyświetl plik

@@ -121,21 +121,6 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
#endif
}
static inline void sock_poll_busy_loop(struct socket *sock, __poll_t events)
{
if (sk_can_busy_loop(sock->sk) &&
events && (events & POLL_BUSY_LOOP)) {
/* once, only if requested by syscall */
sk_busy_loop(sock->sk, 1);
}
}
/* if this socket can poll_ll, tell the system call */
static inline __poll_t sock_poll_busy_flag(struct socket *sock)
{
return sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0;
}
/* used in the NIC receive handler to mark the skb */
static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)

Wyświetl plik

@@ -5937,10 +5937,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
/**
* cfg80211_rx_control_port - notification about a received control port frame
* @dev: The device the frame matched to
* @buf: control port frame
* @len: length of the frame data
* @addr: The peer from which the frame was received
* @proto: frame protocol, typically PAE or Pre-authentication
* @skb: The skbuf with the control port frame. It is assumed that the skbuf
* is 802.3 formatted (with 802.3 header). The skb can be non-linear.
* This function does not take ownership of the skb, so the caller is
* responsible for any cleanup. The caller must also ensure that
* skb->protocol is set appropriately.
* @unencrypted: Whether the frame was received unencrypted
*
* This function is used to inform userspace about a received control port
@@ -5953,8 +5954,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
* Return: %true if the frame was passed to userspace
*/
bool cfg80211_rx_control_port(struct net_device *dev,
const u8 *buf, size_t len,
const u8 *addr, u16 proto, bool unencrypted);
struct sk_buff *skb, bool unencrypted);
/**
* cfg80211_cqm_rssi_notify - connection quality monitoring rssi event

Wyświetl plik

@@ -34,6 +34,19 @@ int dcb_ieee_setapp(struct net_device *, struct dcb_app *);
int dcb_ieee_delapp(struct net_device *, struct dcb_app *);
u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *);
struct dcb_ieee_app_prio_map {
u64 map[IEEE_8021QAZ_MAX_TCS];
};
void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
struct dcb_ieee_app_prio_map *p_map);
struct dcb_ieee_app_dscp_map {
u8 map[64];
};
void dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
struct dcb_ieee_app_dscp_map *p_map);
u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev);
int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
u32 seq, u32 pid);
int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,

Wyświetl plik

@@ -259,6 +259,9 @@ struct dsa_switch {
/* Number of switch port queues */
unsigned int num_tx_queues;
unsigned long *bitmap;
unsigned long _bitmap;
/* Dynamically allocated ports, keep last */
size_t num_ports;
struct dsa_port ports[];

Wyświetl plik

@@ -475,6 +475,14 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
return dst_orig;
}
static inline struct dst_entry *
xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, const struct sock *sk,
int flags, u32 if_id)
{
return dst_orig;
}
static inline struct dst_entry *xfrm_lookup_route(struct net *net,
struct dst_entry *dst_orig,
const struct flowi *fl,
@@ -494,6 +502,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, const struct sock *sk,
int flags);
struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
struct dst_entry *dst_orig,
const struct flowi *fl,
const struct sock *sk, int flags,
u32 if_id);
struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, const struct sock *sk,
int flags);

Wyświetl plik

@@ -207,7 +207,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */
FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */
FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */
FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */
FLOW_DISSECTOR_KEY_MAX,
};

Wyświetl plik

@@ -75,7 +75,8 @@ struct inet_frag_queue {
struct timer_list timer;
spinlock_t lock;
refcount_t refcnt;
struct sk_buff *fragments;
struct sk_buff *fragments; /* Used in IPv6. */
struct rb_root rb_fragments; /* Used in IPv4. */
struct sk_buff *fragments_tail;
ktime_t stamp;
int len;

Wyświetl plik

@@ -359,4 +359,12 @@ static inline bool inet_get_convert_csum(struct sock *sk)
return !!inet_sk(sk)->convert_csum;
}
static inline bool inet_can_nonlocal_bind(struct net *net,
struct inet_sock *inet)
{
return net->ipv4.sysctl_ip_nonlocal_bind ||
inet->freebind || inet->transparent;
}
#endif /* _INET_SOCK_H */

Wyświetl plik

@@ -281,6 +281,11 @@ static inline void fib6_info_hold(struct fib6_info *f6i)
atomic_inc(&f6i->fib6_ref);
}
static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
{
return atomic_inc_not_zero(&f6i->fib6_ref);
}
static inline void fib6_info_release(struct fib6_info *f6i)
{
if (f6i && atomic_dec_and_test(&f6i->fib6_ref))

Wyświetl plik

@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
}
static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
{
return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
RTF_GATEWAY;
}
void ip6_route_input(struct sk_buff *skb);
struct dst_entry *ip6_route_input_lookup(struct net *net,
struct net_device *dev,

Wyświetl plik

@@ -335,6 +335,11 @@ enum ip_vs_sctp_states {
IP_VS_SCTP_S_LAST
};
/* Connection templates use bits from state */
#define IP_VS_CTPL_S_NONE 0x0000
#define IP_VS_CTPL_S_ASSURED 0x0001
#define IP_VS_CTPL_S_LAST 0x0002
/* Delta sequence info structure
* Each ip_vs_conn has 2 (output AND input seq. changes).
* Only used in the VS/NAT.
@@ -1221,7 +1226,7 @@ struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
struct ip_vs_dest *dest, __u32 fwmark);
void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
const char *ip_vs_state_name(__u16 proto, int state);
const char *ip_vs_state_name(const struct ip_vs_conn *cp);
void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
@@ -1289,6 +1294,17 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
atomic_inc(&ctl_cp->n_control);
}
/* Mark our template as assured */
static inline void
ip_vs_control_assure_ct(struct ip_vs_conn *cp)
{
struct ip_vs_conn *ct = cp->control;
if (ct && !(ct->state & IP_VS_CTPL_S_ASSURED) &&
(ct->flags & IP_VS_CONN_F_TEMPLATE))
ct->state |= IP_VS_CTPL_S_ASSURED;
}
/* IPVS netns init & cleanup functions */
int ip_vs_estimator_net_init(struct netns_ipvs *ipvs);
int ip_vs_control_net_init(struct netns_ipvs *ipvs);

Wyświetl plik

@@ -375,14 +375,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
struct ipv6_txoptions *opt,
int newtype,
struct ipv6_opt_hdr __user *newopt,
int newoptlen);
struct ipv6_txoptions *
ipv6_renew_options_kern(struct sock *sk,
struct ipv6_txoptions *opt,
int newtype,
struct ipv6_opt_hdr *newopt,
int newoptlen);
struct ipv6_opt_hdr *newopt);
struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
struct ipv6_txoptions *opt);
@@ -581,34 +574,6 @@ static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
}
#endif
struct inet_frag_queue;
enum ip6_defrag_users {
IP6_DEFRAG_LOCAL_DELIVER,
IP6_DEFRAG_CONNTRACK_IN,
__IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
IP6_DEFRAG_CONNTRACK_OUT,
__IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
};
void ip6_frag_init(struct inet_frag_queue *q, const void *a);
extern const struct rhashtable_params ip6_rhash_params;
/*
* Equivalent of ipv4 struct ip
*/
struct frag_queue {
struct inet_frag_queue q;
int iif;
__u16 nhoffset;
u8 ecn;
};
void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
static inline bool ipv6_addr_any(const struct in6_addr *a)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
@@ -817,6 +782,13 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
#if IS_ENABLED(CONFIG_IPV6)
static inline bool ipv6_can_nonlocal_bind(struct net *net,
struct inet_sock *inet)
{
return net->ipv6.sysctl.ip_nonlocal_bind ||
inet->freebind || inet->transparent;
}
/* Sysctl settings for net ipv6.auto_flowlabels */
#define IP6_AUTO_FLOW_LABEL_OFF 0
#define IP6_AUTO_FLOW_LABEL_OPTOUT 1
@@ -850,7 +822,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
* to minimize possbility that any useful information to an
* attacker is leaked. Only lower 20 bits are relevant.
*/
rol32(hash, 16);
hash = rol32(hash, 16);
flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
@@ -1127,6 +1099,8 @@ void ipv6_sysctl_unregister(void);
int ipv6_sock_mc_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
const struct in6_addr *addr, unsigned int mode);
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
#endif /* _NET_IPV6_H */

104
include/net/ipv6_frag.h Normal file
Wyświetl plik

@@ -0,0 +1,104 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IPV6_FRAG_H
#define _IPV6_FRAG_H
#include <linux/kernel.h>
#include <net/addrconf.h>
#include <net/ipv6.h>
#include <net/inet_frag.h>
enum ip6_defrag_users {
IP6_DEFRAG_LOCAL_DELIVER,
IP6_DEFRAG_CONNTRACK_IN,
__IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
IP6_DEFRAG_CONNTRACK_OUT,
__IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
};
/*
* Equivalent of ipv4 struct ip
*/
struct frag_queue {
struct inet_frag_queue q;
int iif;
__u16 nhoffset;
u8 ecn;
};
#if IS_ENABLED(CONFIG_IPV6)
static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
{
struct frag_queue *fq = container_of(q, struct frag_queue, q);
const struct frag_v6_compare_key *key = a;
q->key.v6 = *key;
fq->ecn = 0;
}
static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
{
return jhash2(data,
sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
}
static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
{
const struct inet_frag_queue *fq = data;
return jhash2((const u32 *)&fq->key.v6,
sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
}
static inline int
ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
{
const struct frag_v6_compare_key *key = arg->key;
const struct inet_frag_queue *fq = ptr;
return !!memcmp(&fq->key, key, sizeof(*key));
}
static inline void
ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
{
struct net_device *dev = NULL;
struct sk_buff *head;
rcu_read_lock();
spin_lock(&fq->q.lock);
if (fq->q.flags & INET_FRAG_COMPLETE)
goto out;
inet_frag_kill(&fq->q);
dev = dev_get_by_index_rcu(net, fq->iif);
if (!dev)
goto out;
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
/* Don't send error if the first segment did not arrive. */
head = fq->q.fragments;
if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
goto out;
head->dev = dev;
skb_get(head);
spin_unlock(&fq->q.lock);
icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
kfree_skb(head);
goto out_rcu_unlock;
out:
spin_unlock(&fq->q.lock);
out_rcu_unlock:
rcu_read_unlock();
inet_frag_put(&fq->q);
}
#endif
#endif

Wyświetl plik

@@ -10,6 +10,7 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <linux/sysctl.h>
#include <linux/uidgid.h>
#include <net/flow.h>
#include <net/netns/core.h>
@@ -170,6 +171,8 @@ extern struct net init_net;
struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
struct net *old_net);
void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
void net_ns_barrier(void);
#else /* CONFIG_NET_NS */
#include <linux/sched.h>
@@ -182,6 +185,13 @@ static inline struct net *copy_net_ns(unsigned long flags,
return old_net;
}
static inline void net_ns_get_ownership(const struct net *net,
kuid_t *uid, kgid_t *gid)
{
*uid = GLOBAL_ROOT_UID;
*gid = GLOBAL_ROOT_GID;
}
static inline void net_ns_barrier(void) {}
#endif /* CONFIG_NET_NS */

Wyświetl plik

@@ -28,6 +28,7 @@ enum netevent_notif_type {
NETEVENT_DELAY_PROBE_TIME_UPDATE, /* arg is struct neigh_parms ptr */
NETEVENT_IPV4_MPATH_HASH_UPDATE, /* arg is struct net ptr */
NETEVENT_IPV6_MPATH_HASH_UPDATE, /* arg is struct net ptr */
NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE, /* arg is struct net ptr */
};
int register_netevent_notifier(struct notifier_block *nb);

Wyświetl plik

@@ -10,9 +10,6 @@
#ifndef _NF_CONNTRACK_IPV4_H
#define _NF_CONNTRACK_IPV4_H
const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;

Wyświetl plik

@@ -41,6 +41,11 @@ union nf_conntrack_expect_proto {
/* insert expect proto private data here */
};
struct nf_conntrack_net {
unsigned int users4;
unsigned int users6;
};
#include <linux/types.h>
#include <linux/skbuff.h>
@@ -171,8 +176,6 @@ void nf_ct_netns_put(struct net *net, u8 nfproto);
*/
void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
void nf_ct_free_hashtable(void *hash, unsigned int size);
int nf_conntrack_hash_check_insert(struct nf_conn *ct);
bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);

Wyświetl plik

@@ -14,7 +14,6 @@
#define _NF_CONNTRACK_CORE_H
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
@@ -40,16 +39,8 @@ void nf_conntrack_cleanup_start(void);
void nf_conntrack_init_end(void);
void nf_conntrack_cleanup_end(void);
bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff,
unsigned int dataoff, u_int16_t l3num, u_int8_t protonum,
struct net *net,
struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto);
bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto);
/* Find a connection corresponding to a tuple. */
@@ -75,10 +66,8 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
return ret;
}
void
print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *proto);
void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l4proto *proto);
#define CONNTRACK_LOCKS 1024

Wyświetl plik

@@ -1,8 +1,23 @@
#ifndef _NF_CONNTRACK_COUNT_H
#define _NF_CONNTRACK_COUNT_H
#include <linux/list.h>
struct nf_conncount_data;
enum nf_conncount_list_add {
NF_CONNCOUNT_ADDED, /* list add was ok */
NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */
NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */
};
struct nf_conncount_list {
spinlock_t list_lock;
struct list_head head; /* connections with the same filtering key */
unsigned int count; /* length of list */
bool dead;
};
struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
unsigned int keylen);
void nf_conncount_destroy(struct net *net, unsigned int family,
@@ -14,15 +29,21 @@ unsigned int nf_conncount_count(struct net *net,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone);
unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone,
bool *addit);
void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone,
bool *addit);
bool nf_conncount_add(struct hlist_head *head,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone);
void nf_conncount_list_init(struct nf_conncount_list *list);
void nf_conncount_cache_free(struct hlist_head *hhead);
enum nf_conncount_list_add
nf_conncount_add(struct nf_conncount_list *list,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone);
bool nf_conncount_gc_list(struct net *net,
struct nf_conncount_list *list);
void nf_conncount_cache_free(struct nf_conncount_list *list);
#endif

Wyświetl plik

@@ -103,9 +103,7 @@ int nf_conntrack_helpers_register(struct nf_conntrack_helper *, unsigned int);
void nf_conntrack_helpers_unregister(struct nf_conntrack_helper *,
unsigned int);
struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct,
struct nf_conntrack_helper *helper,
gfp_t gfp);
struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
gfp_t flags);

Wyświetl plik

@@ -1,84 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C)2003,2004 USAGI/WIDE Project
*
* Header for use in defining a given L3 protocol for connection tracking.
*
* Author:
* Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
*
* Derived from include/netfilter_ipv4/ip_conntrack_protocol.h
*/
#ifndef _NF_CONNTRACK_L3PROTO_H
#define _NF_CONNTRACK_L3PROTO_H
#include <linux/netlink.h>
#include <net/netlink.h>
#include <linux/seq_file.h>
#include <net/netfilter/nf_conntrack.h>
struct nf_conntrack_l3proto {
/* L3 Protocol Family number. ex) PF_INET */
u_int16_t l3proto;
/* size of tuple nlattr, fills a hole */
u16 nla_size;
/*
* Try to fill in the third arg: nhoff is offset of l3 proto
* hdr. Return true if possible.
*/
bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff,
struct nf_conntrack_tuple *tuple);
/*
* Invert the per-proto part of the tuple: ie. turn xmit into reply.
* Some packets can't be inverted: return 0 in that case.
*/
bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig);
/*
* Called before tracking.
* *dataoff: offset of protocol header (TCP, UDP,...) in skb
* *protonum: protocol number
*/
int (*get_l4proto)(const struct sk_buff *skb, unsigned int nhoff,
unsigned int *dataoff, u_int8_t *protonum);
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
int (*tuple_to_nlattr)(struct sk_buff *skb,
const struct nf_conntrack_tuple *t);
int (*nlattr_to_tuple)(struct nlattr *tb[],
struct nf_conntrack_tuple *t);
const struct nla_policy *nla_policy;
#endif
/* Called when netns wants to use connection tracking */
int (*net_ns_get)(struct net *);
void (*net_ns_put)(struct net *);
/* Module (if any) which this is connected to. */
struct module *me;
};
extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO];
/* Protocol global registration. */
int nf_ct_l3proto_register(const struct nf_conntrack_l3proto *proto);
void nf_ct_l3proto_unregister(const struct nf_conntrack_l3proto *proto);
const struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
/* Existing built-in protocols */
extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
static inline struct nf_conntrack_l3proto *
__nf_ct_l3proto_find(u_int16_t l3proto)
{
if (unlikely(l3proto >= NFPROTO_NUMPROTO))
return &nf_conntrack_l3proto_generic;
return rcu_dereference(nf_ct_l3protos[l3proto]);
}
#endif /*_NF_CONNTRACK_L3PROTO_H*/

Wyświetl plik

@@ -36,7 +36,7 @@ struct nf_conntrack_l4proto {
struct net *net, struct nf_conntrack_tuple *tuple);
/* Invert the per-proto part of the tuple: ie. turn xmit into reply.
* Some packets can't be inverted: return 0 in that case.
* Only used by icmp, most protocols use a generic version.
*/
bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig);
@@ -45,13 +45,12 @@ struct nf_conntrack_l4proto {
int (*packet)(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
unsigned int *timeouts);
enum ip_conntrack_info ctinfo);
/* Called when a new connection for this protocol found;
* returns TRUE if it's OK. If so, packet() called next. */
bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, unsigned int *timeouts);
unsigned int dataoff);
/* Called when a conntrack entry is destroyed */
void (*destroy)(struct nf_conn *ct);
@@ -63,9 +62,6 @@ struct nf_conntrack_l4proto {
/* called by gc worker if table is full */
bool (*can_early_drop)(const struct nf_conn *ct);
/* Return the array of timeouts for this protocol. */
unsigned int *(*get_timeouts)(struct net *net);
/* convert protoinfo to nfnetink attributes */
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct);
@@ -134,10 +130,6 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
/* Protocol global registration. */
int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto);
void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[],
unsigned int num_proto);
void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[],
unsigned int num_proto);
/* Generic netlink helpers */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,

Wyświetl plik

@@ -67,27 +67,17 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
#endif
};
static inline unsigned int *
nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
const struct nf_conntrack_l4proto *l4proto)
static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct)
{
unsigned int *timeouts = NULL;
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
struct nf_conn_timeout *timeout_ext;
unsigned int *timeouts;
timeout_ext = nf_ct_timeout_find(ct);
if (timeout_ext) {
if (timeout_ext)
timeouts = nf_ct_timeout_data(timeout_ext);
if (unlikely(!timeouts))
timeouts = l4proto->get_timeouts(net);
} else {
timeouts = l4proto->get_timeouts(net);
}
return timeouts;
#else
return l4proto->get_timeouts(net);
#endif
return timeouts;
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT

Wyświetl plik

@@ -150,6 +150,7 @@ static inline void nft_data_debug(const struct nft_data *data)
* @portid: netlink portID of the original message
* @seq: netlink sequence number
* @family: protocol family
* @level: depth of the chains
* @report: notify via unicast netlink message
*/
struct nft_ctx {
@@ -160,6 +161,7 @@ struct nft_ctx {
u32 portid;
u32 seq;
u8 family;
u8 level;
bool report;
};
@@ -865,7 +867,6 @@ enum nft_chain_flags {
* @table: table that this chain belongs to
* @handle: chain handle
* @use: number of jump references to this chain
* @level: length of longest path to this chain
* @flags: bitmask of enum nft_chain_flags
* @name: name of the chain
*/
@@ -878,7 +879,6 @@ struct nft_chain {
struct nft_table *table;
u64 handle;
u32 use;
u16 level;
u8 flags:6,
genmask:2;
char *name;
@@ -1124,7 +1124,6 @@ struct nft_flowtable {
u32 genmask:2,
use:30;
u64 handle;
char *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
/* runtime data below here */
struct nf_hook_ops *ops ____cacheline_aligned;
struct nf_flowtable data;

Wyświetl plik

@@ -65,4 +65,17 @@ extern const struct nft_expr_ops nft_payload_fast_ops;
extern struct static_key_false nft_counters_enabled;
extern struct static_key_false nft_trace_enabled;
extern struct nft_set_type nft_set_rhash_type;
extern struct nft_set_type nft_set_hash_type;
extern struct nft_set_type nft_set_hash_fast_type;
extern struct nft_set_type nft_set_rbtree_type;
extern struct nft_set_type nft_set_bitmap_type;
struct nft_expr;
struct nft_regs;
struct nft_pktinfo;
void nft_meta_get_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
void nft_lookup_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
#endif /* _NET_NF_TABLES_CORE_H */

Wyświetl plik

@@ -17,6 +17,14 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
return false;
}
/* assign a socket to the skb -- consumes sk */
static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
{
skb_orphan(skb);
skb->sk = sk;
skb->destructor = sock_edemux;
}
__be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
/**
@@ -64,7 +72,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
* belonging to established connections going through that one.
*/
struct sock *
nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
const u8 protocol,
const __be32 saddr, const __be32 daddr,
const __be16 sport, const __be16 dport,
@@ -103,7 +111,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
struct sock *sk);
struct sock *
nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
const u8 protocol,
const struct in6_addr *saddr, const struct in6_addr *daddr,
const __be16 sport, const __be16 dport,

Wyświetl plik

@@ -98,6 +98,7 @@ struct netns_ipv4 {
int sysctl_ip_default_ttl;
int sysctl_ip_no_pmtu_disc;
int sysctl_ip_fwd_use_pmtu;
int sysctl_ip_fwd_update_priority;
int sysctl_ip_nonlocal_bind;
/* Shall we try to damage output packets if routing dev changes? */
int sysctl_ip_dynaddr;

Wyświetl plik

@@ -7,6 +7,7 @@
struct netns_nftables {
struct list_head tables;
struct list_head commit_list;
struct mutex commit_mutex;
unsigned int base_seq;
u8 gencursor;
u8 validate_state;

Wyświetl plik

@@ -7,12 +7,16 @@
#include <net/sch_generic.h>
#include <net/act_api.h>
/* TC action not accessible from user space */
#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1)
/* Basic packet classifier frontend definitions. */
struct tcf_walker {
int stop;
int skip;
int count;
unsigned long cookie;
int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
};
@@ -36,9 +40,9 @@ struct tcf_block_cb;
bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
#ifdef CONFIG_NET_CLS
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
bool create);
void tcf_chain_put(struct tcf_chain *chain);
struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
u32 chain_index);
void tcf_chain_put_by_act(struct tcf_chain *chain);
void tcf_block_netif_keep_dst(struct tcf_block *block);
int tcf_block_get(struct tcf_block **p_block,
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
@@ -113,11 +117,6 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
{
}
static inline bool tcf_block_shared(struct tcf_block *block)
{
return false;
}
static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
{
return NULL;
@@ -725,6 +724,8 @@ enum tc_fl_command {
TC_CLSFLOWER_REPLACE,
TC_CLSFLOWER_DESTROY,
TC_CLSFLOWER_STATS,
TC_CLSFLOWER_TMPLT_CREATE,
TC_CLSFLOWER_TMPLT_DESTROY,
};
struct tc_cls_flower_offload {

Wyświetl plik

@@ -235,9 +235,17 @@ struct tcf_result {
u32 classid;
};
const struct tcf_proto *goto_tp;
/* used by the TC_ACT_REINSERT action */
struct {
bool ingress;
struct gnet_stats_queue *qstats;
};
};
};
struct tcf_chain;
struct tcf_proto_ops {
struct list_head head;
char kind[IFNAMSIZ];
@@ -263,10 +271,18 @@ struct tcf_proto_ops {
tc_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack);
void (*bind_class)(void *, u32, unsigned long);
void * (*tmplt_create)(struct net *net,
struct tcf_chain *chain,
struct nlattr **tca,
struct netlink_ext_ack *extack);
void (*tmplt_destroy)(void *tmplt_priv);
/* rtnetlink specific */
int (*dump)(struct net*, struct tcf_proto*, void *,
struct sk_buff *skb, struct tcmsg*);
int (*tmplt_dump)(struct sk_buff *skb,
struct net *net,
void *tmplt_priv);
struct module *owner;
};
@@ -275,6 +291,8 @@ struct tcf_proto {
/* Fast access part */
struct tcf_proto __rcu *next;
void __rcu *root;
/* called under RCU BH lock*/
int (*classify)(struct sk_buff *,
const struct tcf_proto *,
struct tcf_result *);
@@ -300,11 +318,14 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
struct tcf_chain {
struct tcf_proto __rcu *filter_chain;
struct list_head filter_chain_list;
struct list_head list;
struct tcf_block *block;
u32 index; /* chain index */
unsigned int refcnt;
unsigned int action_refcnt;
bool explicitly_created;
const struct tcf_proto_ops *tmplt_ops;
void *tmplt_priv;
};
struct tcf_block {
@@ -318,6 +339,10 @@ struct tcf_block {
bool keep_dst;
unsigned int offloadcnt; /* Number of oddloaded filters */
unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
struct {
struct tcf_chain *chain;
struct list_head filter_chain_list;
} chain0;
};
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
@@ -550,6 +575,15 @@ static inline void skb_reset_tc(struct sk_buff *skb)
#endif
}
static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
return skb->tc_redirected;
#else
return false;
#endif
}
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
@@ -1089,4 +1123,17 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq);
static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
{
struct gnet_stats_queue *stats = res->qstats;
int ret;
if (res->ingress)
ret = netif_receive_skb(skb);
else
ret = dev_queue_xmit(skb);
if (ret && stats)
qstats_overlimit_inc(res->qstats);
}
#endif

Wyświetl plik

@@ -1788,7 +1788,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
{
WARN_ON(parent->sk);
write_lock_bh(&sk->sk_callback_lock);
sk->sk_wq = parent->wq;
rcu_assign_pointer(sk->sk_wq, parent->wq);
parent->sk = sk;
sk_set_socket(sk, parent);
sk->sk_uid = SOCK_INODE(parent)->i_uid;
@@ -2057,16 +2057,16 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
/**
* sock_poll_wait - place memory barrier behind the poll_wait call.
* @filp: file
* @wait_address: socket wait queue
* @p: poll_table
*
* See the comments in the wq_has_sleeper function.
*/
static inline void sock_poll_wait(struct file *filp,
wait_queue_head_t *wait_address, poll_table *p)
static inline void sock_poll_wait(struct file *filp, poll_table *p)
{
if (!poll_does_not_wait(p) && wait_address) {
poll_wait(filp, wait_address, p);
struct socket *sock = filp->private_data;
if (!poll_does_not_wait(p)) {
poll_wait(filp, &sock->wq->wait, p);
/* We need to be sure we are in sync with the
* socket flags modification.
*

Wyświetl plik

@@ -7,7 +7,6 @@
#include <linux/tc_act/tc_csum.h>
struct tcf_csum_params {
int action;
u32 update_flags;
struct rcu_head rcu;
};

Wyświetl plik

@@ -18,7 +18,6 @@
struct tcf_tunnel_key_params {
struct rcu_head rcu;
int tcft_action;
int action;
struct metadata_dst *tcft_enc_metadata;
};

Wyświetl plik

@@ -342,6 +342,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{
@@ -540,6 +541,7 @@ void tcp_send_fin(struct sock *sk);
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
int tcp_send_synack(struct sock *);
void tcp_push_one(struct sock *, unsigned int mss_now);
void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
void tcp_send_loss_probe(struct sock *sk);
@@ -829,12 +831,21 @@ struct tcp_skb_cb {
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
{
TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
}
#if IS_ENABLED(CONFIG_IPV6)
/* This is the variant of inet6_iif() that must be used by TCP,
* as TCP moves IP6CB into a different location in skb->cb[]
*/
static inline int tcp_v6_iif(const struct sk_buff *skb)
{
return TCP_SKB_CB(skb)->header.h6.iif;
}
static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
{
bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
@@ -909,8 +920,6 @@ enum tcp_ca_event {
CA_EVENT_LOSS, /* loss timeout */
CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
CA_EVENT_NON_DELAYED_ACK,
};
/* Information about inbound ACK, passed to cong_ops->in_ack_event() */

Wyświetl plik

@@ -83,6 +83,16 @@ struct tls_device {
void (*unhash)(struct tls_device *device, struct sock *sk);
};
enum {
TLS_BASE,
TLS_SW,
#ifdef CONFIG_TLS_DEVICE
TLS_HW,
#endif
TLS_HW_RECORD,
TLS_NUM_CONFIG,
};
struct tls_sw_context_tx {
struct crypto_aead *aead_send;
struct crypto_wait async_wait;
@@ -128,7 +138,7 @@ struct tls_record_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
struct tls_offload_context {
struct tls_offload_context_tx {
struct crypto_aead *aead_send;
spinlock_t lock; /* protects records list */
struct list_head records_list;
@@ -147,8 +157,8 @@ struct tls_offload_context {
#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
};
#define TLS_OFFLOAD_CONTEXT_SIZE \
(ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) + \
#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
(ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
TLS_DRIVER_STATE_SIZE)
enum {
@@ -197,6 +207,7 @@ struct tls_context {
int (*push_pending_record)(struct sock *sk, int flags);
void (*sk_write_space)(struct sock *sk);
void (*sk_destruct)(struct sock *sk);
void (*sk_proto_close)(struct sock *sk, long timeout);
int (*setsockopt)(struct sock *sk, int level,
@@ -209,13 +220,27 @@ struct tls_context {
void (*unhash)(struct sock *sk);
};
struct tls_offload_context_rx {
/* sw must be the first member of tls_offload_context_rx */
struct tls_sw_context_rx sw;
atomic64_t resync_req;
u8 driver_state[];
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
};
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
(ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
TLS_DRIVER_STATE_SIZE)
int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_sw_sendpage(struct sock *sk, struct page *page,
@@ -223,6 +248,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
void tls_sw_close(struct sock *sk, long timeout);
void tls_sw_free_resources_tx(struct sock *sk);
void tls_sw_free_resources_rx(struct sock *sk);
void tls_sw_release_resources_rx(struct sock *sk);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
unsigned int tls_sw_poll(struct file *file, struct socket *sock,
@@ -239,7 +265,7 @@ void tls_device_sk_destruct(struct sock *sk);
void tls_device_init(void);
void tls_device_cleanup(void);
struct tls_record_info *tls_get_record(struct tls_offload_context *context,
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn);
static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
@@ -289,11 +315,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
return tls_ctx->pending_open_record_frags;
}
struct sk_buff *
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
{
return sk_fullsock(sk) &&
/* matches smp_store_release in tls_set_device_offload */
smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct;
#ifdef CONFIG_SOCK_VALIDATE_XMIT
return sk_fullsock(sk) &
(smp_load_acquire(&sk->sk_validate_xmit_skb) ==
&tls_validate_xmit_skb);
#else
return false;
#endif
}
static inline void tls_err_abort(struct sock *sk, int err)
@@ -380,23 +414,47 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
}
static inline struct tls_offload_context *tls_offload_ctx(
const struct tls_context *tls_ctx)
static inline struct tls_offload_context_tx *
tls_offload_ctx_tx(const struct tls_context *tls_ctx)
{
return (struct tls_offload_context *)tls_ctx->priv_ctx_tx;
return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
}
static inline struct tls_offload_context_rx *
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
{
return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
}
/* The TLS context is valid until sk_destruct is called */
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1));
}
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type);
void tls_register_device(struct tls_device *device);
void tls_unregister_device(struct tls_device *device);
int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout);
struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
struct net_device *dev,
struct sk_buff *skb);
int tls_sw_fallback_init(struct sock *sk,
struct tls_offload_context *offload_ctx,
struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info);
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
void tls_device_offload_cleanup_rx(struct sock *sk);
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
#endif /* _TLS_OFFLOAD_H */

Wyświetl plik

@@ -144,4 +144,17 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
return unlikely(xdp->data_meta > xdp->data);
}
struct xdp_attachment_info {
struct bpf_prog *prog;
u32 flags;
};
struct netdev_bpf;
int xdp_attachment_query(struct xdp_attachment_info *info,
struct netdev_bpf *bpf);
bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
struct netdev_bpf *bpf);
void xdp_attachment_setup(struct xdp_attachment_info *info,
struct netdev_bpf *bpf);
#endif /* __LINUX_NET_XDP_H__ */

Wyświetl plik

@@ -60,6 +60,10 @@ struct xdp_sock {
bool zc;
/* Protects multiple processes in the control path */
struct mutex mutex;
/* Mutual exclusion of NAPI TX thread and sendmsg error paths
* in the SKB destructor callback.
*/
spinlock_t tx_completion_lock;
u64 rx_dropped;
};

Wyświetl plik

@@ -23,6 +23,7 @@
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/flow.h>
#include <net/gro_cells.h>
#include <linux/interrupt.h>
@@ -147,6 +148,7 @@ struct xfrm_state {
struct xfrm_id id;
struct xfrm_selector sel;
struct xfrm_mark mark;
u32 if_id;
u32 tfcpad;
u32 genid;
@@ -166,7 +168,7 @@ struct xfrm_state {
int header_len;
int trailer_len;
u32 extra_flags;
u32 output_mark;
struct xfrm_mark smark;
} props;
struct xfrm_lifetime_cfg lft;
@@ -225,7 +227,7 @@ struct xfrm_state {
long saved_tmo;
/* Last used time */
unsigned long lastused;
time64_t lastused;
struct page_frag xfrag;
@@ -292,6 +294,13 @@ struct xfrm_replay {
int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
};
struct xfrm_if_cb {
struct xfrm_if *(*decode_session)(struct sk_buff *skb);
};
void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
void xfrm_if_unregister_cb(void);
struct net_device;
struct xfrm_type;
struct xfrm_dst;
@@ -323,7 +332,6 @@ int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int fam
void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
void km_policy_notify(struct xfrm_policy *xp, int dir,
const struct km_event *c);
void xfrm_policy_cache_flush(void);
void km_state_notify(struct xfrm_state *x, const struct km_event *c);
struct xfrm_tmpl;
@@ -574,6 +582,7 @@ struct xfrm_policy {
atomic_t genid;
u32 priority;
u32 index;
u32 if_id;
struct xfrm_mark mark;
struct xfrm_selector selector;
struct xfrm_lifetime_cfg lft;
@@ -1037,6 +1046,22 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
struct xfrm_if_parms {
char name[IFNAMSIZ]; /* name of XFRM device */
int link; /* ifindex of underlying L2 interface */
u32 if_id; /* interface identifyer */
};
struct xfrm_if {
struct xfrm_if __rcu *next; /* next interface in list */
struct net_device *dev; /* virtual device associated with interface */
struct net_device *phydev; /* physical device */
struct net *net; /* netns for packet i/o */
struct xfrm_if_parms p; /* interface parms */
struct gro_cells gro_cells;
};
struct xfrm_offload {
/* Output sequence number for replay protection on offloading. */
struct {
@@ -1532,8 +1557,8 @@ struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
const struct flowi *fl,
struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err,
unsigned short family);
struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
unsigned short family, u32 if_id);
struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
xfrm_address_t *daddr,
xfrm_address_t *saddr,
unsigned short family,
@@ -1690,20 +1715,20 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
void *);
void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
u8 type, int dir,
struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err);
struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
u32 id, int delete, int *err);
struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
int dir, u32 id, int delete, int *err);
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
void xfrm_policy_hash_rebuild(struct net *net);
u32 xfrm_get_acqseq(void);
int verify_spi_info(u8 proto, u32 min, u32 max);
int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
u8 mode, u32 reqid, u8 proto,
u8 mode, u32 reqid, u32 if_id, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create,
unsigned short family);
@@ -2012,6 +2037,22 @@ static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
return ret;
}
static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
{
struct xfrm_mark *m = &x->props.smark;
return (m->v & m->m) | (mark & ~m->m);
}
static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
{
int ret = 0;
if (if_id)
ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
return ret;
}
static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
unsigned int family)
{