net: Add asynchronous callbacks for xfrm on layer 2.
This patch implements asynchronous crypto callbacks and a backlog handler that can be used when IPsec is done at layer 2 in the TX path. It also extends the skb validate functions so that we can update the driver transmit return codes based on async crypto operation or to indicate that we queued the packet in a backlog queue. Joint work with: Aviv Heller <avivh@mellanox.com> Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
This commit is contained in:
@@ -3059,7 +3059,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
|
||||
}
|
||||
EXPORT_SYMBOL(skb_csum_hwoffload_help);
|
||||
|
||||
static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
|
||||
static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
|
||||
{
|
||||
netdev_features_t features;
|
||||
|
||||
@@ -3099,7 +3099,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
|
||||
}
|
||||
}
|
||||
|
||||
skb = validate_xmit_xfrm(skb, features);
|
||||
skb = validate_xmit_xfrm(skb, features, again);
|
||||
|
||||
return skb;
|
||||
|
||||
@@ -3110,7 +3110,7 @@ out_null:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
|
||||
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
|
||||
{
|
||||
struct sk_buff *next, *head = NULL, *tail;
|
||||
|
||||
@@ -3121,7 +3121,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
|
||||
/* in case skb wont be segmented, point to itself */
|
||||
skb->prev = skb;
|
||||
|
||||
skb = validate_xmit_skb(skb, dev);
|
||||
skb = validate_xmit_skb(skb, dev, again);
|
||||
if (!skb)
|
||||
continue;
|
||||
|
||||
@@ -3448,6 +3448,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
||||
struct netdev_queue *txq;
|
||||
struct Qdisc *q;
|
||||
int rc = -ENOMEM;
|
||||
bool again = false;
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
@@ -3509,7 +3510,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
||||
XMIT_RECURSION_LIMIT))
|
||||
goto recursion_alert;
|
||||
|
||||
skb = validate_xmit_skb(skb, dev);
|
||||
skb = validate_xmit_skb(skb, dev, &again);
|
||||
if (!skb)
|
||||
goto out;
|
||||
|
||||
@@ -4193,6 +4194,8 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
|
||||
spin_unlock(root_lock);
|
||||
}
|
||||
}
|
||||
|
||||
xfrm_dev_backlog(sd);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
|
||||
@@ -8874,6 +8877,9 @@ static int __init net_dev_init(void)
|
||||
|
||||
skb_queue_head_init(&sd->input_pkt_queue);
|
||||
skb_queue_head_init(&sd->process_queue);
|
||||
#ifdef CONFIG_XFRM_OFFLOAD
|
||||
skb_queue_head_init(&sd->xfrm_backlog);
|
||||
#endif
|
||||
INIT_LIST_HEAD(&sd->poll_list);
|
||||
sd->output_queue_tailp = &sd->output_queue;
|
||||
#ifdef CONFIG_RPS
|
||||
|
Reference in New Issue
Block a user