Merge branch 'net-tls-small-code-cleanup'
Jakub Kicinski says: ==================== net/tls: small code cleanup This small patch set cleans up tls (mostly offload parts). Other than avoiding unnecessary error messages - no functional changes here. v2 (Saeed): - fix up Review tags; - remove the warning on failure completely. ==================== Reviewed-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -914,34 +914,13 @@ struct xfrmdev_ops {
|
|||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_TLS_DEVICE)
|
|
||||||
enum tls_offload_ctx_dir {
|
|
||||||
TLS_OFFLOAD_CTX_DIR_RX,
|
|
||||||
TLS_OFFLOAD_CTX_DIR_TX,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct tls_crypto_info;
|
|
||||||
struct tls_context;
|
|
||||||
|
|
||||||
struct tlsdev_ops {
|
|
||||||
int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
|
|
||||||
enum tls_offload_ctx_dir direction,
|
|
||||||
struct tls_crypto_info *crypto_info,
|
|
||||||
u32 start_offload_tcp_sn);
|
|
||||||
void (*tls_dev_del)(struct net_device *netdev,
|
|
||||||
struct tls_context *ctx,
|
|
||||||
enum tls_offload_ctx_dir direction);
|
|
||||||
void (*tls_dev_resync_rx)(struct net_device *netdev,
|
|
||||||
struct sock *sk, u32 seq, u64 rcd_sn);
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct dev_ifalias {
|
struct dev_ifalias {
|
||||||
struct rcu_head rcuhead;
|
struct rcu_head rcuhead;
|
||||||
char ifalias[];
|
char ifalias[];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct devlink;
|
struct devlink;
|
||||||
|
struct tlsdev_ops;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This structure defines the management hooks for network devices.
|
* This structure defines the management hooks for network devices.
|
||||||
|
@@ -277,6 +277,23 @@ struct tls_context {
|
|||||||
void (*unhash)(struct sock *sk);
|
void (*unhash)(struct sock *sk);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum tls_offload_ctx_dir {
|
||||||
|
TLS_OFFLOAD_CTX_DIR_RX,
|
||||||
|
TLS_OFFLOAD_CTX_DIR_TX,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct tlsdev_ops {
|
||||||
|
int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
|
||||||
|
enum tls_offload_ctx_dir direction,
|
||||||
|
struct tls_crypto_info *crypto_info,
|
||||||
|
u32 start_offload_tcp_sn);
|
||||||
|
void (*tls_dev_del)(struct net_device *netdev,
|
||||||
|
struct tls_context *ctx,
|
||||||
|
enum tls_offload_ctx_dir direction);
|
||||||
|
void (*tls_dev_resync_rx)(struct net_device *netdev,
|
||||||
|
struct sock *sk, u32 seq, u64 rcd_sn);
|
||||||
|
};
|
||||||
|
|
||||||
struct tls_offload_context_rx {
|
struct tls_offload_context_rx {
|
||||||
/* sw must be the first member of tls_offload_context_rx */
|
/* sw must be the first member of tls_offload_context_rx */
|
||||||
struct tls_sw_context_rx sw;
|
struct tls_sw_context_rx sw;
|
||||||
@@ -317,7 +334,6 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
|
|||||||
int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
|
int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
|
||||||
int tls_device_sendpage(struct sock *sk, struct page *page,
|
int tls_device_sendpage(struct sock *sk, struct page *page,
|
||||||
int offset, size_t size, int flags);
|
int offset, size_t size, int flags);
|
||||||
void tls_device_sk_destruct(struct sock *sk);
|
|
||||||
void tls_device_free_resources_tx(struct sock *sk);
|
void tls_device_free_resources_tx(struct sock *sk);
|
||||||
void tls_device_init(void);
|
void tls_device_init(void);
|
||||||
void tls_device_cleanup(void);
|
void tls_device_cleanup(void);
|
||||||
@@ -336,7 +352,6 @@ static inline u32 tls_record_start_seq(struct tls_record_info *rec)
|
|||||||
return rec->end_seq - rec->len;
|
return rec->end_seq - rec->len;
|
||||||
}
|
}
|
||||||
|
|
||||||
void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
|
|
||||||
int tls_push_sg(struct sock *sk, struct tls_context *ctx,
|
int tls_push_sg(struct sock *sk, struct tls_context *ctx,
|
||||||
struct scatterlist *sg, u16 first_offset,
|
struct scatterlist *sg, u16 first_offset,
|
||||||
int flags);
|
int flags);
|
||||||
@@ -547,7 +562,7 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
|
|||||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||||
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
|
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
|
||||||
|
|
||||||
atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1));
|
atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@@ -89,22 +89,6 @@ static void tls_device_gc_task(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
|
|
||||||
struct net_device *netdev)
|
|
||||||
{
|
|
||||||
if (sk->sk_destruct != tls_device_sk_destruct) {
|
|
||||||
refcount_set(&ctx->refcount, 1);
|
|
||||||
dev_hold(netdev);
|
|
||||||
ctx->netdev = netdev;
|
|
||||||
spin_lock_irq(&tls_device_lock);
|
|
||||||
list_add_tail(&ctx->list, &tls_device_list);
|
|
||||||
spin_unlock_irq(&tls_device_lock);
|
|
||||||
|
|
||||||
ctx->sk_destruct = sk->sk_destruct;
|
|
||||||
sk->sk_destruct = tls_device_sk_destruct;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
|
static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@@ -199,7 +183,7 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
|
|||||||
* socket and no in-flight SKBs associated with this
|
* socket and no in-flight SKBs associated with this
|
||||||
* socket, so it is safe to free all the resources.
|
* socket, so it is safe to free all the resources.
|
||||||
*/
|
*/
|
||||||
void tls_device_sk_destruct(struct sock *sk)
|
static void tls_device_sk_destruct(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||||
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
|
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
|
||||||
@@ -217,7 +201,6 @@ void tls_device_sk_destruct(struct sock *sk)
|
|||||||
if (refcount_dec_and_test(&tls_ctx->refcount))
|
if (refcount_dec_and_test(&tls_ctx->refcount))
|
||||||
tls_device_queue_ctx_destruction(tls_ctx);
|
tls_device_queue_ctx_destruction(tls_ctx);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(tls_device_sk_destruct);
|
|
||||||
|
|
||||||
void tls_device_free_resources_tx(struct sock *sk)
|
void tls_device_free_resources_tx(struct sock *sk)
|
||||||
{
|
{
|
||||||
@@ -584,7 +567,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
|
|||||||
|
|
||||||
rx_ctx = tls_offload_ctx_rx(tls_ctx);
|
rx_ctx = tls_offload_ctx_rx(tls_ctx);
|
||||||
resync_req = atomic64_read(&rx_ctx->resync_req);
|
resync_req = atomic64_read(&rx_ctx->resync_req);
|
||||||
req_seq = ntohl(resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
|
req_seq = (resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
|
||||||
is_req_pending = resync_req;
|
is_req_pending = resync_req;
|
||||||
|
|
||||||
if (unlikely(is_req_pending) && req_seq == seq &&
|
if (unlikely(is_req_pending) && req_seq == seq &&
|
||||||
@@ -682,6 +665,22 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
|
|||||||
tls_device_reencrypt(sk, skb);
|
tls_device_reencrypt(sk, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
|
||||||
|
struct net_device *netdev)
|
||||||
|
{
|
||||||
|
if (sk->sk_destruct != tls_device_sk_destruct) {
|
||||||
|
refcount_set(&ctx->refcount, 1);
|
||||||
|
dev_hold(netdev);
|
||||||
|
ctx->netdev = netdev;
|
||||||
|
spin_lock_irq(&tls_device_lock);
|
||||||
|
list_add_tail(&ctx->list, &tls_device_list);
|
||||||
|
spin_unlock_irq(&tls_device_lock);
|
||||||
|
|
||||||
|
ctx->sk_destruct = sk->sk_destruct;
|
||||||
|
sk->sk_destruct = tls_device_sk_destruct;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
|
int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
|
||||||
{
|
{
|
||||||
u16 nonce_size, tag_size, iv_size, rec_seq_size;
|
u16 nonce_size, tag_size, iv_size, rec_seq_size;
|
||||||
@@ -865,8 +864,6 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
|
if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
|
||||||
pr_err_ratelimited("%s: netdev %s with no TLS offload\n",
|
|
||||||
__func__, netdev->name);
|
|
||||||
rc = -ENOTSUPP;
|
rc = -ENOTSUPP;
|
||||||
goto release_netdev;
|
goto release_netdev;
|
||||||
}
|
}
|
||||||
@@ -894,11 +891,8 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
|
|||||||
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
|
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
|
||||||
&ctx->crypto_recv.info,
|
&ctx->crypto_recv.info,
|
||||||
tcp_sk(sk)->copied_seq);
|
tcp_sk(sk)->copied_seq);
|
||||||
if (rc) {
|
if (rc)
|
||||||
pr_err_ratelimited("%s: The netdev has refused to offload this socket\n",
|
|
||||||
__func__);
|
|
||||||
goto free_sw_resources;
|
goto free_sw_resources;
|
||||||
}
|
|
||||||
|
|
||||||
tls_device_attach(ctx, sk, netdev);
|
tls_device_attach(ctx, sk, netdev);
|
||||||
goto release_netdev;
|
goto release_netdev;
|
||||||
|
Reference in New Issue
Block a user