Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Just minor overlapping changes in the conflicts here. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -261,24 +261,9 @@ void tls_ctx_free(struct tls_context *ctx)
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
static void tls_sk_proto_close(struct sock *sk, long timeout)
|
||||
static void tls_sk_proto_cleanup(struct sock *sk,
|
||||
struct tls_context *ctx, long timeo)
|
||||
{
|
||||
struct tls_context *ctx = tls_get_ctx(sk);
|
||||
long timeo = sock_sndtimeo(sk, 0);
|
||||
void (*sk_proto_close)(struct sock *sk, long timeout);
|
||||
bool free_ctx = false;
|
||||
|
||||
lock_sock(sk);
|
||||
sk_proto_close = ctx->sk_proto_close;
|
||||
|
||||
if (ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD)
|
||||
goto skip_tx_cleanup;
|
||||
|
||||
if (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE) {
|
||||
free_ctx = true;
|
||||
goto skip_tx_cleanup;
|
||||
}
|
||||
|
||||
if (unlikely(sk->sk_write_pending) &&
|
||||
!wait_on_pending_writer(sk, &timeo))
|
||||
tls_handle_open_record(sk, 0);
|
||||
@@ -287,7 +272,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
|
||||
if (ctx->tx_conf == TLS_SW) {
|
||||
kfree(ctx->tx.rec_seq);
|
||||
kfree(ctx->tx.iv);
|
||||
tls_sw_free_resources_tx(sk);
|
||||
tls_sw_release_resources_tx(sk);
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
} else if (ctx->tx_conf == TLS_HW) {
|
||||
tls_device_free_resources_tx(sk);
|
||||
@@ -295,26 +280,44 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
|
||||
}
|
||||
|
||||
if (ctx->rx_conf == TLS_SW)
|
||||
tls_sw_free_resources_rx(sk);
|
||||
tls_sw_release_resources_rx(sk);
|
||||
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
if (ctx->rx_conf == TLS_HW)
|
||||
tls_device_offload_cleanup_rx(sk);
|
||||
|
||||
if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) {
|
||||
#else
|
||||
{
|
||||
#endif
|
||||
tls_ctx_free(ctx);
|
||||
ctx = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
skip_tx_cleanup:
|
||||
static void tls_sk_proto_close(struct sock *sk, long timeout)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct tls_context *ctx = tls_get_ctx(sk);
|
||||
long timeo = sock_sndtimeo(sk, 0);
|
||||
bool free_ctx;
|
||||
|
||||
if (ctx->tx_conf == TLS_SW)
|
||||
tls_sw_cancel_work_tx(ctx);
|
||||
|
||||
lock_sock(sk);
|
||||
free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW;
|
||||
|
||||
if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE)
|
||||
tls_sk_proto_cleanup(sk, ctx, timeo);
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
if (free_ctx)
|
||||
icsk->icsk_ulp_data = NULL;
|
||||
sk->sk_prot = ctx->sk_proto;
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
release_sock(sk);
|
||||
sk_proto_close(sk, timeout);
|
||||
/* free ctx for TLS_HW_RECORD, used by tcp_set_state
|
||||
* for sk->sk_prot->unhash [tls_hw_unhash]
|
||||
*/
|
||||
if (ctx->tx_conf == TLS_SW)
|
||||
tls_sw_free_ctx_tx(ctx);
|
||||
if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
|
||||
tls_sw_strparser_done(ctx);
|
||||
if (ctx->rx_conf == TLS_SW)
|
||||
tls_sw_free_ctx_rx(ctx);
|
||||
ctx->sk_proto_close(sk, timeout);
|
||||
|
||||
if (free_ctx)
|
||||
tls_ctx_free(ctx);
|
||||
}
|
||||
@@ -526,6 +529,8 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
|
||||
{
|
||||
#endif
|
||||
rc = tls_set_sw_offload(sk, ctx, 1);
|
||||
if (rc)
|
||||
goto err_crypto_info;
|
||||
conf = TLS_SW;
|
||||
}
|
||||
} else {
|
||||
@@ -537,13 +542,13 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
|
||||
{
|
||||
#endif
|
||||
rc = tls_set_sw_offload(sk, ctx, 0);
|
||||
if (rc)
|
||||
goto err_crypto_info;
|
||||
conf = TLS_SW;
|
||||
}
|
||||
tls_sw_strparser_arm(sk, ctx);
|
||||
}
|
||||
|
||||
if (rc)
|
||||
goto err_crypto_info;
|
||||
|
||||
if (tx)
|
||||
ctx->tx_conf = conf;
|
||||
else
|
||||
@@ -607,6 +612,7 @@ static struct tls_context *create_ctx(struct sock *sk)
|
||||
ctx->setsockopt = sk->sk_prot->setsockopt;
|
||||
ctx->getsockopt = sk->sk_prot->getsockopt;
|
||||
ctx->sk_proto_close = sk->sk_prot->close;
|
||||
ctx->unhash = sk->sk_prot->unhash;
|
||||
return ctx;
|
||||
}
|
||||
|
||||
@@ -764,7 +770,6 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
|
||||
prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
|
||||
prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash;
|
||||
prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash;
|
||||
prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close;
|
||||
}
|
||||
|
||||
static int tls_init(struct sock *sk)
|
||||
@@ -773,7 +778,7 @@ static int tls_init(struct sock *sk)
|
||||
int rc = 0;
|
||||
|
||||
if (tls_hw_prot(sk))
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
/* The TLS ulp is currently supported only for TCP sockets
|
||||
* in ESTABLISHED state.
|
||||
@@ -784,21 +789,38 @@ static int tls_init(struct sock *sk)
|
||||
if (sk->sk_state != TCP_ESTABLISHED)
|
||||
return -ENOTSUPP;
|
||||
|
||||
tls_build_proto(sk);
|
||||
|
||||
/* allocate tls context */
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
ctx = create_ctx(sk);
|
||||
if (!ctx) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
tls_build_proto(sk);
|
||||
ctx->tx_conf = TLS_BASE;
|
||||
ctx->rx_conf = TLS_BASE;
|
||||
ctx->sk_proto = sk->sk_prot;
|
||||
update_sk_prot(sk, ctx);
|
||||
out:
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void tls_update(struct sock *sk, struct proto *p)
|
||||
{
|
||||
struct tls_context *ctx;
|
||||
|
||||
ctx = tls_get_ctx(sk);
|
||||
if (likely(ctx)) {
|
||||
ctx->sk_proto_close = p->close;
|
||||
ctx->sk_proto = p;
|
||||
} else {
|
||||
sk->sk_prot = p;
|
||||
}
|
||||
}
|
||||
|
||||
void tls_register_device(struct tls_device *device)
|
||||
{
|
||||
spin_lock_bh(&device_spinlock);
|
||||
@@ -819,6 +841,7 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
|
||||
.name = "tls",
|
||||
.owner = THIS_MODULE,
|
||||
.init = tls_init,
|
||||
.update = tls_update,
|
||||
};
|
||||
|
||||
static int __init tls_register(void)
|
||||
|
@@ -2054,7 +2054,16 @@ static void tls_data_ready(struct sock *sk)
|
||||
}
|
||||
}
|
||||
|
||||
void tls_sw_free_resources_tx(struct sock *sk)
|
||||
void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
|
||||
{
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
|
||||
set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
|
||||
set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
|
||||
cancel_delayed_work_sync(&ctx->tx_work.work);
|
||||
}
|
||||
|
||||
void tls_sw_release_resources_tx(struct sock *sk)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
@@ -2065,11 +2074,6 @@ void tls_sw_free_resources_tx(struct sock *sk)
|
||||
if (atomic_read(&ctx->encrypt_pending))
|
||||
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
|
||||
|
||||
release_sock(sk);
|
||||
cancel_delayed_work_sync(&ctx->tx_work.work);
|
||||
lock_sock(sk);
|
||||
|
||||
/* Tx whatever records we can transmit and abandon the rest */
|
||||
tls_tx_records(sk, -1);
|
||||
|
||||
/* Free up un-sent records in tx_list. First, free
|
||||
@@ -2092,6 +2096,11 @@ void tls_sw_free_resources_tx(struct sock *sk)
|
||||
|
||||
crypto_free_aead(ctx->aead_send);
|
||||
tls_free_open_rec(sk);
|
||||
}
|
||||
|
||||
void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
|
||||
{
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
|
||||
kfree(ctx);
|
||||
}
|
||||
@@ -2110,23 +2119,38 @@ void tls_sw_release_resources_rx(struct sock *sk)
|
||||
skb_queue_purge(&ctx->rx_list);
|
||||
crypto_free_aead(ctx->aead_recv);
|
||||
strp_stop(&ctx->strp);
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
sk->sk_data_ready = ctx->saved_data_ready;
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
release_sock(sk);
|
||||
strp_done(&ctx->strp);
|
||||
lock_sock(sk);
|
||||
/* If tls_sw_strparser_arm() was not called (cleanup paths)
|
||||
* we still want to strp_stop(), but sk->sk_data_ready was
|
||||
* never swapped.
|
||||
*/
|
||||
if (ctx->saved_data_ready) {
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
sk->sk_data_ready = ctx->saved_data_ready;
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void tls_sw_strparser_done(struct tls_context *tls_ctx)
|
||||
{
|
||||
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
|
||||
|
||||
strp_done(&ctx->strp);
|
||||
}
|
||||
|
||||
void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
|
||||
{
|
||||
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
|
||||
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
void tls_sw_free_resources_rx(struct sock *sk)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
|
||||
|
||||
tls_sw_release_resources_rx(sk);
|
||||
|
||||
kfree(ctx);
|
||||
tls_sw_free_ctx_rx(tls_ctx);
|
||||
}
|
||||
|
||||
/* The work handler to transmitt the encrypted records in tx_list */
|
||||
@@ -2137,11 +2161,17 @@ static void tx_work_handler(struct work_struct *work)
|
||||
struct tx_work, work);
|
||||
struct sock *sk = tx_work->sk;
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
struct tls_sw_context_tx *ctx;
|
||||
|
||||
if (unlikely(!tls_ctx))
|
||||
return;
|
||||
|
||||
ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
|
||||
return;
|
||||
|
||||
if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
|
||||
return;
|
||||
|
||||
lock_sock(sk);
|
||||
tls_tx_records(sk, -1);
|
||||
release_sock(sk);
|
||||
@@ -2160,6 +2190,18 @@ void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
|
||||
}
|
||||
}
|
||||
|
||||
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
|
||||
{
|
||||
struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
rx_ctx->saved_data_ready = sk->sk_data_ready;
|
||||
sk->sk_data_ready = tls_data_ready;
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
strp_check_rcv(&rx_ctx->strp);
|
||||
}
|
||||
|
||||
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
@@ -2357,13 +2399,6 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
|
||||
cb.parse_msg = tls_read_size;
|
||||
|
||||
strp_init(&sw_ctx_rx->strp, sk, &cb);
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
|
||||
sk->sk_data_ready = tls_data_ready;
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
strp_check_rcv(&sw_ctx_rx->strp);
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
Reference in New Issue
Block a user