net/tls: Add asynchronous resync
This patch adds support for asynchronous resynchronization in tls_device. Async resync follows two distinct stages: 1. The NIC driver indicates that it would like to resync on some TLS record within the received packet (P), but the driver does not know (yet) which of the TLS records within the packet. At this stage, the NIC driver will query the device to find the exact TCP sequence for resync (tcpsn), however, the driver does not wait for the device to provide the response. 2. Eventually, the device responds, and the driver provides the tcpsn within the resync packet to KTLS. Now, KTLS can check the tcpsn against any processed TLS records within packet P, and also against any record that is processed in the future within packet P. The asynchronous resync path simplifies the device driver, as it can save bits on the packet completion (32-bit TCP sequence), and pass this information on an asynchronous command instead. Signed-off-by: Boris Pismenny <borisp@mellanox.com> Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:

committed by
Saeed Mahameed

parent
acb5a07aaf
commit
ed9b7646b0
@@ -690,6 +690,47 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
|
||||
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
|
||||
}
|
||||
|
||||
static bool
|
||||
tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
|
||||
s64 resync_req, u32 *seq)
|
||||
{
|
||||
u32 is_async = resync_req & RESYNC_REQ_ASYNC;
|
||||
u32 req_seq = resync_req >> 32;
|
||||
u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
|
||||
|
||||
if (is_async) {
|
||||
/* asynchronous stage: log all headers seq such that
|
||||
* req_seq <= seq <= end_seq, and wait for real resync request
|
||||
*/
|
||||
if (between(*seq, req_seq, req_end) &&
|
||||
resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
|
||||
resync_async->log[resync_async->loglen++] = *seq;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* synchronous stage: check against the logged entries and
|
||||
* proceed to check the next entries if no match was found
|
||||
*/
|
||||
while (resync_async->loglen) {
|
||||
if (req_seq == resync_async->log[resync_async->loglen - 1] &&
|
||||
atomic64_try_cmpxchg(&resync_async->req,
|
||||
&resync_req, 0)) {
|
||||
resync_async->loglen = 0;
|
||||
*seq = req_seq;
|
||||
return true;
|
||||
}
|
||||
resync_async->loglen--;
|
||||
}
|
||||
|
||||
if (req_seq == *seq &&
|
||||
atomic64_try_cmpxchg(&resync_async->req,
|
||||
&resync_req, 0))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
@@ -736,6 +777,16 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
|
||||
seq += rcd_len;
|
||||
tls_bigint_increment(rcd_sn, prot->rec_seq_size);
|
||||
break;
|
||||
case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
|
||||
resync_req = atomic64_read(&rx_ctx->resync_async->req);
|
||||
is_req_pending = resync_req;
|
||||
if (likely(!is_req_pending))
|
||||
return;
|
||||
|
||||
if (!tls_device_rx_resync_async(rx_ctx->resync_async,
|
||||
resync_req, &seq))
|
||||
return;
|
||||
break;
|
||||
}
|
||||
|
||||
tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
|
||||
|
Reference in New Issue
Block a user