summaryrefslogtreecommitdiff
path: root/net/tls
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2022-07-14 22:22:33 -0700
committerDavid S. Miller <davem@davemloft.net>2022-07-18 11:24:11 +0100
commitc618db2afe7c31d13ca8cf05b60f17165fbdc282 (patch)
treeaa66b80b665d2a52d9149377e2df3de0dfd8d6f1 /net/tls
parent6ececdc5136900bc99ef04c60c9daeab86dbeb85 (diff)
tls: rx: async: hold onto the input skb
Async crypto currently benefits from the fact that we decrypt in place. When we allow input and output to be different skbs we will have to hang onto the input while we move to the next record. Clone the inputs and keep them on a list. Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls')
-rw-r--r--net/tls/Makefile2
-rw-r--r--net/tls/tls.h3
-rw-r--r--net/tls/tls_strp.c17
-rw-r--r--net/tls/tls_sw.c26
4 files changed, 38 insertions, 10 deletions
diff --git a/net/tls/Makefile b/net/tls/Makefile
index f1ffbfe8968d..e41c800489ac 100644
--- a/net/tls/Makefile
+++ b/net/tls/Makefile
@@ -7,7 +7,7 @@ CFLAGS_trace.o := -I$(src)
obj-$(CONFIG_TLS) += tls.o
-tls-y := tls_main.o tls_sw.o tls_proc.o trace.o
+tls-y := tls_main.o tls_sw.o tls_proc.o trace.o tls_strp.o
tls-$(CONFIG_TLS_TOE) += tls_toe.o
tls-$(CONFIG_TLS_DEVICE) += tls_device.o tls_device_fallback.o
diff --git a/net/tls/tls.h b/net/tls/tls.h
index 44522b221717..c818dc68955d 100644
--- a/net/tls/tls.h
+++ b/net/tls/tls.h
@@ -124,6 +124,9 @@ int tls_sw_fallback_init(struct sock *sk,
struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info);
+int tls_strp_msg_hold(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *dst);
+
static inline struct tls_msg *tls_msg(struct sk_buff *skb)
{
struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb;
diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
new file mode 100644
index 000000000000..9ccab79a6e1e
--- /dev/null
+++ b/net/tls/tls_strp.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/skbuff.h>
+
+#include "tls.h"
+
+int tls_strp_msg_hold(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *dst)
+{
+ struct sk_buff *clone;
+
+ clone = skb_clone(skb, sk->sk_allocation);
+ if (!clone)
+ return -ENOMEM;
+ __skb_queue_tail(dst, clone);
+ return 0;
+}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 09fe2cfff51a..f767501e178d 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1535,8 +1535,13 @@ fallback_to_reg_recv:
goto exit_free_pages;
darg->skb = tls_strp_msg(ctx);
- if (darg->async)
- return 0;
+
+ if (unlikely(darg->async)) {
+ err = tls_strp_msg_hold(sk, skb, &ctx->async_hold);
+ if (err)
+ __skb_queue_tail(&ctx->async_hold, darg->skb);
+ return err;
+ }
if (prot->tail_size)
darg->tail = dctx->tail;
@@ -1998,14 +2003,16 @@ recv_end:
reinit_completion(&ctx->async_wait.completion);
pending = atomic_read(&ctx->decrypt_pending);
spin_unlock_bh(&ctx->decrypt_compl_lock);
- if (pending) {
+ ret = 0;
+ if (pending)
ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
- if (ret) {
- if (err >= 0 || err == -EINPROGRESS)
- err = ret;
- decrypted = 0;
- goto end;
- }
+ __skb_queue_purge(&ctx->async_hold);
+
+ if (ret) {
+ if (err >= 0 || err == -EINPROGRESS)
+ err = ret;
+ decrypted = 0;
+ goto end;
}
/* Drain records from the rx_list & copy if required */
@@ -2440,6 +2447,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
skb_queue_head_init(&sw_ctx_rx->rx_list);
+ skb_queue_head_init(&sw_ctx_rx->async_hold);
aead = &sw_ctx_rx->aead_recv;
}