summaryrefslogtreecommitdiff
path: root/net/tls/tls_sw.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2022-07-07 18:03:13 -0700
committerJakub Kicinski <kuba@kernel.org>2022-07-08 18:38:45 -0700
commit5879031423089b2e19b769f30fc618af742264c3 (patch)
treea5dc1560ed7e005dedffe1589cdb183fe6d162ed /net/tls/tls_sw.c
parent03957d84055e59235c7d57c95a37617bd3aa5646 (diff)
downloadlwn-5879031423089b2e19b769f30fc618af742264c3.tar.gz
lwn-5879031423089b2e19b769f30fc618af742264c3.zip
tls: create an internal header
include/net/tls.h is getting a little long, and is probably hard for driver authors to navigate. Split out the internals into a header which will live under net/tls/. While at it move some static inlines with a single user into the source files, add a few tls_ prefixes and fix spelling of 'proccess'. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/tls/tls_sw.c')
-rw-r--r--net/tls/tls_sw.c22
1 files changed, 18 insertions, 4 deletions
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 21c76db8f9b3..1376f866734d 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -44,6 +44,8 @@
#include <net/strparser.h>
#include <net/tls.h>
+#include "tls.h"
+
struct tls_decrypt_arg {
bool zc;
bool async;
@@ -524,7 +526,8 @@ static int tls_do_encryption(struct sock *sk,
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
prot->iv_size + prot->salt_size);
- xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
+ tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
+ tls_ctx->tx.rec_seq);
sge->offset += prot->prepend_size;
sge->length -= prot->prepend_size;
@@ -961,7 +964,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
lock_sock(sk);
if (unlikely(msg->msg_controllen)) {
- ret = tls_proccess_cmsg(sk, msg, &record_type);
+ ret = tls_process_cmsg(sk, msg, &record_type);
if (ret) {
if (ret == -EINPROGRESS)
num_async++;
@@ -1495,7 +1498,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
goto exit_free;
memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
}
- xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
+ tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
/* Prepare AAD */
tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
@@ -2267,12 +2270,23 @@ static void tx_work_handler(struct work_struct *work)
mutex_unlock(&tls_ctx->tx_lock);
}
+static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
+{
+ struct tls_rec *rec;
+
+ rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
+ if (!rec)
+ return false;
+
+ return READ_ONCE(rec->tx_ready);
+}
+
void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
{
struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
/* Schedule the transmission if tx list is ready */
- if (is_tx_ready(tx_ctx) &&
+ if (tls_is_tx_ready(tx_ctx) &&
!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
schedule_delayed_work(&tx_ctx->tx_work.work, 0);
}