summaryrefslogtreecommitdiff
path: root/net/tls
diff options
context:
space:
mode:
authorSabrina Dubroca <sd@queasysnail.net>2023-10-09 22:50:42 +0200
committerDavid S. Miller <davem@davemloft.net>2023-10-13 11:26:09 +0100
commit8f1d532b4a49e196696b0aa150962d7ce96985e4 (patch)
treeae8b7bba910851b9e560e9294d2f97f562e48c81 /net/tls
parent3bab3ee0f95ebd2a897ac3205b4fdee50c3b5f96 (diff)
downloadlwn-8f1d532b4a49e196696b0aa150962d7ce96985e4.tar.gz
lwn-8f1d532b4a49e196696b0aa150962d7ce96985e4.zip
tls: drop unnecessary cipher_type checks in tls offload
We should never reach tls_device_reencrypt, tls_enc_record, or tls_enc_skb with a cipher_type that can't be offloaded. Replace those checks with a DEBUG_NET_WARN_ON_ONCE, and use cipher_desc instead of hard-coding offloadable cipher types. Signed-off-by: Sabrina Dubroca <sd@queasysnail.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls')
-rw-r--r--net/tls/tls_device.c8
-rw-r--r--net/tls/tls_device_fallback.c17
2 files changed, 4 insertions, 21 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 8c94c926606a..fbd687a0c66f 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -891,14 +891,8 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
struct strp_msg *rxm;
char *orig_buf, *buf;
- switch (tls_ctx->crypto_recv.info.cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- case TLS_CIPHER_AES_GCM_256:
- break;
- default:
- return -EINVAL;
- }
cipher_desc = get_cipher_desc(tls_ctx->crypto_recv.info.cipher_type);
+ DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable);
rxm = strp_msg(tls_strp_msg(sw_ctx));
orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv,
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index b4a65f53d9c0..1d2b4d83ccab 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -62,14 +62,8 @@ static int tls_enc_record(struct aead_request *aead_req,
u16 len;
int rc;
- switch (prot->cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- case TLS_CIPHER_AES_GCM_256:
- break;
- default:
- return -EINVAL;
- }
cipher_desc = get_cipher_desc(prot->cipher_type);
+ DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable);
buf_size = TLS_HEADER_SIZE + cipher_desc->iv;
len = min_t(int, *in_len, buf_size);
@@ -338,14 +332,9 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
if (!aead_req)
return NULL;
- switch (tls_ctx->crypto_send.info.cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- case TLS_CIPHER_AES_GCM_256:
- break;
- default:
- goto free_req;
- }
cipher_desc = get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
+ DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable);
+
buf_len = cipher_desc->salt + cipher_desc->iv + TLS_AAD_SPACE_SIZE +
sync_size + cipher_desc->tag;
buf = kmalloc(buf_len, GFP_ATOMIC);