summaryrefslogtreecommitdiff
path: root/io_uring/net.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-07-07 14:30:09 -0600
committerJens Axboe <axboe@kernel.dk>2022-07-24 18:39:17 -0600
commit43e0bbbd0b0e30d232fd8e9e908125b5c49a9fbc (patch)
tree36990dc646e88f2bc176bb4873fa783b09ac9c69 /io_uring/net.h
parent9731bc9855dc169f27433fef3c4d0ff3496c512d (diff)
downloadlwn-43e0bbbd0b0e30d232fd8e9e908125b5c49a9fbc.tar.gz
lwn-43e0bbbd0b0e30d232fd8e9e908125b5c49a9fbc.zip
io_uring: add netmsg cache
For recvmsg/sendmsg, if they don't complete inline, we currently need to allocate a struct io_async_msghdr for each request. This is a somewhat large struct. Hook up sendmsg/recvmsg to use the io_alloc_cache. This reduces the alloc + free overhead considerably, yielding 4-5% of extra performance running netbench. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/net.h')
-rw-r--r--io_uring/net.h13
1 files changed, 12 insertions, 1 deletions
diff --git a/io_uring/net.h b/io_uring/net.h
index 81d71d164770..178a6d8b76e0 100644
--- a/io_uring/net.h
+++ b/io_uring/net.h
@@ -3,9 +3,14 @@
#include <linux/net.h>
#include <linux/uio.h>
+#include "alloc_cache.h"
+
#if defined(CONFIG_NET)
struct io_async_msghdr {
- struct iovec fast_iov[UIO_FASTIOV];
+ union {
+ struct iovec fast_iov[UIO_FASTIOV];
+ struct io_cache_entry cache;
+ };
/* points to an allocated iov, if NULL we use fast_iov instead */
struct iovec *free_iov;
struct sockaddr __user *uaddr;
@@ -40,4 +45,10 @@ int io_socket(struct io_kiocb *req, unsigned int issue_flags);
int io_connect_prep_async(struct io_kiocb *req);
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_connect(struct io_kiocb *req, unsigned int issue_flags);
+
+void io_netmsg_cache_free(struct io_cache_entry *entry);
+#else
+static inline void io_netmsg_cache_free(struct io_cache_entry *entry)
+{
+}
#endif