summaryrefslogtreecommitdiff
path: root/io_uring/net.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-09-01 11:54:04 +0100
committerJens Axboe <axboe@kernel.dk>2022-09-01 09:13:33 -0600
commitb48c312be05e83b55a4d58bf61f80b4a3288fb7e (patch)
treebb07d8c2abc0d8c225d3050fb48e3a3689a120a3 /io_uring/net.c
parent57f332246afa5929bdf2e7a5facddedb43549be4 (diff)
downloadlwn-b48c312be05e83b55a4d58bf61f80b4a3288fb7e.tar.gz
lwn-b48c312be05e83b55a4d58bf61f80b4a3288fb7e.zip
io_uring/net: simplify zerocopy send user API
Following user feedback, this patch simplifies zerocopy send API. One of the main complaints is that the current API is difficult with the userspace managing notification slots, and then send retries with error handling make it even worse. Instead of keeping notification slots change it to the per-request notifications model, which posts both completion and notification CQEs for each request when any data has been sent, and only one CQE if it fails. All notification CQEs will have IORING_CQE_F_NOTIF set and IORING_CQE_F_MORE in completion CQEs indicates whether to wait a notification or not. IOSQE_CQE_SKIP_SUCCESS is disallowed with zerocopy sends for now. This is less flexible, but greatly simplifies the user API and also the kernel implementation. We reuse notif helpers in this patch, but in the future there won't be need for keeping two requests. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/95287640ab98fc9417370afb16e310677c63e6ce.1662027856.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/net.c')
-rw-r--r--io_uring/net.c53
1 files changed, 33 insertions, 20 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index aac6997b7d88..7047c1342541 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -65,12 +65,12 @@ struct io_sendzc {
struct file *file;
void __user *buf;
size_t len;
- u16 slot_idx;
unsigned msg_flags;
unsigned flags;
unsigned addr_len;
void __user *addr;
size_t done_io;
+ struct io_kiocb *notif;
};
#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
@@ -879,12 +879,26 @@ out_free:
return ret;
}
+void io_sendzc_cleanup(struct io_kiocb *req)
+{
+ struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
+
+ zc->notif->flags |= REQ_F_CQE_SKIP;
+ io_notif_flush(zc->notif);
+ zc->notif = NULL;
+}
+
int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *notif;
- if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))
+ if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3) ||
+ READ_ONCE(sqe->__pad3[0]))
+ return -EINVAL;
+ /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
+ if (req->flags & REQ_F_CQE_SKIP)
return -EINVAL;
zc->flags = READ_ONCE(sqe->ioprio);
@@ -900,11 +914,17 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->imu = READ_ONCE(ctx->user_bufs[idx]);
io_req_set_rsrc_node(req, ctx, 0);
}
+ notif = zc->notif = io_alloc_notif(ctx);
+ if (!notif)
+ return -ENOMEM;
+ notif->cqe.user_data = req->cqe.user_data;
+ notif->cqe.res = 0;
+ notif->cqe.flags = IORING_CQE_F_NOTIF;
+ req->flags |= REQ_F_NEED_CLEANUP;
zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
zc->len = READ_ONCE(sqe->len);
zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
- zc->slot_idx = READ_ONCE(sqe->notification_idx);
if (zc->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
@@ -976,33 +996,20 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
{
struct sockaddr_storage __address, *addr = NULL;
- struct io_ring_ctx *ctx = req->ctx;
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
- struct io_notif_slot *notif_slot;
- struct io_kiocb *notif;
struct msghdr msg;
struct iovec iov;
struct socket *sock;
- unsigned msg_flags;
+ unsigned msg_flags, cflags;
int ret, min_ret = 0;
if (!(req->flags & REQ_F_POLLED) &&
(zc->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
-
- if (issue_flags & IO_URING_F_UNLOCKED)
- return -EAGAIN;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
- notif_slot = io_get_notif_slot(ctx, zc->slot_idx);
- if (!notif_slot)
- return -EINVAL;
- notif = io_get_notif(ctx, notif_slot);
- if (!notif)
- return -ENOMEM;
-
msg.msg_name = NULL;
msg.msg_control = NULL;
msg.msg_controllen = 0;
@@ -1033,7 +1040,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
&msg.msg_iter);
if (unlikely(ret))
return ret;
- ret = io_notif_account_mem(notif, zc->len);
+ ret = io_notif_account_mem(zc->notif, zc->len);
if (unlikely(ret))
return ret;
}
@@ -1045,7 +1052,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
min_ret = iov_iter_count(&msg.msg_iter);
msg.msg_flags = msg_flags;
- msg.msg_ubuf = &io_notif_to_data(notif)->uarg;
+ msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
msg.sg_from_iter = io_sg_from_iter;
ret = sock_sendmsg(sock, &msg);
@@ -1060,6 +1067,8 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
req->flags |= REQ_F_PARTIAL_IO;
return io_setup_async_addr(req, addr, issue_flags);
}
+ if (ret < 0 && !zc->done_io)
+ zc->notif->flags |= REQ_F_CQE_SKIP;
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
@@ -1069,7 +1078,11 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
ret += zc->done_io;
else if (zc->done_io)
ret = zc->done_io;
- io_req_set_res(req, ret, 0);
+
+ io_notif_flush(zc->notif);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ cflags = ret >= 0 ? IORING_CQE_F_MORE : 0;
+ io_req_set_res(req, ret, cflags);
return IOU_OK;
}