summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-12-07 03:53:31 +0000
committerJens Axboe <axboe@kernel.dk>2022-12-07 06:47:13 -0700
commit17add5cea2bbafea0d481f1a3ea9dea019a98ee9 (patch)
tree55652b69cf57e7f16a461773ae78682bacf47e48 /io_uring
parente6aeb2721d3bad8379c43644d0380908e93b0187 (diff)
downloadlwn-17add5cea2bbafea0d481f1a3ea9dea019a98ee9.tar.gz
lwn-17add5cea2bbafea0d481f1a3ea9dea019a98ee9.zip
io_uring: force multishot CQEs into task context
Multishot are posting CQEs outside of the normal request completion path, which is usually done from within a task work handler. However, it might be not the case when it's yet to be polled but has been punted to io-wq. Make it abide ->task_complete and push it to the polling path when executed by io-wq. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/d7714aaff583096769a0f26e8e747759e556feb1.1670384893.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/net.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index 90342dcb6b1d..f276f6dd5b09 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -67,6 +67,19 @@ struct io_sr_msg {
struct io_kiocb *notif;
};
+static inline bool io_check_multishot(struct io_kiocb *req,
+ unsigned int issue_flags)
+{
+ /*
+ * When ->locked_cq is set we only allow to post CQEs from the original
+ * task context. Usual request completions will be handled in other
+ * generic paths but multipoll may decide to post extra cqes.
+ */
+ return !(issue_flags & IO_URING_F_IOWQ) ||
+ !(issue_flags & IO_URING_F_MULTISHOT) ||
+ !req->ctx->task_complete;
+}
+
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
@@ -730,6 +743,9 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_msg(req, kmsg, issue_flags);
+ if (!io_check_multishot(req, issue_flags))
+ return io_setup_async_msg(req, kmsg, issue_flags);
+
retry_multishot:
if (io_do_buffer_select(req)) {
void __user *buf;
@@ -829,6 +845,9 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
+ if (!io_check_multishot(req, issue_flags))
+ return -EAGAIN;
+
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
@@ -1280,6 +1299,8 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
struct file *file;
int ret, fd;
+ if (!io_check_multishot(req, issue_flags))
+ return -EAGAIN;
retry:
if (!fixed) {
fd = __get_unused_fd_flags(accept->flags, accept->nofile);