summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-10-28 21:49:21 -0600
committerJens Axboe <axboe@kernel.dk>2019-11-01 08:35:31 -0600
commit62755e35dfb2b113c52b81cd96d01c20971c8e02 (patch)
treed8b0c898a5e1cf7ef8233af49773b18560198b83 /fs/io_uring.c
parent975c99a570967dd48e917dd7853867fee3febabd (diff)
downloadlwn-62755e35dfb2b113c52b81cd96d01c20971c8e02.tar.gz
lwn-62755e35dfb2b113c52b81cd96d01c20971c8e02.zip
io_uring: support for generic async request cancel
This adds support for IORING_OP_ASYNC_CANCEL, which will attempt to cancel requests that have been punted to async context and are now in-flight. This works for regular read/write requests to files, as long as they haven't been started yet. For socket based IO (or things like accept4(2)), we can cancel work that is already running as well. To cancel a request, the sqe must have ->addr set to the user_data of the request it wishes to cancel. If the request is cancelled successfully, the original request is completed with -ECANCELED and the cancel request is completed with a result of 0. If the request was already running, the original may or may not complete in error. The cancel request will complete with -EALREADY for that case. And finally, if the request to cancel wasn't found, the cancel request is completed with -ENOENT. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 72d260520c8f..76d653085987 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2133,6 +2133,48 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
+static bool io_cancel_cb(struct io_wq_work *work, void *data)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+ return req->user_data == (unsigned long) data;
+}
+
+static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ enum io_wq_cancel cancel_ret;
+ void *sqe_addr;
+ int ret = 0;
+
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+ if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
+ sqe->cancel_flags)
+ return -EINVAL;
+
+ sqe_addr = (void *) (unsigned long) READ_ONCE(sqe->addr);
+ cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
+ switch (cancel_ret) {
+ case IO_WQ_CANCEL_OK:
+ ret = 0;
+ break;
+ case IO_WQ_CANCEL_RUNNING:
+ ret = -EALREADY;
+ break;
+ case IO_WQ_CANCEL_NOTFOUND:
+ ret = -ENOENT;
+ break;
+ }
+
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
+ io_cqring_add_event(req->ctx, sqe->user_data, ret);
+ io_put_req(req, nxt);
+ return 0;
+}
+
static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
@@ -2217,6 +2259,9 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
case IORING_OP_ACCEPT:
ret = io_accept(req, s->sqe, nxt, force_nonblock);
break;
+ case IORING_OP_ASYNC_CANCEL:
+ ret = io_async_cancel(req, s->sqe, nxt);
+ break;
default:
ret = -EINVAL;
break;