summaryrefslogtreecommitdiff
path: root/io_uring/uring_cmd.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/uring_cmd.c')
-rw-r--r--io_uring/uring_cmd.c92
1 files changed, 59 insertions, 33 deletions
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index e6701b7aa147..f2cfc371f3d0 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -16,10 +16,19 @@
#include "rsrc.h"
#include "uring_cmd.h"
+void io_cmd_cache_free(const void *entry)
+{
+ struct io_async_cmd *ac = (struct io_async_cmd *)entry;
+
+ io_vec_free(&ac->vec);
+ kfree(ac);
+}
+
static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct io_uring_cmd_data *cache = req->async_data;
+ struct io_async_cmd *ac = req->async_data;
+ struct io_uring_cmd_data *cache = &ac->data;
if (cache->op_data) {
kfree(cache->op_data);
@@ -28,13 +37,23 @@ static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
if (issue_flags & IO_URING_F_UNLOCKED)
return;
- if (io_alloc_cache_put(&req->ctx->uring_cache, cache)) {
+
+ io_alloc_cache_vec_kasan(&ac->vec);
+ if (ac->vec.nr > IO_VEC_CACHE_SOFT_CAP)
+ io_vec_free(&ac->vec);
+
+ if (io_alloc_cache_put(&req->ctx->cmd_cache, cache)) {
ioucmd->sqe = NULL;
req->async_data = NULL;
- req->flags &= ~REQ_F_ASYNC_DATA;
+ req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
}
}
+void io_uring_cmd_cleanup(struct io_kiocb *req)
+{
+ io_req_uring_cleanup(req, 0);
+}
+
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
struct io_uring_task *tctx, bool cancel_all)
{
@@ -102,7 +121,7 @@ void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
}
EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
-static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
+static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
unsigned int flags = IO_URING_F_COMPLETE_DEFER;
@@ -169,12 +188,15 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct io_uring_cmd_data *cache;
+ struct io_async_cmd *ac;
- cache = io_uring_alloc_async_data(&req->ctx->uring_cache, req);
- if (!cache)
+ /* see io_uring_cmd_get_async_data() */
+ BUILD_BUG_ON(offsetof(struct io_async_cmd, data) != 0);
+
+ ac = io_uring_alloc_async_data(&req->ctx->cmd_cache, req);
+ if (!ac)
return -ENOMEM;
- cache->op_data = NULL;
+ ac->data.op_data = NULL;
/*
* Unconditionally cache the SQE for now - this is only needed for
@@ -183,8 +205,8 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
* that it doesn't read in per-op data, play it safe and ensure that
* any SQE data is stable beyond prep. This can later get relaxed.
*/
- memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
- ioucmd->sqe = cache->sqes;
+ memcpy(ac->data.sqes, sqe, uring_sqe_size(req->ctx));
+ ioucmd->sqe = ac->data.sqes;
return 0;
}
@@ -199,21 +221,9 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (ioucmd->flags & ~IORING_URING_CMD_MASK)
return -EINVAL;
- if (ioucmd->flags & IORING_URING_CMD_FIXED) {
- struct io_ring_ctx *ctx = req->ctx;
- struct io_rsrc_node *node;
- u16 index = READ_ONCE(sqe->buf_index);
-
- node = io_rsrc_node_lookup(&ctx->buf_table, index);
- if (unlikely(!node))
- return -EFAULT;
- /*
- * Pi node upfront, prior to io_uring_cmd_import_fixed()
- * being called. This prevents destruction of the mapped buffer
- * we'll need at actual import time.
- */
- io_req_assign_buf_node(req, node);
- }
+ if (ioucmd->flags & IORING_URING_CMD_FIXED)
+ req->buf_index = READ_ONCE(sqe->buf_index);
+
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
return io_uring_cmd_prep_setup(req, sqe);
@@ -237,7 +247,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
issue_flags |= IO_URING_F_SQE128;
if (ctx->flags & IORING_SETUP_CQE32)
issue_flags |= IO_URING_F_CQE32;
- if (ctx->compat)
+ if (io_is_compat(ctx))
issue_flags |= IO_URING_F_COMPAT;
if (ctx->flags & IORING_SETUP_IOPOLL) {
if (!file->f_op->uring_cmd_iopoll)
@@ -257,19 +267,35 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
}
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
- struct iov_iter *iter, void *ioucmd)
+ struct iov_iter *iter,
+ struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
- struct io_rsrc_node *node = req->buf_node;
-
- /* Must have had rsrc_node assigned at prep time */
- if (node)
- return io_import_fixed(rw, iter, node->buf, ubuf, len);
- return -EFAULT;
+ return io_import_reg_buf(req, iter, ubuf, len, rw, issue_flags);
}
EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
+int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
+ const struct iovec __user *uvec,
+ size_t uvec_segs,
+ int ddir, struct iov_iter *iter,
+ unsigned issue_flags)
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ struct io_async_cmd *ac = req->async_data;
+ int ret;
+
+ ret = io_prep_reg_iovec(req, &ac->vec, uvec, uvec_segs);
+ if (ret)
+ return ret;
+
+ return io_import_reg_vec(ddir, iter, req, &ac->vec, uvec_segs,
+ issue_flags);
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed_vec);
+
void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);