summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c332
1 files changed, 188 insertions, 144 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index f7acae5f7e1d..3ba49c628337 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -97,6 +97,7 @@
#include "uring_cmd.h"
#include "msg_ring.h"
#include "memmap.h"
+#include "zcrx.h"
#include "timeout.h"
#include "poll.h"
@@ -110,11 +111,13 @@
#define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
+#define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
+
#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
REQ_F_ASYNC_DATA)
-#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
+#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | IO_REQ_LINK_FLAGS | \
REQ_F_REISSUE | IO_REQ_CLEAN_FLAGS)
#define IO_TCTX_REFS_CACHE_NR (1U << 10)
@@ -131,7 +134,6 @@ struct io_defer_entry {
/* requests with any of those set should undergo io_disarm_next() */
#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
-#define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
/*
* No waiters. It's larger than any valid value of the tw counter
@@ -254,7 +256,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
percpu_ref_get(&ctx->refs);
mutex_lock(&ctx->uring_lock);
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
- req->io_task_work.func(req, &ts);
+ req->io_task_work.func(req, ts);
io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock);
percpu_ref_put(&ctx->refs);
@@ -282,6 +284,17 @@ static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
return 0;
}
+static void io_free_alloc_caches(struct io_ring_ctx *ctx)
+{
+ io_alloc_cache_free(&ctx->apoll_cache, kfree);
+ io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
+ io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
+ io_alloc_cache_free(&ctx->cmd_cache, io_cmd_cache_free);
+ io_alloc_cache_free(&ctx->msg_cache, kfree);
+ io_futex_cache_free(ctx);
+ io_rsrc_cache_free(ctx);
+}
+
static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
{
struct io_ring_ctx *ctx;
@@ -313,7 +326,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
init_waitqueue_head(&ctx->sqo_sq_wait);
INIT_LIST_HEAD(&ctx->sqd_list);
INIT_LIST_HEAD(&ctx->cq_overflow_list);
- INIT_LIST_HEAD(&ctx->io_buffers_cache);
ret = io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX,
sizeof(struct async_poll), 0);
ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
@@ -322,12 +334,14 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct io_async_rw),
offsetof(struct io_async_rw, clear));
- ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
- sizeof(struct io_uring_cmd_data), 0);
+ ret |= io_alloc_cache_init(&ctx->cmd_cache, IO_ALLOC_CACHE_MAX,
+ sizeof(struct io_async_cmd),
+ sizeof(struct io_async_cmd));
spin_lock_init(&ctx->msg_lock);
ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct io_kiocb), 0);
ret |= io_futex_cache_init(ctx);
+ ret |= io_rsrc_cache_init(ctx);
if (ret)
goto free_ref;
init_completion(&ctx->ref_comp);
@@ -338,7 +352,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
spin_lock_init(&ctx->completion_lock);
raw_spin_lock_init(&ctx->timeout_lock);
INIT_WQ_LIST(&ctx->iopoll_list);
- INIT_LIST_HEAD(&ctx->io_buffers_comp);
INIT_LIST_HEAD(&ctx->defer_list);
INIT_LIST_HEAD(&ctx->timeout_list);
INIT_LIST_HEAD(&ctx->ltimeout_list);
@@ -360,12 +373,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
free_ref:
percpu_ref_exit(&ctx->refs);
err:
- io_alloc_cache_free(&ctx->apoll_cache, kfree);
- io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
- io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
- io_alloc_cache_free(&ctx->uring_cache, kfree);
- io_alloc_cache_free(&ctx->msg_cache, kfree);
- io_futex_cache_free(ctx);
+ io_free_alloc_caches(ctx);
kvfree(ctx->cancel_table.hbs);
xa_destroy(&ctx->io_bl_xa);
kfree(ctx);
@@ -393,11 +401,8 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
static void io_clean_op(struct io_kiocb *req)
{
- if (req->flags & REQ_F_BUFFER_SELECTED) {
- spin_lock(&req->ctx->completion_lock);
- io_kbuf_drop(req);
- spin_unlock(&req->ctx->completion_lock);
- }
+ if (unlikely(req->flags & REQ_F_BUFFER_SELECTED))
+ io_kbuf_drop_legacy(req);
if (req->flags & REQ_F_NEED_CLEANUP) {
const struct io_cold_def *def = &io_cold_defs[req->opcode];
@@ -542,7 +547,7 @@ static void io_queue_iowq(struct io_kiocb *req)
io_queue_linked_timeout(link);
}
-static void io_req_queue_iowq_tw(struct io_kiocb *req, struct io_tw_state *ts)
+static void io_req_queue_iowq_tw(struct io_kiocb *req, io_tw_token_t tw)
{
io_queue_iowq(req);
}
@@ -829,24 +834,14 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
return false;
}
-static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res,
- u32 cflags)
+bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
{
bool filled;
+ io_cq_lock(ctx);
filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
if (!filled)
filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
-
- return filled;
-}
-
-bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
-{
- bool filled;
-
- io_cq_lock(ctx);
- filled = __io_post_aux_cqe(ctx, user_data, res, cflags);
io_cq_unlock_post(ctx);
return filled;
}
@@ -887,6 +882,7 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
+ bool completed = true;
/*
* All execution paths but io-wq use the deferred completions by
@@ -899,19 +895,21 @@ static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
* Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
* the submitter task context, IOPOLL protects with uring_lock.
*/
- if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) {
+ if (ctx->lockless_cq || (req->flags & REQ_F_REISSUE)) {
+defer_complete:
req->io_task_work.func = io_req_task_complete;
io_req_task_work_add(req);
return;
}
io_cq_lock(ctx);
- if (!(req->flags & REQ_F_CQE_SKIP)) {
- if (!io_fill_cqe_req(ctx, req))
- io_req_cqe_overflow(req);
- }
+ if (!(req->flags & REQ_F_CQE_SKIP))
+ completed = io_fill_cqe_req(ctx, req);
io_cq_unlock_post(ctx);
+ if (!completed)
+ goto defer_complete;
+
/*
* We don't free the request here because we know it's called from
* io-wq only, which holds a reference, so it cannot be the last put.
@@ -1021,7 +1019,7 @@ static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
return nxt;
}
-static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
+static void ctx_flush_and_put(struct io_ring_ctx *ctx, io_tw_token_t tw)
{
if (!ctx)
return;
@@ -1051,24 +1049,24 @@ struct llist_node *io_handle_tw_list(struct llist_node *node,
io_task_work.node);
if (req->ctx != ctx) {
- ctx_flush_and_put(ctx, &ts);
+ ctx_flush_and_put(ctx, ts);
ctx = req->ctx;
mutex_lock(&ctx->uring_lock);
percpu_ref_get(&ctx->refs);
}
INDIRECT_CALL_2(req->io_task_work.func,
io_poll_task_func, io_req_rw_complete,
- req, &ts);
+ req, ts);
node = next;
(*count)++;
if (unlikely(need_resched())) {
- ctx_flush_and_put(ctx, &ts);
+ ctx_flush_and_put(ctx, ts);
ctx = NULL;
cond_resched();
}
} while (node && *count < max_entries);
- ctx_flush_and_put(ctx, &ts);
+ ctx_flush_and_put(ctx, ts);
return node;
}
@@ -1157,7 +1155,7 @@ static inline void io_req_local_work_add(struct io_kiocb *req,
* We don't know how many reuqests is there in the link and whether
* they can even be queued lazily, fall back to non-lazy.
*/
- if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
+ if (req->flags & IO_REQ_LINK_FLAGS)
flags &= ~IOU_F_TWQ_LAZY_WAKE;
guard(rcu)();
@@ -1276,7 +1274,7 @@ static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
}
static int __io_run_local_work_loop(struct llist_node **node,
- struct io_tw_state *ts,
+ io_tw_token_t tw,
int events)
{
int ret = 0;
@@ -1287,7 +1285,7 @@ static int __io_run_local_work_loop(struct llist_node **node,
io_task_work.node);
INDIRECT_CALL_2(req->io_task_work.func,
io_poll_task_func, io_req_rw_complete,
- req, ts);
+ req, tw);
*node = next;
if (++ret >= events)
break;
@@ -1296,7 +1294,7 @@ static int __io_run_local_work_loop(struct llist_node **node,
return ret;
}
-static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
+static int __io_run_local_work(struct io_ring_ctx *ctx, io_tw_token_t tw,
int min_events, int max_events)
{
struct llist_node *node;
@@ -1309,7 +1307,7 @@ static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
again:
min_events -= ret;
- ret = __io_run_local_work_loop(&ctx->retry_llist.first, ts, max_events);
+ ret = __io_run_local_work_loop(&ctx->retry_llist.first, tw, max_events);
if (ctx->retry_llist.first)
goto retry_done;
@@ -1318,7 +1316,7 @@ again:
* running the pending items.
*/
node = llist_reverse_order(llist_del_all(&ctx->work_llist));
- ret += __io_run_local_work_loop(&node, ts, max_events - ret);
+ ret += __io_run_local_work_loop(&node, tw, max_events - ret);
ctx->retry_llist.first = node;
loops++;
@@ -1340,7 +1338,7 @@ static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
if (!io_local_work_pending(ctx))
return 0;
- return __io_run_local_work(ctx, &ts, min_events,
+ return __io_run_local_work(ctx, ts, min_events,
max(IO_LOCAL_TW_DEFAULT_MAX, min_events));
}
@@ -1351,20 +1349,20 @@ static int io_run_local_work(struct io_ring_ctx *ctx, int min_events,
int ret;
mutex_lock(&ctx->uring_lock);
- ret = __io_run_local_work(ctx, &ts, min_events, max_events);
+ ret = __io_run_local_work(ctx, ts, min_events, max_events);
mutex_unlock(&ctx->uring_lock);
return ret;
}
-static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
+static void io_req_task_cancel(struct io_kiocb *req, io_tw_token_t tw)
{
- io_tw_lock(req->ctx, ts);
+ io_tw_lock(req->ctx, tw);
io_req_defer_failed(req, req->cqe.res);
}
-void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
+void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw)
{
- io_tw_lock(req->ctx, ts);
+ io_tw_lock(req->ctx, tw);
if (unlikely(io_should_terminate_tw()))
io_req_defer_failed(req, -EFAULT);
else if (req->flags & REQ_F_FORCE_ASYNC)
@@ -1419,8 +1417,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx,
if (apoll->double_poll)
kfree(apoll->double_poll);
- if (!io_alloc_cache_put(&ctx->apoll_cache, apoll))
- kfree(apoll);
+ io_cache_free(&ctx->apoll_cache, apoll);
req->flags &= ~REQ_F_POLLED;
}
if (req->flags & IO_REQ_LINK_FLAGS)
@@ -1508,11 +1505,13 @@ static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
mutex_unlock(&ctx->uring_lock);
}
-static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
+static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned int min_events)
{
unsigned int nr_events = 0;
unsigned long check_cq;
+ min_events = min(min_events, ctx->cq_entries);
+
lockdep_assert_held(&ctx->uring_lock);
if (!io_allowed_run_tw(ctx))
@@ -1554,7 +1553,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
io_task_work_pending(ctx)) {
u32 tail = ctx->cached_cq_tail;
- (void) io_run_local_work_locked(ctx, min);
+ (void) io_run_local_work_locked(ctx, min_events);
if (task_work_pending(current) ||
wq_list_empty(&ctx->iopoll_list)) {
@@ -1567,7 +1566,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
wq_list_empty(&ctx->iopoll_list))
break;
}
- ret = io_do_iopoll(ctx, !min);
+ ret = io_do_iopoll(ctx, !min_events);
if (unlikely(ret < 0))
return ret;
@@ -1577,12 +1576,12 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
break;
nr_events += ret;
- } while (nr_events < min);
+ } while (nr_events < min_events);
return 0;
}
-void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts)
+void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw)
{
io_req_complete_defer(req);
}
@@ -1719,15 +1718,13 @@ static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
return !!req->file;
}
-static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+static inline int __io_issue_sqe(struct io_kiocb *req,
+ unsigned int issue_flags,
+ const struct io_issue_def *def)
{
- const struct io_issue_def *def = &io_issue_defs[req->opcode];
const struct cred *creds = NULL;
int ret;
- if (unlikely(!io_assign_file(req, def, issue_flags)))
- return -EBADF;
-
if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
creds = override_creds(req->creds);
@@ -1742,6 +1739,19 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
if (creds)
revert_creds(creds);
+ return ret;
+}
+
+static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+{
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
+ int ret;
+
+ if (unlikely(!io_assign_file(req, def, issue_flags)))
+ return -EBADF;
+
+ ret = __io_issue_sqe(req, issue_flags, def);
+
if (ret == IOU_OK) {
if (issue_flags & IO_URING_F_COMPLETE_DEFER)
io_req_complete_defer(req);
@@ -1762,11 +1772,23 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
return ret;
}
-int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts)
+int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw)
{
- io_tw_lock(req->ctx, ts);
- return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT|
- IO_URING_F_COMPLETE_DEFER);
+ const unsigned int issue_flags = IO_URING_F_NONBLOCK |
+ IO_URING_F_MULTISHOT |
+ IO_URING_F_COMPLETE_DEFER;
+ int ret;
+
+ io_tw_lock(req->ctx, tw);
+
+ WARN_ON_ONCE(!req->file);
+ if (WARN_ON_ONCE(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EFAULT;
+
+ ret = __io_issue_sqe(req, issue_flags, &io_issue_defs[req->opcode]);
+
+ WARN_ON_ONCE(ret == IOU_ISSUE_SKIP_COMPLETE);
+ return ret;
}
struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
@@ -1818,7 +1840,7 @@ fail:
* Don't allow any multishot execution from io-wq. It's more restrictive
* than necessary and also cleaner.
*/
- if (req->flags & REQ_F_APOLL_MULTISHOT) {
+ if (req->flags & (REQ_F_MULTISHOT|REQ_F_APOLL_MULTISHOT)) {
err = -EBADFD;
if (!io_file_can_poll(req))
goto fail;
@@ -1829,7 +1851,7 @@ fail:
goto fail;
return;
} else {
- req->flags &= ~REQ_F_APOLL_MULTISHOT;
+ req->flags &= ~(REQ_F_APOLL_MULTISHOT|REQ_F_MULTISHOT);
}
}
@@ -1996,9 +2018,8 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx,
return true;
}
-static void io_init_req_drain(struct io_kiocb *req)
+static void io_init_drain(struct io_ring_ctx *ctx)
{
- struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *head = ctx->submit_state.link.head;
ctx->drain_active = true;
@@ -2062,7 +2083,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (sqe_flags & IOSQE_IO_DRAIN) {
if (ctx->drain_disabled)
return io_init_fail_req(req, -EOPNOTSUPP);
- io_init_req_drain(req);
+ io_init_drain(ctx);
}
}
if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
@@ -2423,7 +2444,7 @@ static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer)
goto out_wake;
}
- iowq->t.function = io_cqring_timer_wakeup;
+ hrtimer_update_function(&iowq->t, io_cqring_timer_wakeup);
hrtimer_set_expires(timer, iowq->timeout);
return HRTIMER_RESTART;
out_wake:
@@ -2458,8 +2479,18 @@ static int io_cqring_schedule_timeout(struct io_wait_queue *iowq,
return READ_ONCE(iowq->hit_timeout) ? -ETIME : 0;
}
+struct ext_arg {
+ size_t argsz;
+ struct timespec64 ts;
+ const sigset_t __user *sig;
+ ktime_t min_time;
+ bool ts_set;
+ bool iowait;
+};
+
static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
+ struct ext_arg *ext_arg,
ktime_t start_time)
{
int ret = 0;
@@ -2469,7 +2500,7 @@ static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
* can take into account that the task is waiting for IO - turns out
* to be important for low QD IO.
*/
- if (current_pending_io())
+ if (ext_arg->iowait && current_pending_io())
current->in_iowait = 1;
if (iowq->timeout != KTIME_MAX || iowq->min_timeout)
ret = io_cqring_schedule_timeout(iowq, ctx->clockid, start_time);
@@ -2482,6 +2513,7 @@ static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
/* If this returns > 0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
+ struct ext_arg *ext_arg,
ktime_t start_time)
{
if (unlikely(READ_ONCE(ctx->check_cq)))
@@ -2495,17 +2527,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
if (unlikely(io_should_wake(iowq)))
return 0;
- return __io_cqring_wait_schedule(ctx, iowq, start_time);
+ return __io_cqring_wait_schedule(ctx, iowq, ext_arg, start_time);
}
-struct ext_arg {
- size_t argsz;
- struct timespec64 ts;
- const sigset_t __user *sig;
- ktime_t min_time;
- bool ts_set;
-};
-
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
@@ -2518,6 +2542,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
ktime_t start_time;
int ret;
+ min_events = min_t(int, min_events, ctx->cq_entries);
+
if (!io_allowed_run_tw(ctx))
return -EEXIST;
if (io_local_work_pending(ctx))
@@ -2583,7 +2609,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
TASK_INTERRUPTIBLE);
}
- ret = io_cqring_wait_schedule(ctx, &iowq, start_time);
+ ret = io_cqring_wait_schedule(ctx, &iowq, ext_arg, start_time);
__set_current_state(TASK_RUNNING);
atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
@@ -2702,14 +2728,10 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
mutex_lock(&ctx->uring_lock);
io_sqe_buffers_unregister(ctx);
io_sqe_files_unregister(ctx);
+ io_unregister_zcrx_ifqs(ctx);
io_cqring_overflow_kill(ctx);
io_eventfd_unregister(ctx);
- io_alloc_cache_free(&ctx->apoll_cache, kfree);
- io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
- io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
- io_alloc_cache_free(&ctx->uring_cache, kfree);
- io_alloc_cache_free(&ctx->msg_cache, kfree);
- io_futex_cache_free(ctx);
+ io_free_alloc_caches(ctx);
io_destroy_buffers(ctx);
io_free_region(ctx, &ctx->param_region);
mutex_unlock(&ctx->uring_lock);
@@ -2866,6 +2888,11 @@ static __cold void io_ring_exit_work(struct work_struct *work)
io_cqring_overflow_kill(ctx);
mutex_unlock(&ctx->uring_lock);
}
+ if (ctx->ifq) {
+ mutex_lock(&ctx->uring_lock);
+ io_shutdown_zcrx_ifqs(ctx);
+ mutex_unlock(&ctx->uring_lock);
+ }
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
io_move_task_work_from_local(ctx);
@@ -3239,6 +3266,8 @@ static int io_get_ext_arg(struct io_ring_ctx *ctx, unsigned flags,
const struct io_uring_getevents_arg __user *uarg = argp;
struct io_uring_getevents_arg arg;
+ ext_arg->iowait = !(flags & IORING_ENTER_NO_IOWAIT);
+
/*
* If EXT_ARG isn't set, then we have no timespec and the argp pointer
* is just a pointer to the sigset_t.
@@ -3316,7 +3345,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
IORING_ENTER_REGISTERED_RING |
IORING_ENTER_ABS_TIMER |
- IORING_ENTER_EXT_ARG_REG)))
+ IORING_ENTER_EXT_ARG_REG |
+ IORING_ENTER_NO_IOWAIT)))
return -EINVAL;
/*
@@ -3400,22 +3430,16 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
mutex_lock(&ctx->uring_lock);
iopoll_locked:
ret2 = io_validate_ext_arg(ctx, flags, argp, argsz);
- if (likely(!ret2)) {
- min_complete = min(min_complete,
- ctx->cq_entries);
+ if (likely(!ret2))
ret2 = io_iopoll_check(ctx, min_complete);
- }
mutex_unlock(&ctx->uring_lock);
} else {
struct ext_arg ext_arg = { .argsz = argsz };
ret2 = io_get_ext_arg(ctx, flags, argp, &ext_arg);
- if (likely(!ret2)) {
- min_complete = min(min_complete,
- ctx->cq_entries);
+ if (likely(!ret2))
ret2 = io_cqring_wait(ctx, min_complete, flags,
&ext_arg);
- }
}
if (!ret) {
@@ -3537,6 +3561,44 @@ static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
O_RDWR | O_CLOEXEC, NULL);
}
+static int io_uring_sanitise_params(struct io_uring_params *p)
+{
+ unsigned flags = p->flags;
+
+ /* There is no way to mmap rings without a real fd */
+ if ((flags & IORING_SETUP_REGISTERED_FD_ONLY) &&
+ !(flags & IORING_SETUP_NO_MMAP))
+ return -EINVAL;
+
+ if (flags & IORING_SETUP_SQPOLL) {
+ /* IPI related flags don't make sense with SQPOLL */
+ if (flags & (IORING_SETUP_COOP_TASKRUN |
+ IORING_SETUP_TASKRUN_FLAG |
+ IORING_SETUP_DEFER_TASKRUN))
+ return -EINVAL;
+ }
+
+ if (flags & IORING_SETUP_TASKRUN_FLAG) {
+ if (!(flags & (IORING_SETUP_COOP_TASKRUN |
+ IORING_SETUP_DEFER_TASKRUN)))
+ return -EINVAL;
+ }
+
+ /* HYBRID_IOPOLL only valid with IOPOLL */
+ if ((flags & IORING_SETUP_HYBRID_IOPOLL) && !(flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+
+ /*
+ * For DEFER_TASKRUN we require the completion task to be the same as
+ * the submission task. This implies that there is only one submitter.
+ */
+ if ((flags & IORING_SETUP_DEFER_TASKRUN) &&
+ !(flags & IORING_SETUP_SINGLE_ISSUER))
+ return -EINVAL;
+
+ return 0;
+}
+
int io_uring_fill_params(unsigned entries, struct io_uring_params *p)
{
if (!entries)
@@ -3547,10 +3609,6 @@ int io_uring_fill_params(unsigned entries, struct io_uring_params *p)
entries = IORING_MAX_ENTRIES;
}
- if ((p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
- && !(p->flags & IORING_SETUP_NO_MMAP))
- return -EINVAL;
-
/*
* Use twice as many entries for the CQ ring. It's possible for the
* application to drive a higher depth than the size of the SQ ring,
@@ -3612,6 +3670,10 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
struct file *file;
int ret;
+ ret = io_uring_sanitise_params(p);
+ if (ret)
+ return ret;
+
ret = io_uring_fill_params(entries, p);
if (unlikely(ret))
return ret;
@@ -3659,37 +3721,10 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
* For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
* COOP_TASKRUN is set, then IPIs are never needed by the app.
*/
- ret = -EINVAL;
- if (ctx->flags & IORING_SETUP_SQPOLL) {
- /* IPI related flags don't make sense with SQPOLL */
- if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
- IORING_SETUP_TASKRUN_FLAG |
- IORING_SETUP_DEFER_TASKRUN))
- goto err;
- ctx->notify_method = TWA_SIGNAL_NO_IPI;
- } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
+ if (ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_COOP_TASKRUN))
ctx->notify_method = TWA_SIGNAL_NO_IPI;
- } else {
- if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
- !(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
- goto err;
+ else
ctx->notify_method = TWA_SIGNAL;
- }
-
- /* HYBRID_IOPOLL only valid with IOPOLL */
- if ((ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_HYBRID_IOPOLL)) ==
- IORING_SETUP_HYBRID_IOPOLL)
- goto err;
-
- /*
- * For DEFER_TASKRUN we require the completion task to be the same as the
- * submission task. This implies that there is only one submitter, so enforce
- * that.
- */
- if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
- !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
- goto err;
- }
/*
* This is just grabbed for accounting purposes. When a process exits,
@@ -3719,7 +3754,7 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING |
IORING_FEAT_RECVSEND_BUNDLE | IORING_FEAT_MIN_TIMEOUT |
- IORING_FEAT_RW_ATTR;
+ IORING_FEAT_RW_ATTR | IORING_FEAT_NO_IOWAIT;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
@@ -3793,29 +3828,36 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
return io_uring_create(entries, &p, params);
}
-static inline bool io_uring_allowed(void)
+static inline int io_uring_allowed(void)
{
int disabled = READ_ONCE(sysctl_io_uring_disabled);
kgid_t io_uring_group;
if (disabled == 2)
- return false;
+ return -EPERM;
if (disabled == 0 || capable(CAP_SYS_ADMIN))
- return true;
+ goto allowed_lsm;
io_uring_group = make_kgid(&init_user_ns, sysctl_io_uring_group);
if (!gid_valid(io_uring_group))
- return false;
+ return -EPERM;
- return in_group_p(io_uring_group);
+ if (!in_group_p(io_uring_group))
+ return -EPERM;
+
+allowed_lsm:
+ return security_uring_allowed();
}
SYSCALL_DEFINE2(io_uring_setup, u32, entries,
struct io_uring_params __user *, params)
{
- if (!io_uring_allowed())
- return -EPERM;
+ int ret;
+
+ ret = io_uring_allowed();
+ if (ret)
+ return ret;
return io_uring_setup(entries, params);
}
@@ -3908,6 +3950,9 @@ static int __init io_uring_init(void)
io_uring_optable_init();
+ /* imu->dir is u8 */
+ BUILD_BUG_ON((IO_IMU_DEST | IO_IMU_SOURCE) > U8_MAX);
+
/*
* Allow user copy in the per-command field, which starts after the
* file in io_kiocb and until the opcode field. The openat2 handling
@@ -3918,10 +3963,9 @@ static int __init io_uring_init(void)
req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args,
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT |
SLAB_TYPESAFE_BY_RCU);
- io_buf_cachep = KMEM_CACHE(io_buffer,
- SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64);
+ BUG_ON(!iou_wq);
#ifdef CONFIG_SYSCTL
register_sysctl_init("kernel", kernel_io_uring_disabled_table);