summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/kbuf.c2
-rw-r--r--io_uring/rsrc.c17
-rw-r--r--io_uring/zcrx.c19
-rw-r--r--io_uring/zcrx.h5
4 files changed, 31 insertions, 12 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 098109259671..953d5e742569 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -504,6 +504,8 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
p->nbufs = tmp;
p->addr = READ_ONCE(sqe->addr);
p->len = READ_ONCE(sqe->len);
+ if (!p->len)
+ return -EINVAL;
if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
&size))
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 5e64a8bb30a4..b36c8825550e 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -175,6 +175,18 @@ void io_rsrc_cache_free(struct io_ring_ctx *ctx)
io_alloc_cache_free(&ctx->imu_cache, kfree);
}
+static void io_clear_table_tags(struct io_rsrc_data *data)
+{
+ int i;
+
+ for (i = 0; i < data->nr; i++) {
+ struct io_rsrc_node *node = data->nodes[i];
+
+ if (node)
+ node->tag = 0;
+ }
+}
+
__cold void io_rsrc_data_free(struct io_ring_ctx *ctx,
struct io_rsrc_data *data)
{
@@ -583,6 +595,7 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
io_file_table_set_alloc_range(ctx, 0, ctx->file_table.data.nr);
return 0;
fail:
+ io_clear_table_tags(&ctx->file_table.data);
io_sqe_files_unregister(ctx);
return ret;
}
@@ -902,8 +915,10 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
}
ctx->buf_table = data;
- if (ret)
+ if (ret) {
+ io_clear_table_tags(&ctx->buf_table);
io_sqe_buffers_unregister(ctx);
+ }
return ret;
}
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 80d4a6f71d29..0f46e0404c04 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -181,7 +181,7 @@ static void io_zcrx_free_area(struct io_zcrx_area *area)
kvfree(area->nia.niovs);
kvfree(area->user_refs);
if (area->pages) {
- unpin_user_pages(area->pages, area->nia.num_niovs);
+ unpin_user_pages(area->pages, area->nr_folios);
kvfree(area->pages);
}
kfree(area);
@@ -192,7 +192,7 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
struct io_uring_zcrx_area_reg *area_reg)
{
struct io_zcrx_area *area;
- int i, ret, nr_pages;
+ int i, ret, nr_pages, nr_iovs;
struct iovec iov;
if (area_reg->flags || area_reg->rq_area_token)
@@ -220,27 +220,28 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
area->pages = NULL;
goto err;
}
- area->nia.num_niovs = nr_pages;
+ area->nr_folios = nr_iovs = nr_pages;
+ area->nia.num_niovs = nr_iovs;
- area->nia.niovs = kvmalloc_array(nr_pages, sizeof(area->nia.niovs[0]),
+ area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]),
GFP_KERNEL | __GFP_ZERO);
if (!area->nia.niovs)
goto err;
- area->freelist = kvmalloc_array(nr_pages, sizeof(area->freelist[0]),
+ area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]),
GFP_KERNEL | __GFP_ZERO);
if (!area->freelist)
goto err;
- for (i = 0; i < nr_pages; i++)
+ for (i = 0; i < nr_iovs; i++)
area->freelist[i] = i;
- area->user_refs = kvmalloc_array(nr_pages, sizeof(area->user_refs[0]),
+ area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]),
GFP_KERNEL | __GFP_ZERO);
if (!area->user_refs)
goto err;
- for (i = 0; i < nr_pages; i++) {
+ for (i = 0; i < nr_iovs; i++) {
struct net_iov *niov = &area->nia.niovs[i];
niov->owner = &area->nia;
@@ -248,7 +249,7 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
atomic_set(&area->user_refs[i], 0);
}
- area->free_count = nr_pages;
+ area->free_count = nr_iovs;
area->ifq = ifq;
/* we're only supporting one area per ifq for now */
area->area_id = 0;
diff --git a/io_uring/zcrx.h b/io_uring/zcrx.h
index 706cc7300780..47f1c0e8c197 100644
--- a/io_uring/zcrx.h
+++ b/io_uring/zcrx.h
@@ -15,6 +15,7 @@ struct io_zcrx_area {
bool is_mapped;
u16 area_id;
struct page **pages;
+ unsigned long nr_folios;
/* freelist */
spinlock_t freelist_lock ____cacheline_aligned_in_smp;
@@ -26,11 +27,11 @@ struct io_zcrx_ifq {
struct io_ring_ctx *ctx;
struct io_zcrx_area *area;
+ spinlock_t rq_lock ____cacheline_aligned_in_smp;
struct io_uring *rq_ring;
struct io_uring_zcrx_rqe *rqes;
- u32 rq_entries;
u32 cached_rq_head;
- spinlock_t rq_lock;
+ u32 rq_entries;
u32 if_rxq;
struct device *dev;