summaryrefslogtreecommitdiff
path: root/fs/io-wq.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-02-19 12:33:30 -0700
committerJens Axboe <axboe@kernel.dk>2021-02-25 09:23:47 -0700
commite941894eae31b52f0fd9bdb3ce20620afa152f45 (patch)
treed8705880ec0fa73973f5c243b91986ca23e6ad09 /fs/io-wq.h
parenteb2de9418d56b5e6ebf27bad51dbce3e22ee109b (diff)
downloadlwn-e941894eae31b52f0fd9bdb3ce20620afa152f45.tar.gz
lwn-e941894eae31b52f0fd9bdb3ce20620afa152f45.zip
io-wq: make buffered file write hashed work map per-ctx
Before the io-wq thread change, we maintained a hash work map and lock per-node per-ring. That wasn't ideal, as we really wanted it to be per ring. But now that we have per-task workers, the hash map ends up being just per-task. That'll work just fine for the normal case of having one task use a ring, but if you share the ring between tasks, then it's considerably worse than it was before. Make the hash map per ctx instead, which provides full per-ctx buffered write serialization on hashed writes. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.h')
-rw-r--r--fs/io-wq.h14
1 files changed, 14 insertions, 0 deletions
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 86825673be08..3677b39db015 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -1,6 +1,7 @@
#ifndef INTERNAL_IO_WQ_H
#define INTERNAL_IO_WQ_H
+#include <linux/refcount.h>
#include <linux/io_uring.h>
struct io_wq;
@@ -93,7 +94,20 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
typedef void (io_wq_work_fn)(struct io_wq_work *);
+struct io_wq_hash {
+ refcount_t refs;
+ unsigned long map;
+ struct wait_queue_head wait;
+};
+
+static inline void io_wq_put_hash(struct io_wq_hash *hash)
+{
+ if (refcount_dec_and_test(&hash->refs))
+ kfree(hash);
+}
+
struct io_wq_data {
+ struct io_wq_hash *hash;
io_wq_work_fn *do_work;
free_work_fn *free_work;
};