summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-14 00:33:38 +0100
committerJens Axboe <axboe@kernel.dk>2011-12-14 00:33:38 +0100
commit6e736be7f282fff705db7c34a15313281b372a76 (patch)
tree1683e00e073ee4bd3027798f92ae2d368404d44b /block
parent42ec57a8f68311bbbf4ff96a5d33c8a2e90b9d05 (diff)
downloadlwn-6e736be7f282fff705db7c34a15313281b372a76.tar.gz
lwn-6e736be7f282fff705db7c34a15313281b372a76.zip
block: make ioc get/put interface more conventional and fix race on alloction
Ignoring copy_io() during fork, io_context can be allocated from two places - current_io_context() and set_task_ioprio(). The former is always called from local task while the latter can be called from different task. The synchornization between them are peculiar and dubious. * current_io_context() doesn't grab task_lock() and assumes that if it saw %NULL ->io_context, it would stay that way until allocation and assignment is complete. It has smp_wmb() between alloc/init and assignment. * set_task_ioprio() grabs task_lock() for assignment and does smp_read_barrier_depends() between "ioc = task->io_context" and "if (ioc)". Unfortunately, this doesn't achieve anything - the latter is not a dependent load of the former. ie, if ioc itself were being dereferenced "ioc->xxx", it would mean something (not sure what tho) but as the code currently stands, the dependent read barrier is noop. As only one of the the two test-assignment sequences is task_lock() protected, the task_lock() can't do much about race between the two. Nothing prevents current_io_context() and set_task_ioprio() allocating its own ioc for the same task and overwriting the other's. Also, set_task_ioprio() can race with exiting task and create a new ioc after exit_io_context() is finished. ioc get/put doesn't have any reason to be complex. The only hot path is accessing the existing ioc of %current, which is simple to achieve given that ->io_context is never destroyed as long as the task is alive. All other paths can happily go through task_lock() like all other task sub structures without impacting anything. This patch updates ioc get/put so that it becomes more conventional. * alloc_io_context() is replaced with get_task_io_context(). This is the only interface which can acquire access to ioc of another task. On return, the caller has an explicit reference to the object which should be put using put_io_context() afterwards. * The functionality of current_io_context() remains the same but when creating a new ioc, it shares the code path with get_task_io_context() and always goes through task_lock(). * get_io_context() now means incrementing ref on an ioc which the caller already has access to (be that an explicit refcnt or implicit %current one). * PF_EXITING inhibits creation of new io_context and once exit_io_context() is finished, it's guaranteed that both ioc acquisition functions return %NULL. * All users are updated. Most are trivial but smp_read_barrier_depends() removal from cfq_get_io_context() needs a bit of explanation. I suppose the original intention was to ensure ioc->ioprio is visible when set_task_ioprio() allocates new io_context and installs it; however, this wouldn't have worked because set_task_ioprio() doesn't have wmb between init and install. There are other problems with this which will be fixed in another patch. * While at it, use NUMA_NO_NODE instead of -1 for wildcard node specification. -v2: Vivek spotted contamination from debug patch. Removed. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-ioc.c99
-rw-r--r--block/blk.h1
-rw-r--r--block/cfq-iosched.c18
4 files changed, 81 insertions, 46 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 8f630cec906e..4b001dcd85b0 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1645,11 +1645,12 @@ static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
struct io_context *ioc;
- task_lock(tsk);
- ioc = tsk->io_context;
- if (ioc)
+ /* we don't lose anything even if ioc allocation fails */
+ ioc = get_task_io_context(tsk, GFP_ATOMIC, NUMA_NO_NODE);
+ if (ioc) {
ioc->cgroup_changed = 1;
- task_unlock(tsk);
+ put_io_context(ioc);
+ }
}
void blkio_policy_register(struct blkio_policy_type *blkiop)
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 8bebf06bac76..b13ed96776c2 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -16,6 +16,19 @@
*/
static struct kmem_cache *iocontext_cachep;
+/**
+ * get_io_context - increment reference count to io_context
+ * @ioc: io_context to get
+ *
+ * Increment reference count to @ioc.
+ */
+void get_io_context(struct io_context *ioc)
+{
+ BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
+ atomic_long_inc(&ioc->refcount);
+}
+EXPORT_SYMBOL(get_io_context);
+
static void cfq_dtor(struct io_context *ioc)
{
if (!hlist_empty(&ioc->cic_list)) {
@@ -71,6 +84,9 @@ void exit_io_context(struct task_struct *task)
{
struct io_context *ioc;
+ /* PF_EXITING prevents new io_context from being attached to @task */
+ WARN_ON_ONCE(!(current->flags & PF_EXITING));
+
task_lock(task);
ioc = task->io_context;
task->io_context = NULL;
@@ -82,7 +98,9 @@ void exit_io_context(struct task_struct *task)
put_io_context(ioc);
}
-struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
+static struct io_context *create_task_io_context(struct task_struct *task,
+ gfp_t gfp_flags, int node,
+ bool take_ref)
{
struct io_context *ioc;
@@ -98,6 +116,20 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ioc->cic_list);
+ /* try to install, somebody might already have beaten us to it */
+ task_lock(task);
+
+ if (!task->io_context && !(task->flags & PF_EXITING)) {
+ task->io_context = ioc;
+ } else {
+ kmem_cache_free(iocontext_cachep, ioc);
+ ioc = task->io_context;
+ }
+
+ if (ioc && take_ref)
+ get_io_context(ioc);
+
+ task_unlock(task);
return ioc;
}
@@ -114,46 +146,47 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
*/
struct io_context *current_io_context(gfp_t gfp_flags, int node)
{
- struct task_struct *tsk = current;
- struct io_context *ret;
-
- ret = tsk->io_context;
- if (likely(ret))
- return ret;
-
- ret = alloc_io_context(gfp_flags, node);
- if (ret) {
- /* make sure set_task_ioprio() sees the settings above */
- smp_wmb();
- tsk->io_context = ret;
- }
+ might_sleep_if(gfp_flags & __GFP_WAIT);
- return ret;
+ if (current->io_context)
+ return current->io_context;
+
+ return create_task_io_context(current, gfp_flags, node, false);
}
+EXPORT_SYMBOL(current_io_context);
-/*
- * If the current task has no IO context then create one and initialise it.
- * If it does have a context, take a ref on it.
+/**
+ * get_task_io_context - get io_context of a task
+ * @task: task of interest
+ * @gfp_flags: allocation flags, used if allocation is necessary
+ * @node: allocation node, used if allocation is necessary
+ *
+ * Return io_context of @task. If it doesn't exist, it is created with
+ * @gfp_flags and @node. The returned io_context has its reference count
+ * incremented.
*
- * This is always called in the context of the task which submitted the I/O.
+ * This function always goes through task_lock() and it's better to use
+ * current_io_context() + get_io_context() for %current.
*/
-struct io_context *get_io_context(gfp_t gfp_flags, int node)
+struct io_context *get_task_io_context(struct task_struct *task,
+ gfp_t gfp_flags, int node)
{
- struct io_context *ioc = NULL;
-
- /*
- * Check for unlikely race with exiting task. ioc ref count is
- * zero when ioc is being detached.
- */
- do {
- ioc = current_io_context(gfp_flags, node);
- if (unlikely(!ioc))
- break;
- } while (!atomic_long_inc_not_zero(&ioc->refcount));
+ struct io_context *ioc;
- return ioc;
+ might_sleep_if(gfp_flags & __GFP_WAIT);
+
+ task_lock(task);
+ ioc = task->io_context;
+ if (likely(ioc)) {
+ get_io_context(ioc);
+ task_unlock(task);
+ return ioc;
+ }
+ task_unlock(task);
+
+ return create_task_io_context(task, gfp_flags, node, true);
}
-EXPORT_SYMBOL(get_io_context);
+EXPORT_SYMBOL(get_task_io_context);
static int __init blk_ioc_init(void)
{
diff --git a/block/blk.h b/block/blk.h
index aae4d88fc523..fc3c41b2fd24 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -122,6 +122,7 @@ static inline int blk_should_fake_timeout(struct request_queue *q)
}
#endif
+void get_io_context(struct io_context *ioc);
struct io_context *current_io_context(gfp_t gfp_flags, int node);
int ll_back_merge_fn(struct request_queue *q, struct request *req,
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ec3f5e8ba564..d42d89ccce1b 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -14,6 +14,7 @@
#include <linux/rbtree.h>
#include <linux/ioprio.h>
#include <linux/blktrace_api.h>
+#include "blk.h"
#include "cfq.h"
/*
@@ -3194,13 +3195,13 @@ static struct cfq_io_context *
cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
{
struct io_context *ioc = NULL;
- struct cfq_io_context *cic;
+ struct cfq_io_context *cic = NULL;
might_sleep_if(gfp_mask & __GFP_WAIT);
- ioc = get_io_context(gfp_mask, cfqd->queue->node);
+ ioc = current_io_context(gfp_mask, cfqd->queue->node);
if (!ioc)
- return NULL;
+ goto err;
cic = cfq_cic_lookup(cfqd, ioc);
if (cic)
@@ -3211,10 +3212,10 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
goto err;
if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
- goto err_free;
-
+ goto err;
out:
- smp_read_barrier_depends();
+ get_io_context(ioc);
+
if (unlikely(ioc->ioprio_changed))
cfq_ioc_set_ioprio(ioc);
@@ -3223,10 +3224,9 @@ out:
cfq_ioc_set_cgroup(ioc);
#endif
return cic;
-err_free:
- cfq_cic_free(cic);
err:
- put_io_context(ioc);
+ if (cic)
+ cfq_cic_free(cic);
return NULL;
}