diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-09-13 20:07:36 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-09-16 15:18:52 +0200 |
commit | bcddc3f01c9122882c8b9f12ab94a934e55aef97 (patch) | |
tree | db642f05ec083f49c4e32cf8d93c9f14028d8ec5 | |
parent | cfc4ba5365449cb6b5c9f68d755a142f17da1e47 (diff) | |
download | lwn-bcddc3f01c9122882c8b9f12ab94a934e55aef97.tar.gz lwn-bcddc3f01c9122882c8b9f12ab94a934e55aef97.zip |
writeback: inline allocation failure handling in bdi_alloc_queue_work()
This gets rid of work == NULL in bdi_queue_work() and puts the
OOM handling where it belongs.
Acked-by: Jan Kara <jack@suse.cz>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | fs/fs-writeback.c | 49 |
1 files changed, 27 insertions, 22 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f8cd7a97f5b7..59b3ee63b624 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -149,21 +149,19 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) { - if (work) { - work->seen = bdi->wb_mask; - BUG_ON(!work->seen); - atomic_set(&work->pending, bdi->wb_cnt); - BUG_ON(!bdi->wb_cnt); + work->seen = bdi->wb_mask; + BUG_ON(!work->seen); + atomic_set(&work->pending, bdi->wb_cnt); + BUG_ON(!bdi->wb_cnt); - /* - * Make sure stores are seen before it appears on the list - */ - smp_mb(); + /* + * Make sure stores are seen before it appears on the list + */ + smp_mb(); - spin_lock(&bdi->wb_lock); - list_add_tail_rcu(&work->list, &bdi->work_list); - spin_unlock(&bdi->wb_lock); - } + spin_lock(&bdi->wb_lock); + list_add_tail_rcu(&work->list, &bdi->work_list); + spin_unlock(&bdi->wb_lock); /* * If the default thread isn't there, make sure we add it. When @@ -175,14 +173,12 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) struct bdi_writeback *wb = &bdi->wb; /* - * If we failed allocating the bdi work item, wake up the wb - * thread always. As a safety precaution, it'll flush out - * everything + * End work now if this wb has no dirty IO pending. Otherwise + * wakeup the handling thread */ - if (!wb_has_dirty_io(wb)) { - if (work) - wb_clear_pending(wb, work); - } else if (wb->task) + if (!wb_has_dirty_io(wb)) + wb_clear_pending(wb, work); + else if (wb->task) wake_up_process(wb->task); } } @@ -202,11 +198,20 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi, { struct bdi_work *work; + /* + * This is WB_SYNC_NONE writeback, so if allocation fails just + * wakeup the thread for old dirty data writeback + */ work = kmalloc(sizeof(*work), GFP_ATOMIC); - if (work) + if (work) { bdi_work_init(work, wbc); + bdi_queue_work(bdi, work); + } else { + struct bdi_writeback *wb = &bdi->wb; - bdi_queue_work(bdi, work); + if (wb->task) + wake_up_process(wb->task); + } } void bdi_start_writeback(struct writeback_control *wbc) |