summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorAlan Stern <stern@rowland.harvard.edu>2012-03-02 10:51:00 +0100
committerJens Axboe <axboe@kernel.dk>2012-03-02 10:51:00 +0100
commit62d3c5439c534b0e6c653fc63e6d8c67be3a57b1 (patch)
treed335d0e449ef2d61d52921e3f210cdd403bb025a /block
parentcecd353a02fb1405c8a72a324b26b5acf97e7411 (diff)
downloadlwn-62d3c5439c534b0e6c653fc63e6d8c67be3a57b1.tar.gz
lwn-62d3c5439c534b0e6c653fc63e6d8c67be3a57b1.zip
Block: use a freezable workqueue for disk-event polling
This patch (as1519) fixes a bug in the block layer's disk-events polling. The polling is done by a work routine queued on the system_nrt_wq workqueue. Since that workqueue isn't freezable, the polling continues even in the middle of a system sleep transition. Obviously, polling a suspended drive for media changes and such isn't a good thing to do; in the case of USB mass-storage devices it can lead to real problems requiring device resets and even re-enumeration. The patch fixes things by creating a new system-wide, non-reentrant, freezable workqueue and using it for disk-events polling. Signed-off-by: Alan Stern <stern@rowland.harvard.edu> CC: <stable@kernel.org> Acked-by: Tejun Heo <tj@kernel.org> Acked-by: Rafael J. Wysocki <rjw@sisk.pl> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/genhd.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/block/genhd.c b/block/genhd.c
index b26c4085590d..df9816ede75b 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1478,9 +1478,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
intv = disk_events_poll_jiffies(disk);
set_timer_slack(&ev->dwork.timer, intv / 4);
if (check_now)
- queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
+ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
else if (intv)
- queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
+ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
out_unlock:
spin_unlock_irqrestore(&ev->lock, flags);
}
@@ -1524,7 +1524,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
ev->clearing |= mask;
if (!ev->block) {
cancel_delayed_work(&ev->dwork);
- queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
+ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
}
spin_unlock_irq(&ev->lock);
}
@@ -1561,7 +1561,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
/* uncondtionally schedule event check and wait for it to finish */
disk_block_events(disk);
- queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
+ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
flush_delayed_work(&ev->dwork);
__disk_unblock_events(disk, false);
@@ -1598,7 +1598,7 @@ static void disk_events_workfn(struct work_struct *work)
intv = disk_events_poll_jiffies(disk);
if (!ev->block && intv)
- queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
+ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
spin_unlock_irq(&ev->lock);