diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-02 09:54:49 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-02 09:54:49 -0700 |
commit | 033d9959ed2dc1029217d4165f80a71702dc578e (patch) | |
tree | 3d306316e44bdabce2e0bf2ef7e466e525f90b4c /fs | |
parent | 974a847e00cf3ff1695e62b276892137893706ab (diff) | |
parent | 7c6e72e46c9ea4a88f3f8ba96edce9db4bd48726 (diff) | |
download | lwn-033d9959ed2dc1029217d4165f80a71702dc578e.tar.gz lwn-033d9959ed2dc1029217d4165f80a71702dc578e.zip |
Merge branch 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue changes from Tejun Heo:
"This is workqueue updates for v3.7-rc1. A lot of activities this
round including considerable API and behavior cleanups.
* delayed_work combines a timer and a work item. The handling of the
timer part has always been a bit clunky leading to confusing
cancelation API with weird corner-case behaviors. delayed_work is
updated to use new IRQ safe timer and cancelation now works as
expected.
* Another deficiency of delayed_work was lack of the counterpart of
mod_timer() which led to cancel+queue combinations or open-coded
timer+work usages. mod_delayed_work[_on]() are added.
These two delayed_work changes make delayed_work provide interface
and behave like timer which is executed with process context.
* A work item could be executed concurrently on multiple CPUs, which
is rather unintuitive and made flush_work() behavior confusing and
half-broken under certain circumstances. This problem doesn't
exist for non-reentrant workqueues. While non-reentrancy check
isn't free, the overhead is incurred only when a work item bounces
across different CPUs and even in simulated pathological scenario
the overhead isn't too high.
All workqueues are made non-reentrant. This removes the
distinction between flush_[delayed_]work() and
flush_[delayed_]_work_sync(). The former is now as strong as the
latter and the specified work item is guaranteed to have finished
execution of any previous queueing on return.
* In addition to the various bug fixes, Lai redid and simplified CPU
hotplug handling significantly.
* Joonsoo introduced system_highpri_wq and used it during CPU
hotplug.
There are two merge commits - one to pull in IRQ safe timer from
tip/timers/core and the other to pull in CPU hotplug fixes from
wq/for-3.6-fixes as Lai's hotplug restructuring depended on them."
Fixed a number of trivial conflicts, but the more interesting conflicts
were silent ones where the deprecated interfaces had been used by new
code in the merge window, and thus didn't cause any real data conflicts.
Tejun pointed out a few of them, I fixed a couple more.
* 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (46 commits)
workqueue: remove spurious WARN_ON_ONCE(in_irq()) from try_to_grab_pending()
workqueue: use cwq_set_max_active() helper for workqueue_set_max_active()
workqueue: introduce cwq_set_max_active() helper for thaw_workqueues()
workqueue: remove @delayed from cwq_dec_nr_in_flight()
workqueue: fix possible stall on try_to_grab_pending() of a delayed work item
workqueue: use hotcpu_notifier() for workqueue_cpu_down_callback()
workqueue: use __cpuinit instead of __devinit for cpu callbacks
workqueue: rename manager_mutex to assoc_mutex
workqueue: WORKER_REBIND is no longer necessary for idle rebinding
workqueue: WORKER_REBIND is no longer necessary for busy rebinding
workqueue: reimplement idle worker rebinding
workqueue: deprecate __cancel_delayed_work()
workqueue: reimplement cancel_delayed_work() using try_to_grab_pending()
workqueue: use mod_delayed_work() instead of __cancel + queue
workqueue: use irqsafe timer for delayed_work
workqueue: clean up delayed_work initializers and add missing one
workqueue: make deferrable delayed_work initializer names consistent
workqueue: cosmetic whitespace updates for macro definitions
workqueue: deprecate system_nrt[_freezable]_wq
workqueue: deprecate flush[_delayed]_work_sync()
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/affs/super.c | 2 | ||||
-rw-r--r-- | fs/afs/callback.c | 4 | ||||
-rw-r--r-- | fs/afs/server.c | 10 | ||||
-rw-r--r-- | fs/afs/vlocation.c | 14 | ||||
-rw-r--r-- | fs/gfs2/lock_dlm.c | 2 | ||||
-rw-r--r-- | fs/gfs2/super.c | 2 | ||||
-rw-r--r-- | fs/hfs/inode.c | 2 | ||||
-rw-r--r-- | fs/ncpfs/inode.c | 6 | ||||
-rw-r--r-- | fs/nfs/nfs4renewd.c | 3 | ||||
-rw-r--r-- | fs/ocfs2/cluster/quorum.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_super.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_sync.c | 2 |
12 files changed, 17 insertions, 34 deletions
diff --git a/fs/affs/super.c b/fs/affs/super.c index c70f1e5fc024..022cecb0757d 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -551,7 +551,7 @@ affs_remount(struct super_block *sb, int *flags, char *data) return -EINVAL; } - flush_delayed_work_sync(&sbi->sb_work); + flush_delayed_work(&sbi->sb_work); replace_mount_options(sb, new_opts); sbi->s_flags = mount_flags; diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 587ef5123cd8..7ef637d7f3a5 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c @@ -351,9 +351,7 @@ void afs_dispatch_give_up_callbacks(struct work_struct *work) */ void afs_flush_callback_breaks(struct afs_server *server) { - cancel_delayed_work(&server->cb_break_work); - queue_delayed_work(afs_callback_update_worker, - &server->cb_break_work, 0); + mod_delayed_work(afs_callback_update_worker, &server->cb_break_work, 0); } #if 0 diff --git a/fs/afs/server.c b/fs/afs/server.c index d59b7516e943..f342acf3547d 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -285,12 +285,7 @@ static void afs_reap_server(struct work_struct *work) expiry = server->time_of_death + afs_server_timeout; if (expiry > now) { delay = (expiry - now) * HZ; - if (!queue_delayed_work(afs_wq, &afs_server_reaper, - delay)) { - cancel_delayed_work(&afs_server_reaper); - queue_delayed_work(afs_wq, &afs_server_reaper, - delay); - } + mod_delayed_work(afs_wq, &afs_server_reaper, delay); break; } @@ -323,6 +318,5 @@ static void afs_reap_server(struct work_struct *work) void __exit afs_purge_servers(void) { afs_server_timeout = 0; - cancel_delayed_work(&afs_server_reaper); - queue_delayed_work(afs_wq, &afs_server_reaper, 0); + mod_delayed_work(afs_wq, &afs_server_reaper, 0); } diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c index 431984d2e372..57bcb1596530 100644 --- a/fs/afs/vlocation.c +++ b/fs/afs/vlocation.c @@ -561,12 +561,7 @@ static void afs_vlocation_reaper(struct work_struct *work) if (expiry > now) { delay = (expiry - now) * HZ; _debug("delay %lu", delay); - if (!queue_delayed_work(afs_wq, &afs_vlocation_reap, - delay)) { - cancel_delayed_work(&afs_vlocation_reap); - queue_delayed_work(afs_wq, &afs_vlocation_reap, - delay); - } + mod_delayed_work(afs_wq, &afs_vlocation_reap, delay); break; } @@ -614,13 +609,10 @@ void afs_vlocation_purge(void) spin_lock(&afs_vlocation_updates_lock); list_del_init(&afs_vlocation_updates); spin_unlock(&afs_vlocation_updates_lock); - cancel_delayed_work(&afs_vlocation_update); - queue_delayed_work(afs_vlocation_update_worker, - &afs_vlocation_update, 0); + mod_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0); destroy_workqueue(afs_vlocation_update_worker); - cancel_delayed_work(&afs_vlocation_reap); - queue_delayed_work(afs_wq, &afs_vlocation_reap, 0); + mod_delayed_work(afs_wq, &afs_vlocation_reap, 0); } /* diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 4a38db739ca0..0fb6539b0c8c 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -1289,7 +1289,7 @@ static void gdlm_unmount(struct gfs2_sbd *sdp) spin_lock(&ls->ls_recover_spin); set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); spin_unlock(&ls->ls_recover_spin); - flush_delayed_work_sync(&sdp->sd_control_work); + flush_delayed_work(&sdp->sd_control_work); /* mounted_lock and control_lock will be purged in dlm recovery */ release: diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index a8d90f2f576c..bc737261f234 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1579,7 +1579,7 @@ out: clear_inode(inode); gfs2_dir_hash_inval(ip); ip->i_gl->gl_object = NULL; - flush_delayed_work_sync(&ip->i_gl->gl_work); + flush_delayed_work(&ip->i_gl->gl_work); gfs2_glock_add_to_lru(ip->i_gl); gfs2_glock_put(ip->i_gl); ip->i_gl = NULL; diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index ee1bc55677f1..553909395270 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -644,7 +644,7 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end, /* sync the superblock to buffers */ sb = inode->i_sb; - flush_delayed_work_sync(&HFS_SB(sb)->mdb_work); + flush_delayed_work(&HFS_SB(sb)->mdb_work); /* .. finally sync the buffers to disk */ err = sync_blockdev(sb->s_bdev); if (!ret) diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 333df07ae3bd..eaa74323663a 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -314,11 +314,11 @@ static void ncp_stop_tasks(struct ncp_server *server) { release_sock(sk); del_timer_sync(&server->timeout_tm); - flush_work_sync(&server->rcv.tq); + flush_work(&server->rcv.tq); if (sk->sk_socket->type == SOCK_STREAM) - flush_work_sync(&server->tx.tq); + flush_work(&server->tx.tq); else - flush_work_sync(&server->timeout_tq); + flush_work(&server->timeout_tq); } static int ncp_show_options(struct seq_file *seq, struct dentry *root) diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 6930bec91bca..1720d32ffa54 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -117,8 +117,7 @@ nfs4_schedule_state_renewal(struct nfs_client *clp) timeout = 5 * HZ; dprintk("%s: requeueing work. Lease period = %ld\n", __func__, (timeout + HZ - 1) / HZ); - cancel_delayed_work(&clp->cl_renewd); - schedule_delayed_work(&clp->cl_renewd, timeout); + mod_delayed_work(system_wq, &clp->cl_renewd, timeout); set_bit(NFS_CS_RENEWD, &clp->cl_res_state); spin_unlock(&clp->cl_lock); } diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index 8f9cea1597af..c19897d0fe14 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c @@ -327,5 +327,5 @@ void o2quo_exit(void) { struct o2quo_state *qs = &o2quo_state; - flush_work_sync(&qs->qs_work); + flush_work(&qs->qs_work); } diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 19e2380fb867..001537f92caf 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -954,7 +954,7 @@ xfs_fs_sync_fs( * We schedule xfssyncd now (now that the disk is * active) instead of later (when it might not be). */ - flush_delayed_work_sync(&mp->m_sync_work); + flush_delayed_work(&mp->m_sync_work); } return 0; diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c index 96548176db80..9500caf15acf 100644 --- a/fs/xfs/xfs_sync.c +++ b/fs/xfs/xfs_sync.c @@ -475,7 +475,7 @@ xfs_flush_inodes( struct xfs_mount *mp = ip->i_mount; queue_work(xfs_syncd_wq, &mp->m_flush_work); - flush_work_sync(&mp->m_flush_work); + flush_work(&mp->m_flush_work); } STATIC void |