summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Stultz <johnstul@us.ibm.com>2010-02-22 18:09:56 -0800
committerThomas Gleixner <tglx@linutronix.de>2010-04-27 17:32:59 +0200
commit070976b5b038218900648ea4cc88786d5dfcd58d (patch)
tree1c5ffc0d3ba5006981a8e328cee270e96183a584
parent23695896abe0d1c813c2c88f0dd165d435d08689 (diff)
downloadlwn-070976b5b038218900648ea4cc88786d5dfcd58d.tar.gz
lwn-070976b5b038218900648ea4cc88786d5dfcd58d.zip
Fix inc/dec_mnt_count for -rt
With Nick's vfs patches, inc/dec_mnt_count use per-cpu counters, so this patch makes sure we disable preemption before calling. Its not a great fix, but works because count_mnt_count() sums all the percpu values, so each one individually doesn't need to be 0'ed out. I suspect the better fix for -rt is to revert the mnt_count back to an atomic counter. Signed-off-by: John Stultz <johnstul@us.ibm.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--fs/namespace.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/fs/namespace.c b/fs/namespace.c
index 7fb7fe3da44e..c1632069b06c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -757,7 +757,9 @@ void mntput_no_expire(struct vfsmount *mnt)
vfsmount_read_unlock(cpu);
goto repeat;
}
+ preempt_disable();
dec_mnt_count(mnt);
+ preempt_enable();
vfsmount_read_unlock(cpu);
return;
@@ -766,7 +768,9 @@ void mntput_no_expire(struct vfsmount *mnt)
repeat:
vfsmount_write_lock();
BUG_ON(mnt->mnt_flags & MNT_MOUNTED);
+ preempt_disable();
dec_mnt_count(mnt);
+ preempt_enable();
if (count_mnt_count(mnt)) {
vfsmount_write_unlock();
return;
@@ -819,7 +823,9 @@ void mnt_unpin(struct vfsmount *mnt)
{
vfsmount_write_lock();
if (mnt->mnt_pinned) {
- inc_mnt_count(mnt);
+ preempt_disable();
+ dec_mnt_count(mnt);
+ preempt_enable();
mnt->mnt_pinned--;
}
vfsmount_write_unlock();