summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWanpeng Li <liwp@linux.vnet.ibm.com>2012-06-09 11:10:55 +0800
committerFengguang Wu <fengguang.wu@intel.com>2012-06-09 19:54:47 +0800
commit331cbdeedeb2f4ef01ccb761513708af0fe77098 (patch)
treed7177689ddee2137dc54dead6aa306ac2821deb5
parenteb608e3a344b3af21300360fcf868f8b4e808a8e (diff)
downloadlwn-331cbdeedeb2f4ef01ccb761513708af0fe77098.tar.gz
lwn-331cbdeedeb2f4ef01ccb761513708af0fe77098.zip
writeback: Fix some comment errors
Signed-off-by: Wanpeng Li <liwp@linux.vnet.ibm.com> Signed-off-by: Fengguang Wu <fengguang.wu@intel.com>
-rw-r--r--fs/fs-writeback.c4
-rw-r--r--fs/super.c2
-rw-r--r--fs/sync.c2
-rw-r--r--mm/page-writeback.c4
4 files changed, 6 insertions, 6 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 41a3ccff18d8..0b2c87e08e90 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -628,8 +628,8 @@ static long writeback_sb_inodes(struct super_block *sb,
}
/*
- * Don't bother with new inodes or inodes beeing freed, first
- * kind does not need peridic writeout yet, and for the latter
+ * Don't bother with new inodes or inodes being freed, first
+ * kind does not need periodic writeout yet, and for the latter
* kind writeout is handled by the freer.
*/
spin_lock(&inode->i_lock);
diff --git a/fs/super.c b/fs/super.c
index cf001775617f..3d65443aea8c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -318,7 +318,7 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
/*
* grab_super_passive - acquire a passive reference
- * @s: reference we are trying to grab
+ * @sb: reference we are trying to grab
*
* Tries to acquire a passive reference. This is used in places where we
* cannot take an active reference but we need to ensure that the
diff --git a/fs/sync.c b/fs/sync.c
index 11e3d1c44901..1830704df071 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -92,7 +92,7 @@ static void sync_filesystems(int wait)
}
/*
- * sync everything. Start out by waking pdflush, because that writes back
+ * sync everything. Start out by waking flusher, because that writes back
* all queues in parallel.
*/
SYSCALL_DEFINE0(sync)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index ec14419e53b5..e5363f34e025 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -946,7 +946,7 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
* bdi->dirty_ratelimit = balanced_dirty_ratelimit;
*
* However to get a more stable dirty_ratelimit, the below elaborated
- * code makes use of task_ratelimit to filter out sigular points and
+ * code makes use of task_ratelimit to filter out singular points and
* limit the step size.
*
* The below code essentially only uses the relative value of
@@ -969,7 +969,7 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
* feel and care are stable dirty rate and small position error.
*
* |task_ratelimit - dirty_ratelimit| is used to limit the step size
- * and filter out the sigular points of balanced_dirty_ratelimit. Which
+ * and filter out the singular points of balanced_dirty_ratelimit. Which
* keeps jumping around randomly and can even leap far away at times
* due to the small 200ms estimation period of dirty_rate (we want to
* keep that period small to reduce time lags).