summaryrefslogtreecommitdiff
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2017-03-29 20:48:44 +0200
committerDavid Sterba <dsterba@suse.com>2017-04-18 14:07:26 +0200
commit619a974292387343c817f5a36e9df6daeb3ccc60 (patch)
tree84054ab9ab18ebbbca66634fd07976b0f7f6f782 /fs/btrfs/scrub.c
parente501bfe323356ea3f7ef79d4b0d95389b70a7193 (diff)
downloadlwn-619a974292387343c817f5a36e9df6daeb3ccc60.tar.gz
lwn-619a974292387343c817f5a36e9df6daeb3ccc60.zip
btrfs: use clear_page where appropriate
There's a helper to clear whole page, with a arch-specific optimized code. The replaced cases do not seem to be in performace critical code, but we still might get some percent gain. Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index c4d1e60e831e..48dd6f170c36 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1643,7 +1643,7 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
if (spage->io_error) {
void *mapped_buffer = kmap_atomic(spage->page);
- memset(mapped_buffer, 0, PAGE_SIZE);
+ clear_page(mapped_buffer);
flush_dcache_page(spage->page);
kunmap_atomic(mapped_buffer);
}