diff options
author | Joel Becker <joel.becker@oracle.com> | 2010-08-16 12:10:17 -0700 |
---|---|---|
committer | Joel Becker <joel.becker@oracle.com> | 2010-09-10 08:42:48 -0700 |
commit | a33f13efe05192e7a805018a2ce2b2afddd04057 (patch) | |
tree | eb2f8404c82f1464086c4bd3ddef82dcc1db71c8 /fs/libfs.c | |
parent | 3bdb8efd94a73bb137e3315cd831cbc874052b4b (diff) | |
download | lwn-a33f13efe05192e7a805018a2ce2b2afddd04057.tar.gz lwn-a33f13efe05192e7a805018a2ce2b2afddd04057.zip |
libfs: Fix shift bug in generic_check_addressable()
generic_check_addressable() erroneously shifts pages down by a block
factor when it should be shifting up. To prevent overflow, we shift
blocks down to pages.
Signed-off-by: Joel Becker <joel.becker@oracle.com>
Diffstat (limited to 'fs/libfs.c')
-rw-r--r-- | fs/libfs.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/libfs.c b/fs/libfs.c index 8debe7b33769..62baa0387d6e 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -925,6 +925,8 @@ EXPORT_SYMBOL(generic_file_fsync); int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks) { u64 last_fs_block = num_blocks - 1; + u64 last_fs_page = + last_fs_block >> (PAGE_CACHE_SHIFT - blocksize_bits); if (unlikely(num_blocks == 0)) return 0; @@ -932,10 +934,8 @@ int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks) if ((blocksize_bits < 9) || (blocksize_bits > PAGE_CACHE_SHIFT)) return -EINVAL; - if ((last_fs_block > - (sector_t)(~0ULL) >> (blocksize_bits - 9)) || - (last_fs_block > - (pgoff_t)(~0ULL) >> (PAGE_CACHE_SHIFT - blocksize_bits))) { + if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) || + (last_fs_page > (pgoff_t)(~0ULL))) { return -EFBIG; } return 0; |