summaryrefslogtreecommitdiff
path: root/fs/mbcache.c
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2018-01-07 16:35:20 -0500
committerTheodore Ts'o <tytso@mit.edu>2018-01-07 16:35:20 -0500
commitbbe45d2460da98785cb9453fb0b42d9b2e79dd99 (patch)
treeea4ef3b21c70cada9c70dc00ac18d702de7b9acc /fs/mbcache.c
parent3876bbe27d04b848750d5310a37d6b76b593f648 (diff)
downloadlwn-bbe45d2460da98785cb9453fb0b42d9b2e79dd99.tar.gz
lwn-bbe45d2460da98785cb9453fb0b42d9b2e79dd99.zip
mbcache: revert "fs/mbcache.c: make count_objects() more robust"
This reverts commit d5dabd633922ac5ee5bcc67748f7defb8b211469. This patch did absolutely nothing, because ->c_entry_count is unsigned. In addition if there is a bug in how mbcache maintains its entry count, it needs to be fixed, not just hacked around. (There is no obvious bug, though.) Cc: Jan Kara <jack@suse.cz> Cc: Jiang Biao <jiang.biao2@zte.com.cn> Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'fs/mbcache.c')
-rw-r--r--fs/mbcache.c3
1 files changed, 0 insertions, 3 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 46b23bb432fe..49c5b25bfa8c 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -270,9 +270,6 @@ static unsigned long mb_cache_count(struct shrinker *shrink,
struct mb_cache *cache = container_of(shrink, struct mb_cache,
c_shrink);
- /* Unlikely, but not impossible */
- if (unlikely(cache->c_entry_count < 0))
- return 0;
return cache->c_entry_count;
}