From 1568ee7e3c6305d9fbb2414bfd4b56e52853d42d Mon Sep 17 00:00:00 2001 From: Guoju Fang Date: Thu, 25 Apr 2019 00:48:26 +0800 Subject: bcache: fix crashes stopping bcache device before read miss done The bio from upper layer is considered completed when bio_complete() returns. In most scenarios bio_complete() is called in search_free(), but when read miss happens, the bio_compete() is called when backing device reading completed, while the struct search is still in use until cache inserting finished. If someone stops the bcache device just then, the device may be closed and released, but after cache inserting finished the struct search will access a freed struct cached_dev. This patch add the reference of bcache device before bio_complete() when read miss happens, and put it after the search is not used. Signed-off-by: Guoju Fang Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/request.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index f101bfe8657a..f11123079fe0 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -706,14 +706,14 @@ static void search_free(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); - atomic_dec(&s->d->c->search_inflight); + atomic_dec(&s->iop.c->search_inflight); if (s->iop.bio) bio_put(s->iop.bio); bio_complete(s); closure_debug_destroy(cl); - mempool_free(s, &s->d->c->search); + mempool_free(s, &s->iop.c->search); } static inline struct search *search_alloc(struct bio *bio, @@ -756,13 +756,13 @@ static void cached_dev_bio_complete(struct closure *cl) struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - search_free(cl); cached_dev_put(dc); + search_free(cl); } /* Process reads */ -static void cached_dev_cache_miss_done(struct closure *cl) +static void cached_dev_read_error_done(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); @@ -800,7 +800,22 @@ static void cached_dev_read_error(struct closure *cl) closure_bio_submit(s->iop.c, bio, cl); } - continue_at(cl, cached_dev_cache_miss_done, NULL); + continue_at(cl, cached_dev_read_error_done, NULL); +} + +static void cached_dev_cache_miss_done(struct closure *cl) +{ + struct search *s = container_of(cl, struct search, cl); + struct bcache_device *d = s->d; + + if (s->iop.replace_collision) + bch_mark_cache_miss_collision(s->iop.c, s->d); + + if (s->iop.bio) + bio_free_pages(s->iop.bio); + + cached_dev_bio_complete(cl); + closure_put(&d->cl); } static void cached_dev_read_done(struct closure *cl) @@ -833,6 +848,7 @@ static void cached_dev_read_done(struct closure *cl) if (verify(dc) && s->recoverable && !s->read_dirty_data) bch_data_verify(dc, s->orig_bio); + closure_get(&dc->disk.cl); bio_complete(s); if (s->iop.bio && -- cgit v1.2.3 From 4e0c04ec3a304490a83d5c0355e64176acc9b4ba Mon Sep 17 00:00:00 2001 From: Guoju Fang Date: Thu, 25 Apr 2019 00:48:27 +0800 Subject: bcache: fix inaccurate result of unused buckets To get the amount of unused buckets in sysfs_priority_stats, the code count the buckets which GC_SECTORS_USED is zero. It's correct and should not be overwritten by the count of buckets which prio is zero. Signed-off-by: Guoju Fang Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/sysfs.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 17bae9c14ca0..6cd44d3cf906 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -996,8 +996,6 @@ SHOW(__bch_cache) !cached[n - 1]) --n; - unused = ca->sb.nbuckets - n; - while (cached < p + n && *cached == BTREE_PRIO) cached++, n--; -- cgit v1.2.3 From 78d4eb8ad9e1d413449d1b7a060f50b6efa81ebd Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 25 Apr 2019 00:48:28 +0800 Subject: bcache: avoid clang -Wunintialized warning clang has identified a code path in which it thinks a variable may be unused: drivers/md/bcache/alloc.c:333:4: error: variable 'bucket' is used uninitialized whenever 'if' condition is false [-Werror,-Wsometimes-uninitialized] fifo_pop(&ca->free_inc, bucket); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/md/bcache/util.h:219:27: note: expanded from macro 'fifo_pop' #define fifo_pop(fifo, i) fifo_pop_front(fifo, (i)) ^~~~~~~~~~~~~~~~~~~~~~~~~ drivers/md/bcache/util.h:189:6: note: expanded from macro 'fifo_pop_front' if (_r) { \ ^~ drivers/md/bcache/alloc.c:343:46: note: uninitialized use occurs here allocator_wait(ca, bch_allocator_push(ca, bucket)); ^~~~~~ drivers/md/bcache/alloc.c:287:7: note: expanded from macro 'allocator_wait' if (cond) \ ^~~~ drivers/md/bcache/alloc.c:333:4: note: remove the 'if' if its condition is always true fifo_pop(&ca->free_inc, bucket); ^ drivers/md/bcache/util.h:219:27: note: expanded from macro 'fifo_pop' #define fifo_pop(fifo, i) fifo_pop_front(fifo, (i)) ^ drivers/md/bcache/util.h:189:2: note: expanded from macro 'fifo_pop_front' if (_r) { \ ^ drivers/md/bcache/alloc.c:331:15: note: initialize the variable 'bucket' to silence this warning long bucket; ^ This cannot happen in practice because we only enter the loop if there is at least one element in the list. Slightly rearranging the code makes this clearer to both the reader and the compiler, which avoids the warning. Signed-off-by: Arnd Bergmann Reviewed-by: Nathan Chancellor Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/alloc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 5002838ea476..f8986effcb50 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -327,10 +327,11 @@ static int bch_allocator_thread(void *arg) * possibly issue discards to them, then we add the bucket to * the free list: */ - while (!fifo_empty(&ca->free_inc)) { + while (1) { long bucket; - fifo_pop(&ca->free_inc, bucket); + if (!fifo_pop(&ca->free_inc, bucket)) + break; if (ca->discard) { mutex_unlock(&ca->set->bucket_lock); -- cgit v1.2.3 From 792732d9852c0e4505aceff4631ea2168fd02480 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Thu, 25 Apr 2019 00:48:29 +0800 Subject: bcache: use kmemdup_nul for CACHED_LABEL buffer This patch uses kmemdup_nul to create a NUL-terminated string from dc->sb.label. This is better than open coding it. With this, we can move env[2] initialization into env[] array to make code more elegant. Signed-off-by: Geliang Tang Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index a697a3a923cd..6e618cb6126c 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -906,21 +906,18 @@ static int cached_dev_status_update(void *arg) void bch_cached_dev_run(struct cached_dev *dc) { struct bcache_device *d = &dc->disk; - char buf[SB_LABEL_SIZE + 1]; + char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL); char *env[] = { "DRIVER=bcache", kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), - NULL, + kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf ? : ""), NULL, }; - memcpy(buf, dc->sb.label, SB_LABEL_SIZE); - buf[SB_LABEL_SIZE] = '\0'; - env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); - if (atomic_xchg(&dc->running, 1)) { kfree(env[1]); kfree(env[2]); + kfree(buf); return; } @@ -944,6 +941,7 @@ void bch_cached_dev_run(struct cached_dev *dc) kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); kfree(env[1]); kfree(env[2]); + kfree(buf); if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) -- cgit v1.2.3 From 3a3947271cd6ce05c5b30c1250fa99de57410500 Mon Sep 17 00:00:00 2001 From: George Spelvin Date: Thu, 25 Apr 2019 00:48:30 +0800 Subject: bcache: Clean up bch_get_congested() There are a few nits in this function. They could in theory all be separate patches, but that's probably taking small commits too far. 1) I added a brief comment saying what it does. 2) I like to declare pointer parameters "const" where possible for documentation reasons. 3) It uses bitmap_weight(&rand, BITS_PER_LONG) to compute the Hamming weight of a 32-bit random number (giving a random integer with mean 16 and variance 8). Passing by reference in a 64-bit variable is silly; just use hweight32(). 4) Its helper function fract_exp_two is unnecessarily tangled. Gcc can optimize the multiply by (1 << x) to a shift, but it can be written in a much more straightforward way at the cost of one more bit of internal precision. Some analysis reveals that this bit is always available. This shrinks the object code for fract_exp_two(x, 6) from 23 bytes: 0000000000000000 : 0: 89 f9 mov %edi,%ecx 2: c1 e9 06 shr $0x6,%ecx 5: b8 01 00 00 00 mov $0x1,%eax a: d3 e0 shl %cl,%eax c: 83 e7 3f and $0x3f,%edi f: d3 e7 shl %cl,%edi 11: c1 ef 06 shr $0x6,%edi 14: 01 f8 add %edi,%eax 16: c3 retq To 19: 0000000000000017 : 17: 89 f8 mov %edi,%eax 19: 83 e0 3f and $0x3f,%eax 1c: 83 c0 40 add $0x40,%eax 1f: 89 f9 mov %edi,%ecx 21: c1 e9 06 shr $0x6,%ecx 24: d3 e0 shl %cl,%eax 26: c1 e8 06 shr $0x6,%eax 29: c3 retq (Verified with 0 <= frac_bits <= 8, 0 <= x < 16< Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/request.c | 15 ++++++++------- drivers/md/bcache/request.h | 2 +- drivers/md/bcache/util.h | 26 +++++++++++++++++++------- 3 files changed, 28 insertions(+), 15 deletions(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index f11123079fe0..41adcd1546f1 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -329,12 +329,13 @@ void bch_data_insert(struct closure *cl) bch_data_insert_start(cl); } -/* Congested? */ - -unsigned int bch_get_congested(struct cache_set *c) +/* + * Congested? Return 0 (not congested) or the limit (in sectors) + * beyond which we should bypass the cache due to congestion. + */ +unsigned int bch_get_congested(const struct cache_set *c) { int i; - long rand; if (!c->congested_read_threshold_us && !c->congested_write_threshold_us) @@ -353,8 +354,7 @@ unsigned int bch_get_congested(struct cache_set *c) if (i > 0) i = fract_exp_two(i, 6); - rand = get_random_int(); - i -= bitmap_weight(&rand, BITS_PER_LONG); + i -= hweight32(get_random_u32()); return i > 0 ? i : 1; } @@ -376,7 +376,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) { struct cache_set *c = dc->disk.c; unsigned int mode = cache_mode(dc); - unsigned int sectors, congested = bch_get_congested(c); + unsigned int sectors, congested; struct task_struct *task = current; struct io *i; @@ -412,6 +412,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) goto rescale; } + congested = bch_get_congested(c); if (!congested && !dc->sequential_cutoff) goto rescale; diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 721bf336ed1a..c64dbd7a91aa 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -33,7 +33,7 @@ struct data_insert_op { BKEY_PADDED(replace_key); }; -unsigned int bch_get_congested(struct cache_set *c); +unsigned int bch_get_congested(const struct cache_set *c); void bch_data_insert(struct closure *cl); void bch_cached_dev_request_init(struct cached_dev *dc); diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 00aab6abcfe4..1fbced94e4cc 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -560,17 +560,29 @@ static inline uint64_t bch_crc64_update(uint64_t crc, return crc; } -/* Does linear interpolation between powers of two */ +/* + * A stepwise-linear pseudo-exponential. This returns 1 << (x >> + * frac_bits), with the less-significant bits filled in by linear + * interpolation. + * + * This can also be interpreted as a floating-point number format, + * where the low frac_bits are the mantissa (with implicit leading + * 1 bit), and the more significant bits are the exponent. + * The return value is 1.mantissa * 2^exponent. + * + * The way this is used, fract_bits is 6 and the largest possible + * input is CONGESTED_MAX-1 = 1023 (exponent 16, mantissa 0x1.fc), + * so the maximum output is 0x1fc00. + */ static inline unsigned int fract_exp_two(unsigned int x, unsigned int fract_bits) { - unsigned int fract = x & ~(~0 << fract_bits); - - x >>= fract_bits; - x = 1 << x; - x += (x * fract) >> fract_bits; + unsigned int mantissa = 1 << fract_bits; /* Implicit bit */ - return x; + mantissa += x & (mantissa - 1); + x >>= fract_bits; /* The exponent */ + /* Largest intermediate value 0x7f0000 */ + return mantissa << x >> fract_bits; } void bch_bio_map(struct bio *bio, void *base); -- cgit v1.2.3 From a4b732a248d12cbdb46999daf0bf288c011335eb Mon Sep 17 00:00:00 2001 From: Liang Chen Date: Thu, 25 Apr 2019 00:48:31 +0800 Subject: bcache: fix a race between cache register and cacheset unregister There is a race between cache device register and cache set unregister. For an already registered cache device, register_bcache will call bch_is_open to iterate through all cachesets and check every cache there. The race occurs if cache_set_free executes at the same time and clears the caches right before ca is dereferenced in bch_is_open_cache. To close the race, let's make sure the clean up work is protected by the bch_register_lock as well. This issue can be reproduced as follows, while true; do echo /dev/XXX> /sys/fs/bcache/register ; done& while true; do echo 1> /sys/block/XXX/bcache/set/unregister ; done & and results in the following oops, [ +0.000053] BUG: unable to handle kernel NULL pointer dereference at 0000000000000998 [ +0.000457] #PF error: [normal kernel read fault] [ +0.000464] PGD 800000003ca9d067 P4D 800000003ca9d067 PUD 3ca9c067 PMD 0 [ +0.000388] Oops: 0000 [#1] SMP PTI [ +0.000269] CPU: 1 PID: 3266 Comm: bash Not tainted 5.0.0+ #6 [ +0.000346] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.11.0-2.fc28 04/01/2014 [ +0.000472] RIP: 0010:register_bcache+0x1829/0x1990 [bcache] [ +0.000344] Code: b0 48 83 e8 50 48 81 fa e0 e1 10 c0 0f 84 a9 00 00 00 48 89 c6 48 89 ca 0f b7 ba 54 04 00 00 4c 8b 82 60 0c 00 00 85 ff 74 2f <49> 3b a8 98 09 00 00 74 4e 44 8d 47 ff 31 ff 49 c1 e0 03 eb 0d [ +0.000839] RSP: 0018:ffff92ee804cbd88 EFLAGS: 00010202 [ +0.000328] RAX: ffffffffc010e190 RBX: ffff918b5c6b5000 RCX: ffff918b7d8e0000 [ +0.000399] RDX: ffff918b7d8e0000 RSI: ffffffffc010e190 RDI: 0000000000000001 [ +0.000398] RBP: ffff918b7d318340 R08: 0000000000000000 R09: ffffffffb9bd2d7a [ +0.000385] R10: ffff918b7eb253c0 R11: ffffb95980f51200 R12: ffffffffc010e1a0 [ +0.000411] R13: fffffffffffffff2 R14: 000000000000000b R15: ffff918b7e232620 [ +0.000384] FS: 00007f955bec2740(0000) GS:ffff918b7eb00000(0000) knlGS:0000000000000000 [ +0.000420] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ +0.000801] CR2: 0000000000000998 CR3: 000000003cad6000 CR4: 00000000001406e0 [ +0.000837] Call Trace: [ +0.000682] ? _cond_resched+0x10/0x20 [ +0.000691] ? __kmalloc+0x131/0x1b0 [ +0.000710] kernfs_fop_write+0xfa/0x170 [ +0.000733] __vfs_write+0x2e/0x190 [ +0.000688] ? inode_security+0x10/0x30 [ +0.000698] ? selinux_file_permission+0xd2/0x120 [ +0.000752] ? security_file_permission+0x2b/0x100 [ +0.000753] vfs_write+0xa8/0x1a0 [ +0.000676] ksys_write+0x4d/0xb0 [ +0.000699] do_syscall_64+0x3a/0xf0 [ +0.000692] entry_SYSCALL_64_after_hwframe+0x44/0xa9 Signed-off-by: Liang Chen Cc: stable@vger.kernel.org Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 6e618cb6126c..53c5e3e0ac22 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1514,6 +1514,7 @@ static void cache_set_free(struct closure *cl) bch_btree_cache_free(c); bch_journal_free(c); + mutex_lock(&bch_register_lock); for_each_cache(ca, c, i) if (ca) { ca->set = NULL; @@ -1532,7 +1533,6 @@ static void cache_set_free(struct closure *cl) mempool_exit(&c->search); kfree(c->devices); - mutex_lock(&bch_register_lock); list_del(&c->list); mutex_unlock(&bch_register_lock); -- cgit v1.2.3 From 14215ee01f6377c81c25c2cecda729e8811d2826 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Thu, 25 Apr 2019 00:48:32 +0800 Subject: bcache: move definition of 'int ret' out of macro read_bucket() 'int ret' is defined as a local variable inside macro read_bucket(). Since this macro is called multiple times, and following patches will use a 'int ret' variable in bch_journal_read(), this patch moves definition of 'int ret' from macro read_bucket() to range of function bch_journal_read(). Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/md/bcache/journal.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index b2fd412715b1..6e18057d1d82 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -147,7 +147,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) { #define read_bucket(b) \ ({ \ - int ret = journal_read_bucket(ca, list, b); \ + ret = journal_read_bucket(ca, list, b); \ __set_bit(b, bitmap); \ if (ret < 0) \ return ret; \ @@ -156,6 +156,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) struct cache *ca; unsigned int iter; + int ret = 0; for_each_cache(ca, c, iter) { struct journal_device *ja = &ca->journal; @@ -267,7 +268,7 @@ bsearch: struct journal_replay, list)->j.seq; - return 0; + return ret; #undef read_bucket } -- cgit v1.2.3 From 1bee2addc0c8470c8aaa65ef0599eeae96dd88bc Mon Sep 17 00:00:00 2001 From: Coly Li Date: Thu, 25 Apr 2019 00:48:33 +0800 Subject: bcache: never set KEY_PTRS of journal key to 0 in journal_reclaim() In journal_reclaim() ja->cur_idx of each cache will be update to reclaim available journal buckets. Variable 'int n' is used to count how many cache is successfully reclaimed, then n is set to c->journal.key by SET_KEY_PTRS(). Later in journal_write_unlocked(), a for_each_cache() loop will write the jset data onto each cache. The problem is, if all jouranl buckets on each cache is full, the following code in journal_reclaim(), 529 for_each_cache(ca, c, iter) { 530 struct journal_device *ja = &ca->journal; 531 unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets; 532 533 /* No space available on this device */ 534 if (next == ja->discard_idx) 535 continue; 536 537 ja->cur_idx = next; 538 k->ptr[n++] = MAKE_PTR(0, 539 bucket_to_sector(c, ca->sb.d[ja->cur_idx]), 540 ca->sb.nr_this_dev); 541 } 542 543 bkey_init(k); 544 SET_KEY_PTRS(k, n); If there is no available bucket to reclaim, the if() condition at line 534 will always true, and n remains 0. Then at line 544, SET_KEY_PTRS() will set KEY_PTRS field of c->journal.key to 0. Setting KEY_PTRS field of c->journal.key to 0 is wrong. Because in journal_write_unlocked() the journal data is written in following loop, 649 for (i = 0; i < KEY_PTRS(k); i++) { 650-671 submit journal data to cache device 672 } If KEY_PTRS field is set to 0 in jouranl_reclaim(), the journal data won't be written to cache device here. If system crahed or rebooted before bkeys of the lost journal entries written into btree nodes, data corruption will be reported during bcache reload after rebooting the system. Indeed there is only one cache in a cache set, there is no need to set KEY_PTRS field in journal_reclaim() at all. But in order to keep the for_each_cache() logic consistent for now, this patch fixes the above problem by not setting 0 KEY_PTRS of journal key, if there is no bucket available to reclaim. Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe --- drivers/md/bcache/journal.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 6e18057d1d82..5180bed911ef 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -541,11 +541,11 @@ static void journal_reclaim(struct cache_set *c) ca->sb.nr_this_dev); } - bkey_init(k); - SET_KEY_PTRS(k, n); - - if (n) + if (n) { + bkey_init(k); + SET_KEY_PTRS(k, n); c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; + } out: if (!journal_full(&c->journal)) __closure_wake_up(&c->journal.wait); @@ -672,6 +672,9 @@ static void journal_write_unlocked(struct closure *cl) ca->journal.seq[ca->journal.cur_idx] = w->data->seq; } + /* If KEY_PTRS(k) == 0, this jset gets lost in air */ + BUG_ON(i == 0); + atomic_dec_bug(&fifo_back(&c->journal.pin)); bch_journal_next(&c->journal); journal_reclaim(c); -- cgit v1.2.3 From ce3e4cfb59cb382f8e5ce359238aa580d4ae7778 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Thu, 25 Apr 2019 00:48:34 +0800 Subject: bcache: add failure check to run_cache_set() for journal replay Currently run_cache_set() has no return value, if there is failure in bch_journal_replay(), the caller of run_cache_set() has no idea about such failure and just continue to execute following code after run_cache_set(). The internal failure is triggered inside bch_journal_replay() and being handled in async way. This behavior is inefficient, while failure handling inside bch_journal_replay(), cache register code is still running to start the cache set. Registering and unregistering code running as same time may introduce some rare race condition, and make the code to be more hard to be understood. This patch adds return value to run_cache_set(), and returns -EIO if bch_journal_rreplay() fails. Then caller of run_cache_set() may detect such failure and stop registering code flow immedidately inside register_cache_set(). If journal replay fails, run_cache_set() can report error immediately to register_cache_set(). This patch makes the failure handling for bch_journal_replay() be in synchronized way, easier to understand and debug, and avoid poetential race condition for register-and-unregister in same time. Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 53c5e3e0ac22..8c7fdada0acf 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1773,7 +1773,7 @@ err: return NULL; } -static void run_cache_set(struct cache_set *c) +static int run_cache_set(struct cache_set *c) { const char *err = "cannot allocate memory"; struct cached_dev *dc, *t; @@ -1867,7 +1867,9 @@ static void run_cache_set(struct cache_set *c) if (j->version < BCACHE_JSET_VERSION_UUID) __uuid_write(c); - bch_journal_replay(c, &journal); + err = "bcache: replay journal failed"; + if (bch_journal_replay(c, &journal)) + goto err; } else { pr_notice("invalidating existing data"); @@ -1935,11 +1937,13 @@ static void run_cache_set(struct cache_set *c) flash_devs_run(c); set_bit(CACHE_SET_RUNNING, &c->flags); - return; + return 0; err: closure_sync(&cl); /* XXX: test this, it's broken */ bch_cache_set_error(c, "%s", err); + + return -EIO; } static bool can_attach_cache(struct cache *ca, struct cache_set *c) @@ -2003,8 +2007,11 @@ found: ca->set->cache[ca->sb.nr_this_dev] = ca; c->cache_by_alloc[c->caches_loaded++] = ca; - if (c->caches_loaded == c->sb.nr_in_set) - run_cache_set(c); + if (c->caches_loaded == c->sb.nr_in_set) { + err = "failed to run cache set"; + if (run_cache_set(c) < 0) + goto err; + } return NULL; err: -- cgit v1.2.3 From 2d17456eb1cc78803b999fdd503c2dbd42a7d3da Mon Sep 17 00:00:00 2001 From: Coly Li Date: Thu, 25 Apr 2019 00:48:35 +0800 Subject: bcache: add comments for kobj release callback routine Bcache has several routines to release resources in implicit way, they are called when the associated kobj released. This patch adds code comments to notice when and which release callback will be called, - When dc->disk.kobj released: void bch_cached_dev_release(struct kobject *kobj) - When d->kobj released: void bch_flash_dev_release(struct kobject *kobj) - When c->kobj released: void bch_cache_set_release(struct kobject *kobj) - When ca->kobj released void bch_cache_release(struct kobject *kobj) Signed-off-by: Coly Li Reviewed-by: Chaitanya Kulkarni Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 8c7fdada0acf..f8d80adcafec 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1172,6 +1172,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, return 0; } +/* when dc->disk.kobj released */ void bch_cached_dev_release(struct kobject *kobj) { struct cached_dev *dc = container_of(kobj, struct cached_dev, @@ -1324,6 +1325,7 @@ err: /* Flash only volumes */ +/* When d->kobj released */ void bch_flash_dev_release(struct kobject *kobj) { struct bcache_device *d = container_of(kobj, struct bcache_device, @@ -1494,6 +1496,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) return true; } +/* When c->kobj released */ void bch_cache_set_release(struct kobject *kobj) { struct cache_set *c = container_of(kobj, struct cache_set, kobj); @@ -2021,6 +2024,7 @@ err: /* Cache device */ +/* When ca->kobj released */ void bch_cache_release(struct kobject *kobj) { struct cache *ca = container_of(kobj, struct cache, kobj); -- cgit v1.2.3 From 68d10e6979a3b59e3cd2e90bfcafed79c4cf180a Mon Sep 17 00:00:00 2001 From: Coly Li Date: Thu, 25 Apr 2019 00:48:36 +0800 Subject: bcache: return error immediately in bch_journal_replay() When failure happens inside bch_journal_replay(), calling cache_set_err_on() and handling the failure in async way is not a good idea. Because after bch_journal_replay() returns, registering code will continue to execute following steps, and unregistering code triggered by cache_set_err_on() is running in same time. First it is unnecessary to handle failure and unregister cache set in an async way, second there might be potential race condition to run register and unregister code for same cache set. So in this patch, if failure happens in bch_journal_replay(), we don't call cache_set_err_on(), and just print out the same error message to kernel message buffer, then return -EIO immediately caller. Then caller can detect such failure and handle it in synchrnozied way. Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/md/bcache/journal.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 5180bed911ef..828ab474696a 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -331,9 +331,12 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) list_for_each_entry(i, list, list) { BUG_ON(i->pin && atomic_read(i->pin) != 1); - cache_set_err_on(n != i->j.seq, s, -"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)", - n, i->j.seq - 1, start, end); + if (n != i->j.seq) { + pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)", + n, i->j.seq - 1, start, end); + ret = -EIO; + goto err; + } for (k = i->j.start; k < bset_bkey_last(&i->j); -- cgit v1.2.3 From 88c12d42d2bb6e05deb3cfd24d12f6fe80544575 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Thu, 25 Apr 2019 00:48:37 +0800 Subject: bcache: add error check for calling register_bdev() This patch adds return value to register_bdev(). Then if failure happens inside register_bdev(), its caller register_bcache() may detect and handle the failure more properly. Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index f8d80adcafec..fde334939545 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1279,7 +1279,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) /* Cached device - bcache superblock */ -static void register_bdev(struct cache_sb *sb, struct page *sb_page, +static int register_bdev(struct cache_sb *sb, struct page *sb_page, struct block_device *bdev, struct cached_dev *dc) { @@ -1317,10 +1317,11 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) bch_cached_dev_run(dc); - return; + return 0; err: pr_notice("error %s: %s", dc->backing_dev_name, err); bcache_device_stop(&dc->disk); + return -EIO; } /* Flash only volumes */ @@ -2271,7 +2272,7 @@ static bool bch_is_open(struct block_device *bdev) static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, const char *buffer, size_t size) { - ssize_t ret = size; + ssize_t ret = -EINVAL; const char *err = "cannot allocate memory"; char *path = NULL; struct cache_sb *sb = NULL; @@ -2305,7 +2306,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (!IS_ERR(bdev)) bdput(bdev); if (attr == &ksysfs_register_quiet) - goto out; + goto quiet_out; } goto err; } @@ -2326,8 +2327,10 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, goto err_close; mutex_lock(&bch_register_lock); - register_bdev(sb, sb_page, bdev, dc); + ret = register_bdev(sb, sb_page, bdev, dc); mutex_unlock(&bch_register_lock); + if (ret < 0) + goto err; } else { struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); @@ -2337,6 +2340,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (register_cache(sb, sb_page, bdev, ca) != 0) goto err; } +quiet_out: + ret = size; out: if (sb_page) put_page(sb_page); @@ -2349,7 +2354,6 @@ err_close: blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); err: pr_info("error %s: %s", path, err); - ret = -EINVAL; goto out; } -- cgit v1.2.3 From bb6d355c2aff42d4075a8e7428dd72cb009d6143 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Thu, 25 Apr 2019 00:48:38 +0800 Subject: bcache: Add comments for blkdev_put() in registration code path Add comments to explain why in register_bcache() blkdev_put() won't be called in two location. Add comments to explain why blkdev_put() must be called in register_cache() when cache_alloc() failed. Signed-off-by: Coly Li Reviewed-by: Chaitanya Kulkarni Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index fde334939545..fa856b2ca7af 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -2189,6 +2189,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, ret = cache_alloc(ca); if (ret != 0) { + /* + * If we failed here, it means ca->kobj is not initialized yet, + * kobject_put() won't be called and there is no chance to + * call blkdev_put() to bdev in bch_cache_release(). So we + * explicitly call blkdev_put() here. + */ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); if (ret == -ENOMEM) err = "cache_alloc(): -ENOMEM"; @@ -2329,6 +2335,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, mutex_lock(&bch_register_lock); ret = register_bdev(sb, sb_page, bdev, dc); mutex_unlock(&bch_register_lock); + /* blkdev_put() will be called in cached_dev_free() */ if (ret < 0) goto err; } else { @@ -2337,6 +2344,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (!ca) goto err_close; + /* blkdev_put() will be called in bch_cache_release() */ if (register_cache(sb, sb_page, bdev, ca) != 0) goto err; } -- cgit v1.2.3 From 63d63b51d70fb5155754dcf0baa2c1700bcafcb0 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Thu, 25 Apr 2019 00:48:39 +0800 Subject: bcache: add comments for closure_fn to be called in closure_queue() Add code comments to explain which call back function might be called for the closure_queue(). This is an effort to make code to be more understandable for readers. Signed-off-by: Coly Li Reviewed-by: Chaitanya Kulkarni Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index fa856b2ca7af..0363ab534c8e 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -662,6 +662,11 @@ static const struct block_device_operations bcache_ops = { void bcache_device_stop(struct bcache_device *d) { if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) + /* + * closure_fn set to + * - cached device: cached_dev_flush() + * - flash dev: flash_dev_flush() + */ closure_queue(&d->cl); } @@ -1675,6 +1680,7 @@ static void __cache_set_unregister(struct closure *cl) void bch_cache_set_stop(struct cache_set *c) { if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) + /* closure_fn set to __cache_set_unregister() */ closure_queue(&c->caching); } -- cgit v1.2.3 From eb8cbb6df38f6e5124a3d5f1f8a3dbf519537c60 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Thu, 25 Apr 2019 00:48:40 +0800 Subject: bcache: improve bcache_reboot() This patch tries to release mutex bch_register_lock early, to give chance to stop cache set and bcache device early. This patch also expends time out of stopping all bcache device from 2 seconds to 10 seconds, because stopping writeback rate update worker may delay for 5 seconds, 2 seconds is not enough. After this patch applied, stopping bcache devices during system reboot or shutdown is very hard to be observed any more. Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 0363ab534c8e..3f34b96ebbc3 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -2397,10 +2397,19 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) list_for_each_entry_safe(dc, tdc, &uncached_devices, list) bcache_device_stop(&dc->disk); + mutex_unlock(&bch_register_lock); + + /* + * Give an early chance for other kthreads and + * kworkers to stop themselves + */ + schedule(); + /* What's a condition variable? */ while (1) { - long timeout = start + 2 * HZ - jiffies; + long timeout = start + 10 * HZ - jiffies; + mutex_lock(&bch_register_lock); stopped = list_empty(&bch_cache_sets) && list_empty(&uncached_devices); @@ -2412,7 +2421,6 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) mutex_unlock(&bch_register_lock); schedule_timeout(timeout); - mutex_lock(&bch_register_lock); } finish_wait(&unregister_wait, &wait); -- cgit v1.2.3 From 631207314d88e9091be02fbdd1fdadb1ae2ed79a Mon Sep 17 00:00:00 2001 From: Tang Junhui Date: Thu, 25 Apr 2019 00:48:41 +0800 Subject: bcache: fix failure in journal relplay journal replay failed with messages: Sep 10 19:10:43 ceph kernel: bcache: error on bb379a64-e44e-4812-b91d-a5599871a3b1: bcache: journal entries 2057493-2057567 missing! (replaying 2057493-2076601), disabling caching The reason is in journal_reclaim(), when discard is enabled, we send discard command and reclaim those journal buckets whose seq is old than the last_seq_now, but before we write a journal with last_seq_now, the machine is restarted, so the journal with the last_seq_now is not written to the journal bucket, and the last_seq_wrote in the newest journal is old than last_seq_now which we expect to be, so when we doing replay, journals from last_seq_wrote to last_seq_now are missing. It's hard to write a journal immediately after journal_reclaim(), and it harmless if those missed journal are caused by discarding since those journals are already wrote to btree node. So, if miss seqs are started from the beginning journal, we treat it as normal, and only print a message to show the miss journal, and point out it maybe caused by discarding. Patch v2 add a judgement condition to ignore the missed journal only when discard enabled as Coly suggested. (Coly Li: rebase the patch with other changes in bch_journal_replay()) Signed-off-by: Tang Junhui Tested-by: Dennis Schridde Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/journal.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 828ab474696a..f9afb164b887 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -318,6 +318,18 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list) } } +bool is_discard_enabled(struct cache_set *s) +{ + struct cache *ca; + unsigned int i; + + for_each_cache(ca, s, i) + if (ca->discard) + return true; + + return false; +} + int bch_journal_replay(struct cache_set *s, struct list_head *list) { int ret = 0, keys = 0, entries = 0; @@ -332,10 +344,15 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) BUG_ON(i->pin && atomic_read(i->pin) != 1); if (n != i->j.seq) { - pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)", - n, i->j.seq - 1, start, end); - ret = -EIO; - goto err; + if (n == start && is_discard_enabled(s)) + pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)", + n, i->j.seq - 1, start, end); + else { + pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)", + n, i->j.seq - 1, start, end); + ret = -EIO; + goto err; + } } for (k = i->j.start; -- cgit v1.2.3 From f16277ca20acf2c213fcd4b645f4c1cffcadf533 Mon Sep 17 00:00:00 2001 From: Shenghui Wang Date: Thu, 25 Apr 2019 00:48:42 +0800 Subject: bcache: fix wrong usage use-after-freed on keylist in out_nocoalesce branch of btree_gc_coalesce Elements of keylist should be accessed before the list is freed. Move bch_keylist_free() calling after the while loop to avoid wrong content accessed. Signed-off-by: Shenghui Wang Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/btree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 64def336f053..b139858b0802 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1476,11 +1476,11 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, out_nocoalesce: closure_sync(&cl); - bch_keylist_free(&keylist); while ((k = bch_keylist_pop(&keylist))) if (!bkey_cmp(k, &ZERO_KEY)) atomic_dec(&b->c->prio_blocked); + bch_keylist_free(&keylist); for (i = 0; i < nodes; i++) if (!IS_ERR_OR_NULL(new_nodes[i])) { -- cgit v1.2.3 From 95f18c9d1310730d075499a75aaf13bcd60405a7 Mon Sep 17 00:00:00 2001 From: Shenghui Wang Date: Thu, 25 Apr 2019 00:48:43 +0800 Subject: bcache: avoid potential memleak of list of journal_replay(s) in the CACHE_SYNC branch of run_cache_set In the CACHE_SYNC branch of run_cache_set(), LIST_HEAD(journal) is used to collect journal_replay(s) and filled by bch_journal_read(). If all goes well, bch_journal_replay() will release the list of jounal_replay(s) at the end of the branch. If something goes wrong, code flow will jump to the label "err:" and leave the list unreleased. This patch will release the list of journal_replay(s) in the case of error detected. v1 -> v2: * Move the release code to the location after label 'err:' to simply the change. Signed-off-by: Shenghui Wang Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 3f34b96ebbc3..0ffe9acee9d8 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1790,6 +1790,8 @@ static int run_cache_set(struct cache_set *c) struct cache *ca; struct closure cl; unsigned int i; + LIST_HEAD(journal); + struct journal_replay *l; closure_init_stack(&cl); @@ -1949,6 +1951,12 @@ static int run_cache_set(struct cache_set *c) set_bit(CACHE_SET_RUNNING, &c->flags); return 0; err: + while (!list_empty(&journal)) { + l = list_first_entry(&journal, struct journal_replay, list); + list_del(&l->list); + kfree(l); + } + closure_sync(&cl); /* XXX: test this, it's broken */ bch_cache_set_error(c, "%s", err); -- cgit v1.2.3 From cdca22bcbc64fc83dadb8d927df400a8d86ddabb Mon Sep 17 00:00:00 2001 From: Coly Li Date: Tue, 30 Apr 2019 22:02:25 +0800 Subject: bcache: remove redundant LIST_HEAD(journal) from run_cache_set() Commit 95f18c9d1310 ("bcache: avoid potential memleak of list of journal_replay(s) in the CACHE_SYNC branch of run_cache_set") forgets to remove the original define of LIST_HEAD(journal), which makes the change no take effect. This patch removes redundant variable LIST_HEAD(journal) from run_cache_set(), to make Shenghui's fix working. Fixes: 95f18c9d1310 ("bcache: avoid potential memleak of list of journal_replay(s) in the CACHE_SYNC branch of run_cache_set") Reported-by: Juha Aatrokoski Cc: Shenghui Wang Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 0ffe9acee9d8..1b63ac876169 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1800,7 +1800,6 @@ static int run_cache_set(struct cache_set *c) set_gc_sectors(c); if (CACHE_SYNC(&c->sb)) { - LIST_HEAD(journal); struct bkey *k; struct jset *j; -- cgit v1.2.3 From f936b06ae53815a7633b30ffd8cf5661ac826b3a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 25 Apr 2019 09:02:59 +0200 Subject: bcache: clean up do_btree_node_write a bit Use a variable containing the buffer address instead of the to be removed integer iterator from bio_for_each_segment_all. Suggested-by: Matthew Wilcox Reviewed-by: Hannes Reinecke Acked-by: Coly Li Reviewed-by: Matthew Wilcox Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/md/bcache/btree.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index b139858b0802..3a9f8ed437de 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -431,12 +431,13 @@ static void do_btree_node_write(struct btree *b) if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { int j; struct bio_vec *bv; - void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); + void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); struct bvec_iter_all iter_all; - bio_for_each_segment_all(bv, b->bio, j, iter_all) - memcpy(page_address(bv->bv_page), - base + j * PAGE_SIZE, PAGE_SIZE); + bio_for_each_segment_all(bv, b->bio, j, iter_all) { + memcpy(page_address(bv->bv_page), addr, PAGE_SIZE); + addr += PAGE_SIZE; + } bch_submit_bbio(b->bio, b->c, &k.key, 0); -- cgit v1.2.3 From 2b070cfe582b8e99fec6ada57d2e59e194aae202 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 25 Apr 2019 09:03:00 +0200 Subject: block: remove the i argument to bio_for_each_segment_all We only have two callers that need the integer loop iterator, and they can easily maintain it themselves. Suggested-by: Matthew Wilcox Reviewed-by: Johannes Thumshirn Acked-by: David Sterba Reviewed-by: Hannes Reinecke Acked-by: Coly Li Reviewed-by: Matthew Wilcox Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/bio.c | 29 ++++++++++------------------- block/bounce.c | 3 +-- drivers/md/bcache/btree.c | 3 +-- drivers/md/dm-crypt.c | 3 +-- drivers/md/raid1.c | 6 +++--- drivers/staging/erofs/data.c | 3 +-- drivers/staging/erofs/unzip_vle.c | 3 +-- fs/block_dev.c | 6 ++---- fs/btrfs/compression.c | 3 +-- fs/btrfs/disk-io.c | 4 ++-- fs/btrfs/extent_io.c | 10 ++++------ fs/btrfs/inode.c | 8 ++++---- fs/btrfs/raid56.c | 3 +-- fs/crypto/bio.c | 3 +-- fs/direct-io.c | 3 +-- fs/ext4/page-io.c | 3 +-- fs/ext4/readpage.c | 3 +-- fs/f2fs/data.c | 9 +++------ fs/gfs2/lops.c | 3 +-- fs/gfs2/meta_io.c | 3 +-- fs/iomap.c | 6 ++---- fs/mpage.c | 3 +-- fs/xfs/xfs_aops.c | 3 +-- include/linux/bio.h | 5 ++--- 24 files changed, 47 insertions(+), 81 deletions(-) (limited to 'drivers/md/bcache') diff --git a/block/bio.c b/block/bio.c index 662d45752ec5..9ad0d00cdc9b 100644 --- a/block/bio.c +++ b/block/bio.c @@ -874,9 +874,8 @@ static void bio_get_pages(struct bio *bio) { struct bvec_iter_all iter_all; struct bio_vec *bvec; - int i; - bio_for_each_segment_all(bvec, bio, i, iter_all) + bio_for_each_segment_all(bvec, bio, iter_all) get_page(bvec->bv_page); } @@ -884,9 +883,8 @@ static void bio_release_pages(struct bio *bio) { struct bvec_iter_all iter_all; struct bio_vec *bvec; - int i; - bio_for_each_segment_all(bvec, bio, i, iter_all) + bio_for_each_segment_all(bvec, bio, iter_all) put_page(bvec->bv_page); } @@ -1166,11 +1164,10 @@ static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, */ static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) { - int i; struct bio_vec *bvec; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { ssize_t ret; ret = copy_page_from_iter(bvec->bv_page, @@ -1198,11 +1195,10 @@ static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) */ static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) { - int i; struct bio_vec *bvec; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { ssize_t ret; ret = copy_page_to_iter(bvec->bv_page, @@ -1223,10 +1219,9 @@ static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) void bio_free_pages(struct bio *bio) { struct bio_vec *bvec; - int i; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i, iter_all) + bio_for_each_segment_all(bvec, bio, iter_all) __free_page(bvec->bv_page); } EXPORT_SYMBOL(bio_free_pages); @@ -1464,7 +1459,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, return bio; out_unmap: - bio_for_each_segment_all(bvec, bio, j, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { put_page(bvec->bv_page); } bio_put(bio); @@ -1474,13 +1469,12 @@ struct bio *bio_map_user_iov(struct request_queue *q, static void __bio_unmap_user(struct bio *bio) { struct bio_vec *bvec; - int i; struct bvec_iter_all iter_all; /* * make sure we dirty pages we wrote to */ - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { if (bio_data_dir(bio) == READ) set_page_dirty_lock(bvec->bv_page); @@ -1571,10 +1565,9 @@ static void bio_copy_kern_endio_read(struct bio *bio) { char *p = bio->bi_private; struct bio_vec *bvec; - int i; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { memcpy(p, page_address(bvec->bv_page), bvec->bv_len); p += bvec->bv_len; } @@ -1682,10 +1675,9 @@ cleanup: void bio_set_pages_dirty(struct bio *bio) { struct bio_vec *bvec; - int i; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { if (!PageCompound(bvec->bv_page)) set_page_dirty_lock(bvec->bv_page); } @@ -1734,10 +1726,9 @@ void bio_check_pages_dirty(struct bio *bio) { struct bio_vec *bvec; unsigned long flags; - int i; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page)) goto defer; } diff --git a/block/bounce.c b/block/bounce.c index 47eb7e936e22..f8ed677a1bf7 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -163,14 +163,13 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool) { struct bio *bio_orig = bio->bi_private; struct bio_vec *bvec, orig_vec; - int i; struct bvec_iter orig_iter = bio_orig->bi_iter; struct bvec_iter_all iter_all; /* * free up bounce indirect pages used */ - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { orig_vec = bio_iter_iovec(bio_orig, orig_iter); if (bvec->bv_page != orig_vec.bv_page) { dec_zone_page_state(bvec->bv_page, NR_BOUNCE); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 3a9f8ed437de..773f5fdad25f 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -429,12 +429,11 @@ static void do_btree_node_write(struct btree *b) bset_sector_offset(&b->keys, i)); if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { - int j; struct bio_vec *bv; void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); struct bvec_iter_all iter_all; - bio_for_each_segment_all(bv, b->bio, j, iter_all) { + bio_for_each_segment_all(bv, b->bio, iter_all) { memcpy(page_address(bv->bv_page), addr, PAGE_SIZE); addr += PAGE_SIZE; } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index dd6565798778..cca68546945b 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1445,11 +1445,10 @@ out: static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) { - unsigned int i; struct bio_vec *bv; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bv, clone, i, iter_all) { + bio_for_each_segment_all(bv, clone, iter_all) { BUG_ON(!bv->bv_page); mempool_free(bv->bv_page, &cc->page_pool); } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index fdf451aac369..0c8a098d220e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2110,7 +2110,7 @@ static void process_checks(struct r1bio *r1_bio) } r1_bio->read_disk = primary; for (i = 0; i < conf->raid_disks * 2; i++) { - int j; + int j = 0; struct bio *pbio = r1_bio->bios[primary]; struct bio *sbio = r1_bio->bios[i]; blk_status_t status = sbio->bi_status; @@ -2125,8 +2125,8 @@ static void process_checks(struct r1bio *r1_bio) /* Now we can 'fixup' the error value */ sbio->bi_status = 0; - bio_for_each_segment_all(bi, sbio, j, iter_all) - page_len[j] = bi->bv_len; + bio_for_each_segment_all(bi, sbio, iter_all) + page_len[j++] = bi->bv_len; if (!status) { for (j = vcnt; j-- ; ) { diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c index 81af768e7248..9f04d7466c55 100644 --- a/drivers/staging/erofs/data.c +++ b/drivers/staging/erofs/data.c @@ -17,12 +17,11 @@ static inline void read_endio(struct bio *bio) { - int i; struct bio_vec *bvec; const blk_status_t err = bio->bi_status; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { struct page *page = bvec->bv_page; /* page is already locked */ diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c index 31eef8395774..59b9f37d5c00 100644 --- a/drivers/staging/erofs/unzip_vle.c +++ b/drivers/staging/erofs/unzip_vle.c @@ -844,14 +844,13 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) static inline void z_erofs_vle_read_endio(struct bio *bio) { const blk_status_t err = bio->bi_status; - unsigned int i; struct bio_vec *bvec; #ifdef EROFS_FS_HAS_MANAGED_CACHE struct address_space *mc = NULL; #endif struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { struct page *page = bvec->bv_page; bool cachemngd = false; diff --git a/fs/block_dev.c b/fs/block_dev.c index 24615c76c1d0..8abc6570d29f 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -210,7 +210,6 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, struct bio bio; ssize_t ret; blk_qc_t qc; - int i; struct bvec_iter_all iter_all; if ((pos | iov_iter_alignment(iter)) & @@ -261,7 +260,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, } __set_current_state(TASK_RUNNING); - bio_for_each_segment_all(bvec, &bio, i, iter_all) { + bio_for_each_segment_all(bvec, &bio, iter_all) { if (should_dirty && !PageCompound(bvec->bv_page)) set_page_dirty_lock(bvec->bv_page); put_page(bvec->bv_page); @@ -339,9 +338,8 @@ static void blkdev_bio_end_io(struct bio *bio) if (!bio_flagged(bio, BIO_NO_PAGE_REF)) { struct bvec_iter_all iter_all; struct bio_vec *bvec; - int i; - bio_for_each_segment_all(bvec, bio, i, iter_all) + bio_for_each_segment_all(bvec, bio, iter_all) put_page(bvec->bv_page); } bio_put(bio); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 4f2a8ae0aa42..6313dc65209e 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -160,7 +160,6 @@ csum_failed: if (cb->errors) { bio_io_error(cb->orig_bio); } else { - int i; struct bio_vec *bvec; struct bvec_iter_all iter_all; @@ -169,7 +168,7 @@ csum_failed: * checked so the end_io handlers know about it */ ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment_all(bvec, cb->orig_bio, i, iter_all) + bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) SetPageChecked(bvec->bv_page); bio_endio(cb->orig_bio); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6fe9197f6ee4..c333e79408ff 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -832,11 +832,11 @@ static blk_status_t btree_csum_one_bio(struct bio *bio) { struct bio_vec *bvec; struct btrfs_root *root; - int i, ret = 0; + int ret = 0; struct bvec_iter_all iter_all; ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { root = BTRFS_I(bvec->bv_page->mapping->host)->root; ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); if (ret) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ca8b8e785cf3..c85505c36fa6 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2451,11 +2451,10 @@ static void end_bio_extent_writepage(struct bio *bio) struct bio_vec *bvec; u64 start; u64 end; - int i; struct bvec_iter_all iter_all; ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { struct page *page = bvec->bv_page; struct inode *inode = page->mapping->host; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); @@ -2523,11 +2522,10 @@ static void end_bio_extent_readpage(struct bio *bio) u64 extent_len = 0; int mirror; int ret; - int i; struct bvec_iter_all iter_all; ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { struct page *page = bvec->bv_page; struct inode *inode = page->mapping->host; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); @@ -3643,11 +3641,11 @@ static void end_bio_extent_buffer_writepage(struct bio *bio) { struct bio_vec *bvec; struct extent_buffer *eb; - int i, done; + int done; struct bvec_iter_all iter_all; ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { struct page *page = bvec->bv_page; eb = (struct extent_buffer *)page->private; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 82fdda8ff5ab..10a8d08d3d29 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7828,7 +7828,6 @@ static void btrfs_retry_endio_nocsum(struct bio *bio) struct inode *inode = done->inode; struct bio_vec *bvec; struct extent_io_tree *io_tree, *failure_tree; - int i; struct bvec_iter_all iter_all; if (bio->bi_status) @@ -7841,7 +7840,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio) done->uptodate = 1; ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment_all(bvec, bio, i, iter_all) + bio_for_each_segment_all(bvec, bio, iter_all) clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree, io_tree, done->start, bvec->bv_page, btrfs_ino(BTRFS_I(inode)), 0); @@ -7919,7 +7918,7 @@ static void btrfs_retry_endio(struct bio *bio) struct bio_vec *bvec; int uptodate; int ret; - int i; + int i = 0; struct bvec_iter_all iter_all; if (bio->bi_status) @@ -7934,7 +7933,7 @@ static void btrfs_retry_endio(struct bio *bio) failure_tree = &BTRFS_I(inode)->io_failure_tree; ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, bvec->bv_offset, done->start, bvec->bv_len); @@ -7946,6 +7945,7 @@ static void btrfs_retry_endio(struct bio *bio) bvec->bv_offset); else uptodate = 0; + i++; } done->uptodate = uptodate; diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 67a6f7d47402..f3d0576dd327 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -1442,12 +1442,11 @@ static int fail_bio_stripe(struct btrfs_raid_bio *rbio, static void set_bio_pages_uptodate(struct bio *bio) { struct bio_vec *bvec; - int i; struct bvec_iter_all iter_all; ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment_all(bvec, bio, i, iter_all) + bio_for_each_segment_all(bvec, bio, iter_all) SetPageUptodate(bvec->bv_page); } diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index 5759bcd018cd..8f3a8bc15d98 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -29,10 +29,9 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done) { struct bio_vec *bv; - int i; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bv, bio, i, iter_all) { + bio_for_each_segment_all(bv, bio, iter_all) { struct page *page = bv->bv_page; int ret = fscrypt_decrypt_page(page->mapping->host, page, PAGE_SIZE, 0, page->index); diff --git a/fs/direct-io.c b/fs/direct-io.c index 9bb015bc4a83..fbe885d68035 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -538,7 +538,6 @@ static struct bio *dio_await_one(struct dio *dio) static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio) { struct bio_vec *bvec; - unsigned i; blk_status_t err = bio->bi_status; if (err) { @@ -553,7 +552,7 @@ static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio) } else { struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { struct page *page = bvec->bv_page; if (dio->op == REQ_OP_READ && !PageCompound(page) && diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 3e9298e6a705..4690618a92e9 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -61,11 +61,10 @@ static void buffer_io_error(struct buffer_head *bh) static void ext4_finish_bio(struct bio *bio) { - int i; struct bio_vec *bvec; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { struct page *page = bvec->bv_page; #ifdef CONFIG_FS_ENCRYPTION struct page *data_page = NULL; diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index 3adadf461825..3629a74b7f94 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -71,7 +71,6 @@ static inline bool ext4_bio_encrypted(struct bio *bio) static void mpage_end_io(struct bio *bio) { struct bio_vec *bv; - int i; struct bvec_iter_all iter_all; if (ext4_bio_encrypted(bio)) { @@ -82,7 +81,7 @@ static void mpage_end_io(struct bio *bio) return; } } - bio_for_each_segment_all(bv, bio, i, iter_all) { + bio_for_each_segment_all(bv, bio, iter_all) { struct page *page = bv->bv_page; if (!bio->bi_status) { diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 9727944139f2..64040e998439 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -86,10 +86,9 @@ static void __read_end_io(struct bio *bio) { struct page *page; struct bio_vec *bv; - int i; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bv, bio, i, iter_all) { + bio_for_each_segment_all(bv, bio, iter_all) { page = bv->bv_page; /* PG_error was set if any post_read step failed */ @@ -164,7 +163,6 @@ static void f2fs_write_end_io(struct bio *bio) { struct f2fs_sb_info *sbi = bio->bi_private; struct bio_vec *bvec; - int i; struct bvec_iter_all iter_all; if (time_to_inject(sbi, FAULT_WRITE_IO)) { @@ -172,7 +170,7 @@ static void f2fs_write_end_io(struct bio *bio) bio->bi_status = BLK_STS_IOERR; } - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { struct page *page = bvec->bv_page; enum count_type type = WB_DATA_TYPE(page); @@ -349,7 +347,6 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, { struct bio_vec *bvec; struct page *target; - int i; struct bvec_iter_all iter_all; if (!io->bio) @@ -358,7 +355,7 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, if (!inode && !page && !ino) return true; - bio_for_each_segment_all(bvec, io->bio, i, iter_all) { + bio_for_each_segment_all(bvec, io->bio, iter_all) { if (bvec->bv_page->mapping) target = bvec->bv_page; diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 8722c60b11fe..6f09b5e3dd6e 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -207,7 +207,6 @@ static void gfs2_end_log_write(struct bio *bio) struct gfs2_sbd *sdp = bio->bi_private; struct bio_vec *bvec; struct page *page; - int i; struct bvec_iter_all iter_all; if (bio->bi_status) { @@ -216,7 +215,7 @@ static void gfs2_end_log_write(struct bio *bio) wake_up(&sdp->sd_logd_waitq); } - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { page = bvec->bv_page; if (page_has_buffers(page)) gfs2_end_log_write_bh(sdp, bvec, bio->bi_status); diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 3201342404a7..ff86e1d4f8ff 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -189,10 +189,9 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) static void gfs2_meta_read_endio(struct bio *bio) { struct bio_vec *bvec; - int i; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { struct page *page = bvec->bv_page; struct buffer_head *bh = page_buffers(page); unsigned int len = bvec->bv_len; diff --git a/fs/iomap.c b/fs/iomap.c index abdd18e404f8..12a656271076 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -273,10 +273,9 @@ iomap_read_end_io(struct bio *bio) { int error = blk_status_to_errno(bio->bi_status); struct bio_vec *bvec; - int i; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i, iter_all) + bio_for_each_segment_all(bvec, bio, iter_all) iomap_read_page_end_io(bvec, error); bio_put(bio); } @@ -1592,9 +1591,8 @@ static void iomap_dio_bio_end_io(struct bio *bio) if (!bio_flagged(bio, BIO_NO_PAGE_REF)) { struct bvec_iter_all iter_all; struct bio_vec *bvec; - int i; - bio_for_each_segment_all(bvec, bio, i, iter_all) + bio_for_each_segment_all(bvec, bio, iter_all) put_page(bvec->bv_page); } bio_put(bio); diff --git a/fs/mpage.c b/fs/mpage.c index 3f19da75178b..436a85260394 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -47,10 +47,9 @@ static void mpage_end_io(struct bio *bio) { struct bio_vec *bv; - int i; struct bvec_iter_all iter_all; - bio_for_each_segment_all(bv, bio, i, iter_all) { + bio_for_each_segment_all(bv, bio, iter_all) { struct page *page = bv->bv_page; page_endio(page, bio_op(bio), blk_status_to_errno(bio->bi_status)); diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 3619e9e8d359..47941bbaac02 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -98,7 +98,6 @@ xfs_destroy_ioend( for (bio = &ioend->io_inline_bio; bio; bio = next) { struct bio_vec *bvec; - int i; struct bvec_iter_all iter_all; /* @@ -111,7 +110,7 @@ xfs_destroy_ioend( next = bio->bi_private; /* walk each page on bio, ending page IO on them */ - bio_for_each_segment_all(bvec, bio, i, iter_all) + bio_for_each_segment_all(bvec, bio, iter_all) xfs_finish_page_writeback(inode, bvec, error); bio_put(bio); } diff --git a/include/linux/bio.h b/include/linux/bio.h index 9577ad8f6e28..186b2723c61b 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -134,9 +134,8 @@ static inline bool bio_next_segment(const struct bio *bio, * drivers should _never_ use the all version - the bio may have been split * before it got to the driver and the driver won't own all of it */ -#define bio_for_each_segment_all(bvl, bio, i, iter) \ - for (i = 0, bvl = bvec_init_iter_all(&iter); \ - bio_next_segment((bio), &iter); i++) +#define bio_for_each_segment_all(bvl, bio, iter) \ + for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, unsigned bytes) -- cgit v1.2.3 From 2d5abb9a1e8e92b25e781f0c3537a5b3b4b2f033 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 1 May 2019 06:34:09 -0600 Subject: bcache: make is_discard_enabled() static It's not used outside this file. Fixes: 631207314d88 ("bcache: fix failure in journal relplay") Signed-off-by: Jens Axboe --- drivers/md/bcache/journal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/md/bcache') diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index f9afb164b887..12dae9348147 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -318,7 +318,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list) } } -bool is_discard_enabled(struct cache_set *s) +static bool is_discard_enabled(struct cache_set *s) { struct cache *ca; unsigned int i; -- cgit v1.2.3