diff options
author | Jens Axboe <axboe@kernel.dk> | 2024-06-19 08:02:34 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2024-06-19 08:14:49 -0600 |
commit | 69c34f07e45f2d52d9a41991867a1e5a85bbaaf3 (patch) | |
tree | dc4779c7ed7a5f17ddf67182399111815da27100 /block | |
parent | 465478bb00168a7620788990b1679c5067d421f2 (diff) | |
parent | 339d3948c07b4aa2940aeb874294a7d6782cec16 (diff) | |
download | lwn-69c34f07e45f2d52d9a41991867a1e5a85bbaaf3.tar.gz lwn-69c34f07e45f2d52d9a41991867a1e5a85bbaaf3.zip |
Merge branch 'for-6.11/block-limits' into for-6.11/block
Merge in last round of queue limits changes from Christoph.
* for-6.11/block-limits: (26 commits)
block: move the bounce flag into the features field
block: move the skip_tagset_quiesce flag to queue_limits
block: move the pci_p2pdma flag to queue_limits
block: move the zone_resetall flag to queue_limits
block: move the zoned flag into the features field
block: move the poll flag to queue_limits
block: move the dax flag to queue_limits
block: move the nowait flag to queue_limits
block: move the synchronous flag to queue_limits
block: move the stable_writes flag to queue_limits
block: move the io_stat flag setting to queue_limits
block: move the add_random flag to queue_limits
block: move the nonrot flag to queue_limits
block: move cache control settings out of queue->flags
block: remove blk_flush_policy
block: freeze the queue in queue_attr_store
nbd: move setting the cache control flags to __nbd_set_size
virtio_blk: remove virtblk_update_cache_mode
loop: fold loop_update_rotational into loop_reconfigure_limits
loop: also use the default block size from an underlying block device
...
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 7 | ||||
-rw-r--r-- | block/blk-flush.c | 36 | ||||
-rw-r--r-- | block/blk-mq-debugfs.c | 13 | ||||
-rw-r--r-- | block/blk-mq.c | 42 | ||||
-rw-r--r-- | block/blk-settings.c | 46 | ||||
-rw-r--r-- | block/blk-sysfs.c | 118 | ||||
-rw-r--r-- | block/blk-wbt.c | 4 | ||||
-rw-r--r-- | block/blk.h | 2 |
8 files changed, 130 insertions, 138 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 82c3ae22d76d..8d9fbd353fc7 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -782,7 +782,7 @@ void submit_bio_noacct(struct bio *bio) if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE && bio_op(bio) != REQ_OP_ZONE_APPEND)) goto end_io; - if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { + if (!bdev_write_cache(bdev)) { bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); if (!bio_sectors(bio)) { status = BLK_STS_OK; @@ -791,7 +791,7 @@ void submit_bio_noacct(struct bio *bio) } } - if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + if (!(q->limits.features & BLK_FEAT_POLL)) bio_clear_polled(bio); switch (bio_op(bio)) { @@ -915,8 +915,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) return 0; q = bdev_get_queue(bdev); - if (cookie == BLK_QC_T_NONE || - !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + if (cookie == BLK_QC_T_NONE || !(q->limits.features & BLK_FEAT_POLL)) return 0; blk_flush_plug(current->plug, false); diff --git a/block/blk-flush.c b/block/blk-flush.c index c17cf8ed8113..30b9d5033a2b 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -100,23 +100,6 @@ blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; } -static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) -{ - unsigned int policy = 0; - - if (blk_rq_sectors(rq)) - policy |= REQ_FSEQ_DATA; - - if (fflags & (1UL << QUEUE_FLAG_WC)) { - if (rq->cmd_flags & REQ_PREFLUSH) - policy |= REQ_FSEQ_PREFLUSH; - if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && - (rq->cmd_flags & REQ_FUA)) - policy |= REQ_FSEQ_POSTFLUSH; - } - return policy; -} - static unsigned int blk_flush_cur_seq(struct request *rq) { return 1 << ffz(rq->flush.seq); @@ -398,19 +381,32 @@ static void blk_rq_init_flush(struct request *rq) bool blk_insert_flush(struct request *rq) { struct request_queue *q = rq->q; - unsigned long fflags = q->queue_flags; /* may change, cache */ - unsigned int policy = blk_flush_policy(fflags, rq); struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); + bool supports_fua = q->limits.features & BLK_FEAT_FUA; + unsigned int policy = 0; /* FLUSH/FUA request must never be merged */ WARN_ON_ONCE(rq->bio != rq->biotail); + if (blk_rq_sectors(rq)) + policy |= REQ_FSEQ_DATA; + + /* + * Check which flushes we need to sequence for this operation. + */ + if (blk_queue_write_cache(q)) { + if (rq->cmd_flags & REQ_PREFLUSH) + policy |= REQ_FSEQ_PREFLUSH; + if ((rq->cmd_flags & REQ_FUA) && !supports_fua) + policy |= REQ_FSEQ_POSTFLUSH; + } + /* * @policy now records what operations need to be done. Adjust * REQ_PREFLUSH and FUA for the driver. */ rq->cmd_flags &= ~REQ_PREFLUSH; - if (!(fflags & (1UL << QUEUE_FLAG_FUA))) + if (!supports_fua) rq->cmd_flags &= ~REQ_FUA; /* diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 770c0c2b72fa..344f9e503bdb 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -84,28 +84,15 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(NOMERGES), QUEUE_FLAG_NAME(SAME_COMP), QUEUE_FLAG_NAME(FAIL_IO), - QUEUE_FLAG_NAME(NONROT), - QUEUE_FLAG_NAME(IO_STAT), QUEUE_FLAG_NAME(NOXMERGES), - QUEUE_FLAG_NAME(ADD_RANDOM), - QUEUE_FLAG_NAME(SYNCHRONOUS), QUEUE_FLAG_NAME(SAME_FORCE), QUEUE_FLAG_NAME(INIT_DONE), - QUEUE_FLAG_NAME(STABLE_WRITES), - QUEUE_FLAG_NAME(POLL), - QUEUE_FLAG_NAME(WC), - QUEUE_FLAG_NAME(FUA), - QUEUE_FLAG_NAME(DAX), QUEUE_FLAG_NAME(STATS), QUEUE_FLAG_NAME(REGISTERED), QUEUE_FLAG_NAME(QUIESCED), - QUEUE_FLAG_NAME(PCI_P2PDMA), - QUEUE_FLAG_NAME(ZONE_RESETALL), QUEUE_FLAG_NAME(RQ_ALLOC_TIME), QUEUE_FLAG_NAME(HCTX_ACTIVE), - QUEUE_FLAG_NAME(NOWAIT), QUEUE_FLAG_NAME(SQ_SCHED), - QUEUE_FLAG_NAME(SKIP_TAGSET_QUIESCE), }; #undef QUEUE_FLAG_NAME diff --git a/block/blk-mq.c b/block/blk-mq.c index 0d4cd39c3d25..e2b9710ddc5a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4109,6 +4109,12 @@ void blk_mq_release(struct request_queue *q) blk_mq_sysfs_deinit(q); } +static bool blk_mq_can_poll(struct blk_mq_tag_set *set) +{ + return set->nr_maps > HCTX_TYPE_POLL && + set->map[HCTX_TYPE_POLL].nr_queues; +} + struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, struct queue_limits *lim, void *queuedata) { @@ -4116,7 +4122,13 @@ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, struct request_queue *q; int ret; - q = blk_alloc_queue(lim ? lim : &default_lim, set->numa_node); + if (!lim) + lim = &default_lim; + lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT; + if (blk_mq_can_poll(set)) + lim->features |= BLK_FEAT_POLL; + + q = blk_alloc_queue(lim, set->numa_node); if (IS_ERR(q)) return q; q->queuedata = queuedata; @@ -4269,17 +4281,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, mutex_unlock(&q->sysfs_lock); } -static void blk_mq_update_poll_flag(struct request_queue *q) -{ - struct blk_mq_tag_set *set = q->tag_set; - - if (set->nr_maps > HCTX_TYPE_POLL && - set->map[HCTX_TYPE_POLL].nr_queues) - blk_queue_flag_set(QUEUE_FLAG_POLL, q); - else - blk_queue_flag_clear(QUEUE_FLAG_POLL, q); -} - int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q) { @@ -4307,7 +4308,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, q->tag_set = set; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; - blk_mq_update_poll_flag(q); INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); INIT_LIST_HEAD(&q->flush_list); @@ -4631,13 +4631,15 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) int ret; unsigned long i; + if (WARN_ON_ONCE(!q->mq_freeze_depth)) + return -EINVAL; + if (!set) return -EINVAL; if (q->nr_requests == nr) return 0; - blk_mq_freeze_queue(q); blk_mq_quiesce_queue(q); ret = 0; @@ -4671,7 +4673,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) } blk_mq_unquiesce_queue(q); - blk_mq_unfreeze_queue(q); return ret; } @@ -4793,8 +4794,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, fallback: blk_mq_update_queue_map(set); list_for_each_entry(q, &set->tag_list, tag_set_list) { + struct queue_limits lim; + blk_mq_realloc_hw_ctxs(set, q); - blk_mq_update_poll_flag(q); + if (q->nr_hw_queues != set->nr_hw_queues) { int i = prev_nr_hw_queues; @@ -4806,6 +4809,13 @@ fallback: set->nr_hw_queues = prev_nr_hw_queues; goto fallback; } + lim = queue_limits_start_update(q); + if (blk_mq_can_poll(set)) + lim.features |= BLK_FEAT_POLL; + else + lim.features &= ~BLK_FEAT_POLL; + if (queue_limits_commit_update(q, &lim) < 0) + pr_warn("updating the poll flag failed\n"); blk_mq_map_swqueue(q); } diff --git a/block/blk-settings.c b/block/blk-settings.c index 377a86fe0fcc..9c7f101aaf52 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -68,7 +68,7 @@ static void blk_apply_bdi_limits(struct backing_dev_info *bdi, static int blk_validate_zoned_limits(struct queue_limits *lim) { - if (!lim->zoned) { + if (!(lim->features & BLK_FEAT_ZONED)) { if (WARN_ON_ONCE(lim->max_open_zones) || WARN_ON_ONCE(lim->max_active_zones) || WARN_ON_ONCE(lim->zone_write_granularity) || @@ -269,6 +269,9 @@ static int blk_validate_limits(struct queue_limits *lim) lim->misaligned = 0; } + if (!(lim->features & BLK_FEAT_WRITE_CACHE)) + lim->features &= ~BLK_FEAT_FUA; + err = blk_validate_integrity_limits(lim); if (err) return err; @@ -461,6 +464,19 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, { unsigned int top, bottom, alignment, ret = 0; + t->features |= (b->features & BLK_FEAT_INHERIT_MASK); + + /* + * BLK_FEAT_NOWAIT and BLK_FEAT_POLL need to be supported both by the + * stacking driver and all underlying devices. The stacking driver sets + * the flags before stacking the limits, and this will clear the flags + * if any of the underlying devices does not support it. + */ + if (!(b->features & BLK_FEAT_NOWAIT)) + t->features &= ~BLK_FEAT_NOWAIT; + if (!(b->features & BLK_FEAT_POLL)) + t->features &= ~BLK_FEAT_POLL; + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_user_sectors = min_not_zero(t->max_user_sectors, b->max_user_sectors); @@ -470,7 +486,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, b->max_write_zeroes_sectors); t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t), queue_limits_max_zone_append_sectors(b)); - t->bounce = max(t->bounce, b->bounce); t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); @@ -593,8 +608,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, b->max_secure_erase_sectors); t->zone_write_granularity = max(t->zone_write_granularity, b->zone_write_granularity); - t->zoned = max(t->zoned, b->zoned); - if (!t->zoned) { + if (!(t->features & BLK_FEAT_ZONED)) { t->zone_write_granularity = 0; t->max_zone_append_sectors = 0; } @@ -718,30 +732,6 @@ void blk_set_queue_depth(struct request_queue *q, unsigned int depth) } EXPORT_SYMBOL(blk_set_queue_depth); -/** - * blk_queue_write_cache - configure queue's write cache - * @q: the request queue for the device - * @wc: write back cache on or off - * @fua: device supports FUA writes, if true - * - * Tell the block layer about the write cache of @q. - */ -void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) -{ - if (wc) { - blk_queue_flag_set(QUEUE_FLAG_HW_WC, q); - blk_queue_flag_set(QUEUE_FLAG_WC, q); - } else { - blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q); - blk_queue_flag_clear(QUEUE_FLAG_WC, q); - } - if (fua) - blk_queue_flag_set(QUEUE_FLAG_FUA, q); - else - blk_queue_flag_clear(QUEUE_FLAG_FUA, q); -} -EXPORT_SYMBOL_GPL(blk_queue_write_cache); - int bdev_alignment_offset(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index f0f9314ab65c..da4e96d686f9 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -189,12 +189,9 @@ static ssize_t queue_discard_max_store(struct request_queue *q, if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX) return -EINVAL; - blk_mq_freeze_queue(q); lim = queue_limits_start_update(q); lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT; err = queue_limits_commit_update(q, &lim); - blk_mq_unfreeze_queue(q); - if (err) return err; return ret; @@ -241,11 +238,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) if (ret < 0) return ret; - blk_mq_freeze_queue(q); lim = queue_limits_start_update(q); lim.max_user_sectors = max_sectors_kb << 1; err = queue_limits_commit_update(q, &lim); - blk_mq_unfreeze_queue(q); if (err) return err; return ret; @@ -268,37 +263,43 @@ static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page) return queue_var_show(queue_dma_alignment(q), page); } -#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ -static ssize_t \ -queue_##name##_show(struct request_queue *q, char *page) \ -{ \ - int bit; \ - bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ - return queue_var_show(neg ? !bit : bit, page); \ -} \ -static ssize_t \ -queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ -{ \ - unsigned long val; \ - ssize_t ret; \ - ret = queue_var_store(&val, page, count); \ - if (ret < 0) \ - return ret; \ - if (neg) \ - val = !val; \ - \ - if (val) \ - blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ - else \ - blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ - return ret; \ -} - -QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); -QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); -QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); -QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); -#undef QUEUE_SYSFS_BIT_FNS +static ssize_t queue_feature_store(struct request_queue *q, const char *page, + size_t count, unsigned int feature) +{ + struct queue_limits lim; + unsigned long val; + ssize_t ret; + + ret = queue_var_store(&val, page, count); + if (ret < 0) + return ret; + + lim = queue_limits_start_update(q); + if (val) + lim.features |= feature; + else + lim.features &= ~feature; + ret = queue_limits_commit_update(q, &lim); + if (ret) + return ret; + return count; +} + +#define QUEUE_SYSFS_FEATURE(_name, _feature) \ +static ssize_t queue_##_name##_show(struct request_queue *q, char *page) \ +{ \ + return sprintf(page, "%u\n", !!(q->limits.features & _feature)); \ +} \ +static ssize_t queue_##_name##_store(struct request_queue *q, \ + const char *page, size_t count) \ +{ \ + return queue_feature_store(q, page, count, _feature); \ +} + +QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL) +QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM) +QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT) +QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES); static ssize_t queue_zoned_show(struct request_queue *q, char *page) { @@ -393,13 +394,13 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, static ssize_t queue_poll_show(struct request_queue *q, char *page) { - return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); + return queue_var_show(q->limits.features & BLK_FEAT_POLL, page); } static ssize_t queue_poll_store(struct request_queue *q, const char *page, size_t count) { - if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + if (!(q->limits.features & BLK_FEAT_POLL)) return -EINVAL; pr_info_ratelimited("writes to the poll attribute are ignored.\n"); pr_info_ratelimited("please use driver specific parameters instead.\n"); @@ -428,32 +429,41 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, static ssize_t queue_wc_show(struct request_queue *q, char *page) { - if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) - return sprintf(page, "write back\n"); - - return sprintf(page, "write through\n"); + if (q->limits.features & BLK_FLAGS_WRITE_CACHE_DISABLED) + return sprintf(page, "write through\n"); + return sprintf(page, "write back\n"); } static ssize_t queue_wc_store(struct request_queue *q, const char *page, size_t count) { + struct queue_limits lim; + bool disable; + int err; + if (!strncmp(page, "write back", 10)) { - if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags)) - return -EINVAL; - blk_queue_flag_set(QUEUE_FLAG_WC, q); + disable = false; } else if (!strncmp(page, "write through", 13) || - !strncmp(page, "none", 4)) { - blk_queue_flag_clear(QUEUE_FLAG_WC, q); + !strncmp(page, "none", 4)) { + disable = true; } else { return -EINVAL; } + lim = queue_limits_start_update(q); + if (disable) + lim.flags |= BLK_FLAGS_WRITE_CACHE_DISABLED; + else + lim.flags &= ~BLK_FLAGS_WRITE_CACHE_DISABLED; + err = queue_limits_commit_update(q, &lim); + if (err) + return err; return count; } static ssize_t queue_fua_show(struct request_queue *q, char *page) { - return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); + return sprintf(page, "%u\n", !!(q->limits.features & BLK_FEAT_FUA)); } static ssize_t queue_dax_show(struct request_queue *q, char *page) @@ -522,9 +532,9 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { .show = queue_logical_block_size_show, }; -QUEUE_RW_ENTRY(queue_nonrot, "rotational"); +QUEUE_RW_ENTRY(queue_rotational, "rotational"); QUEUE_RW_ENTRY(queue_iostats, "iostats"); -QUEUE_RW_ENTRY(queue_random, "add_random"); +QUEUE_RW_ENTRY(queue_add_random, "add_random"); QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); #ifdef CONFIG_BLK_WBT @@ -585,13 +595,11 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, * ends up either enabling or disabling wbt completely. We can't * have IO inflight if that happens. */ - blk_mq_freeze_queue(q); blk_mq_quiesce_queue(q); wbt_set_min_lat(q, val); blk_mq_unquiesce_queue(q); - blk_mq_unfreeze_queue(q); return count; } @@ -622,7 +630,7 @@ static struct attribute *queue_attrs[] = { &queue_write_zeroes_max_entry.attr, &queue_zone_append_max_entry.attr, &queue_zone_write_granularity_entry.attr, - &queue_nonrot_entry.attr, + &queue_rotational_entry.attr, &queue_zoned_entry.attr, &queue_nr_zones_entry.attr, &queue_max_open_zones_entry.attr, @@ -630,7 +638,7 @@ static struct attribute *queue_attrs[] = { &queue_nomerges_entry.attr, &queue_iostats_entry.attr, &queue_stable_writes_entry.attr, - &queue_random_entry.attr, + &queue_add_random_entry.attr, &queue_poll_entry.attr, &queue_wc_entry.attr, &queue_fua_entry.attr, @@ -722,9 +730,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, if (!entry->store) return -EIO; + blk_mq_freeze_queue(q); mutex_lock(&q->sysfs_lock); res = entry->store(q, page, length); mutex_unlock(&q->sysfs_lock); + blk_mq_unfreeze_queue(q); return res; } diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 64472134dd26..1a5e4b049ecd 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -206,8 +206,8 @@ static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw, */ if (wb_acct & WBT_DISCARD) limit = rwb->wb_background; - else if (test_bit(QUEUE_FLAG_WC, &rwb->rqos.disk->queue->queue_flags) && - !wb_recent_wait(rwb)) + else if (blk_queue_write_cache(rwb->rqos.disk->queue) && + !wb_recent_wait(rwb)) limit = 0; else limit = rwb->wb_normal; diff --git a/block/blk.h b/block/blk.h index 79e8d5d4fe0c..fa32f7fad5d7 100644 --- a/block/blk.h +++ b/block/blk.h @@ -394,7 +394,7 @@ struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q); static inline bool blk_queue_may_bounce(struct request_queue *q) { return IS_ENABLED(CONFIG_BOUNCE) && - q->limits.bounce == BLK_BOUNCE_HIGH && + (q->limits.features & BLK_FEAT_BOUNCE_HIGH) && max_low_pfn >= max_pfn; } |