diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-05-26 15:04:54 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-05-26 15:04:54 -0700 |
commit | a92c9ab69f6696b26ef0c1ca3e8b922d1fc82e86 (patch) | |
tree | 1d2ca1c854390c579610f51d2823a6ef52a9b87a | |
parent | 6fae9129b1c70bd6b7677b808c03bc96e83460fc (diff) | |
parent | 9491d01fbc550d123d72bf1cd7a0985508a9c469 (diff) | |
download | lwn-a92c9ab69f6696b26ef0c1ca3e8b922d1fc82e86.tar.gz lwn-a92c9ab69f6696b26ef0c1ca3e8b922d1fc82e86.zip |
Merge tag 'block-6.4-2023-05-26' of git://git.kernel.dk/linux
Pull block fixes from Jens Axboe:
"A few fixes for the storage side of things:
- Fix bio caching condition for passthrough IO (Anuj)
- end-of-device check fix for zero sized devices (Christoph)
- Update Paolo's email address
- NVMe pull request via Keith with a single quirk addition
- Fix regression in how wbt enablement is done (Yu)
- Fix race in active queue accounting (Tian)"
* tag 'block-6.4-2023-05-26' of git://git.kernel.dk/linux:
NVMe: Add MAXIO 1602 to bogus nid list.
block: make bio_check_eod work for zero sized devices
block: fix bio-cache for passthru IO
block, bfq: update Paolo's address in maintainer list
blk-mq: fix race condition in active queue accounting
blk-wbt: fix that wbt can't be disabled by default
-rw-r--r-- | MAINTAINERS | 2 | ||||
-rw-r--r-- | block/blk-core.c | 2 | ||||
-rw-r--r-- | block/blk-map.c | 2 | ||||
-rw-r--r-- | block/blk-mq-tag.c | 12 | ||||
-rw-r--r-- | block/blk-wbt.c | 12 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 2 |
6 files changed, 20 insertions, 12 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 207a65905f5e..f26270a41878 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3536,7 +3536,7 @@ F: Documentation/filesystems/befs.rst F: fs/befs/ BFQ I/O SCHEDULER -M: Paolo Valente <paolo.valente@linaro.org> +M: Paolo Valente <paolo.valente@unimore.it> M: Jens Axboe <axboe@kernel.dk> L: linux-block@vger.kernel.org S: Maintained diff --git a/block/blk-core.c b/block/blk-core.c index 00c74330fa92..1da77e7d6289 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -520,7 +520,7 @@ static inline int bio_check_eod(struct bio *bio) sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); unsigned int nr_sectors = bio_sectors(bio); - if (nr_sectors && maxsector && + if (nr_sectors && (nr_sectors > maxsector || bio->bi_iter.bi_sector > maxsector - nr_sectors)) { pr_info_ratelimited("%s: attempt to access beyond end of device\n" diff --git a/block/blk-map.c b/block/blk-map.c index 04c55f1c492e..46eed2e627c3 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -248,7 +248,7 @@ static struct bio *blk_rq_map_bio_alloc(struct request *rq, { struct bio *bio; - if (rq->cmd_flags & REQ_ALLOC_CACHE) { + if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) { bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, &fs_bio_set); if (!bio) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index d6af9d431dc6..dfd81cab5788 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -39,16 +39,20 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) { unsigned int users; + /* + * calling test_bit() prior to test_and_set_bit() is intentional, + * it avoids dirtying the cacheline if the queue is already active. + */ if (blk_mq_is_shared_tags(hctx->flags)) { struct request_queue *q = hctx->queue; - if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) + if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) || + test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) return; - set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags); } else { - if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) || + test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return; - set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state); } users = atomic_inc_return(&hctx->tags->active_queues); diff --git a/block/blk-wbt.c b/block/blk-wbt.c index e49a48684532..9ec2a2f1eda3 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -730,14 +730,16 @@ void wbt_enable_default(struct gendisk *disk) { struct request_queue *q = disk->queue; struct rq_qos *rqos; - bool disable_flag = q->elevator && - test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags); + bool enable = IS_ENABLED(CONFIG_BLK_WBT_MQ); + + if (q->elevator && + test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags)) + enable = false; /* Throttling already enabled? */ rqos = wbt_rq_qos(q); if (rqos) { - if (!disable_flag && - RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT) + if (enable && RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT) RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT; return; } @@ -746,7 +748,7 @@ void wbt_enable_default(struct gendisk *disk) if (!blk_queue_registered(q)) return; - if (queue_is_mq(q) && !disable_flag) + if (queue_is_mq(q) && enable) wbt_init(disk); } EXPORT_SYMBOL_GPL(wbt_enable_default); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 32244582fdb0..492f319ebdf3 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3424,6 +3424,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ |