diff options
author | Christian Brauner <brauner@kernel.org> | 2023-08-23 13:06:55 +0200 |
---|---|---|
committer | Christian Brauner <brauner@kernel.org> | 2023-08-23 13:06:55 +0200 |
commit | 3fb5a6562adef115d8a8c3e19cc9d5fae32e93c8 (patch) | |
tree | 7a0cc61be2ba75aab26403f330da3140424b6b76 /block | |
parent | 051178c366bbc1bf8b4aba5ca5519d7da453c95f (diff) | |
parent | 59ba4fdd2d1f9dd7af98f5168c846150c9aec56d (diff) | |
download | lwn-3fb5a6562adef115d8a8c3e19cc9d5fae32e93c8.tar.gz lwn-3fb5a6562adef115d8a8c3e19cc9d5fae32e93c8.zip |
Merge tag 'vfs-6.6-merge-2' of ssh://gitolite.kernel.org/pub/scm/fs/xfs/xfs-linux
Pull filesystem freezing updates from Darrick Wong:
New code for 6.6:
* Allow the kernel to initiate a freeze of a filesystem. The kernel
and userspace can both hold a freeze on a filesystem at the same
time; the freeze is not lifted until /both/ holders lift it. This
will enable us to fix a longstanding bug in XFS online fsck.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Message-Id: <20230822182604.GB11286@frogsfrogsfrogs>
Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'block')
-rw-r--r-- | block/bdev.c | 8 | ||||
-rw-r--r-- | block/blk-crypto-profile.c | 12 | ||||
-rw-r--r-- | block/blk-flush.c | 2 | ||||
-rw-r--r-- | block/blk-mq.c | 47 | ||||
-rw-r--r-- | block/blk-zoned.c | 86 | ||||
-rw-r--r-- | block/mq-deadline.c | 2 | ||||
-rw-r--r-- | block/partitions/amiga.c | 2 |
7 files changed, 97 insertions, 62 deletions
diff --git a/block/bdev.c b/block/bdev.c index a20263fa27a4..f3b13aa1b7d4 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -231,9 +231,9 @@ int freeze_bdev(struct block_device *bdev) if (!sb) goto sync; if (sb->s_op->freeze_super) - error = sb->s_op->freeze_super(sb); + error = sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE); else - error = freeze_super(sb); + error = freeze_super(sb, FREEZE_HOLDER_USERSPACE); deactivate_super(sb); if (error) { @@ -274,9 +274,9 @@ int thaw_bdev(struct block_device *bdev) goto out; if (sb->s_op->thaw_super) - error = sb->s_op->thaw_super(sb); + error = sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE); else - error = thaw_super(sb); + error = thaw_super(sb, FREEZE_HOLDER_USERSPACE); if (error) bdev->bd_fsfreeze_count++; else diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c index 2a67d3fb63e5..7fabc883e39f 100644 --- a/block/blk-crypto-profile.c +++ b/block/blk-crypto-profile.c @@ -79,7 +79,14 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile, unsigned int slot_hashtable_size; memset(profile, 0, sizeof(*profile)); - init_rwsem(&profile->lock); + + /* + * profile->lock of an underlying device can nest inside profile->lock + * of a device-mapper device, so use a dynamic lock class to avoid + * false-positive lockdep reports. + */ + lockdep_register_key(&profile->lockdep_key); + __init_rwsem(&profile->lock, "&profile->lock", &profile->lockdep_key); if (num_slots == 0) return 0; @@ -89,7 +96,7 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile, profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]), GFP_KERNEL); if (!profile->slots) - return -ENOMEM; + goto err_destroy; profile->num_slots = num_slots; @@ -435,6 +442,7 @@ void blk_crypto_profile_destroy(struct blk_crypto_profile *profile) { if (!profile) return; + lockdep_unregister_key(&profile->lockdep_key); kvfree(profile->slot_hashtable); kvfree_sensitive(profile->slots, sizeof(profile->slots[0]) * profile->num_slots); diff --git a/block/blk-flush.c b/block/blk-flush.c index dba392cf22be..8220517c2d67 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -189,7 +189,7 @@ static void blk_flush_complete_seq(struct request *rq, case REQ_FSEQ_DATA: list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); spin_lock(&q->requeue_lock); - list_add_tail(&rq->queuelist, &q->flush_list); + list_add(&rq->queuelist, &q->requeue_list); spin_unlock(&q->requeue_lock); blk_mq_kick_requeue_list(q); break; diff --git a/block/blk-mq.c b/block/blk-mq.c index 5504719b970d..d50b1d62a3d9 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -328,8 +328,24 @@ void blk_rq_init(struct request_queue *q, struct request *rq) } EXPORT_SYMBOL(blk_rq_init); +/* Set start and alloc time when the allocated request is actually used */ +static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns) +{ + if (blk_mq_need_time_stamp(rq)) + rq->start_time_ns = ktime_get_ns(); + else + rq->start_time_ns = 0; + +#ifdef CONFIG_BLK_RQ_ALLOC_TIME + if (blk_queue_rq_alloc_time(rq->q)) + rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns; + else + rq->alloc_time_ns = 0; +#endif +} + static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, - struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns) + struct blk_mq_tags *tags, unsigned int tag) { struct blk_mq_ctx *ctx = data->ctx; struct blk_mq_hw_ctx *hctx = data->hctx; @@ -356,14 +372,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, } rq->timeout = 0; - if (blk_mq_need_time_stamp(rq)) - rq->start_time_ns = ktime_get_ns(); - else - rq->start_time_ns = 0; rq->part = NULL; -#ifdef CONFIG_BLK_RQ_ALLOC_TIME - rq->alloc_time_ns = alloc_time_ns; -#endif rq->io_start_time_ns = 0; rq->stats_sectors = 0; rq->nr_phys_segments = 0; @@ -393,8 +402,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, } static inline struct request * -__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, - u64 alloc_time_ns) +__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data) { unsigned int tag, tag_offset; struct blk_mq_tags *tags; @@ -413,7 +421,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, tag = tag_offset + i; prefetch(tags->static_rqs[tag]); tag_mask &= ~(1UL << i); - rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns); + rq = blk_mq_rq_ctx_init(data, tags, tag); rq_list_add(data->cached_rq, rq); nr++; } @@ -474,9 +482,11 @@ retry: * Try batched alloc if we want more than 1 tag. */ if (data->nr_tags > 1) { - rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns); - if (rq) + rq = __blk_mq_alloc_requests_batch(data); + if (rq) { + blk_mq_rq_time_init(rq, alloc_time_ns); return rq; + } data->nr_tags = 1; } @@ -499,8 +509,9 @@ retry: goto retry; } - return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag, - alloc_time_ns); + rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag); + blk_mq_rq_time_init(rq, alloc_time_ns); + return rq; } static struct request *blk_mq_rq_cache_fill(struct request_queue *q, @@ -555,6 +566,7 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q, return NULL; plug->cached_rq = rq_list_next(rq); + blk_mq_rq_time_init(rq, 0); } rq->cmd_flags = opf; @@ -656,8 +668,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, tag = blk_mq_get_tag(&data); if (tag == BLK_MQ_NO_TAG) goto out_queue_exit; - rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag, - alloc_time_ns); + rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag); + blk_mq_rq_time_init(rq, alloc_time_ns); rq->__data_len = 0; rq->__sector = (sector_t) -1; rq->bio = rq->biotail = NULL; @@ -2896,6 +2908,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, plug->cached_rq = rq_list_next(rq); rq_qos_throttle(q, *bio); + blk_mq_rq_time_init(rq, 0); rq->cmd_flags = (*bio)->bi_opf; INIT_LIST_HEAD(&rq->queuelist); return rq; diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 0f9f97cdddd9..619ee41a51cc 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -442,7 +442,6 @@ struct blk_revalidate_zone_args { unsigned long *conv_zones_bitmap; unsigned long *seq_zones_wlock; unsigned int nr_zones; - sector_t zone_sectors; sector_t sector; }; @@ -456,38 +455,34 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, struct gendisk *disk = args->disk; struct request_queue *q = disk->queue; sector_t capacity = get_capacity(disk); + sector_t zone_sectors = q->limits.chunk_sectors; + + /* Check for bad zones and holes in the zone report */ + if (zone->start != args->sector) { + pr_warn("%s: Zone gap at sectors %llu..%llu\n", + disk->disk_name, args->sector, zone->start); + return -ENODEV; + } + + if (zone->start >= capacity || !zone->len) { + pr_warn("%s: Invalid zone start %llu, length %llu\n", + disk->disk_name, zone->start, zone->len); + return -ENODEV; + } /* * All zones must have the same size, with the exception on an eventual * smaller last zone. */ - if (zone->start == 0) { - if (zone->len == 0 || !is_power_of_2(zone->len)) { - pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n", - disk->disk_name, zone->len); - return -ENODEV; - } - - args->zone_sectors = zone->len; - args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len); - } else if (zone->start + args->zone_sectors < capacity) { - if (zone->len != args->zone_sectors) { + if (zone->start + zone->len < capacity) { + if (zone->len != zone_sectors) { pr_warn("%s: Invalid zoned device with non constant zone size\n", disk->disk_name); return -ENODEV; } - } else { - if (zone->len > args->zone_sectors) { - pr_warn("%s: Invalid zoned device with larger last zone size\n", - disk->disk_name); - return -ENODEV; - } - } - - /* Check for holes in the zone report */ - if (zone->start != args->sector) { - pr_warn("%s: Zone gap at sectors %llu..%llu\n", - disk->disk_name, args->sector, zone->start); + } else if (zone->len > zone_sectors) { + pr_warn("%s: Invalid zoned device with larger last zone size\n", + disk->disk_name); return -ENODEV; } @@ -526,11 +521,13 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, * @disk: Target disk * @update_driver_data: Callback to update driver data on the frozen disk * - * Helper function for low-level device drivers to (re) allocate and initialize - * a disk request queue zone bitmaps. This functions should normally be called - * within the disk ->revalidate method for blk-mq based drivers. For BIO based - * drivers only q->nr_zones needs to be updated so that the sysfs exposed value - * is correct. + * Helper function for low-level device drivers to check and (re) allocate and + * initialize a disk request queue zone bitmaps. This functions should normally + * be called within the disk ->revalidate method for blk-mq based drivers. + * Before calling this function, the device driver must already have set the + * device zone size (chunk_sector limit) and the max zone append limit. + * For BIO based drivers, this function cannot be used. BIO based device drivers + * only need to set disk->nr_zones so that the sysfs exposed value is correct. * If the @update_driver_data callback function is not NULL, the callback is * executed with the device request queue frozen after all zones have been * checked. @@ -539,9 +536,9 @@ int blk_revalidate_disk_zones(struct gendisk *disk, void (*update_driver_data)(struct gendisk *disk)) { struct request_queue *q = disk->queue; - struct blk_revalidate_zone_args args = { - .disk = disk, - }; + sector_t zone_sectors = q->limits.chunk_sectors; + sector_t capacity = get_capacity(disk); + struct blk_revalidate_zone_args args = { }; unsigned int noio_flag; int ret; @@ -550,13 +547,31 @@ int blk_revalidate_disk_zones(struct gendisk *disk, if (WARN_ON_ONCE(!queue_is_mq(q))) return -EIO; - if (!get_capacity(disk)) - return -EIO; + if (!capacity) + return -ENODEV; + + /* + * Checks that the device driver indicated a valid zone size and that + * the max zone append limit is set. + */ + if (!zone_sectors || !is_power_of_2(zone_sectors)) { + pr_warn("%s: Invalid non power of two zone size (%llu)\n", + disk->disk_name, zone_sectors); + return -ENODEV; + } + + if (!q->limits.max_zone_append_sectors) { + pr_warn("%s: Invalid 0 maximum zone append limit\n", + disk->disk_name); + return -ENODEV; + } /* * Ensure that all memory allocations in this context are done as if * GFP_NOIO was specified. */ + args.disk = disk; + args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors); noio_flag = memalloc_noio_save(); ret = disk->fops->report_zones(disk, 0, UINT_MAX, blk_revalidate_zone_cb, &args); @@ -570,7 +585,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk, * If zones where reported, make sure that the entire disk capacity * has been checked. */ - if (ret > 0 && args.sector != get_capacity(disk)) { + if (ret > 0 && args.sector != capacity) { pr_warn("%s: Missing zones from sector %llu\n", disk->disk_name, args.sector); ret = -ENODEV; @@ -583,7 +598,6 @@ int blk_revalidate_disk_zones(struct gendisk *disk, */ blk_mq_freeze_queue(q); if (ret > 0) { - blk_queue_chunk_sectors(q, args.zone_sectors); disk->nr_zones = args.nr_zones; swap(disk->seq_zones_wlock, args.seq_zones_wlock); swap(disk->conv_zones_bitmap, args.conv_zones_bitmap); diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 6aa5daf7ae32..02a916ba62ee 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -176,7 +176,7 @@ static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio, * zoned writes, start searching from the start of a zone. */ if (blk_rq_is_seq_zoned_write(rq)) - pos -= round_down(pos, rq->q->limits.chunk_sectors); + pos = round_down(pos, rq->q->limits.chunk_sectors); while (node) { rq = rb_entry_rq(node); diff --git a/block/partitions/amiga.c b/block/partitions/amiga.c index ed222b9c901b..506921095412 100644 --- a/block/partitions/amiga.c +++ b/block/partitions/amiga.c @@ -90,7 +90,7 @@ int amiga_partition(struct parsed_partitions *state) } blk = be32_to_cpu(rdb->rdb_PartitionList); put_dev_sector(sect); - for (part = 1; blk>0 && part<=16; part++, put_dev_sector(sect)) { + for (part = 1; (s32) blk>0 && part<=16; part++, put_dev_sector(sect)) { /* Read in terms partition table understands */ if (check_mul_overflow(blk, (sector_t) blksize, &blk)) { pr_err("Dev %s: overflow calculating partition block %llu! Skipping partitions %u and beyond\n", |