summaryrefslogtreecommitdiff
path: root/drivers/md/dm-table.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r--drivers/md/dm-table.c351
1 files changed, 73 insertions, 278 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index b2d5246cff21..92d18dcdb3e9 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -425,6 +425,13 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
q->limits.logical_block_size,
q->limits.alignment_offset,
(unsigned long long) start << SECTOR_SHIFT);
+
+ /*
+ * Only stack the integrity profile if the target doesn't have native
+ * integrity support.
+ */
+ if (!dm_target_has_integrity(ti->type))
+ queue_limits_stack_integrity_bdev(limits, bdev);
return 0;
}
@@ -572,6 +579,12 @@ int dm_split_args(int *argc, char ***argvp, char *input)
return 0;
}
+static void dm_set_stacking_limits(struct queue_limits *limits)
+{
+ blk_set_stacking_limits(limits);
+ limits->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
+}
+
/*
* Impose necessary and sufficient conditions on a devices's table such
* that any incoming bio which respects its logical_block_size can be
@@ -610,7 +623,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t,
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
- blk_set_stacking_limits(&ti_limits);
+ dm_set_stacking_limits(&ti_limits);
/* combine all target devices' limits */
if (ti->type->iterate_devices)
@@ -702,9 +715,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->immutable_target_type = ti->type;
}
- if (dm_target_has_integrity(ti->type))
- t->integrity_added = 1;
-
ti->table = t;
ti->begin = start;
ti->len = len;
@@ -1014,14 +1024,13 @@ bool dm_table_request_based(struct dm_table *t)
return __table_type_request_based(dm_table_get_type(t));
}
-static bool dm_table_supports_poll(struct dm_table *t);
-
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
enum dm_queue_mode type = dm_table_get_type(t);
unsigned int per_io_data_size = 0, front_pad, io_front_pad;
unsigned int min_pool_size = 0, pool_size;
struct dm_md_mempools *pools;
+ unsigned int bioset_flags = 0;
if (unlikely(type == DM_TYPE_NONE)) {
DMERR("no table type is set, can't allocate mempools");
@@ -1038,6 +1047,9 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
goto init_bs;
}
+ if (md->queue->limits.features & BLK_FEAT_POLL)
+ bioset_flags |= BIOSET_PERCPU_CACHE;
+
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
@@ -1050,8 +1062,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
io_front_pad = roundup(per_io_data_size,
__alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
- if (bioset_init(&pools->io_bs, pool_size, io_front_pad,
- dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0))
+ if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags))
goto out_free_pools;
if (t->integrity_supported &&
bioset_integrity_create(&pools->io_bs, pool_size))
@@ -1119,99 +1130,6 @@ static int dm_table_build_index(struct dm_table *t)
return r;
}
-static bool integrity_profile_exists(struct gendisk *disk)
-{
- return !!blk_get_integrity(disk);
-}
-
-/*
- * Get a disk whose integrity profile reflects the table's profile.
- * Returns NULL if integrity support was inconsistent or unavailable.
- */
-static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t)
-{
- struct list_head *devices = dm_table_get_devices(t);
- struct dm_dev_internal *dd = NULL;
- struct gendisk *prev_disk = NULL, *template_disk = NULL;
-
- for (unsigned int i = 0; i < t->num_targets; i++) {
- struct dm_target *ti = dm_table_get_target(t, i);
-
- if (!dm_target_passes_integrity(ti->type))
- goto no_integrity;
- }
-
- list_for_each_entry(dd, devices, list) {
- template_disk = dd->dm_dev->bdev->bd_disk;
- if (!integrity_profile_exists(template_disk))
- goto no_integrity;
- else if (prev_disk &&
- blk_integrity_compare(prev_disk, template_disk) < 0)
- goto no_integrity;
- prev_disk = template_disk;
- }
-
- return template_disk;
-
-no_integrity:
- if (prev_disk)
- DMWARN("%s: integrity not set: %s and %s profile mismatch",
- dm_device_name(t->md),
- prev_disk->disk_name,
- template_disk->disk_name);
- return NULL;
-}
-
-/*
- * Register the mapped device for blk_integrity support if the
- * underlying devices have an integrity profile. But all devices may
- * not have matching profiles (checking all devices isn't reliable
- * during table load because this table may use other DM device(s) which
- * must be resumed before they will have an initialized integity
- * profile). Consequently, stacked DM devices force a 2 stage integrity
- * profile validation: First pass during table load, final pass during
- * resume.
- */
-static int dm_table_register_integrity(struct dm_table *t)
-{
- struct mapped_device *md = t->md;
- struct gendisk *template_disk = NULL;
-
- /* If target handles integrity itself do not register it here. */
- if (t->integrity_added)
- return 0;
-
- template_disk = dm_table_get_integrity_disk(t);
- if (!template_disk)
- return 0;
-
- if (!integrity_profile_exists(dm_disk(md))) {
- t->integrity_supported = true;
- /*
- * Register integrity profile during table load; we can do
- * this because the final profile must match during resume.
- */
- blk_integrity_register(dm_disk(md),
- blk_get_integrity(template_disk));
- return 0;
- }
-
- /*
- * If DM device already has an initialized integrity
- * profile the new profile should not conflict.
- */
- if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
- DMERR("%s: conflict with existing integrity profile: %s profile mismatch",
- dm_device_name(t->md),
- template_disk->disk_name);
- return 1;
- }
-
- /* Preserve existing integrity profile */
- t->integrity_supported = true;
- return 0;
-}
-
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct dm_crypto_profile {
@@ -1423,12 +1341,6 @@ int dm_table_complete(struct dm_table *t)
return r;
}
- r = dm_table_register_integrity(t);
- if (r) {
- DMERR("could not register integrity profile.");
- return r;
- }
-
r = dm_table_construct_crypto_profile(t);
if (r) {
DMERR("could not construct crypto profile.");
@@ -1493,14 +1405,6 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
return &t->targets[(KEYS_PER_NODE * n) + k];
}
-static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags);
-}
-
/*
* type->iterate_devices() should be called when the sanity check needs to
* iterate and check all underlying data devices. iterate_devices() will
@@ -1548,19 +1452,6 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
-static bool dm_table_supports_poll(struct dm_table *t)
-{
- for (unsigned int i = 0; i < t->num_targets; i++) {
- struct dm_target *ti = dm_table_get_target(t, i);
-
- if (!ti->type->iterate_devices ||
- ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
- return false;
- }
-
- return true;
-}
-
/*
* Check whether a table has no data devices attached using each
* target's iterate_devices method.
@@ -1686,12 +1577,20 @@ int dm_calculate_queue_limits(struct dm_table *t,
unsigned int zone_sectors = 0;
bool zoned = false;
- blk_set_stacking_limits(limits);
+ dm_set_stacking_limits(limits);
+
+ t->integrity_supported = true;
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
+ if (!dm_target_passes_integrity(ti->type))
+ t->integrity_supported = false;
+ }
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
- blk_set_stacking_limits(&ti_limits);
+ dm_set_stacking_limits(&ti_limits);
if (!ti->type->iterate_devices) {
/* Set I/O hints portion of queue limits */
@@ -1706,12 +1605,12 @@ int dm_calculate_queue_limits(struct dm_table *t,
ti->type->iterate_devices(ti, dm_set_device_limits,
&ti_limits);
- if (!zoned && ti_limits.zoned) {
+ if (!zoned && (ti_limits.features & BLK_FEAT_ZONED)) {
/*
* After stacking all limits, validate all devices
* in table support this zoned model and zone sectors.
*/
- zoned = ti_limits.zoned;
+ zoned = (ti_limits.features & BLK_FEAT_ZONED);
zone_sectors = ti_limits.chunk_sectors;
}
@@ -1738,6 +1637,18 @@ combine_limits:
dm_device_name(t->md),
(unsigned long long) ti->begin,
(unsigned long long) ti->len);
+
+ if (t->integrity_supported ||
+ dm_target_has_integrity(ti->type)) {
+ if (!queue_limits_stack_integrity(limits, &ti_limits)) {
+ DMWARN("%s: adding target device (start sect %llu len %llu) "
+ "disabled integrity support due to incompatibility",
+ dm_device_name(t->md),
+ (unsigned long long) ti->begin,
+ (unsigned long long) ti->len);
+ t->integrity_supported = false;
+ }
+ }
}
/*
@@ -1747,12 +1658,12 @@ combine_limits:
* zoned model on host-managed zoned block devices.
* BUT...
*/
- if (limits->zoned) {
+ if (limits->features & BLK_FEAT_ZONED) {
/*
* ...IF the above limits stacking determined a zoned model
* validate that all of the table's devices conform to it.
*/
- zoned = limits->zoned;
+ zoned = limits->features & BLK_FEAT_ZONED;
zone_sectors = limits->chunk_sectors;
}
if (validate_hardware_zoned(t, zoned, zone_sectors))
@@ -1762,63 +1673,15 @@ combine_limits:
}
/*
- * Verify that all devices have an integrity profile that matches the
- * DM device's registered integrity profile. If the profiles don't
- * match then unregister the DM device's integrity profile.
+ * Check if a target requires flush support even if none of the underlying
+ * devices need it (e.g. to persist target-specific metadata).
*/
-static void dm_table_verify_integrity(struct dm_table *t)
-{
- struct gendisk *template_disk = NULL;
-
- if (t->integrity_added)
- return;
-
- if (t->integrity_supported) {
- /*
- * Verify that the original integrity profile
- * matches all the devices in this table.
- */
- template_disk = dm_table_get_integrity_disk(t);
- if (template_disk &&
- blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
- return;
- }
-
- if (integrity_profile_exists(dm_disk(t->md))) {
- DMWARN("%s: unable to establish an integrity profile",
- dm_device_name(t->md));
- blk_integrity_unregister(dm_disk(t->md));
- }
-}
-
-static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static bool dm_table_supports_flush(struct dm_table *t)
{
- unsigned long flush = (unsigned long) data;
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return (q->queue_flags & flush);
-}
-
-static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
-{
- /*
- * Require at least one underlying device to support flushes.
- * t->devices includes internal dm devices such as mirror logs
- * so we need to use iterate_devices here, which targets
- * supporting flushes must provide.
- */
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
- if (!ti->num_flush_bios)
- continue;
-
- if (ti->flush_supported)
- return true;
-
- if (ti->type->iterate_devices &&
- ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
+ if (ti->num_flush_bios && ti->flush_supported)
return true;
}
@@ -1839,20 +1702,6 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
return false;
}
-static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- return !bdev_nonrot(dev->bdev);
-}
-
-static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_add_random(q);
-}
-
static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1877,12 +1726,6 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
return true;
}
-static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- return !bdev_nowait(dev->bdev);
-}
-
static bool dm_table_supports_nowait(struct dm_table *t)
{
for (unsigned int i = 0; i < t->num_targets; i++) {
@@ -1890,10 +1733,6 @@ static bool dm_table_supports_nowait(struct dm_table *t)
if (!dm_target_supports_nowait(ti->type))
return false;
-
- if (!ti->type->iterate_devices ||
- ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
- return false;
}
return true;
@@ -1950,29 +1789,25 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
return true;
}
-static int device_requires_stable_pages(struct dm_target *ti,
- struct dm_dev *dev, sector_t start,
- sector_t len, void *data)
-{
- return bdev_stable_writes(dev->bdev);
-}
-
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
- bool wc = false, fua = false;
int r;
- if (dm_table_supports_nowait(t))
- blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
- else
- blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
+ if (!dm_table_supports_nowait(t))
+ limits->features &= ~BLK_FEAT_NOWAIT;
+
+ /*
+ * The current polling impementation does not support request based
+ * stacking.
+ */
+ if (!__table_type_bio_based(t->type))
+ limits->features &= ~BLK_FEAT_POLL;
if (!dm_table_supports_discards(t)) {
limits->max_hw_discard_sectors = 0;
limits->discard_granularity = 0;
limits->discard_alignment = 0;
- limits->discard_misaligned = 0;
}
if (!dm_table_supports_write_zeroes(t))
@@ -1981,58 +1816,22 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (!dm_table_supports_secure_erase(t))
limits->max_secure_erase_sectors = 0;
- if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
- wc = true;
- if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
- fua = true;
- }
- blk_queue_write_cache(q, wc, fua);
+ if (dm_table_supports_flush(t))
+ limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
if (dm_table_supports_dax(t, device_not_dax_capable)) {
- blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+ limits->features |= BLK_FEAT_DAX;
if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
set_dax_synchronous(t->md->dax_dev);
} else
- blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
+ limits->features &= ~BLK_FEAT_DAX;
if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
dax_write_cache(t->md->dax_dev, true);
- /* Ensure that all underlying devices are non-rotational. */
- if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
- else
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-
- dm_table_verify_integrity(t);
-
- /*
- * Some devices don't use blk_integrity but still want stable pages
- * because they do their own checksumming.
- * If any underlying device requires stable pages, a table must require
- * them as well. Only targets that support iterate_devices are considered:
- * don't want error, zero, etc to require stable pages.
- */
- if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
- else
- blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
-
- /*
- * Determine whether or not this queue's I/O timings contribute
- * to the entropy pool, Only request-based targets use this.
- * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
- * have it set.
- */
- if (blk_queue_add_random(q) &&
- dm_table_any_dev_attr(t, device_is_not_random, NULL))
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
-
- /*
- * For a zoned target, setup the zones related queue attributes
- * and resources necessary for zone append emulation if necessary.
- */
- if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) {
+ /* For a zoned table, setup the zone related queue attributes. */
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ (limits->features & BLK_FEAT_ZONED)) {
r = dm_set_zones_restrictions(t, q, limits);
if (r)
return r;
@@ -2042,22 +1841,18 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (r)
return r;
- dm_update_crypto_profile(q, t);
-
/*
- * Check for request-based device is left to
- * dm_mq_init_request_queue()->blk_mq_init_allocated_queue().
- *
- * For bio-based device, only set QUEUE_FLAG_POLL when all
- * underlying devices supporting polling.
+ * Now that the limits are set, check the zones mapped by the table
+ * and setup the resources for zone append emulation if necessary.
*/
- if (__table_type_bio_based(t->type)) {
- if (dm_table_supports_poll(t))
- blk_queue_flag_set(QUEUE_FLAG_POLL, q);
- else
- blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ (limits->features & BLK_FEAT_ZONED)) {
+ r = dm_revalidate_zones(t, q);
+ if (r)
+ return r;
}
+ dm_update_crypto_profile(q, t);
return 0;
}