diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-12 15:46:11 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-12 15:46:11 -0800 |
commit | 3acbdbf42e943d85174401357a6b6243479d4c76 (patch) | |
tree | 94d092eedc0e24f611a14a4fcceb9d3643b7ac25 /drivers/md | |
parent | 8834147f9505661859ce44549bf601e2a06bba7c (diff) | |
parent | 9e05e95ca8dae8de4a7a1645014e1bbd9c8a4dab (diff) | |
download | lwn-3acbdbf42e943d85174401357a6b6243479d4c76.tar.gz lwn-3acbdbf42e943d85174401357a6b6243479d4c76.zip |
Merge tag 'libnvdimm-for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull dax and libnvdimm updates from Dan Williams:
"The bulk of this is a rework of the dax_operations API after
discovering the obstacles it posed to the work-in-progress DAX+reflink
support for XFS and other copy-on-write filesystem mechanics.
Primarily the need to plumb a block_device through the API to handle
partition offsets was a sticking point and Christoph untangled that
dependency in addition to other cleanups to make landing the
DAX+reflink support easier.
The DAX_PMEM_COMPAT option has been around for 4 years and not only
are distributions shipping userspace that understand the current
configuration API, but some are not even bothering to turn this option
on anymore, so it seems a good time to remove it per the deprecation
schedule. Recall that this was added after the device-dax subsystem
moved from /sys/class/dax to /sys/bus/dax for its sysfs organization.
All recent functionality depends on /sys/bus/dax.
Some other miscellaneous cleanups and reflink prep patches are
included as well.
Summary:
- Simplify the dax_operations API:
- Eliminate bdev_dax_pgoff() in favor of the filesystem
maintaining and applying a partition offset to all its DAX iomap
operations.
- Remove wrappers and device-mapper stacked callbacks for
->copy_from_iter() and ->copy_to_iter() in favor of moving
block_device relative offset responsibility to the
dax_direct_access() caller.
- Remove the need for an @bdev in filesystem-DAX infrastructure
- Remove unused uio helpers copy_from_iter_flushcache() and
copy_mc_to_iter() as only the non-check_copy_size() versions are
used for DAX.
- Prepare XFS for the pending (next merge window) DAX+reflink support
- Remove deprecated DEV_DAX_PMEM_COMPAT support
- Cleanup a straggling misuse of the GUID api"
* tag 'libnvdimm-for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (38 commits)
iomap: Fix error handling in iomap_zero_iter()
ACPI: NFIT: Import GUID before use
dax: remove the copy_from_iter and copy_to_iter methods
dax: remove the DAXDEV_F_SYNC flag
dax: simplify dax_synchronous and set_dax_synchronous
uio: remove copy_from_iter_flushcache() and copy_mc_to_iter()
iomap: turn the byte variable in iomap_zero_iter into a ssize_t
memremap: remove support for external pgmap refcounts
fsdax: don't require CONFIG_BLOCK
iomap: build the block based code conditionally
dax: fix up some of the block device related ifdefs
fsdax: shift partition offset handling into the file systems
dax: return the partition offset from fs_dax_get_by_bdev
iomap: add a IOMAP_DAX flag
xfs: pass the mapping flags to xfs_bmbt_to_iomap
xfs: use xfs_direct_write_iomap_ops for DAX zeroing
xfs: move dax device handling into xfs_{alloc,free}_buftarg
ext4: cleanup the dax handling in ext4_fill_super
ext2: cleanup the dax handling in ext2_fill_super
fsdax: decouple zeroing from the iomap buffered I/O code
...
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-linear.c | 63 | ||||
-rw-r--r-- | drivers/md/dm-log-writes.c | 110 | ||||
-rw-r--r-- | drivers/md/dm-stripe.c | 75 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 22 | ||||
-rw-r--r-- | drivers/md/dm-writecache.c | 2 | ||||
-rw-r--r-- | drivers/md/dm.c | 89 | ||||
-rw-r--r-- | drivers/md/dm.h | 4 |
7 files changed, 58 insertions, 307 deletions
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 66ba16713f69..1b97a11d7151 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -162,71 +162,34 @@ static int linear_iterate_devices(struct dm_target *ti, return fn(ti, lc->dev, lc->start, ti->len, data); } -#if IS_ENABLED(CONFIG_DAX_DRIVER) -static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) +#if IS_ENABLED(CONFIG_FS_DAX) +static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff) { - long ret; struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - - dev_sector = linear_map_sector(ti, sector); - ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff); - if (ret) - return ret; - return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); -} + sector_t sector = linear_map_sector(ti, *pgoff << PAGE_SECTORS_SHIFT); -static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) -{ - struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - - dev_sector = linear_map_sector(ti, sector); - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; - return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); + *pgoff = (get_start_sect(lc->dev->bdev) + sector) >> PAGE_SECTORS_SHIFT; + return lc->dev->dax_dev; } -static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) +static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, + long nr_pages, void **kaddr, pfn_t *pfn) { - struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - - dev_sector = linear_map_sector(ti, sector); - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; - return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i); + struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff); + + return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); } static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, size_t nr_pages) { - int ret; - struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - - dev_sector = linear_map_sector(ti, sector); - ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff); - if (ret) - return ret; + struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff); + return dax_zero_page_range(dax_dev, pgoff, nr_pages); } #else #define linear_dax_direct_access NULL -#define linear_dax_copy_from_iter NULL -#define linear_dax_copy_to_iter NULL #define linear_dax_zero_page_range NULL #endif @@ -244,8 +207,6 @@ static struct target_type linear_target = { .prepare_ioctl = linear_prepare_ioctl, .iterate_devices = linear_iterate_devices, .direct_access = linear_dax_direct_access, - .dax_copy_from_iter = linear_dax_copy_from_iter, - .dax_copy_to_iter = linear_dax_copy_to_iter, .dax_zero_page_range = linear_dax_zero_page_range, }; diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 0b3ef977ceeb..139b09b06eda 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -901,120 +901,34 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit limits->io_min = limits->physical_block_size; } -#if IS_ENABLED(CONFIG_DAX_DRIVER) -static int log_dax(struct log_writes_c *lc, sector_t sector, size_t bytes, - struct iov_iter *i) +#if IS_ENABLED(CONFIG_FS_DAX) +static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti, + pgoff_t *pgoff) { - struct pending_block *block; - - if (!bytes) - return 0; - - block = kzalloc(sizeof(struct pending_block), GFP_KERNEL); - if (!block) { - DMERR("Error allocating dax pending block"); - return -ENOMEM; - } - - block->data = kzalloc(bytes, GFP_KERNEL); - if (!block->data) { - DMERR("Error allocating dax data space"); - kfree(block); - return -ENOMEM; - } - - /* write data provided via the iterator */ - if (!copy_from_iter(block->data, bytes, i)) { - DMERR("Error copying dax data"); - kfree(block->data); - kfree(block); - return -EIO; - } - - /* rewind the iterator so that the block driver can use it */ - iov_iter_revert(i, bytes); - - block->datalen = bytes; - block->sector = bio_to_dev_sectors(lc, sector); - block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift; - - atomic_inc(&lc->pending_blocks); - spin_lock_irq(&lc->blocks_lock); - list_add_tail(&block->list, &lc->unflushed_blocks); - spin_unlock_irq(&lc->blocks_lock); - wake_up_process(lc->log_kthread); + struct log_writes_c *lc = ti->private; - return 0; + *pgoff += (get_start_sect(lc->dev->bdev) >> PAGE_SECTORS_SHIFT); + return lc->dev->dax_dev; } static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { - struct log_writes_c *lc = ti->private; - sector_t sector = pgoff * PAGE_SECTORS; - int ret; - - ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages * PAGE_SIZE, &pgoff); - if (ret) - return ret; - return dax_direct_access(lc->dev->dax_dev, pgoff, nr_pages, kaddr, pfn); -} - -static size_t log_writes_dax_copy_from_iter(struct dm_target *ti, - pgoff_t pgoff, void *addr, size_t bytes, - struct iov_iter *i) -{ - struct log_writes_c *lc = ti->private; - sector_t sector = pgoff * PAGE_SECTORS; - int err; - - if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; - - /* Don't bother doing anything if logging has been disabled */ - if (!lc->logging_enabled) - goto dax_copy; - - err = log_dax(lc, sector, bytes, i); - if (err) { - DMWARN("Error %d logging DAX write", err); - return 0; - } -dax_copy: - return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i); -} - -static size_t log_writes_dax_copy_to_iter(struct dm_target *ti, - pgoff_t pgoff, void *addr, size_t bytes, - struct iov_iter *i) -{ - struct log_writes_c *lc = ti->private; - sector_t sector = pgoff * PAGE_SECTORS; + struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff); - if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; - return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i); + return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); } static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, size_t nr_pages) { - int ret; - struct log_writes_c *lc = ti->private; - sector_t sector = pgoff * PAGE_SECTORS; - - ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages << PAGE_SHIFT, - &pgoff); - if (ret) - return ret; - return dax_zero_page_range(lc->dev->dax_dev, pgoff, - nr_pages << PAGE_SHIFT); + struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff); + + return dax_zero_page_range(dax_dev, pgoff, nr_pages << PAGE_SHIFT); } #else #define log_writes_dax_direct_access NULL -#define log_writes_dax_copy_from_iter NULL -#define log_writes_dax_copy_to_iter NULL #define log_writes_dax_zero_page_range NULL #endif @@ -1032,8 +946,6 @@ static struct target_type log_writes_target = { .iterate_devices = log_writes_iterate_devices, .io_hints = log_writes_io_hints, .direct_access = log_writes_dax_direct_access, - .dax_copy_from_iter = log_writes_dax_copy_from_iter, - .dax_copy_to_iter = log_writes_dax_copy_to_iter, .dax_zero_page_range = log_writes_dax_zero_page_range, }; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 6660b6b53d5b..e566115ec0bb 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -300,91 +300,40 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } -#if IS_ENABLED(CONFIG_DAX_DRIVER) -static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) +#if IS_ENABLED(CONFIG_FS_DAX) +static struct dax_device *stripe_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff) { - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; struct stripe_c *sc = ti->private; - struct dax_device *dax_dev; struct block_device *bdev; + sector_t dev_sector; uint32_t stripe; - long ret; - stripe_map_sector(sc, sector, &stripe, &dev_sector); + stripe_map_sector(sc, *pgoff * PAGE_SECTORS, &stripe, &dev_sector); dev_sector += sc->stripe[stripe].physical_start; - dax_dev = sc->stripe[stripe].dev->dax_dev; bdev = sc->stripe[stripe].dev->bdev; - ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff); - if (ret) - return ret; - return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); + *pgoff = (get_start_sect(bdev) + dev_sector) >> PAGE_SECTORS_SHIFT; + return sc->stripe[stripe].dev->dax_dev; } -static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) +static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, + long nr_pages, void **kaddr, pfn_t *pfn) { - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - struct stripe_c *sc = ti->private; - struct dax_device *dax_dev; - struct block_device *bdev; - uint32_t stripe; - - stripe_map_sector(sc, sector, &stripe, &dev_sector); - dev_sector += sc->stripe[stripe].physical_start; - dax_dev = sc->stripe[stripe].dev->dax_dev; - bdev = sc->stripe[stripe].dev->bdev; + struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff); - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; - return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); -} - -static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) -{ - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - struct stripe_c *sc = ti->private; - struct dax_device *dax_dev; - struct block_device *bdev; - uint32_t stripe; - - stripe_map_sector(sc, sector, &stripe, &dev_sector); - dev_sector += sc->stripe[stripe].physical_start; - dax_dev = sc->stripe[stripe].dev->dax_dev; - bdev = sc->stripe[stripe].dev->bdev; - - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; - return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i); + return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); } static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, size_t nr_pages) { - int ret; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - struct stripe_c *sc = ti->private; - struct dax_device *dax_dev; - struct block_device *bdev; - uint32_t stripe; - - stripe_map_sector(sc, sector, &stripe, &dev_sector); - dev_sector += sc->stripe[stripe].physical_start; - dax_dev = sc->stripe[stripe].dev->dax_dev; - bdev = sc->stripe[stripe].dev->bdev; + struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff); - ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff); - if (ret) - return ret; return dax_zero_page_range(dax_dev, pgoff, nr_pages); } #else #define stripe_dax_direct_access NULL -#define stripe_dax_copy_from_iter NULL -#define stripe_dax_copy_to_iter NULL #define stripe_dax_zero_page_range NULL #endif @@ -521,8 +470,6 @@ static struct target_type stripe_target = { .iterate_devices = stripe_iterate_devices, .io_hints = stripe_io_hints, .direct_access = stripe_dax_direct_access, - .dax_copy_from_iter = stripe_dax_copy_from_iter, - .dax_copy_to_iter = stripe_dax_copy_to_iter, .dax_zero_page_range = stripe_dax_zero_page_range, }; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index aa173f5bdc3d..e43096cfe9e2 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -806,12 +806,14 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) EXPORT_SYMBOL_GPL(dm_table_set_type); /* validate the dax capability of the target device span */ -int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, +static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - int blocksize = *(int *) data; + if (dev->dax_dev) + return false; - return !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len); + DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev); + return true; } /* Check devices support synchronous DAX */ @@ -821,8 +823,8 @@ static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_de return !dev->dax_dev || !dax_synchronous(dev->dax_dev); } -bool dm_table_supports_dax(struct dm_table *t, - iterate_devices_callout_fn iterate_fn, int *blocksize) +static bool dm_table_supports_dax(struct dm_table *t, + iterate_devices_callout_fn iterate_fn) { struct dm_target *ti; unsigned i; @@ -835,7 +837,7 @@ bool dm_table_supports_dax(struct dm_table *t, return false; if (!ti->type->iterate_devices || - ti->type->iterate_devices(ti, iterate_fn, blocksize)) + ti->type->iterate_devices(ti, iterate_fn, NULL)) return false; } @@ -862,7 +864,6 @@ static int dm_table_determine_type(struct dm_table *t) struct dm_target *tgt; struct list_head *devices = dm_table_get_devices(t); enum dm_queue_mode live_md_type = dm_get_md_type(t->md); - int page_size = PAGE_SIZE; if (t->type != DM_TYPE_NONE) { /* target already set the table's type */ @@ -906,7 +907,7 @@ static int dm_table_determine_type(struct dm_table *t) verify_bio_based: /* We must use this table as bio-based */ t->type = DM_TYPE_BIO_BASED; - if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) || + if (dm_table_supports_dax(t, device_not_dax_capable) || (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { t->type = DM_TYPE_DAX_BIO_BASED; } @@ -1976,7 +1977,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { bool wc = false, fua = false; - int page_size = PAGE_SIZE; int r; /* @@ -2010,9 +2010,9 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } blk_queue_write_cache(q, wc, fua); - if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) { + if (dm_table_supports_dax(t, device_not_dax_capable)) { blk_queue_flag_set(QUEUE_FLAG_DAX, q); - if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL)) + if (dm_table_supports_dax(t, device_not_dax_synchronous_capable)) set_dax_synchronous(t->md->dax_dev); } else diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 4b8991cde223..4f31591d2d25 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -38,7 +38,7 @@ #define BITMAP_GRANULARITY PAGE_SIZE #endif -#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER) +#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_FS_DAX) #define DM_WRITECACHE_HAS_PMEM #endif diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 280918cdcabd..c0ae8087c602 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -637,7 +637,7 @@ static int open_table_device(struct table_device *td, dev_t dev, struct mapped_device *md) { struct block_device *bdev; - + u64 part_off; int r; BUG_ON(td->dm_dev.bdev); @@ -653,7 +653,7 @@ static int open_table_device(struct table_device *td, dev_t dev, } td->dm_dev.bdev = bdev; - td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev); + td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off); return 0; } @@ -1027,74 +1027,6 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, return ret; } -static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev, - int blocksize, sector_t start, sector_t len) -{ - struct mapped_device *md = dax_get_private(dax_dev); - struct dm_table *map; - bool ret = false; - int srcu_idx; - - map = dm_get_live_table(md, &srcu_idx); - if (!map) - goto out; - - ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize); - -out: - dm_put_live_table(md, srcu_idx); - - return ret; -} - -static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) -{ - struct mapped_device *md = dax_get_private(dax_dev); - sector_t sector = pgoff * PAGE_SECTORS; - struct dm_target *ti; - long ret = 0; - int srcu_idx; - - ti = dm_dax_get_live_target(md, sector, &srcu_idx); - - if (!ti) - goto out; - if (!ti->type->dax_copy_from_iter) { - ret = copy_from_iter(addr, bytes, i); - goto out; - } - ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); - out: - dm_put_live_table(md, srcu_idx); - - return ret; -} - -static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) -{ - struct mapped_device *md = dax_get_private(dax_dev); - sector_t sector = pgoff * PAGE_SECTORS; - struct dm_target *ti; - long ret = 0; - int srcu_idx; - - ti = dm_dax_get_live_target(md, sector, &srcu_idx); - - if (!ti) - goto out; - if (!ti->type->dax_copy_to_iter) { - ret = copy_to_iter(addr, bytes, i); - goto out; - } - ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); - out: - dm_put_live_table(md, srcu_idx); - - return ret; -} - static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, size_t nr_pages) { @@ -1683,6 +1615,7 @@ static void cleanup_mapped_device(struct mapped_device *md) bioset_exit(&md->io_bs); if (md->dax_dev) { + dax_remove_host(md->disk); kill_dax(md->dax_dev); put_dax(md->dax_dev); md->dax_dev = NULL; @@ -1784,10 +1717,15 @@ static struct mapped_device *alloc_dev(int minor) md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); - if (IS_ENABLED(CONFIG_DAX_DRIVER)) { - md->dax_dev = alloc_dax(md, md->disk->disk_name, - &dm_dax_ops, 0); - if (IS_ERR(md->dax_dev)) + if (IS_ENABLED(CONFIG_FS_DAX)) { + md->dax_dev = alloc_dax(md, &dm_dax_ops); + if (IS_ERR(md->dax_dev)) { + md->dax_dev = NULL; + goto bad; + } + set_dax_nocache(md->dax_dev); + set_dax_nomc(md->dax_dev); + if (dax_add_host(md->dax_dev, md->disk)) goto bad; } @@ -3041,9 +2979,6 @@ static const struct block_device_operations dm_rq_blk_dops = { static const struct dax_operations dm_dax_ops = { .direct_access = dm_dax_direct_access, - .dax_supported = dm_dax_supported, - .copy_from_iter = dm_dax_copy_from_iter, - .copy_to_iter = dm_dax_copy_to_iter, .zero_page_range = dm_dax_zero_page_range, }; diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 742d9c80efe1..9013dc1a7b00 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -73,10 +73,6 @@ bool dm_table_bio_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t); void dm_table_free_md_mempools(struct dm_table *t); struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); -bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn, - int *blocksize); -int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data); void dm_lock_md_type(struct mapped_device *md); void dm_unlock_md_type(struct mapped_device *md); |