summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/Kconfig43
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/bitmap.c189
-rw-r--r--drivers/md/bitmap.h10
-rw-r--r--drivers/md/dm-cache-policy-mq.c251
-rw-r--r--drivers/md/dm-crypt.c13
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-io.c15
-rw-r--r--drivers/md/dm-ioctl.c17
-rw-r--r--drivers/md/dm-log-userspace-base.c91
-rw-r--r--drivers/md/dm-log-userspace-transfer.c5
-rw-r--r--drivers/md/dm-log-writes.c825
-rw-r--r--drivers/md/dm-mpath.c6
-rw-r--r--drivers/md/dm-snap.c120
-rw-r--r--drivers/md/dm-sysfs.c43
-rw-r--r--drivers/md/dm-table.c71
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm-verity.c147
-rw-r--r--drivers/md/dm.c618
-rw-r--r--drivers/md/dm.h10
-rw-r--r--drivers/md/md-cluster.c965
-rw-r--r--drivers/md/md-cluster.h29
-rw-r--r--drivers/md/md.c409
-rw-r--r--drivers/md/md.h26
-rw-r--r--drivers/md/raid0.c53
-rw-r--r--drivers/md/raid1.c34
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c839
-rw-r--r--drivers/md/raid5.h59
29 files changed, 4246 insertions, 665 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 63e05e32b462..edcf4ab66e00 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -175,6 +175,22 @@ config MD_FAULTY
In unsure, say N.
+
+config MD_CLUSTER
+ tristate "Cluster Support for MD (EXPERIMENTAL)"
+ depends on BLK_DEV_MD
+ depends on DLM
+ default n
+ ---help---
+ Clustering support for MD devices. This enables locking and
+ synchronization across multiple systems on the cluster, so all
+ nodes in the cluster can access the MD devices simultaneously.
+
+ This brings the redundancy (and uptime) of RAID levels across the
+ nodes of the cluster.
+
+ If unsure, say N.
+
source "drivers/md/bcache/Kconfig"
config BLK_DEV_DM_BUILTIN
@@ -196,6 +212,17 @@ config BLK_DEV_DM
If unsure, say N.
+config DM_MQ_DEFAULT
+ bool "request-based DM: use blk-mq I/O path by default"
+ depends on BLK_DEV_DM
+ ---help---
+ This option enables the blk-mq based I/O path for request-based
+ DM devices by default. With the option the dm_mod.use_blk_mq
+ module/boot option defaults to Y, without it to N, but it can
+ still be overriden either way.
+
+ If unsure say N.
+
config DM_DEBUG
bool "Device mapper debugging support"
depends on BLK_DEV_DM
@@ -432,4 +459,20 @@ config DM_SWITCH
If unsure, say N.
+config DM_LOG_WRITES
+ tristate "Log writes target support"
+ depends on BLK_DEV_DM
+ ---help---
+ This device-mapper target takes two devices, one device to use
+ normally, one to log all write operations done to the first device.
+ This is for use by file system developers wishing to verify that
+ their fs is writing a consitent file system at all times by allowing
+ them to replay the log in a variety of ways and to check the
+ contents.
+
+ To compile this code as a module, choose M here: the module will
+ be called dm-log-writes.
+
+ If unsure, say N.
+
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index a2da532b1c2b..dba4db5985fb 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_MD_RAID10) += raid10.o
obj-$(CONFIG_MD_RAID456) += raid456.o
obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_MD_FAULTY) += faulty.o
+obj-$(CONFIG_MD_CLUSTER) += md-cluster.o
obj-$(CONFIG_BCACHE) += bcache/
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
@@ -55,6 +56,7 @@ obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
obj-$(CONFIG_DM_ERA) += dm-era.o
+obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 3a5767968ba0..2bc56e2a3526 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -205,6 +205,10 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage;
+ int node_offset = 0;
+
+ if (mddev_is_clustered(bitmap->mddev))
+ node_offset = bitmap->cluster_slot * store->file_pages;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE;
@@ -433,6 +437,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
/* This might have been changed by a reshape */
sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
+ sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
bitmap_info.space);
kunmap_atomic(sb);
@@ -544,6 +549,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
bitmap_super_t *sb;
unsigned long chunksize, daemon_sleep, write_behind;
unsigned long long events;
+ int nodes = 0;
unsigned long sectors_reserved = 0;
int err = -EINVAL;
struct page *sb_page;
@@ -562,6 +568,22 @@ static int bitmap_read_sb(struct bitmap *bitmap)
return -ENOMEM;
bitmap->storage.sb_page = sb_page;
+re_read:
+ /* If cluster_slot is set, the cluster is setup */
+ if (bitmap->cluster_slot >= 0) {
+ sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
+
+ sector_div(bm_blocks,
+ bitmap->mddev->bitmap_info.chunksize >> 9);
+ /* bits to bytes */
+ bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
+ /* to 4k blocks */
+ bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
+ bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3);
+ pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
+ bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset);
+ }
+
if (bitmap->storage.file) {
loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
@@ -577,12 +599,15 @@ static int bitmap_read_sb(struct bitmap *bitmap)
if (err)
return err;
+ err = -EINVAL;
sb = kmap_atomic(sb_page);
chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind);
sectors_reserved = le32_to_cpu(sb->sectors_reserved);
+ nodes = le32_to_cpu(sb->nodes);
+ strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
/* verify that the bitmap-specific fields are valid */
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -619,7 +644,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
goto out;
}
events = le64_to_cpu(sb->events);
- if (events < bitmap->mddev->events) {
+ if (!nodes && (events < bitmap->mddev->events)) {
printk(KERN_INFO
"%s: bitmap file is out of date (%llu < %llu) "
"-- forcing full recovery\n",
@@ -634,20 +659,40 @@ static int bitmap_read_sb(struct bitmap *bitmap)
if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
+ strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
err = 0;
+
out:
kunmap_atomic(sb);
+ /* Assiging chunksize is required for "re_read" */
+ bitmap->mddev->bitmap_info.chunksize = chunksize;
+ if (nodes && (bitmap->cluster_slot < 0)) {
+ err = md_setup_cluster(bitmap->mddev, nodes);
+ if (err) {
+ pr_err("%s: Could not setup cluster service (%d)\n",
+ bmname(bitmap), err);
+ goto out_no_sb;
+ }
+ bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
+ goto re_read;
+ }
+
+
out_no_sb:
if (test_bit(BITMAP_STALE, &bitmap->flags))
bitmap->events_cleared = bitmap->mddev->events;
bitmap->mddev->bitmap_info.chunksize = chunksize;
bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
bitmap->mddev->bitmap_info.max_write_behind = write_behind;
+ bitmap->mddev->bitmap_info.nodes = nodes;
if (bitmap->mddev->bitmap_info.space == 0 ||
bitmap->mddev->bitmap_info.space > sectors_reserved)
bitmap->mddev->bitmap_info.space = sectors_reserved;
- if (err)
+ if (err) {
bitmap_print_sb(bitmap);
+ if (bitmap->cluster_slot < 0)
+ md_cluster_stop(bitmap->mddev);
+ }
return err;
}
@@ -692,9 +737,10 @@ static inline struct page *filemap_get_page(struct bitmap_storage *store,
}
static int bitmap_storage_alloc(struct bitmap_storage *store,
- unsigned long chunks, int with_super)
+ unsigned long chunks, int with_super,
+ int slot_number)
{
- int pnum;
+ int pnum, offset = 0;
unsigned long num_pages;
unsigned long bytes;
@@ -703,6 +749,7 @@ static int bitmap_storage_alloc(struct bitmap_storage *store,
bytes += sizeof(bitmap_super_t);
num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
+ offset = slot_number * (num_pages - 1);
store->filemap = kmalloc(sizeof(struct page *)
* num_pages, GFP_KERNEL);
@@ -713,20 +760,22 @@ static int bitmap_storage_alloc(struct bitmap_storage *store,
store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (store->sb_page == NULL)
return -ENOMEM;
- store->sb_page->index = 0;
}
+
pnum = 0;
if (store->sb_page) {
store->filemap[0] = store->sb_page;
pnum = 1;
+ store->sb_page->index = offset;
}
+
for ( ; pnum < num_pages; pnum++) {
store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!store->filemap[pnum]) {
store->file_pages = pnum;
return -ENOMEM;
}
- store->filemap[pnum]->index = pnum;
+ store->filemap[pnum]->index = pnum + offset;
}
store->file_pages = pnum;
@@ -885,6 +934,28 @@ static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
}
}
+static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
+{
+ unsigned long bit;
+ struct page *page;
+ void *paddr;
+ unsigned long chunk = block >> bitmap->counts.chunkshift;
+ int set = 0;
+
+ page = filemap_get_page(&bitmap->storage, chunk);
+ if (!page)
+ return -EINVAL;
+ bit = file_page_offset(&bitmap->storage, chunk);
+ paddr = kmap_atomic(page);
+ if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
+ set = test_bit(bit, paddr);
+ else
+ set = test_bit_le(bit, paddr);
+ kunmap_atomic(paddr);
+ return set;
+}
+
+
/* this gets called when the md device is ready to unplug its underlying
* (slave) device queues -- before we let any writes go down, we need to
* sync the dirty pages of the bitmap file to disk */
@@ -935,7 +1006,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
*/
static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
{
- unsigned long i, chunks, index, oldindex, bit;
+ unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
struct page *page = NULL;
unsigned long bit_cnt = 0;
struct file *file;
@@ -981,6 +1052,9 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
if (!bitmap->mddev->bitmap_info.external)
offset = sizeof(bitmap_super_t);
+ if (mddev_is_clustered(bitmap->mddev))
+ node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
+
for (i = 0; i < chunks; i++) {
int b;
index = file_page_index(&bitmap->storage, i);
@@ -1001,7 +1075,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
bitmap->mddev,
bitmap->mddev->bitmap_info.offset,
page,
- index, count);
+ index + node_offset, count);
if (ret)
goto err;
@@ -1207,7 +1281,6 @@ void bitmap_daemon_work(struct mddev *mddev)
j < bitmap->storage.file_pages
&& !test_bit(BITMAP_STALE, &bitmap->flags);
j++) {
-
if (test_page_attr(bitmap, j,
BITMAP_PAGE_DIRTY))
/* bitmap_unplug will handle the rest */
@@ -1530,11 +1603,13 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
return;
}
if (!*bmc) {
- *bmc = 2 | (needed ? NEEDED_MASK : 0);
+ *bmc = 2;
bitmap_count_page(&bitmap->counts, offset, 1);
bitmap_set_pending(&bitmap->counts, offset);
bitmap->allclean = 0;
}
+ if (needed)
+ *bmc |= NEEDED_MASK;
spin_unlock_irq(&bitmap->counts.lock);
}
@@ -1591,6 +1666,10 @@ static void bitmap_free(struct bitmap *bitmap)
if (!bitmap) /* there was no bitmap */
return;
+ if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
+ bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
+ md_cluster_stop(bitmap->mddev);
+
/* Shouldn't be needed - but just in case.... */
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes) == 0);
@@ -1636,7 +1715,7 @@ void bitmap_destroy(struct mddev *mddev)
* initialize the bitmap structure
* if this returns an error, bitmap_destroy must be called to do clean up
*/
-int bitmap_create(struct mddev *mddev)
+struct bitmap *bitmap_create(struct mddev *mddev, int slot)
{
struct bitmap *bitmap;
sector_t blocks = mddev->resync_max_sectors;
@@ -1650,7 +1729,7 @@ int bitmap_create(struct mddev *mddev)
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
spin_lock_init(&bitmap->counts.lock);
atomic_set(&bitmap->pending_writes, 0);
@@ -1659,6 +1738,7 @@ int bitmap_create(struct mddev *mddev)
init_waitqueue_head(&bitmap->behind_wait);
bitmap->mddev = mddev;
+ bitmap->cluster_slot = slot;
if (mddev->kobj.sd)
bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
@@ -1706,12 +1786,14 @@ int bitmap_create(struct mddev *mddev)
printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
bitmap->counts.pages, bmname(bitmap));
- mddev->bitmap = bitmap;
- return test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
+ err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
+ if (err)
+ goto error;
+ return bitmap;
error:
bitmap_free(bitmap);
- return err;
+ return ERR_PTR(err);
}
int bitmap_load(struct mddev *mddev)
@@ -1765,6 +1847,60 @@ out:
}
EXPORT_SYMBOL_GPL(bitmap_load);
+/* Loads the bitmap associated with slot and copies the resync information
+ * to our bitmap
+ */
+int bitmap_copy_from_slot(struct mddev *mddev, int slot,
+ sector_t *low, sector_t *high, bool clear_bits)
+{
+ int rv = 0, i, j;
+ sector_t block, lo = 0, hi = 0;
+ struct bitmap_counts *counts;
+ struct bitmap *bitmap = bitmap_create(mddev, slot);
+
+ if (IS_ERR(bitmap))
+ return PTR_ERR(bitmap);
+
+ rv = bitmap_read_sb(bitmap);
+ if (rv)
+ goto err;
+
+ rv = bitmap_init_from_disk(bitmap, 0);
+ if (rv)
+ goto err;
+
+ counts = &bitmap->counts;
+ for (j = 0; j < counts->chunks; j++) {
+ block = (sector_t)j << counts->chunkshift;
+ if (bitmap_file_test_bit(bitmap, block)) {
+ if (!lo)
+ lo = block;
+ hi = block;
+ bitmap_file_clear_bit(bitmap, block);
+ bitmap_set_memory_bits(mddev->bitmap, block, 1);
+ bitmap_file_set_bit(mddev->bitmap, block);
+ }
+ }
+
+ if (clear_bits) {
+ bitmap_update_sb(bitmap);
+ /* Setting this for the ev_page should be enough.
+ * And we do not require both write_all and PAGE_DIRT either
+ */
+ for (i = 0; i < bitmap->storage.file_pages; i++)
+ set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
+ bitmap_write_all(bitmap);
+ bitmap_unplug(bitmap);
+ }
+ *low = lo;
+ *high = hi;
+err:
+ bitmap_free(bitmap);
+ return rv;
+}
+EXPORT_SYMBOL_GPL(bitmap_copy_from_slot);
+
+
void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
{
unsigned long chunk_kb;
@@ -1849,7 +1985,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
memset(&store, 0, sizeof(store));
if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
ret = bitmap_storage_alloc(&store, chunks,
- !bitmap->mddev->bitmap_info.external);
+ !bitmap->mddev->bitmap_info.external,
+ bitmap->cluster_slot);
if (ret)
goto err;
@@ -2021,13 +2158,18 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
return -EINVAL;
mddev->bitmap_info.offset = offset;
if (mddev->pers) {
+ struct bitmap *bitmap;
mddev->pers->quiesce(mddev, 1);
- rv = bitmap_create(mddev);
- if (!rv)
+ bitmap = bitmap_create(mddev, -1);
+ if (IS_ERR(bitmap))
+ rv = PTR_ERR(bitmap);
+ else {
+ mddev->bitmap = bitmap;
rv = bitmap_load(mddev);
- if (rv) {
- bitmap_destroy(mddev);
- mddev->bitmap_info.offset = 0;
+ if (rv) {
+ bitmap_destroy(mddev);
+ mddev->bitmap_info.offset = 0;
+ }
}
mddev->pers->quiesce(mddev, 0);
if (rv)
@@ -2186,6 +2328,8 @@ __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
static ssize_t metadata_show(struct mddev *mddev, char *page)
{
+ if (mddev_is_clustered(mddev))
+ return sprintf(page, "clustered\n");
return sprintf(page, "%s\n", (mddev->bitmap_info.external
? "external" : "internal"));
}
@@ -2198,7 +2342,8 @@ static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
return -EBUSY;
if (strncmp(buf, "external", 8) == 0)
mddev->bitmap_info.external = 1;
- else if (strncmp(buf, "internal", 8) == 0)
+ else if ((strncmp(buf, "internal", 8) == 0) ||
+ (strncmp(buf, "clustered", 9) == 0))
mddev->bitmap_info.external = 0;
else
return -EINVAL;
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 30210b9c4ef9..f1f4dd01090d 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -130,8 +130,9 @@ typedef struct bitmap_super_s {
__le32 write_behind; /* 60 number of outstanding write-behind writes */
__le32 sectors_reserved; /* 64 number of 512-byte sectors that are
* reserved for the bitmap. */
-
- __u8 pad[256 - 68]; /* set to zero */
+ __le32 nodes; /* 68 the maximum number of nodes in cluster. */
+ __u8 cluster_name[64]; /* 72 cluster name to which this md belongs */
+ __u8 pad[256 - 136]; /* set to zero */
} bitmap_super_t;
/* notes:
@@ -226,12 +227,13 @@ struct bitmap {
wait_queue_head_t behind_wait;
struct kernfs_node *sysfs_can_clear;
+ int cluster_slot; /* Slot offset for clustered env */
};
/* the bitmap API */
/* these are used only by md/bitmap */
-int bitmap_create(struct mddev *mddev);
+struct bitmap *bitmap_create(struct mddev *mddev, int slot);
int bitmap_load(struct mddev *mddev);
void bitmap_flush(struct mddev *mddev);
void bitmap_destroy(struct mddev *mddev);
@@ -260,6 +262,8 @@ void bitmap_daemon_work(struct mddev *mddev);
int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
int chunksize, int init);
+int bitmap_copy_from_slot(struct mddev *mddev, int slot,
+ sector_t *lo, sector_t *hi, bool clear_bits);
#endif
#endif
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 13f547a4eeb6..3ddd1162334d 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -8,6 +8,7 @@
#include "dm.h"
#include <linux/hash.h>
+#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -124,32 +125,41 @@ static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
* sorted queue.
*/
#define NR_QUEUE_LEVELS 16u
+#define NR_SENTINELS NR_QUEUE_LEVELS * 3
+
+#define WRITEBACK_PERIOD HZ
struct queue {
+ unsigned nr_elts;
+ bool current_writeback_sentinels;
+ unsigned long next_writeback;
struct list_head qs[NR_QUEUE_LEVELS];
+ struct list_head sentinels[NR_SENTINELS];
};
static void queue_init(struct queue *q)
{
unsigned i;
- for (i = 0; i < NR_QUEUE_LEVELS; i++)
+ q->nr_elts = 0;
+ q->current_writeback_sentinels = false;
+ q->next_writeback = 0;
+ for (i = 0; i < NR_QUEUE_LEVELS; i++) {
INIT_LIST_HEAD(q->qs + i);
+ INIT_LIST_HEAD(q->sentinels + i);
+ INIT_LIST_HEAD(q->sentinels + NR_QUEUE_LEVELS + i);
+ INIT_LIST_HEAD(q->sentinels + (2 * NR_QUEUE_LEVELS) + i);
+ }
}
-/*
- * Checks to see if the queue is empty.
- * FIXME: reduce cpu usage.
- */
-static bool queue_empty(struct queue *q)
+static unsigned queue_size(struct queue *q)
{
- unsigned i;
-
- for (i = 0; i < NR_QUEUE_LEVELS; i++)
- if (!list_empty(q->qs + i))
- return false;
+ return q->nr_elts;
+}
- return true;
+static bool queue_empty(struct queue *q)
+{
+ return q->nr_elts == 0;
}
/*
@@ -157,24 +167,19 @@ static bool queue_empty(struct queue *q)
*/
static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
{
+ q->nr_elts++;
list_add_tail(elt, q->qs + level);
}
-static void queue_remove(struct list_head *elt)
+static void queue_remove(struct queue *q, struct list_head *elt)
{
+ q->nr_elts--;
list_del(elt);
}
-/*
- * Shifts all regions down one level. This has no effect on the order of
- * the queue.
- */
-static void queue_shift_down(struct queue *q)
+static bool is_sentinel(struct queue *q, struct list_head *h)
{
- unsigned level;
-
- for (level = 1; level < NR_QUEUE_LEVELS; level++)
- list_splice_init(q->qs + level, q->qs + level - 1);
+ return (h >= q->sentinels) && (h < (q->sentinels + NR_SENTINELS));
}
/*
@@ -184,10 +189,12 @@ static void queue_shift_down(struct queue *q)
static struct list_head *queue_peek(struct queue *q)
{
unsigned level;
+ struct list_head *h;
for (level = 0; level < NR_QUEUE_LEVELS; level++)
- if (!list_empty(q->qs + level))
- return q->qs[level].next;
+ list_for_each(h, q->qs + level)
+ if (!is_sentinel(q, h))
+ return h;
return NULL;
}
@@ -197,16 +204,34 @@ static struct list_head *queue_pop(struct queue *q)
struct list_head *r = queue_peek(q);
if (r) {
+ q->nr_elts--;
list_del(r);
-
- /* have we just emptied the bottom level? */
- if (list_empty(q->qs))
- queue_shift_down(q);
}
return r;
}
+/*
+ * Pops an entry from a level that is not past a sentinel.
+ */
+static struct list_head *queue_pop_old(struct queue *q)
+{
+ unsigned level;
+ struct list_head *h;
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ list_for_each(h, q->qs + level) {
+ if (is_sentinel(q, h))
+ break;
+
+ q->nr_elts--;
+ list_del(h);
+ return h;
+ }
+
+ return NULL;
+}
+
static struct list_head *list_pop(struct list_head *lh)
{
struct list_head *r = lh->next;
@@ -217,6 +242,62 @@ static struct list_head *list_pop(struct list_head *lh)
return r;
}
+static struct list_head *writeback_sentinel(struct queue *q, unsigned level)
+{
+ if (q->current_writeback_sentinels)
+ return q->sentinels + NR_QUEUE_LEVELS + level;
+ else
+ return q->sentinels + 2 * NR_QUEUE_LEVELS + level;
+}
+
+static void queue_update_writeback_sentinels(struct queue *q)
+{
+ unsigned i;
+ struct list_head *h;
+
+ if (time_after(jiffies, q->next_writeback)) {
+ for (i = 0; i < NR_QUEUE_LEVELS; i++) {
+ h = writeback_sentinel(q, i);
+ list_del(h);
+ list_add_tail(h, q->qs + i);
+ }
+
+ q->next_writeback = jiffies + WRITEBACK_PERIOD;
+ q->current_writeback_sentinels = !q->current_writeback_sentinels;
+ }
+}
+
+/*
+ * Sometimes we want to iterate through entries that have been pushed since
+ * a certain event. We use sentinel entries on the queues to delimit these
+ * 'tick' events.
+ */
+static void queue_tick(struct queue *q)
+{
+ unsigned i;
+
+ for (i = 0; i < NR_QUEUE_LEVELS; i++) {
+ list_del(q->sentinels + i);
+ list_add_tail(q->sentinels + i, q->qs + i);
+ }
+}
+
+typedef void (*iter_fn)(struct list_head *, void *);
+static void queue_iterate_tick(struct queue *q, iter_fn fn, void *context)
+{
+ unsigned i;
+ struct list_head *h;
+
+ for (i = 0; i < NR_QUEUE_LEVELS; i++) {
+ list_for_each_prev(h, q->qs + i) {
+ if (is_sentinel(q, h))
+ break;
+
+ fn(h, context);
+ }
+ }
+}
+
/*----------------------------------------------------------------*/
/*
@@ -232,8 +313,6 @@ struct entry {
*/
bool dirty:1;
unsigned hit_count;
- unsigned generation;
- unsigned tick;
};
/*
@@ -481,7 +560,6 @@ static bool in_cache(struct mq_policy *mq, struct entry *e)
*/
static void push(struct mq_policy *mq, struct entry *e)
{
- e->tick = mq->tick;
hash_insert(mq, e);
if (in_cache(mq, e))
@@ -496,7 +574,11 @@ static void push(struct mq_policy *mq, struct entry *e)
*/
static void del(struct mq_policy *mq, struct entry *e)
{
- queue_remove(&e->list);
+ if (in_cache(mq, e))
+ queue_remove(e->dirty ? &mq->cache_dirty : &mq->cache_clean, &e->list);
+ else
+ queue_remove(&mq->pre_cache, &e->list);
+
hash_remove(e);
}
@@ -518,18 +600,24 @@ static struct entry *pop(struct mq_policy *mq, struct queue *q)
return e;
}
-static struct entry *peek(struct queue *q)
+static struct entry *pop_old(struct mq_policy *mq, struct queue *q)
{
- struct list_head *h = queue_peek(q);
- return h ? container_of(h, struct entry, list) : NULL;
+ struct entry *e;
+ struct list_head *h = queue_pop_old(q);
+
+ if (!h)
+ return NULL;
+
+ e = container_of(h, struct entry, list);
+ hash_remove(e);
+
+ return e;
}
-/*
- * Has this entry already been updated?
- */
-static bool updated_this_tick(struct mq_policy *mq, struct entry *e)
+static struct entry *peek(struct queue *q)
{
- return mq->tick == e->tick;
+ struct list_head *h = queue_peek(q);
+ return h ? container_of(h, struct entry, list) : NULL;
}
/*
@@ -583,20 +671,9 @@ static void check_generation(struct mq_policy *mq)
* Whenever we use an entry we bump up it's hit counter, and push it to the
* back to it's current level.
*/
-static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)
+static void requeue(struct mq_policy *mq, struct entry *e)
{
- if (updated_this_tick(mq, e))
- return;
-
- e->hit_count++;
- mq->hit_count++;
check_generation(mq);
-
- /* generation adjustment, to stop the counts increasing forever. */
- /* FIXME: divide? */
- /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
- e->generation = mq->generation;
-
del(mq, e);
push(mq, e);
}
@@ -703,7 +780,7 @@ static int cache_entry_found(struct mq_policy *mq,
struct entry *e,
struct policy_result *result)
{
- requeue_and_update_tick(mq, e);
+ requeue(mq, e);
if (in_cache(mq, e)) {
result->op = POLICY_HIT;
@@ -740,8 +817,6 @@ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
new_e->oblock = e->oblock;
new_e->dirty = false;
new_e->hit_count = e->hit_count;
- new_e->generation = e->generation;
- new_e->tick = e->tick;
del(mq, e);
free_entry(&mq->pre_cache_pool, e);
@@ -757,18 +832,16 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
int data_dir, struct policy_result *result)
{
int r = 0;
- bool updated = updated_this_tick(mq, e);
- if ((!discarded_oblock && updated) ||
- !should_promote(mq, e, discarded_oblock, data_dir)) {
- requeue_and_update_tick(mq, e);
+ if (!should_promote(mq, e, discarded_oblock, data_dir)) {
+ requeue(mq, e);
result->op = POLICY_MISS;
} else if (!can_migrate)
r = -EWOULDBLOCK;
else {
- requeue_and_update_tick(mq, e);
+ requeue(mq, e);
r = pre_cache_to_cache(mq, e, result);
}
@@ -795,7 +868,6 @@ static void insert_in_pre_cache(struct mq_policy *mq,
e->dirty = false;
e->oblock = oblock;
e->hit_count = 1;
- e->generation = mq->generation;
push(mq, e);
}
@@ -828,7 +900,6 @@ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
e->oblock = oblock;
e->dirty = false;
e->hit_count = 1;
- e->generation = mq->generation;
push(mq, e);
result->cblock = infer_cblock(&mq->cache_pool, e);
@@ -905,12 +976,37 @@ static void mq_destroy(struct dm_cache_policy *p)
kfree(mq);
}
+static void update_pre_cache_hits(struct list_head *h, void *context)
+{
+ struct entry *e = container_of(h, struct entry, list);
+ e->hit_count++;
+}
+
+static void update_cache_hits(struct list_head *h, void *context)
+{
+ struct mq_policy *mq = context;
+ struct entry *e = container_of(h, struct entry, list);
+ e->hit_count++;
+ mq->hit_count++;
+}
+
static void copy_tick(struct mq_policy *mq)
{
- unsigned long flags;
+ unsigned long flags, tick;
spin_lock_irqsave(&mq->tick_lock, flags);
- mq->tick = mq->tick_protected;
+ tick = mq->tick_protected;
+ if (tick != mq->tick) {
+ queue_iterate_tick(&mq->pre_cache, update_pre_cache_hits, mq);
+ queue_iterate_tick(&mq->cache_dirty, update_cache_hits, mq);
+ queue_iterate_tick(&mq->cache_clean, update_cache_hits, mq);
+ mq->tick = tick;
+ }
+
+ queue_tick(&mq->pre_cache);
+ queue_tick(&mq->cache_dirty);
+ queue_tick(&mq->cache_clean);
+ queue_update_writeback_sentinels(&mq->cache_dirty);
spin_unlock_irqrestore(&mq->tick_lock, flags);
}
@@ -1001,7 +1097,6 @@ static int mq_load_mapping(struct dm_cache_policy *p,
e->oblock = oblock;
e->dirty = false; /* this gets corrected in a minute */
e->hit_count = hint_valid ? hint : 1;
- e->generation = mq->generation;
push(mq, e);
return 0;
@@ -1012,10 +1107,15 @@ static int mq_save_hints(struct mq_policy *mq, struct queue *q,
{
int r;
unsigned level;
+ struct list_head *h;
struct entry *e;
for (level = 0; level < NR_QUEUE_LEVELS; level++)
- list_for_each_entry(e, q->qs + level, list) {
+ list_for_each(h, q->qs + level) {
+ if (is_sentinel(q, h))
+ continue;
+
+ e = container_of(h, struct entry, list);
r = fn(context, infer_cblock(&mq->cache_pool, e),
e->oblock, e->hit_count);
if (r)
@@ -1087,10 +1187,27 @@ static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
return r;
}
+#define CLEAN_TARGET_PERCENTAGE 25
+
+static bool clean_target_met(struct mq_policy *mq)
+{
+ /*
+ * Cache entries may not be populated. So we're cannot rely on the
+ * size of the clean queue.
+ */
+ unsigned nr_clean = from_cblock(mq->cache_size) - queue_size(&mq->cache_dirty);
+ unsigned target = from_cblock(mq->cache_size) * CLEAN_TARGET_PERCENTAGE / 100;
+
+ return nr_clean >= target;
+}
+
static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
dm_cblock_t *cblock)
{
- struct entry *e = pop(mq, &mq->cache_dirty);
+ struct entry *e = pop_old(mq, &mq->cache_dirty);
+
+ if (!e && !clean_target_met(mq))
+ e = pop(mq, &mq->cache_dirty);
if (!e)
return -ENODATA;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 713a96237a80..5503e43e5f28 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -228,7 +228,7 @@ static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
*
* tcw: Compatible implementation of the block chaining mode used
* by the TrueCrypt device encryption system (prior to version 4.1).
- * For more info see: http://www.truecrypt.org
+ * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
* It operates on full 512 byte sectors and uses CBC
* with an IV derived from initial key and the sector number.
* In addition, whitening value is applied on every sector, whitening
@@ -1124,15 +1124,15 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{
struct crypt_config *cc = io->cc;
- struct bio *base_bio = io->base_bio;
struct bio *clone;
/*
- * The block layer might modify the bvec array, so always
- * copy the required bvecs because we need the original
- * one in order to decrypt the whole bio data *afterwards*.
+ * We need the original biovec array in order to decrypt
+ * the whole bio data *afterwards* -- thanks to immutable
+ * biovecs we don't need to worry about the block layer
+ * modifying the biovec array; so leverage bio_clone_fast().
*/
- clone = bio_clone_bioset(base_bio, gfp, cc->bs);
+ clone = bio_clone_fast(io->base_bio, gfp, cc->bs);
if (!clone)
return 1;
@@ -1816,6 +1816,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (ret)
goto bad;
+ ret = -EINVAL;
while (opt_params--) {
opt_string = dm_shift_arg(&as);
if (!opt_string) {
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 42c3a27a14cc..57b6a1901c91 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -236,7 +236,7 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
delayed->context = dc;
- delayed->expires = expires = jiffies + (delay * HZ / 1000);
+ delayed->expires = expires = jiffies + msecs_to_jiffies(delay);
mutex_lock(&delayed_bios_lock);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 37de0173b6d2..74adcd2c967e 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
struct request_queue *q = bdev_get_queue(where->bdev);
unsigned short logical_block_size = queue_logical_block_size(q);
sector_t num_sectors;
+ unsigned int uninitialized_var(special_cmd_max_sectors);
- /* Reject unsupported discard requests */
- if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
+ /*
+ * Reject unsupported discard and write same requests.
+ */
+ if (rw & REQ_DISCARD)
+ special_cmd_max_sectors = q->limits.max_discard_sectors;
+ else if (rw & REQ_WRITE_SAME)
+ special_cmd_max_sectors = q->limits.max_write_same_sectors;
+ if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
dec_count(io, region, -EOPNOTSUPP);
return;
}
@@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
store_io_and_region_in_bio(bio, io, region);
if (rw & REQ_DISCARD) {
- num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
+ num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
remaining -= num_sectors;
} else if (rw & REQ_WRITE_SAME) {
@@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
*/
dp->get_page(dp, &page, &len, &offset);
bio_add_page(bio, page, logical_block_size, offset);
- num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
+ num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
offset = 0;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index c8a18e4ee9dc..720ceeb7fa9b 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1298,21 +1298,22 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
goto err_unlock_md_type;
}
- if (dm_get_md_type(md) == DM_TYPE_NONE)
+ if (dm_get_md_type(md) == DM_TYPE_NONE) {
/* Initial table load: acquire type of table. */
dm_set_md_type(md, dm_table_get_type(t));
- else if (dm_get_md_type(md) != dm_table_get_type(t)) {
+
+ /* setup md->queue to reflect md's type (may block) */
+ r = dm_setup_md_queue(md);
+ if (r) {
+ DMWARN("unable to set up device queue for new table.");
+ goto err_unlock_md_type;
+ }
+ } else if (dm_get_md_type(md) != dm_table_get_type(t)) {
DMWARN("can't change device type after initial table load.");
r = -EINVAL;
goto err_unlock_md_type;
}
- /* setup md->queue to reflect md's type (may block) */
- r = dm_setup_md_queue(md);
- if (r) {
- DMWARN("unable to set up device queue for new table.");
- goto err_unlock_md_type;
- }
dm_unlock_md_type(md);
/* stage inactive table */
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 03177ca0b009..058256d2eeea 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -17,7 +17,9 @@
#define DM_LOG_USERSPACE_VSN "1.3.0"
-struct flush_entry {
+#define FLUSH_ENTRY_POOL_SIZE 16
+
+struct dm_dirty_log_flush_entry {
int type;
region_t region;
struct list_head list;
@@ -34,22 +36,14 @@ struct flush_entry {
struct log_c {
struct dm_target *ti;
struct dm_dev *log_dev;
- uint32_t region_size;
- region_t region_count;
- uint64_t luid;
- char uuid[DM_UUID_LEN];
char *usr_argv_str;
uint32_t usr_argc;
- /*
- * in_sync_hint gets set when doing is_remote_recovering. It
- * represents the first region that needs recovery. IOW, the
- * first zero bit of sync_bits. This can be useful for to limit
- * traffic for calls like is_remote_recovering and get_resync_work,
- * but be take care in its use for anything else.
- */
- uint64_t in_sync_hint;
+ uint32_t region_size;
+ region_t region_count;
+ uint64_t luid;
+ char uuid[DM_UUID_LEN];
/*
* Mark and clear requests are held until a flush is issued
@@ -62,6 +56,15 @@ struct log_c {
struct list_head clear_list;
/*
+ * in_sync_hint gets set when doing is_remote_recovering. It
+ * represents the first region that needs recovery. IOW, the
+ * first zero bit of sync_bits. This can be useful for to limit
+ * traffic for calls like is_remote_recovering and get_resync_work,
+ * but be take care in its use for anything else.
+ */
+ uint64_t in_sync_hint;
+
+ /*
* Workqueue for flush of clear region requests.
*/
struct workqueue_struct *dmlog_wq;
@@ -72,19 +75,11 @@ struct log_c {
* Combine userspace flush and mark requests for efficiency.
*/
uint32_t integrated_flush;
-};
-
-static mempool_t *flush_entry_pool;
-static void *flush_entry_alloc(gfp_t gfp_mask, void *pool_data)
-{
- return kmalloc(sizeof(struct flush_entry), gfp_mask);
-}
+ mempool_t *flush_entry_pool;
+};
-static void flush_entry_free(void *element, void *pool_data)
-{
- kfree(element);
-}
+static struct kmem_cache *_flush_entry_cache;
static int userspace_do_request(struct log_c *lc, const char *uuid,
int request_type, char *data, size_t data_size,
@@ -254,6 +249,14 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
goto out;
}
+ lc->flush_entry_pool = mempool_create_slab_pool(FLUSH_ENTRY_POOL_SIZE,
+ _flush_entry_cache);
+ if (!lc->flush_entry_pool) {
+ DMERR("Failed to create flush_entry_pool");
+ r = -ENOMEM;
+ goto out;
+ }
+
/*
* Send table string and get back any opened device.
*/
@@ -310,6 +313,8 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
out:
kfree(devices_rdata);
if (r) {
+ if (lc->flush_entry_pool)
+ mempool_destroy(lc->flush_entry_pool);
kfree(lc);
kfree(ctr_str);
} else {
@@ -338,6 +343,8 @@ static void userspace_dtr(struct dm_dirty_log *log)
if (lc->log_dev)
dm_put_device(lc->ti, lc->log_dev);
+ mempool_destroy(lc->flush_entry_pool);
+
kfree(lc->usr_argv_str);
kfree(lc);
@@ -461,7 +468,7 @@ static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
{
int r = 0;
- struct flush_entry *fe;
+ struct dm_dirty_log_flush_entry *fe;
list_for_each_entry(fe, flush_list, list) {
r = userspace_do_request(lc, lc->uuid, fe->type,
@@ -481,7 +488,7 @@ static int flush_by_group(struct log_c *lc, struct list_head *flush_list,
int r = 0;
int count;
uint32_t type = 0;
- struct flush_entry *fe, *tmp_fe;
+ struct dm_dirty_log_flush_entry *fe, *tmp_fe;
LIST_HEAD(tmp_list);
uint64_t group[MAX_FLUSH_GROUP_COUNT];
@@ -563,7 +570,8 @@ static int userspace_flush(struct dm_dirty_log *log)
LIST_HEAD(clear_list);
int mark_list_is_empty;
int clear_list_is_empty;
- struct flush_entry *fe, *tmp_fe;
+ struct dm_dirty_log_flush_entry *fe, *tmp_fe;
+ mempool_t *flush_entry_pool = lc->flush_entry_pool;
spin_lock_irqsave(&lc->flush_lock, flags);
list_splice_init(&lc->mark_list, &mark_list);
@@ -643,10 +651,10 @@ static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
{
unsigned long flags;
struct log_c *lc = log->context;
- struct flush_entry *fe;
+ struct dm_dirty_log_flush_entry *fe;
/* Wait for an allocation, but _never_ fail */
- fe = mempool_alloc(flush_entry_pool, GFP_NOIO);
+ fe = mempool_alloc(lc->flush_entry_pool, GFP_NOIO);
BUG_ON(!fe);
spin_lock_irqsave(&lc->flush_lock, flags);
@@ -672,7 +680,7 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
{
unsigned long flags;
struct log_c *lc = log->context;
- struct flush_entry *fe;
+ struct dm_dirty_log_flush_entry *fe;
/*
* If we fail to allocate, we skip the clearing of
@@ -680,7 +688,7 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
* to cause the region to be resync'ed when the
* device is activated next time.
*/
- fe = mempool_alloc(flush_entry_pool, GFP_ATOMIC);
+ fe = mempool_alloc(lc->flush_entry_pool, GFP_ATOMIC);
if (!fe) {
DMERR("Failed to allocate memory to clear region.");
return;
@@ -733,7 +741,6 @@ static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region)
static void userspace_set_region_sync(struct dm_dirty_log *log,
region_t region, int in_sync)
{
- int r;
struct log_c *lc = log->context;
struct {
region_t r;
@@ -743,12 +750,12 @@ static void userspace_set_region_sync(struct dm_dirty_log *log,
pkg.r = region;
pkg.i = (int64_t)in_sync;
- r = userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
- (char *)&pkg, sizeof(pkg), NULL, NULL);
+ (void) userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
+ (char *)&pkg, sizeof(pkg), NULL, NULL);
/*
* It would be nice to be able to report failures.
- * However, it is easy emough to detect and resolve.
+ * However, it is easy enough to detect and resolve.
*/
return;
}
@@ -886,18 +893,16 @@ static int __init userspace_dirty_log_init(void)
{
int r = 0;
- flush_entry_pool = mempool_create(100, flush_entry_alloc,
- flush_entry_free, NULL);
-
- if (!flush_entry_pool) {
- DMWARN("Unable to create flush_entry_pool: No memory.");
+ _flush_entry_cache = KMEM_CACHE(dm_dirty_log_flush_entry, 0);
+ if (!_flush_entry_cache) {
+ DMWARN("Unable to create flush_entry_cache: No memory.");
return -ENOMEM;
}
r = dm_ulog_tfr_init();
if (r) {
DMWARN("Unable to initialize userspace log communications");
- mempool_destroy(flush_entry_pool);
+ kmem_cache_destroy(_flush_entry_cache);
return r;
}
@@ -905,7 +910,7 @@ static int __init userspace_dirty_log_init(void)
if (r) {
DMWARN("Couldn't register userspace dirty log type");
dm_ulog_tfr_exit();
- mempool_destroy(flush_entry_pool);
+ kmem_cache_destroy(_flush_entry_cache);
return r;
}
@@ -917,7 +922,7 @@ static void __exit userspace_dirty_log_exit(void)
{
dm_dirty_log_type_unregister(&_userspace_type);
dm_ulog_tfr_exit();
- mempool_destroy(flush_entry_pool);
+ kmem_cache_destroy(_flush_entry_cache);
DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
return;
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 39ad9664d397..fdf8ec304f8d 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -172,6 +172,7 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
char *rdata, size_t *rdata_size)
{
int r = 0;
+ unsigned long tmo;
size_t dummy = 0;
int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
struct dm_ulog_request *tfr = prealloced_ulog_tfr;
@@ -236,11 +237,11 @@ resend:
goto out;
}
- r = wait_for_completion_timeout(&(pkg.complete), DM_ULOG_RETRY_TIMEOUT);
+ tmo = wait_for_completion_timeout(&(pkg.complete), DM_ULOG_RETRY_TIMEOUT);
spin_lock(&receiving_list_lock);
list_del_init(&(pkg.list));
spin_unlock(&receiving_list_lock);
- if (!r) {
+ if (!tmo) {
DMWARN("[%s] Request timed out: [%u/%u] - retrying",
(strlen(uuid) > 8) ?
(uuid + (strlen(uuid) - 8)) : (uuid),
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
new file mode 100644
index 000000000000..93e08446a87d
--- /dev/null
+++ b/drivers/md/dm-log-writes.c
@@ -0,0 +1,825 @@
+/*
+ * Copyright (C) 2014 Facebook. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/device-mapper.h>
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#define DM_MSG_PREFIX "log-writes"
+
+/*
+ * This target will sequentially log all writes to the target device onto the
+ * log device. This is helpful for replaying writes to check for fs consistency
+ * at all times. This target provides a mechanism to mark specific events to
+ * check data at a later time. So for example you would:
+ *
+ * write data
+ * fsync
+ * dmsetup message /dev/whatever mark mymark
+ * unmount /mnt/test
+ *
+ * Then replay the log up to mymark and check the contents of the replay to
+ * verify it matches what was written.
+ *
+ * We log writes only after they have been flushed, this makes the log describe
+ * close to the order in which the data hits the actual disk, not its cache. So
+ * for example the following sequence (W means write, C means complete)
+ *
+ * Wa,Wb,Wc,Cc,Ca,FLUSH,FUAd,Cb,CFLUSH,CFUAd
+ *
+ * Would result in the log looking like this:
+ *
+ * c,a,flush,fuad,b,<other writes>,<next flush>
+ *
+ * This is meant to help expose problems where file systems do not properly wait
+ * on data being written before invoking a FLUSH. FUA bypasses cache so once it
+ * completes it is added to the log as it should be on disk.
+ *
+ * We treat DISCARDs as if they don't bypass cache so that they are logged in
+ * order of completion along with the normal writes. If we didn't do it this
+ * way we would process all the discards first and then write all the data, when
+ * in fact we want to do the data and the discard in the order that they
+ * completed.
+ */
+#define LOG_FLUSH_FLAG (1 << 0)
+#define LOG_FUA_FLAG (1 << 1)
+#define LOG_DISCARD_FLAG (1 << 2)
+#define LOG_MARK_FLAG (1 << 3)
+
+#define WRITE_LOG_VERSION 1
+#define WRITE_LOG_MAGIC 0x6a736677736872
+
+/*
+ * The disk format for this is braindead simple.
+ *
+ * At byte 0 we have our super, followed by the following sequence for
+ * nr_entries:
+ *
+ * [ 1 sector ][ entry->nr_sectors ]
+ * [log_write_entry][ data written ]
+ *
+ * The log_write_entry takes up a full sector so we can have arbitrary length
+ * marks and it leaves us room for extra content in the future.
+ */
+
+/*
+ * Basic info about the log for userspace.
+ */
+struct log_write_super {
+ __le64 magic;
+ __le64 version;
+ __le64 nr_entries;
+ __le32 sectorsize;
+};
+
+/*
+ * sector - the sector we wrote.
+ * nr_sectors - the number of sectors we wrote.
+ * flags - flags for this log entry.
+ * data_len - the size of the data in this log entry, this is for private log
+ * entry stuff, the MARK data provided by userspace for example.
+ */
+struct log_write_entry {
+ __le64 sector;
+ __le64 nr_sectors;
+ __le64 flags;
+ __le64 data_len;
+};
+
+struct log_writes_c {
+ struct dm_dev *dev;
+ struct dm_dev *logdev;
+ u64 logged_entries;
+ u32 sectorsize;
+ atomic_t io_blocks;
+ atomic_t pending_blocks;
+ sector_t next_sector;
+ sector_t end_sector;
+ bool logging_enabled;
+ bool device_supports_discard;
+ spinlock_t blocks_lock;
+ struct list_head unflushed_blocks;
+ struct list_head logging_blocks;
+ wait_queue_head_t wait;
+ struct task_struct *log_kthread;
+};
+
+struct pending_block {
+ int vec_cnt;
+ u64 flags;
+ sector_t sector;
+ sector_t nr_sectors;
+ char *data;
+ u32 datalen;
+ struct list_head list;
+ struct bio_vec vecs[0];
+};
+
+struct per_bio_data {
+ struct pending_block *block;
+};
+
+static void put_pending_block(struct log_writes_c *lc)
+{
+ if (atomic_dec_and_test(&lc->pending_blocks)) {
+ smp_mb__after_atomic();
+ if (waitqueue_active(&lc->wait))
+ wake_up(&lc->wait);
+ }
+}
+
+static void put_io_block(struct log_writes_c *lc)
+{
+ if (atomic_dec_and_test(&lc->io_blocks)) {
+ smp_mb__after_atomic();
+ if (waitqueue_active(&lc->wait))
+ wake_up(&lc->wait);
+ }
+}
+
+static void log_end_io(struct bio *bio, int err)
+{
+ struct log_writes_c *lc = bio->bi_private;
+ struct bio_vec *bvec;
+ int i;
+
+ if (err) {
+ unsigned long flags;
+
+ DMERR("Error writing log block, error=%d", err);
+ spin_lock_irqsave(&lc->blocks_lock, flags);
+ lc->logging_enabled = false;
+ spin_unlock_irqrestore(&lc->blocks_lock, flags);
+ }
+
+ bio_for_each_segment_all(bvec, bio, i)
+ __free_page(bvec->bv_page);
+
+ put_io_block(lc);
+ bio_put(bio);
+}
+
+/*
+ * Meant to be called if there is an error, it will free all the pages
+ * associated with the block.
+ */
+static void free_pending_block(struct log_writes_c *lc,
+ struct pending_block *block)
+{
+ int i;
+
+ for (i = 0; i < block->vec_cnt; i++) {
+ if (block->vecs[i].bv_page)
+ __free_page(block->vecs[i].bv_page);
+ }
+ kfree(block->data);
+ kfree(block);
+ put_pending_block(lc);
+}
+
+static int write_metadata(struct log_writes_c *lc, void *entry,
+ size_t entrylen, void *data, size_t datalen,
+ sector_t sector)
+{
+ struct bio *bio;
+ struct page *page;
+ void *ptr;
+ size_t ret;
+
+ bio = bio_alloc(GFP_KERNEL, 1);
+ if (!bio) {
+ DMERR("Couldn't alloc log bio");
+ goto error;
+ }
+ bio->bi_iter.bi_size = 0;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_bdev = lc->logdev->bdev;
+ bio->bi_end_io = log_end_io;
+ bio->bi_private = lc;
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ DMERR("Couldn't alloc log page");
+ bio_put(bio);
+ goto error;
+ }
+
+ ptr = kmap_atomic(page);
+ memcpy(ptr, entry, entrylen);
+ if (datalen)
+ memcpy(ptr + entrylen, data, datalen);
+ memset(ptr + entrylen + datalen, 0,
+ lc->sectorsize - entrylen - datalen);
+ kunmap_atomic(ptr);
+
+ ret = bio_add_page(bio, page, lc->sectorsize, 0);
+ if (ret != lc->sectorsize) {
+ DMERR("Couldn't add page to the log block");
+ goto error_bio;
+ }
+ submit_bio(WRITE, bio);
+ return 0;
+error_bio:
+ bio_put(bio);
+ __free_page(page);
+error:
+ put_io_block(lc);
+ return -1;
+}
+
+static int log_one_block(struct log_writes_c *lc,
+ struct pending_block *block, sector_t sector)
+{
+ struct bio *bio;
+ struct log_write_entry entry;
+ size_t ret;
+ int i;
+
+ entry.sector = cpu_to_le64(block->sector);
+ entry.nr_sectors = cpu_to_le64(block->nr_sectors);
+ entry.flags = cpu_to_le64(block->flags);
+ entry.data_len = cpu_to_le64(block->datalen);
+ if (write_metadata(lc, &entry, sizeof(entry), block->data,
+ block->datalen, sector)) {
+ free_pending_block(lc, block);
+ return -1;
+ }
+
+ if (!block->vec_cnt)
+ goto out;
+ sector++;
+
+ bio = bio_alloc(GFP_KERNEL, block->vec_cnt);
+ if (!bio) {
+ DMERR("Couldn't alloc log bio");
+ goto error;
+ }
+ atomic_inc(&lc->io_blocks);
+ bio->bi_iter.bi_size = 0;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_bdev = lc->logdev->bdev;
+ bio->bi_end_io = log_end_io;
+ bio->bi_private = lc;
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ for (i = 0; i < block->vec_cnt; i++) {
+ /*
+ * The page offset is always 0 because we allocate a new page
+ * for every bvec in the original bio for simplicity sake.
+ */
+ ret = bio_add_page(bio, block->vecs[i].bv_page,
+ block->vecs[i].bv_len, 0);
+ if (ret != block->vecs[i].bv_len) {
+ atomic_inc(&lc->io_blocks);
+ submit_bio(WRITE, bio);
+ bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
+ if (!bio) {
+ DMERR("Couldn't alloc log bio");
+ goto error;
+ }
+ bio->bi_iter.bi_size = 0;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_bdev = lc->logdev->bdev;
+ bio->bi_end_io = log_end_io;
+ bio->bi_private = lc;
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ ret = bio_add_page(bio, block->vecs[i].bv_page,
+ block->vecs[i].bv_len, 0);
+ if (ret != block->vecs[i].bv_len) {
+ DMERR("Couldn't add page on new bio?");
+ bio_put(bio);
+ goto error;
+ }
+ }
+ sector += block->vecs[i].bv_len >> SECTOR_SHIFT;
+ }
+ submit_bio(WRITE, bio);
+out:
+ kfree(block->data);
+ kfree(block);
+ put_pending_block(lc);
+ return 0;
+error:
+ free_pending_block(lc, block);
+ put_io_block(lc);
+ return -1;
+}
+
+static int log_super(struct log_writes_c *lc)
+{
+ struct log_write_super super;
+
+ super.magic = cpu_to_le64(WRITE_LOG_MAGIC);
+ super.version = cpu_to_le64(WRITE_LOG_VERSION);
+ super.nr_entries = cpu_to_le64(lc->logged_entries);
+ super.sectorsize = cpu_to_le32(lc->sectorsize);
+
+ if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) {
+ DMERR("Couldn't write super");
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline sector_t logdev_last_sector(struct log_writes_c *lc)
+{
+ return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+static int log_writes_kthread(void *arg)
+{
+ struct log_writes_c *lc = (struct log_writes_c *)arg;
+ sector_t sector = 0;
+
+ while (!kthread_should_stop()) {
+ bool super = false;
+ bool logging_enabled;
+ struct pending_block *block = NULL;
+ int ret;
+
+ spin_lock_irq(&lc->blocks_lock);
+ if (!list_empty(&lc->logging_blocks)) {
+ block = list_first_entry(&lc->logging_blocks,
+ struct pending_block, list);
+ list_del_init(&block->list);
+ if (!lc->logging_enabled)
+ goto next;
+
+ sector = lc->next_sector;
+ if (block->flags & LOG_DISCARD_FLAG)
+ lc->next_sector++;
+ else
+ lc->next_sector += block->nr_sectors + 1;
+
+ /*
+ * Apparently the size of the device may not be known
+ * right away, so handle this properly.
+ */
+ if (!lc->end_sector)
+ lc->end_sector = logdev_last_sector(lc);
+ if (lc->end_sector &&
+ lc->next_sector >= lc->end_sector) {
+ DMERR("Ran out of space on the logdev");
+ lc->logging_enabled = false;
+ goto next;
+ }
+ lc->logged_entries++;
+ atomic_inc(&lc->io_blocks);
+
+ super = (block->flags & (LOG_FUA_FLAG | LOG_MARK_FLAG));
+ if (super)
+ atomic_inc(&lc->io_blocks);
+ }
+next:
+ logging_enabled = lc->logging_enabled;
+ spin_unlock_irq(&lc->blocks_lock);
+ if (block) {
+ if (logging_enabled) {
+ ret = log_one_block(lc, block, sector);
+ if (!ret && super)
+ ret = log_super(lc);
+ if (ret) {
+ spin_lock_irq(&lc->blocks_lock);
+ lc->logging_enabled = false;
+ spin_unlock_irq(&lc->blocks_lock);
+ }
+ } else
+ free_pending_block(lc, block);
+ continue;
+ }
+
+ if (!try_to_freeze()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!kthread_should_stop() &&
+ !atomic_read(&lc->pending_blocks))
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Construct a log-writes mapping:
+ * log-writes <dev_path> <log_dev_path>
+ */
+static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct log_writes_c *lc;
+ struct dm_arg_set as;
+ const char *devname, *logdevname;
+
+ as.argc = argc;
+ as.argv = argv;
+
+ if (argc < 2) {
+ ti->error = "Invalid argument count";
+ return -EINVAL;
+ }
+
+ lc = kzalloc(sizeof(struct log_writes_c), GFP_KERNEL);
+ if (!lc) {
+ ti->error = "Cannot allocate context";
+ return -ENOMEM;
+ }
+ spin_lock_init(&lc->blocks_lock);
+ INIT_LIST_HEAD(&lc->unflushed_blocks);
+ INIT_LIST_HEAD(&lc->logging_blocks);
+ init_waitqueue_head(&lc->wait);
+ lc->sectorsize = 1 << SECTOR_SHIFT;
+ atomic_set(&lc->io_blocks, 0);
+ atomic_set(&lc->pending_blocks, 0);
+
+ devname = dm_shift_arg(&as);
+ if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev)) {
+ ti->error = "Device lookup failed";
+ goto bad;
+ }
+
+ logdevname = dm_shift_arg(&as);
+ if (dm_get_device(ti, logdevname, dm_table_get_mode(ti->table), &lc->logdev)) {
+ ti->error = "Log device lookup failed";
+ dm_put_device(ti, lc->dev);
+ goto bad;
+ }
+
+ lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
+ if (!lc->log_kthread) {
+ ti->error = "Couldn't alloc kthread";
+ dm_put_device(ti, lc->dev);
+ dm_put_device(ti, lc->logdev);
+ goto bad;
+ }
+
+ /* We put the super at sector 0, start logging at sector 1 */
+ lc->next_sector = 1;
+ lc->logging_enabled = true;
+ lc->end_sector = logdev_last_sector(lc);
+ lc->device_supports_discard = true;
+
+ ti->num_flush_bios = 1;
+ ti->flush_supported = true;
+ ti->num_discard_bios = 1;
+ ti->discards_supported = true;
+ ti->per_bio_data_size = sizeof(struct per_bio_data);
+ ti->private = lc;
+ return 0;
+
+bad:
+ kfree(lc);
+ return -EINVAL;
+}
+
+static int log_mark(struct log_writes_c *lc, char *data)
+{
+ struct pending_block *block;
+ size_t maxsize = lc->sectorsize - sizeof(struct log_write_entry);
+
+ block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
+ if (!block) {
+ DMERR("Error allocating pending block");
+ return -ENOMEM;
+ }
+
+ block->data = kstrndup(data, maxsize, GFP_KERNEL);
+ if (!block->data) {
+ DMERR("Error copying mark data");
+ kfree(block);
+ return -ENOMEM;
+ }
+ atomic_inc(&lc->pending_blocks);
+ block->datalen = strlen(block->data);
+ block->flags |= LOG_MARK_FLAG;
+ spin_lock_irq(&lc->blocks_lock);
+ list_add_tail(&block->list, &lc->logging_blocks);
+ spin_unlock_irq(&lc->blocks_lock);
+ wake_up_process(lc->log_kthread);
+ return 0;
+}
+
+static void log_writes_dtr(struct dm_target *ti)
+{
+ struct log_writes_c *lc = ti->private;
+
+ spin_lock_irq(&lc->blocks_lock);
+ list_splice_init(&lc->unflushed_blocks, &lc->logging_blocks);
+ spin_unlock_irq(&lc->blocks_lock);
+
+ /*
+ * This is just nice to have since it'll update the super to include the
+ * unflushed blocks, if it fails we don't really care.
+ */
+ log_mark(lc, "dm-log-writes-end");
+ wake_up_process(lc->log_kthread);
+ wait_event(lc->wait, !atomic_read(&lc->io_blocks) &&
+ !atomic_read(&lc->pending_blocks));
+ kthread_stop(lc->log_kthread);
+
+ WARN_ON(!list_empty(&lc->logging_blocks));
+ WARN_ON(!list_empty(&lc->unflushed_blocks));
+ dm_put_device(ti, lc->dev);
+ dm_put_device(ti, lc->logdev);
+ kfree(lc);
+}
+
+static void normal_map_bio(struct dm_target *ti, struct bio *bio)
+{
+ struct log_writes_c *lc = ti->private;
+
+ bio->bi_bdev = lc->dev->bdev;
+}
+
+static int log_writes_map(struct dm_target *ti, struct bio *bio)
+{
+ struct log_writes_c *lc = ti->private;
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ struct pending_block *block;
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ size_t alloc_size;
+ int i = 0;
+ bool flush_bio = (bio->bi_rw & REQ_FLUSH);
+ bool fua_bio = (bio->bi_rw & REQ_FUA);
+ bool discard_bio = (bio->bi_rw & REQ_DISCARD);
+
+ pb->block = NULL;
+
+ /* Don't bother doing anything if logging has been disabled */
+ if (!lc->logging_enabled)
+ goto map_bio;
+
+ /*
+ * Map reads as normal.
+ */
+ if (bio_data_dir(bio) == READ)
+ goto map_bio;
+
+ /* No sectors and not a flush? Don't care */
+ if (!bio_sectors(bio) && !flush_bio)
+ goto map_bio;
+
+ /*
+ * Discards will have bi_size set but there's no actual data, so just
+ * allocate the size of the pending block.
+ */
+ if (discard_bio)
+ alloc_size = sizeof(struct pending_block);
+ else
+ alloc_size = sizeof(struct pending_block) + sizeof(struct bio_vec) * bio_segments(bio);
+
+ block = kzalloc(alloc_size, GFP_NOIO);
+ if (!block) {
+ DMERR("Error allocating pending block");
+ spin_lock_irq(&lc->blocks_lock);
+ lc->logging_enabled = false;
+ spin_unlock_irq(&lc->blocks_lock);
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&block->list);
+ pb->block = block;
+ atomic_inc(&lc->pending_blocks);
+
+ if (flush_bio)
+ block->flags |= LOG_FLUSH_FLAG;
+ if (fua_bio)
+ block->flags |= LOG_FUA_FLAG;
+ if (discard_bio)
+ block->flags |= LOG_DISCARD_FLAG;
+
+ block->sector = bio->bi_iter.bi_sector;
+ block->nr_sectors = bio_sectors(bio);
+
+ /* We don't need the data, just submit */
+ if (discard_bio) {
+ WARN_ON(flush_bio || fua_bio);
+ if (lc->device_supports_discard)
+ goto map_bio;
+ bio_endio(bio, 0);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /* Flush bio, splice the unflushed blocks onto this list and submit */
+ if (flush_bio && !bio_sectors(bio)) {
+ spin_lock_irq(&lc->blocks_lock);
+ list_splice_init(&lc->unflushed_blocks, &block->list);
+ spin_unlock_irq(&lc->blocks_lock);
+ goto map_bio;
+ }
+
+ /*
+ * We will write this bio somewhere else way later so we need to copy
+ * the actual contents into new pages so we know the data will always be
+ * there.
+ *
+ * We do this because this could be a bio from O_DIRECT in which case we
+ * can't just hold onto the page until some later point, we have to
+ * manually copy the contents.
+ */
+ bio_for_each_segment(bv, bio, iter) {
+ struct page *page;
+ void *src, *dst;
+
+ page = alloc_page(GFP_NOIO);
+ if (!page) {
+ DMERR("Error allocing page");
+ free_pending_block(lc, block);
+ spin_lock_irq(&lc->blocks_lock);
+ lc->logging_enabled = false;
+ spin_unlock_irq(&lc->blocks_lock);
+ return -ENOMEM;
+ }
+
+ src = kmap_atomic(bv.bv_page);
+ dst = kmap_atomic(page);
+ memcpy(dst, src + bv.bv_offset, bv.bv_len);
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
+ block->vecs[i].bv_page = page;
+ block->vecs[i].bv_len = bv.bv_len;
+ block->vec_cnt++;
+ i++;
+ }
+
+ /* Had a flush with data in it, weird */
+ if (flush_bio) {
+ spin_lock_irq(&lc->blocks_lock);
+ list_splice_init(&lc->unflushed_blocks, &block->list);
+ spin_unlock_irq(&lc->blocks_lock);
+ }
+map_bio:
+ normal_map_bio(ti, bio);
+ return DM_MAPIO_REMAPPED;
+}
+
+static int normal_end_io(struct dm_target *ti, struct bio *bio, int error)
+{
+ struct log_writes_c *lc = ti->private;
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+
+ if (bio_data_dir(bio) == WRITE && pb->block) {
+ struct pending_block *block = pb->block;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lc->blocks_lock, flags);
+ if (block->flags & LOG_FLUSH_FLAG) {
+ list_splice_tail_init(&block->list, &lc->logging_blocks);
+ list_add_tail(&block->list, &lc->logging_blocks);
+ wake_up_process(lc->log_kthread);
+ } else if (block->flags & LOG_FUA_FLAG) {
+ list_add_tail(&block->list, &lc->logging_blocks);
+ wake_up_process(lc->log_kthread);
+ } else
+ list_add_tail(&block->list, &lc->unflushed_blocks);
+ spin_unlock_irqrestore(&lc->blocks_lock, flags);
+ }
+
+ return error;
+}
+
+/*
+ * INFO format: <logged entries> <highest allocated sector>
+ */
+static void log_writes_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result,
+ unsigned maxlen)
+{
+ unsigned sz = 0;
+ struct log_writes_c *lc = ti->private;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%llu %llu", lc->logged_entries,
+ (unsigned long long)lc->next_sector - 1);
+ if (!lc->logging_enabled)
+ DMEMIT(" logging_disabled");
+ break;
+
+ case STATUSTYPE_TABLE:
+ DMEMIT("%s %s", lc->dev->name, lc->logdev->name);
+ break;
+ }
+}
+
+static int log_writes_ioctl(struct dm_target *ti, unsigned int cmd,
+ unsigned long arg)
+{
+ struct log_writes_c *lc = ti->private;
+ struct dm_dev *dev = lc->dev;
+ int r = 0;
+
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
+ return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+}
+
+static int log_writes_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct log_writes_c *lc = ti->private;
+ struct request_queue *q = bdev_get_queue(lc->dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = lc->dev->bdev;
+ bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
+
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
+static int log_writes_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn,
+ void *data)
+{
+ struct log_writes_c *lc = ti->private;
+
+ return fn(ti, lc->dev, 0, ti->len, data);
+}
+
+/*
+ * Messages supported:
+ * mark <mark data> - specify the marked data.
+ */
+static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int r = -EINVAL;
+ struct log_writes_c *lc = ti->private;
+
+ if (argc != 2) {
+ DMWARN("Invalid log-writes message arguments, expect 2 arguments, got %d", argc);
+ return r;
+ }
+
+ if (!strcasecmp(argv[0], "mark"))
+ r = log_mark(lc, argv[1]);
+ else
+ DMWARN("Unrecognised log writes target message received: %s", argv[0]);
+
+ return r;
+}
+
+static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct log_writes_c *lc = ti->private;
+ struct request_queue *q = bdev_get_queue(lc->dev->bdev);
+
+ if (!q || !blk_queue_discard(q)) {
+ lc->device_supports_discard = false;
+ limits->discard_granularity = 1 << SECTOR_SHIFT;
+ limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
+ }
+}
+
+static struct target_type log_writes_target = {
+ .name = "log-writes",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = log_writes_ctr,
+ .dtr = log_writes_dtr,
+ .map = log_writes_map,
+ .end_io = normal_end_io,
+ .status = log_writes_status,
+ .ioctl = log_writes_ioctl,
+ .merge = log_writes_merge,
+ .message = log_writes_message,
+ .iterate_devices = log_writes_iterate_devices,
+ .io_hints = log_writes_io_hints,
+};
+
+static int __init dm_log_writes_init(void)
+{
+ int r = dm_register_target(&log_writes_target);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ return r;
+}
+
+static void __exit dm_log_writes_exit(void)
+{
+ dm_unregister_target(&log_writes_target);
+}
+
+module_init(dm_log_writes_init);
+module_exit(dm_log_writes_exit);
+
+MODULE_DESCRIPTION(DM_NAME " log writes target");
+MODULE_AUTHOR("Josef Bacik <jbacik@fb.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d376dc87716e..63953477a07c 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -428,7 +428,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
} else {
/* blk-mq request-based interface */
*__clone = blk_get_request(bdev_get_queue(bdev),
- rq_data_dir(rq), GFP_KERNEL);
+ rq_data_dir(rq), GFP_ATOMIC);
if (IS_ERR(*__clone))
/* ENOMEM, requeue */
return r;
@@ -1627,7 +1627,7 @@ static int __pgpath_busy(struct pgpath *pgpath)
{
struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
- return dm_underlying_device_busy(q);
+ return blk_lld_busy(q);
}
/*
@@ -1703,7 +1703,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 8, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 8b204ae216ab..f83a0f3fc365 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -20,6 +20,8 @@
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
+#include "dm.h"
+
#include "dm-exception-store.h"
#define DM_MSG_PREFIX "snapshots"
@@ -291,12 +293,23 @@ struct origin {
};
/*
+ * This structure is allocated for each origin target
+ */
+struct dm_origin {
+ struct dm_dev *dev;
+ struct dm_target *ti;
+ unsigned split_boundary;
+ struct list_head hash_list;
+};
+
+/*
* Size of the hash table for origin volumes. If we make this
* the size of the minors list then it should be nearly perfect
*/
#define ORIGIN_HASH_SIZE 256
#define ORIGIN_MASK 0xFF
static struct list_head *_origins;
+static struct list_head *_dm_origins;
static struct rw_semaphore _origins_lock;
static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
@@ -310,12 +323,22 @@ static int init_origin_hash(void)
_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
GFP_KERNEL);
if (!_origins) {
- DMERR("unable to allocate memory");
+ DMERR("unable to allocate memory for _origins");
return -ENOMEM;
}
-
for (i = 0; i < ORIGIN_HASH_SIZE; i++)
INIT_LIST_HEAD(_origins + i);
+
+ _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!_dm_origins) {
+ DMERR("unable to allocate memory for _dm_origins");
+ kfree(_origins);
+ return -ENOMEM;
+ }
+ for (i = 0; i < ORIGIN_HASH_SIZE; i++)
+ INIT_LIST_HEAD(_dm_origins + i);
+
init_rwsem(&_origins_lock);
return 0;
@@ -324,6 +347,7 @@ static int init_origin_hash(void)
static void exit_origin_hash(void)
{
kfree(_origins);
+ kfree(_dm_origins);
}
static unsigned origin_hash(struct block_device *bdev)
@@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o)
list_add_tail(&o->hash_list, sl);
}
+static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
+{
+ struct list_head *ol;
+ struct dm_origin *o;
+
+ ol = &_dm_origins[origin_hash(origin)];
+ list_for_each_entry (o, ol, hash_list)
+ if (bdev_equal(o->dev->bdev, origin))
+ return o;
+
+ return NULL;
+}
+
+static void __insert_dm_origin(struct dm_origin *o)
+{
+ struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
+ list_add_tail(&o->hash_list, sl);
+}
+
+static void __remove_dm_origin(struct dm_origin *o)
+{
+ list_del(&o->hash_list);
+}
+
/*
* _origins_lock must be held when calling this function.
* Returns number of snapshots registered using the supplied cow device, plus:
@@ -1840,9 +1888,40 @@ static int snapshot_preresume(struct dm_target *ti)
static void snapshot_resume(struct dm_target *ti)
{
struct dm_snapshot *s = ti->private;
- struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
+ struct dm_origin *o;
+ struct mapped_device *origin_md = NULL;
+ bool must_restart_merging = false;
down_read(&_origins_lock);
+
+ o = __lookup_dm_origin(s->origin->bdev);
+ if (o)
+ origin_md = dm_table_get_md(o->ti->table);
+ if (!origin_md) {
+ (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
+ if (snap_merging)
+ origin_md = dm_table_get_md(snap_merging->ti->table);
+ }
+ if (origin_md == dm_table_get_md(ti->table))
+ origin_md = NULL;
+ if (origin_md) {
+ if (dm_hold(origin_md))
+ origin_md = NULL;
+ }
+
+ up_read(&_origins_lock);
+
+ if (origin_md) {
+ dm_internal_suspend_fast(origin_md);
+ if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
+ must_restart_merging = true;
+ stop_merge(snap_merging);
+ }
+ }
+
+ down_read(&_origins_lock);
+
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
down_write(&snap_src->lock);
@@ -1851,8 +1930,16 @@ static void snapshot_resume(struct dm_target *ti)
up_write(&snap_dest->lock);
up_write(&snap_src->lock);
}
+
up_read(&_origins_lock);
+ if (origin_md) {
+ if (must_restart_merging)
+ start_merge(snap_merging);
+ dm_internal_resume_fast(origin_md);
+ dm_put(origin_md);
+ }
+
/* Now we have correct chunk size, reregister */
reregister_snapshot(s);
@@ -2133,11 +2220,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
* Origin: maps a linear range of a device, with hooks for snapshotting.
*/
-struct dm_origin {
- struct dm_dev *dev;
- unsigned split_boundary;
-};
-
/*
* Construct an origin mapping: <dev_path>
* The context for an origin is merely a 'struct dm_dev *'
@@ -2166,6 +2248,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_open;
}
+ o->ti = ti;
ti->private = o;
ti->num_flush_bios = 1;
@@ -2180,6 +2263,7 @@ bad_alloc:
static void origin_dtr(struct dm_target *ti)
{
struct dm_origin *o = ti->private;
+
dm_put_device(ti, o->dev);
kfree(o);
}
@@ -2216,6 +2300,19 @@ static void origin_resume(struct dm_target *ti)
struct dm_origin *o = ti->private;
o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
+
+ down_write(&_origins_lock);
+ __insert_dm_origin(o);
+ up_write(&_origins_lock);
+}
+
+static void origin_postsuspend(struct dm_target *ti)
+{
+ struct dm_origin *o = ti->private;
+
+ down_write(&_origins_lock);
+ __remove_dm_origin(o);
+ up_write(&_origins_lock);
}
static void origin_status(struct dm_target *ti, status_type_t type,
@@ -2258,12 +2355,13 @@ static int origin_iterate_devices(struct dm_target *ti,
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 8, 1},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
.map = origin_map,
.resume = origin_resume,
+ .postsuspend = origin_postsuspend,
.status = origin_status,
.merge = origin_merge,
.iterate_devices = origin_iterate_devices,
@@ -2271,7 +2369,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 12, 0},
+ .version = {1, 13, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2285,7 +2383,7 @@ static struct target_type snapshot_target = {
static struct target_type merge_target = {
.name = dm_snapshot_merge_target_name,
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index c62c5ab6aed5..7e818f5f1dc4 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -11,7 +11,7 @@
struct dm_sysfs_attr {
struct attribute attr;
ssize_t (*show)(struct mapped_device *, char *);
- ssize_t (*store)(struct mapped_device *, char *);
+ ssize_t (*store)(struct mapped_device *, const char *, size_t count);
};
#define DM_ATTR_RO(_name) \
@@ -39,6 +39,31 @@ static ssize_t dm_attr_show(struct kobject *kobj, struct attribute *attr,
return ret;
}
+#define DM_ATTR_RW(_name) \
+struct dm_sysfs_attr dm_attr_##_name = \
+ __ATTR(_name, S_IRUGO | S_IWUSR, dm_attr_##_name##_show, dm_attr_##_name##_store)
+
+static ssize_t dm_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t count)
+{
+ struct dm_sysfs_attr *dm_attr;
+ struct mapped_device *md;
+ ssize_t ret;
+
+ dm_attr = container_of(attr, struct dm_sysfs_attr, attr);
+ if (!dm_attr->store)
+ return -EIO;
+
+ md = dm_get_from_kobject(kobj);
+ if (!md)
+ return -EINVAL;
+
+ ret = dm_attr->store(md, page, count);
+ dm_put(md);
+
+ return ret;
+}
+
static ssize_t dm_attr_name_show(struct mapped_device *md, char *buf)
{
if (dm_copy_name_and_uuid(md, buf, NULL))
@@ -64,25 +89,33 @@ static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
return strlen(buf);
}
+static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf)
+{
+ sprintf(buf, "%d\n", dm_use_blk_mq(md));
+
+ return strlen(buf);
+}
+
static DM_ATTR_RO(name);
static DM_ATTR_RO(uuid);
static DM_ATTR_RO(suspended);
+static DM_ATTR_RO(use_blk_mq);
+static DM_ATTR_RW(rq_based_seq_io_merge_deadline);
static struct attribute *dm_attrs[] = {
&dm_attr_name.attr,
&dm_attr_uuid.attr,
&dm_attr_suspended.attr,
+ &dm_attr_use_blk_mq.attr,
+ &dm_attr_rq_based_seq_io_merge_deadline.attr,
NULL,
};
static const struct sysfs_ops dm_sysfs_ops = {
.show = dm_attr_show,
+ .store = dm_attr_store,
};
-/*
- * dm kobject is embedded in mapped_device structure
- * no need to define release function here
- */
static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
.default_attrs = dm_attrs,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 6554d9148927..d9b00b8565c6 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -18,6 +18,8 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/atomic.h>
+#include <linux/blk-mq.h>
+#include <linux/mount.h>
#define DM_MSG_PREFIX "table"
@@ -372,23 +374,18 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
int r;
dev_t uninitialized_var(dev);
struct dm_dev_internal *dd;
- unsigned int major, minor;
struct dm_table *t = ti->table;
- char dummy;
+ struct block_device *bdev;
BUG_ON(!t);
- if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
- /* Extract the major/minor numbers */
- dev = MKDEV(major, minor);
- if (MAJOR(dev) != major || MINOR(dev) != minor)
- return -EOVERFLOW;
+ /* convert the path to a device */
+ bdev = lookup_bdev(path);
+ if (IS_ERR(bdev)) {
+ dev = name_to_dev_t(path);
+ if (!dev)
+ return -ENODEV;
} else {
- /* convert the path to a device */
- struct block_device *bdev = lookup_bdev(path);
-
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
dev = bdev->bd_dev;
bdput(bdev);
}
@@ -939,7 +936,7 @@ bool dm_table_mq_request_based(struct dm_table *t)
return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED;
}
-static int dm_table_alloc_md_mempools(struct dm_table *t)
+static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
unsigned type = dm_table_get_type(t);
unsigned per_bio_data_size = 0;
@@ -957,7 +954,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t)
per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
}
- t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size);
+ t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_bio_data_size);
if (!t->mempools)
return -ENOMEM;
@@ -1127,7 +1124,7 @@ int dm_table_complete(struct dm_table *t)
return r;
}
- r = dm_table_alloc_md_mempools(t);
+ r = dm_table_alloc_md_mempools(t, t->md);
if (r)
DMERR("unable to allocate mempools");
@@ -1339,14 +1336,14 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
continue;
if (ti->flush_supported)
- return 1;
+ return true;
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_flush_capable, &flush))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
static bool dm_table_discard_zeroes_data(struct dm_table *t)
@@ -1359,10 +1356,10 @@ static bool dm_table_discard_zeroes_data(struct dm_table *t)
ti = dm_table_get_target(t, i++);
if (ti->discard_zeroes_data_unsupported)
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
@@ -1408,10 +1405,10 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, func, NULL))
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1468,14 +1465,14 @@ static bool dm_table_supports_discards(struct dm_table *t)
continue;
if (ti->discards_supported)
- return 1;
+ return true;
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_discard_capable, NULL))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -1677,20 +1674,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
return r;
}
-int dm_table_any_busy_target(struct dm_table *t)
-{
- unsigned i;
- struct dm_target *ti;
-
- for (i = 0; i < t->num_targets; i++) {
- ti = t->targets + i;
- if (ti->type->busy && ti->type->busy(ti))
- return 1;
- }
-
- return 0;
-}
-
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
return t->md;
@@ -1709,9 +1692,13 @@ void dm_table_run_md_queue_async(struct dm_table *t)
md = dm_table_get_md(t);
queue = dm_get_md_queue(md);
if (queue) {
- spin_lock_irqsave(queue->queue_lock, flags);
- blk_run_queue_async(queue);
- spin_unlock_irqrestore(queue->queue_lock, flags);
+ if (queue->mq_ops)
+ blk_mq_run_hw_queues(queue, true);
+ else {
+ spin_lock_irqsave(queue->queue_lock, flags);
+ blk_run_queue_async(queue);
+ spin_unlock_irqrestore(queue->queue_lock, flags);
+ }
}
}
EXPORT_SYMBOL(dm_table_run_md_queue_async);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 654773cb1eee..921aafd12aee 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2358,17 +2358,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
case -ENODATA:
- if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
- /*
- * This block isn't provisioned, and we have no way
- * of doing so.
- */
- handle_unserviceable_bio(tc->pool, bio);
- cell_defer_no_holder(tc, virt_cell);
- return DM_MAPIO_SUBMITTED;
- }
- /* fall through */
-
case -EWOULDBLOCK:
thin_defer_cell(tc, virt_cell);
return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 7a7bab8947ae..66616db33e6f 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -18,20 +18,39 @@
#include <linux/module.h>
#include <linux/device-mapper.h>
+#include <linux/reboot.h>
#include <crypto/hash.h>
#define DM_MSG_PREFIX "verity"
+#define DM_VERITY_ENV_LENGTH 42
+#define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR"
+
#define DM_VERITY_IO_VEC_INLINE 16
#define DM_VERITY_MEMPOOL_SIZE 4
#define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
#define DM_VERITY_MAX_LEVELS 63
+#define DM_VERITY_MAX_CORRUPTED_ERRS 100
+
+#define DM_VERITY_OPT_LOGGING "ignore_corruption"
+#define DM_VERITY_OPT_RESTART "restart_on_corruption"
static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
+enum verity_mode {
+ DM_VERITY_MODE_EIO,
+ DM_VERITY_MODE_LOGGING,
+ DM_VERITY_MODE_RESTART
+};
+
+enum verity_block_type {
+ DM_VERITY_BLOCK_TYPE_DATA,
+ DM_VERITY_BLOCK_TYPE_METADATA
+};
+
struct dm_verity {
struct dm_dev *data_dev;
struct dm_dev *hash_dev;
@@ -54,6 +73,8 @@ struct dm_verity {
unsigned digest_size; /* digest size for the current hash algorithm */
unsigned shash_descsize;/* the size of temporary space for crypto */
int hash_failed; /* set to 1 if hash of any block failed */
+ enum verity_mode mode; /* mode for handling verification errors */
+ unsigned corrupted_errs;/* Number of errors for corrupted blocks */
mempool_t *vec_mempool; /* mempool of bio vector */
@@ -175,6 +196,57 @@ static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
}
/*
+ * Handle verification errors.
+ */
+static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
+ unsigned long long block)
+{
+ char verity_env[DM_VERITY_ENV_LENGTH];
+ char *envp[] = { verity_env, NULL };
+ const char *type_str = "";
+ struct mapped_device *md = dm_table_get_md(v->ti->table);
+
+ /* Corruption should be visible in device status in all modes */
+ v->hash_failed = 1;
+
+ if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
+ goto out;
+
+ v->corrupted_errs++;
+
+ switch (type) {
+ case DM_VERITY_BLOCK_TYPE_DATA:
+ type_str = "data";
+ break;
+ case DM_VERITY_BLOCK_TYPE_METADATA:
+ type_str = "metadata";
+ break;
+ default:
+ BUG();
+ }
+
+ DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
+ block);
+
+ if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
+ DMERR("%s: reached maximum errors", v->data_dev->name);
+
+ snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu",
+ DM_VERITY_ENV_VAR_NAME, type, block);
+
+ kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
+
+out:
+ if (v->mode == DM_VERITY_MODE_LOGGING)
+ return 0;
+
+ if (v->mode == DM_VERITY_MODE_RESTART)
+ kernel_restart("dm-verity device corrupted");
+
+ return 1;
+}
+
+/*
* Verify hash of a metadata block pertaining to the specified data block
* ("block" argument) at a specified level ("level" argument).
*
@@ -251,11 +323,11 @@ static int verity_verify_level(struct dm_verity_io *io, sector_t block,
goto release_ret_r;
}
if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
- DMERR_LIMIT("metadata block %llu is corrupted",
- (unsigned long long)hash_block);
- v->hash_failed = 1;
- r = -EIO;
- goto release_ret_r;
+ if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_METADATA,
+ hash_block)) {
+ r = -EIO;
+ goto release_ret_r;
+ }
} else
aux->hash_verified = 1;
}
@@ -367,10 +439,9 @@ test_block_hash:
return r;
}
if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
- DMERR_LIMIT("data block %llu is corrupted",
- (unsigned long long)(io->block + b));
- v->hash_failed = 1;
- return -EIO;
+ if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
+ io->block + b))
+ return -EIO;
}
}
@@ -546,6 +617,19 @@ static void verity_status(struct dm_target *ti, status_type_t type,
else
for (x = 0; x < v->salt_size; x++)
DMEMIT("%02x", v->salt[x]);
+ if (v->mode != DM_VERITY_MODE_EIO) {
+ DMEMIT(" 1 ");
+ switch (v->mode) {
+ case DM_VERITY_MODE_LOGGING:
+ DMEMIT(DM_VERITY_OPT_LOGGING);
+ break;
+ case DM_VERITY_MODE_RESTART:
+ DMEMIT(DM_VERITY_OPT_RESTART);
+ break;
+ default:
+ BUG();
+ }
+ }
break;
}
}
@@ -647,13 +731,19 @@ static void verity_dtr(struct dm_target *ti)
static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
{
struct dm_verity *v;
- unsigned num;
+ struct dm_arg_set as;
+ const char *opt_string;
+ unsigned int num, opt_params;
unsigned long long num_ll;
int r;
int i;
sector_t hash_position;
char dummy;
+ static struct dm_arg _args[] = {
+ {0, 1, "Invalid number of feature args"},
+ };
+
v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
if (!v) {
ti->error = "Cannot allocate verity structure";
@@ -668,8 +758,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- if (argc != 10) {
- ti->error = "Invalid argument count: exactly 10 arguments required";
+ if (argc < 10) {
+ ti->error = "Not enough arguments";
r = -EINVAL;
goto bad;
}
@@ -790,6 +880,39 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
}
+ argv += 10;
+ argc -= 10;
+
+ /* Optional parameters */
+ if (argc) {
+ as.argc = argc;
+ as.argv = argv;
+
+ r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
+ if (r)
+ goto bad;
+
+ while (opt_params) {
+ opt_params--;
+ opt_string = dm_shift_arg(&as);
+ if (!opt_string) {
+ ti->error = "Not enough feature arguments";
+ r = -EINVAL;
+ goto bad;
+ }
+
+ if (!strcasecmp(opt_string, DM_VERITY_OPT_LOGGING))
+ v->mode = DM_VERITY_MODE_LOGGING;
+ else if (!strcasecmp(opt_string, DM_VERITY_OPT_RESTART))
+ v->mode = DM_VERITY_MODE_RESTART;
+ else {
+ ti->error = "Invalid feature arguments";
+ r = -EINVAL;
+ goto bad;
+ }
+ }
+ }
+
v->hash_per_block_bits =
__fls((1 << v->hash_dev_block_bits) / v->digest_size);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 73f28802dc7a..a930b72314ac 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -21,6 +21,9 @@
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/elevator.h> /* for rq_end_sector() */
+#include <linux/blk-mq.h>
#include <trace/events/block.h>
@@ -216,8 +219,29 @@ struct mapped_device {
struct kthread_worker kworker;
struct task_struct *kworker_task;
+
+ /* for request-based merge heuristic in dm_request_fn() */
+ unsigned seq_rq_merge_deadline_usecs;
+ int last_rq_rw;
+ sector_t last_rq_pos;
+ ktime_t last_rq_start_time;
+
+ /* for blk-mq request-based DM support */
+ struct blk_mq_tag_set tag_set;
+ bool use_blk_mq;
};
+#ifdef CONFIG_DM_MQ_DEFAULT
+static bool use_blk_mq = true;
+#else
+static bool use_blk_mq = false;
+#endif
+
+bool dm_use_blk_mq(struct mapped_device *md)
+{
+ return md->use_blk_mq;
+}
+
/*
* For mempools pre-allocation at the table loading time.
*/
@@ -250,35 +274,35 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
*/
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
-static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
+static unsigned __dm_get_module_param(unsigned *module_param,
unsigned def, unsigned max)
{
- unsigned ios = ACCESS_ONCE(*reserved_ios);
- unsigned modified_ios = 0;
+ unsigned param = ACCESS_ONCE(*module_param);
+ unsigned modified_param = 0;
- if (!ios)
- modified_ios = def;
- else if (ios > max)
- modified_ios = max;
+ if (!param)
+ modified_param = def;
+ else if (param > max)
+ modified_param = max;
- if (modified_ios) {
- (void)cmpxchg(reserved_ios, ios, modified_ios);
- ios = modified_ios;
+ if (modified_param) {
+ (void)cmpxchg(module_param, param, modified_param);
+ param = modified_param;
}
- return ios;
+ return param;
}
unsigned dm_get_reserved_bio_based_ios(void)
{
- return __dm_get_reserved_ios(&reserved_bio_based_ios,
+ return __dm_get_module_param(&reserved_bio_based_ios,
RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
unsigned dm_get_reserved_rq_based_ios(void)
{
- return __dm_get_reserved_ios(&reserved_rq_based_ios,
+ return __dm_get_module_param(&reserved_rq_based_ios,
RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
@@ -433,7 +457,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
dm_get(md);
atomic_inc(&md->open_count);
-
out:
spin_unlock(&_minor_lock);
@@ -442,16 +465,20 @@ out:
static void dm_blk_close(struct gendisk *disk, fmode_t mode)
{
- struct mapped_device *md = disk->private_data;
+ struct mapped_device *md;
spin_lock(&_minor_lock);
+ md = disk->private_data;
+ if (WARN_ON(!md))
+ goto out;
+
if (atomic_dec_and_test(&md->open_count) &&
(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
queue_work(deferred_remove_workqueue, &deferred_remove_work);
dm_put(md);
-
+out:
spin_unlock(&_minor_lock);
}
@@ -1014,6 +1041,11 @@ static void end_clone_bio(struct bio *clone, int error)
blk_update_request(tio->orig, 0, nr_bytes);
}
+static struct dm_rq_target_io *tio_from_request(struct request *rq)
+{
+ return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
+}
+
/*
* Don't touch any member of the md after calling this function because
* the md may be freed in dm_put() at the end of this function.
@@ -1021,10 +1053,13 @@ static void end_clone_bio(struct bio *clone, int error)
*/
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
+ int nr_requests_pending;
+
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
- if (!md_in_flight(md))
+ nr_requests_pending = md_in_flight(md);
+ if (!nr_requests_pending)
wake_up(&md->wait);
/*
@@ -1033,8 +1068,13 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
- if (run_queue)
- blk_run_queue_async(md->queue);
+ if (run_queue) {
+ if (md->queue->mq_ops)
+ blk_mq_run_hw_queues(md->queue, true);
+ else if (!nr_requests_pending ||
+ (nr_requests_pending >= md->queue->nr_congestion_on))
+ blk_run_queue_async(md->queue);
+ }
/*
* dm_put() must be at the end of this function. See the comment above
@@ -1042,16 +1082,29 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
dm_put(md);
}
-static void free_rq_clone(struct request *clone)
+static void free_rq_clone(struct request *clone, bool must_be_mapped)
{
struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+
+ WARN_ON_ONCE(must_be_mapped && !clone->q);
blk_rq_unprep_clone(clone);
- if (clone->q && clone->q->mq_ops)
+
+ if (md->type == DM_TYPE_MQ_REQUEST_BASED)
+ /* stacked on blk-mq queue(s) */
tio->ti->type->release_clone_rq(clone);
- else
- free_clone_request(tio->md, clone);
- free_rq_tio(tio);
+ else if (!md->queue->mq_ops)
+ /* request_fn queue stacked on request_fn queue(s) */
+ free_clone_request(md, clone);
+ /*
+ * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
+ * no need to call free_clone_request() because we leverage blk-mq by
+ * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
+ */
+
+ if (!md->queue->mq_ops)
+ free_rq_tio(tio);
}
/*
@@ -1079,38 +1132,54 @@ static void dm_end_request(struct request *clone, int error)
rq->sense_len = clone->sense_len;
}
- free_rq_clone(clone);
- blk_end_request_all(rq, error);
+ free_rq_clone(clone, true);
+ if (!rq->q->mq_ops)
+ blk_end_request_all(rq, error);
+ else
+ blk_mq_end_request(rq, error);
rq_completed(md, rw, true);
}
static void dm_unprep_request(struct request *rq)
{
- struct dm_rq_target_io *tio = rq->special;
+ struct dm_rq_target_io *tio = tio_from_request(rq);
struct request *clone = tio->clone;
- rq->special = NULL;
- rq->cmd_flags &= ~REQ_DONTPREP;
+ if (!rq->q->mq_ops) {
+ rq->special = NULL;
+ rq->cmd_flags &= ~REQ_DONTPREP;
+ }
if (clone)
- free_rq_clone(clone);
+ free_rq_clone(clone, false);
}
/*
* Requeue the original request of a clone.
*/
-static void dm_requeue_unmapped_original_request(struct mapped_device *md,
- struct request *rq)
+static void old_requeue_request(struct request *rq)
{
- int rw = rq_data_dir(rq);
struct request_queue *q = rq->q;
unsigned long flags;
- dm_unprep_request(rq);
-
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void dm_requeue_unmapped_original_request(struct mapped_device *md,
+ struct request *rq)
+{
+ int rw = rq_data_dir(rq);
+
+ dm_unprep_request(rq);
+
+ if (!rq->q->mq_ops)
+ old_requeue_request(rq);
+ else {
+ blk_mq_requeue_request(rq);
+ blk_mq_kick_requeue_list(rq->q);
+ }
rq_completed(md, rw, false);
}
@@ -1122,35 +1191,44 @@ static void dm_requeue_unmapped_request(struct request *clone)
dm_requeue_unmapped_original_request(tio->md, tio->orig);
}
-static void __stop_queue(struct request_queue *q)
-{
- blk_stop_queue(q);
-}
-
-static void stop_queue(struct request_queue *q)
+static void old_stop_queue(struct request_queue *q)
{
unsigned long flags;
+ if (blk_queue_stopped(q))
+ return;
+
spin_lock_irqsave(q->queue_lock, flags);
- __stop_queue(q);
+ blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static void __start_queue(struct request_queue *q)
+static void stop_queue(struct request_queue *q)
{
- if (blk_queue_stopped(q))
- blk_start_queue(q);
+ if (!q->mq_ops)
+ old_stop_queue(q);
+ else
+ blk_mq_stop_hw_queues(q);
}
-static void start_queue(struct request_queue *q)
+static void old_start_queue(struct request_queue *q)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
- __start_queue(q);
+ if (blk_queue_stopped(q))
+ blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
+static void start_queue(struct request_queue *q)
+{
+ if (!q->mq_ops)
+ old_start_queue(q);
+ else
+ blk_mq_start_stopped_hw_queues(q, true);
+}
+
static void dm_done(struct request *clone, int error, bool mapped)
{
int r = error;
@@ -1189,13 +1267,20 @@ static void dm_done(struct request *clone, int error, bool mapped)
static void dm_softirq_done(struct request *rq)
{
bool mapped = true;
- struct dm_rq_target_io *tio = rq->special;
+ struct dm_rq_target_io *tio = tio_from_request(rq);
struct request *clone = tio->clone;
+ int rw;
if (!clone) {
- blk_end_request_all(rq, tio->error);
- rq_completed(tio->md, rq_data_dir(rq), false);
- free_rq_tio(tio);
+ rw = rq_data_dir(rq);
+ if (!rq->q->mq_ops) {
+ blk_end_request_all(rq, tio->error);
+ rq_completed(tio->md, rw, false);
+ free_rq_tio(tio);
+ } else {
+ blk_mq_end_request(rq, tio->error);
+ rq_completed(tio->md, rw, false);
+ }
return;
}
@@ -1211,7 +1296,7 @@ static void dm_softirq_done(struct request *rq)
*/
static void dm_complete_request(struct request *rq, int error)
{
- struct dm_rq_target_io *tio = rq->special;
+ struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
blk_complete_request(rq);
@@ -1230,7 +1315,7 @@ static void dm_kill_unmapped_request(struct request *rq, int error)
}
/*
- * Called with the clone's queue lock held
+ * Called with the clone's queue lock held (for non-blk-mq)
*/
static void end_clone_request(struct request *clone, int error)
{
@@ -1690,7 +1775,7 @@ out:
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
-static void _dm_request(struct request_queue *q, struct bio *bio)
+static void dm_make_request(struct request_queue *q, struct bio *bio)
{
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
@@ -1722,16 +1807,6 @@ int dm_request_based(struct mapped_device *md)
return blk_queue_stackable(md->queue);
}
-static void dm_request(struct request_queue *q, struct bio *bio)
-{
- struct mapped_device *md = q->queuedata;
-
- if (dm_request_based(md))
- blk_queue_bio(q, bio);
- else
- _dm_request(q, bio);
-}
-
static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
{
int r;
@@ -1784,15 +1859,25 @@ static int setup_clone(struct request *clone, struct request *rq,
static struct request *clone_rq(struct request *rq, struct mapped_device *md,
struct dm_rq_target_io *tio, gfp_t gfp_mask)
{
- struct request *clone = alloc_clone_request(md, gfp_mask);
+ /*
+ * Do not allocate a clone if tio->clone was already set
+ * (see: dm_mq_queue_rq).
+ */
+ bool alloc_clone = !tio->clone;
+ struct request *clone;
- if (!clone)
- return NULL;
+ if (alloc_clone) {
+ clone = alloc_clone_request(md, gfp_mask);
+ if (!clone)
+ return NULL;
+ } else
+ clone = tio->clone;
blk_rq_init(NULL, clone);
if (setup_clone(clone, rq, tio, gfp_mask)) {
/* -ENOMEM */
- free_clone_request(md, clone);
+ if (alloc_clone)
+ free_clone_request(md, clone);
return NULL;
}
@@ -1801,6 +1886,19 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
static void map_tio_request(struct kthread_work *work);
+static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
+ struct mapped_device *md)
+{
+ tio->md = md;
+ tio->ti = NULL;
+ tio->clone = NULL;
+ tio->orig = rq;
+ tio->error = 0;
+ memset(&tio->info, 0, sizeof(tio->info));
+ if (md->kworker_task)
+ init_kthread_work(&tio->work, map_tio_request);
+}
+
static struct dm_rq_target_io *prep_tio(struct request *rq,
struct mapped_device *md, gfp_t gfp_mask)
{
@@ -1812,13 +1910,7 @@ static struct dm_rq_target_io *prep_tio(struct request *rq,
if (!tio)
return NULL;
- tio->md = md;
- tio->ti = NULL;
- tio->clone = NULL;
- tio->orig = rq;
- tio->error = 0;
- memset(&tio->info, 0, sizeof(tio->info));
- init_kthread_work(&tio->work, map_tio_request);
+ init_tio(tio, rq, md);
table = dm_get_live_table(md, &srcu_idx);
if (!dm_table_mq_request_based(table)) {
@@ -1862,11 +1954,11 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
* DM_MAPIO_REQUEUE : the original request needs to be requeued
* < 0 : the request was completed due to failure
*/
-static int map_request(struct dm_target *ti, struct request *rq,
+static int map_request(struct dm_rq_target_io *tio, struct request *rq,
struct mapped_device *md)
{
int r;
- struct dm_rq_target_io *tio = rq->special;
+ struct dm_target *ti = tio->ti;
struct request *clone = NULL;
if (tio->clone) {
@@ -1881,7 +1973,7 @@ static int map_request(struct dm_target *ti, struct request *rq,
}
if (IS_ERR(clone))
return DM_MAPIO_REQUEUE;
- if (setup_clone(clone, rq, tio, GFP_KERNEL)) {
+ if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
/* -ENOMEM */
ti->type->release_clone_rq(clone);
return DM_MAPIO_REQUEUE;
@@ -1922,15 +2014,24 @@ static void map_tio_request(struct kthread_work *work)
struct request *rq = tio->orig;
struct mapped_device *md = tio->md;
- if (map_request(tio->ti, rq, md) == DM_MAPIO_REQUEUE)
+ if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
dm_requeue_unmapped_original_request(md, rq);
}
static void dm_start_request(struct mapped_device *md, struct request *orig)
{
- blk_start_request(orig);
+ if (!orig->q->mq_ops)
+ blk_start_request(orig);
+ else
+ blk_mq_start_request(orig);
atomic_inc(&md->pending[rq_data_dir(orig)]);
+ if (md->seq_rq_merge_deadline_usecs) {
+ md->last_rq_pos = rq_end_sector(orig);
+ md->last_rq_rw = rq_data_dir(orig);
+ md->last_rq_start_time = ktime_get();
+ }
+
/*
* Hold the md reference here for the in-flight I/O.
* We can't rely on the reference count by device opener,
@@ -1941,6 +2042,45 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
dm_get(md);
}
+#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
+{
+ return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
+}
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+ const char *buf, size_t count)
+{
+ unsigned deadline;
+
+ if (!dm_request_based(md) || md->use_blk_mq)
+ return count;
+
+ if (kstrtouint(buf, 10, &deadline))
+ return -EINVAL;
+
+ if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
+ deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
+
+ md->seq_rq_merge_deadline_usecs = deadline;
+
+ return count;
+}
+
+static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
+{
+ ktime_t kt_deadline;
+
+ if (!md->seq_rq_merge_deadline_usecs)
+ return false;
+
+ kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
+ kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
+
+ return !ktime_after(ktime_get(), kt_deadline);
+}
+
/*
* q->request_fn for request-based dm.
* Called with the queue lock held.
@@ -1964,7 +2104,7 @@ static void dm_request_fn(struct request_queue *q)
while (!blk_queue_stopped(q)) {
rq = blk_peek_request(q);
if (!rq)
- goto delay_and_out;
+ goto out;
/* always use block 0 to find the target for flushes for now */
pos = 0;
@@ -1983,12 +2123,17 @@ static void dm_request_fn(struct request_queue *q)
continue;
}
+ if (dm_request_peeked_before_merge_deadline(md) &&
+ md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+ md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
+ goto delay_and_out;
+
if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;
dm_start_request(md, rq);
- tio = rq->special;
+ tio = tio_from_request(rq);
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
queue_kthread_work(&md->kworker, &tio->work);
@@ -1998,33 +2143,11 @@ static void dm_request_fn(struct request_queue *q)
goto out;
delay_and_out:
- blk_delay_queue(q, HZ / 10);
+ blk_delay_queue(q, HZ / 100);
out:
dm_put_live_table(md, srcu_idx);
}
-int dm_underlying_device_busy(struct request_queue *q)
-{
- return blk_lld_busy(q);
-}
-EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
-
-static int dm_lld_busy(struct request_queue *q)
-{
- int r;
- struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_live_table_fast(md);
-
- if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
- r = 1;
- else
- r = dm_table_any_busy_target(map);
-
- dm_put_live_table_fast(md);
-
- return r;
-}
-
static int dm_any_congested(void *congested_data, int bdi_bits)
{
int r = bdi_bits;
@@ -2107,7 +2230,7 @@ static void dm_init_md_queue(struct mapped_device *md)
{
/*
* Request-based dm devices cannot be stacked on top of bio-based dm
- * devices. The type of this dm device has not been decided yet.
+ * devices. The type of this dm device may not have been decided yet.
* The type is decided at the first table loading time.
* To prevent problematic device stacking, clear the queue flag
* for request stacking support until then.
@@ -2115,13 +2238,21 @@ static void dm_init_md_queue(struct mapped_device *md)
* This queue is new, so no concurrency on the queue_flags.
*/
queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
+}
+static void dm_init_old_md_queue(struct mapped_device *md)
+{
+ md->use_blk_mq = false;
+ dm_init_md_queue(md);
+
+ /*
+ * Initialize aspects of queue that aren't relevant for blk-mq
+ */
md->queue->queuedata = md;
md->queue->backing_dev_info.congested_fn = dm_any_congested;
md->queue->backing_dev_info.congested_data = md;
- blk_queue_make_request(md->queue, dm_request);
+
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
- blk_queue_merge_bvec(md->queue, dm_merge_bvec);
}
/*
@@ -2153,6 +2284,7 @@ static struct mapped_device *alloc_dev(int minor)
if (r < 0)
goto bad_io_barrier;
+ md->use_blk_mq = use_blk_mq;
md->type = DM_TYPE_NONE;
mutex_init(&md->suspend_lock);
mutex_init(&md->type_lock);
@@ -2241,7 +2373,6 @@ static void free_dev(struct mapped_device *md)
int minor = MINOR(disk_devt(md->disk));
unlock_fs(md);
- bdput(md->bdev);
destroy_workqueue(md->wq);
if (md->kworker_task)
@@ -2252,19 +2383,24 @@ static void free_dev(struct mapped_device *md)
mempool_destroy(md->rq_pool);
if (md->bs)
bioset_free(md->bs);
- blk_integrity_unregister(md->disk);
- del_gendisk(md->disk);
+
cleanup_srcu_struct(&md->io_barrier);
free_table_devices(&md->table_devices);
- free_minor(minor);
+ dm_stats_cleanup(&md->stats);
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
spin_unlock(&_minor_lock);
-
+ if (blk_get_integrity(md->disk))
+ blk_integrity_unregister(md->disk);
+ del_gendisk(md->disk);
put_disk(md->disk);
blk_cleanup_queue(md->queue);
- dm_stats_cleanup(&md->stats);
+ if (md->use_blk_mq)
+ blk_mq_free_tag_set(&md->tag_set);
+ bdput(md->bdev);
+ free_minor(minor);
+
module_put(THIS_MODULE);
kfree(md);
}
@@ -2273,7 +2409,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
- if (md->io_pool && md->bs) {
+ if (md->bs) {
/* The md already has necessary mempools. */
if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
/*
@@ -2305,7 +2441,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
p->bs = NULL;
out:
- /* mempool bind completed, now no need any mempools in the table */
+ /* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t);
}
@@ -2352,7 +2488,7 @@ int dm_queue_merge_is_compulsory(struct request_queue *q)
if (!q->merge_bvec_fn)
return 0;
- if (q->make_request_fn == dm_request) {
+ if (q->make_request_fn == dm_make_request) {
dev_md = q->queuedata;
if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
return 0;
@@ -2421,7 +2557,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* This must be done before setting the queue restrictions,
* because request-based dm may be run just after the setting.
*/
- if (dm_table_request_based(t) && !blk_queue_stopped(q))
+ if (dm_table_request_based(t))
stop_queue(q);
__bind_mempools(md, t);
@@ -2503,14 +2639,6 @@ unsigned dm_get_md_type(struct mapped_device *md)
return md->type;
}
-static bool dm_md_type_request_based(struct mapped_device *md)
-{
- unsigned table_type = dm_get_md_type(md);
-
- return (table_type == DM_TYPE_REQUEST_BASED ||
- table_type == DM_TYPE_MQ_REQUEST_BASED);
-}
-
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{
return md->immutable_target_type;
@@ -2527,6 +2655,14 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
+static void init_rq_based_worker_thread(struct mapped_device *md)
+{
+ /* Initialize the request-based DM worker thread */
+ init_kthread_worker(&md->kworker);
+ md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
+ "kdmwork-%s", dm_device_name(md));
+}
+
/*
* Fully initialize a request-based queue (->elevator, ->request_fn, etc).
*/
@@ -2534,28 +2670,158 @@ static int dm_init_request_based_queue(struct mapped_device *md)
{
struct request_queue *q = NULL;
- if (md->queue->elevator)
- return 1;
-
/* Fully initialize the queue */
q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
if (!q)
- return 0;
+ return -EINVAL;
+
+ /* disable dm_request_fn's merge heuristic by default */
+ md->seq_rq_merge_deadline_usecs = 0;
md->queue = q;
- dm_init_md_queue(md);
+ dm_init_old_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_prep_fn);
- blk_queue_lld_busy(md->queue, dm_lld_busy);
- /* Also initialize the request-based DM worker thread */
- init_kthread_worker(&md->kworker);
- md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
- "kdmwork-%s", dm_device_name(md));
+ init_rq_based_worker_thread(md);
elv_register_queue(md->queue);
- return 1;
+ return 0;
+}
+
+static int dm_mq_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int request_idx,
+ unsigned int numa_node)
+{
+ struct mapped_device *md = data;
+ struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
+
+ /*
+ * Must initialize md member of tio, otherwise it won't
+ * be available in dm_mq_queue_rq.
+ */
+ tio->md = md;
+
+ return 0;
+}
+
+static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *rq = bd->rq;
+ struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
+ struct mapped_device *md = tio->md;
+ int srcu_idx;
+ struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+ struct dm_target *ti;
+ sector_t pos;
+
+ /* always use block 0 to find the target for flushes for now */
+ pos = 0;
+ if (!(rq->cmd_flags & REQ_FLUSH))
+ pos = blk_rq_pos(rq);
+
+ ti = dm_table_find_target(map, pos);
+ if (!dm_target_is_valid(ti)) {
+ dm_put_live_table(md, srcu_idx);
+ DMERR_LIMIT("request attempted access beyond the end of device");
+ /*
+ * Must perform setup, that rq_completed() requires,
+ * before returning BLK_MQ_RQ_QUEUE_ERROR
+ */
+ dm_start_request(md, rq);
+ return BLK_MQ_RQ_QUEUE_ERROR;
+ }
+ dm_put_live_table(md, srcu_idx);
+
+ if (ti->type->busy && ti->type->busy(ti))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
+ dm_start_request(md, rq);
+
+ /* Init tio using md established in .init_request */
+ init_tio(tio, rq, md);
+
+ /*
+ * Establish tio->ti before queuing work (map_tio_request)
+ * or making direct call to map_request().
+ */
+ tio->ti = ti;
+
+ /* Clone the request if underlying devices aren't blk-mq */
+ if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
+ /* clone request is allocated at the end of the pdu */
+ tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
+ if (!clone_rq(rq, md, tio, GFP_ATOMIC))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+ queue_kthread_work(&md->kworker, &tio->work);
+ } else {
+ /* Direct call is fine since .queue_rq allows allocations */
+ if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
+ dm_requeue_unmapped_original_request(md, rq);
+ }
+
+ return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static struct blk_mq_ops dm_mq_ops = {
+ .queue_rq = dm_mq_queue_rq,
+ .map_queue = blk_mq_map_queue,
+ .complete = dm_softirq_done,
+ .init_request = dm_mq_init_request,
+};
+
+static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
+{
+ unsigned md_type = dm_get_md_type(md);
+ struct request_queue *q;
+ int err;
+
+ memset(&md->tag_set, 0, sizeof(md->tag_set));
+ md->tag_set.ops = &dm_mq_ops;
+ md->tag_set.queue_depth = BLKDEV_MAX_RQ;
+ md->tag_set.numa_node = NUMA_NO_NODE;
+ md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+ md->tag_set.nr_hw_queues = 1;
+ if (md_type == DM_TYPE_REQUEST_BASED) {
+ /* make the memory for non-blk-mq clone part of the pdu */
+ md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request);
+ } else
+ md->tag_set.cmd_size = sizeof(struct dm_rq_target_io);
+ md->tag_set.driver_data = md;
+
+ err = blk_mq_alloc_tag_set(&md->tag_set);
+ if (err)
+ return err;
+
+ q = blk_mq_init_allocated_queue(&md->tag_set, md->queue);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto out_tag_set;
+ }
+ md->queue = q;
+ dm_init_md_queue(md);
+
+ /* backfill 'mq' sysfs registration normally done in blk_register_queue */
+ blk_mq_register_disk(md->disk);
+
+ if (md_type == DM_TYPE_REQUEST_BASED)
+ init_rq_based_worker_thread(md);
+
+ return 0;
+
+out_tag_set:
+ blk_mq_free_tag_set(&md->tag_set);
+ return err;
+}
+
+static unsigned filter_md_type(unsigned type, struct mapped_device *md)
+{
+ if (type == DM_TYPE_BIO_BASED)
+ return type;
+
+ return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
}
/*
@@ -2563,9 +2829,29 @@ static int dm_init_request_based_queue(struct mapped_device *md)
*/
int dm_setup_md_queue(struct mapped_device *md)
{
- if (dm_md_type_request_based(md) && !dm_init_request_based_queue(md)) {
- DMWARN("Cannot initialize queue for request-based mapped device");
- return -EINVAL;
+ int r;
+ unsigned md_type = filter_md_type(dm_get_md_type(md), md);
+
+ switch (md_type) {
+ case DM_TYPE_REQUEST_BASED:
+ r = dm_init_request_based_queue(md);
+ if (r) {
+ DMWARN("Cannot initialize queue for request-based mapped device");
+ return r;
+ }
+ break;
+ case DM_TYPE_MQ_REQUEST_BASED:
+ r = dm_init_request_based_blk_mq_queue(md);
+ if (r) {
+ DMWARN("Cannot initialize queue for request-based blk-mq mapped device");
+ return r;
+ }
+ break;
+ case DM_TYPE_BIO_BASED:
+ dm_init_old_md_queue(md);
+ blk_queue_make_request(md->queue, dm_make_request);
+ blk_queue_merge_bvec(md->queue, dm_merge_bvec);
+ break;
}
return 0;
@@ -2616,6 +2902,19 @@ void dm_get(struct mapped_device *md)
BUG_ON(test_bit(DMF_FREEING, &md->flags));
}
+int dm_hold(struct mapped_device *md)
+{
+ spin_lock(&_minor_lock);
+ if (test_bit(DMF_FREEING, &md->flags)) {
+ spin_unlock(&_minor_lock);
+ return -EBUSY;
+ }
+ dm_get(md);
+ spin_unlock(&_minor_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_hold);
+
const char *dm_device_name(struct mapped_device *md)
{
return md->name;
@@ -2629,19 +2928,26 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
might_sleep();
- spin_lock(&_minor_lock);
map = dm_get_live_table(md, &srcu_idx);
+
+ spin_lock(&_minor_lock);
idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- if (dm_request_based(md))
+ if (dm_request_based(md) && md->kworker_task)
flush_kthread_worker(&md->kworker);
+ /*
+ * Take suspend_lock so that presuspend and postsuspend methods
+ * do not race with internal suspend.
+ */
+ mutex_lock(&md->suspend_lock);
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
dm_table_postsuspend_targets(map);
}
+ mutex_unlock(&md->suspend_lock);
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
dm_put_live_table(md, srcu_idx);
@@ -2883,7 +3189,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
*/
if (dm_request_based(md)) {
stop_queue(md->queue);
- flush_kthread_worker(&md->kworker);
+ if (md->kworker_task)
+ flush_kthread_worker(&md->kworker);
}
flush_workqueue(md->wq);
@@ -3115,6 +3422,7 @@ void dm_internal_suspend_fast(struct mapped_device *md)
flush_workqueue(md->wq);
dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
}
+EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
void dm_internal_resume_fast(struct mapped_device *md)
{
@@ -3126,6 +3434,7 @@ void dm_internal_resume_fast(struct mapped_device *md)
done:
mutex_unlock(&md->suspend_lock);
}
+EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
/*-----------------------------------------------------------------
* Event notification.
@@ -3179,6 +3488,7 @@ struct gendisk *dm_disk(struct mapped_device *md)
{
return md->disk;
}
+EXPORT_SYMBOL_GPL(dm_disk);
struct kobject *dm_kobject(struct mapped_device *md)
{
@@ -3226,16 +3536,19 @@ int dm_noflush_suspending(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
-struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
+struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
+ unsigned integrity, unsigned per_bio_data_size)
{
struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
- struct kmem_cache *cachep;
+ struct kmem_cache *cachep = NULL;
unsigned int pool_size = 0;
unsigned int front_pad;
if (!pools)
return NULL;
+ type = filter_md_type(type, md);
+
switch (type) {
case DM_TYPE_BIO_BASED:
cachep = _io_cache;
@@ -3243,13 +3556,13 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
break;
case DM_TYPE_REQUEST_BASED:
+ cachep = _rq_tio_cache;
pool_size = dm_get_reserved_rq_based_ios();
pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
if (!pools->rq_pool)
goto out;
/* fall through to setup remaining rq-based pools */
case DM_TYPE_MQ_REQUEST_BASED:
- cachep = _rq_tio_cache;
if (!pool_size)
pool_size = dm_get_reserved_rq_based_ios();
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
@@ -3257,12 +3570,14 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
WARN_ON(per_bio_data_size != 0);
break;
default:
- goto out;
+ BUG();
}
- pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
- if (!pools->io_pool)
- goto out;
+ if (cachep) {
+ pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
+ if (!pools->io_pool)
+ goto out;
+ }
pools->bs = bioset_create_nobvec(pool_size, front_pad);
if (!pools->bs)
@@ -3319,6 +3634,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
+module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
+
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 59f53e79db82..6123c2bf9150 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -70,7 +70,6 @@ void dm_table_presuspend_undo_targets(struct dm_table *t);
void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
-int dm_table_any_busy_target(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
@@ -212,6 +211,8 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
void dm_internal_suspend(struct mapped_device *md);
void dm_internal_resume(struct mapped_device *md);
+bool dm_use_blk_mq(struct mapped_device *md);
+
int dm_io_init(void);
void dm_io_exit(void);
@@ -221,7 +222,8 @@ void dm_kcopyd_exit(void);
/*
* Mempool operations
*/
-struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
+struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
+ unsigned integrity, unsigned per_bio_data_size);
void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
@@ -235,4 +237,8 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
return !maxlen || strlen(result) + 1 >= maxlen;
}
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+ const char *buf, size_t count);
+
#endif
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
new file mode 100644
index 000000000000..fcfc4b9b2672
--- /dev/null
+++ b/drivers/md/md-cluster.c
@@ -0,0 +1,965 @@
+/*
+ * Copyright (C) 2015, SUSE
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/dlm.h>
+#include <linux/sched.h>
+#include <linux/raid/md_p.h>
+#include "md.h"
+#include "bitmap.h"
+#include "md-cluster.h"
+
+#define LVB_SIZE 64
+#define NEW_DEV_TIMEOUT 5000
+
+struct dlm_lock_resource {
+ dlm_lockspace_t *ls;
+ struct dlm_lksb lksb;
+ char *name; /* lock name. */
+ uint32_t flags; /* flags to pass to dlm_lock() */
+ struct completion completion; /* completion for synchronized locking */
+ void (*bast)(void *arg, int mode); /* blocking AST function pointer*/
+ struct mddev *mddev; /* pointing back to mddev. */
+};
+
+struct suspend_info {
+ int slot;
+ sector_t lo;
+ sector_t hi;
+ struct list_head list;
+};
+
+struct resync_info {
+ __le64 lo;
+ __le64 hi;
+};
+
+/* md_cluster_info flags */
+#define MD_CLUSTER_WAITING_FOR_NEWDISK 1
+
+
+struct md_cluster_info {
+ /* dlm lock space and resources for clustered raid. */
+ dlm_lockspace_t *lockspace;
+ int slot_number;
+ struct completion completion;
+ struct dlm_lock_resource *sb_lock;
+ struct mutex sb_mutex;
+ struct dlm_lock_resource *bitmap_lockres;
+ struct list_head suspend_list;
+ spinlock_t suspend_lock;
+ struct md_thread *recovery_thread;
+ unsigned long recovery_map;
+ /* communication loc resources */
+ struct dlm_lock_resource *ack_lockres;
+ struct dlm_lock_resource *message_lockres;
+ struct dlm_lock_resource *token_lockres;
+ struct dlm_lock_resource *no_new_dev_lockres;
+ struct md_thread *recv_thread;
+ struct completion newdisk_completion;
+ unsigned long state;
+};
+
+enum msg_type {
+ METADATA_UPDATED = 0,
+ RESYNCING,
+ NEWDISK,
+ REMOVE,
+ RE_ADD,
+};
+
+struct cluster_msg {
+ int type;
+ int slot;
+ /* TODO: Unionize this for smaller footprint */
+ sector_t low;
+ sector_t high;
+ char uuid[16];
+ int raid_slot;
+};
+
+static void sync_ast(void *arg)
+{
+ struct dlm_lock_resource *res;
+
+ res = (struct dlm_lock_resource *) arg;
+ complete(&res->completion);
+}
+
+static int dlm_lock_sync(struct dlm_lock_resource *res, int mode)
+{
+ int ret = 0;
+
+ init_completion(&res->completion);
+ ret = dlm_lock(res->ls, mode, &res->lksb,
+ res->flags, res->name, strlen(res->name),
+ 0, sync_ast, res, res->bast);
+ if (ret)
+ return ret;
+ wait_for_completion(&res->completion);
+ return res->lksb.sb_status;
+}
+
+static int dlm_unlock_sync(struct dlm_lock_resource *res)
+{
+ return dlm_lock_sync(res, DLM_LOCK_NL);
+}
+
+static struct dlm_lock_resource *lockres_init(struct mddev *mddev,
+ char *name, void (*bastfn)(void *arg, int mode), int with_lvb)
+{
+ struct dlm_lock_resource *res = NULL;
+ int ret, namelen;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ res = kzalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
+ if (!res)
+ return NULL;
+ res->ls = cinfo->lockspace;
+ res->mddev = mddev;
+ namelen = strlen(name);
+ res->name = kzalloc(namelen + 1, GFP_KERNEL);
+ if (!res->name) {
+ pr_err("md-cluster: Unable to allocate resource name for resource %s\n", name);
+ goto out_err;
+ }
+ strlcpy(res->name, name, namelen + 1);
+ if (with_lvb) {
+ res->lksb.sb_lvbptr = kzalloc(LVB_SIZE, GFP_KERNEL);
+ if (!res->lksb.sb_lvbptr) {
+ pr_err("md-cluster: Unable to allocate LVB for resource %s\n", name);
+ goto out_err;
+ }
+ res->flags = DLM_LKF_VALBLK;
+ }
+
+ if (bastfn)
+ res->bast = bastfn;
+
+ res->flags |= DLM_LKF_EXPEDITE;
+
+ ret = dlm_lock_sync(res, DLM_LOCK_NL);
+ if (ret) {
+ pr_err("md-cluster: Unable to lock NL on new lock resource %s\n", name);
+ goto out_err;
+ }
+ res->flags &= ~DLM_LKF_EXPEDITE;
+ res->flags |= DLM_LKF_CONVERT;
+
+ return res;
+out_err:
+ kfree(res->lksb.sb_lvbptr);
+ kfree(res->name);
+ kfree(res);
+ return NULL;
+}
+
+static void lockres_free(struct dlm_lock_resource *res)
+{
+ if (!res)
+ return;
+
+ init_completion(&res->completion);
+ dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res);
+ wait_for_completion(&res->completion);
+
+ kfree(res->name);
+ kfree(res->lksb.sb_lvbptr);
+ kfree(res);
+}
+
+static char *pretty_uuid(char *dest, char *src)
+{
+ int i, len = 0;
+
+ for (i = 0; i < 16; i++) {
+ if (i == 4 || i == 6 || i == 8 || i == 10)
+ len += sprintf(dest + len, "-");
+ len += sprintf(dest + len, "%02x", (__u8)src[i]);
+ }
+ return dest;
+}
+
+static void add_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres,
+ sector_t lo, sector_t hi)
+{
+ struct resync_info *ri;
+
+ ri = (struct resync_info *)lockres->lksb.sb_lvbptr;
+ ri->lo = cpu_to_le64(lo);
+ ri->hi = cpu_to_le64(hi);
+}
+
+static struct suspend_info *read_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres)
+{
+ struct resync_info ri;
+ struct suspend_info *s = NULL;
+ sector_t hi = 0;
+
+ dlm_lock_sync(lockres, DLM_LOCK_CR);
+ memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info));
+ hi = le64_to_cpu(ri.hi);
+ if (ri.hi > 0) {
+ s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
+ if (!s)
+ goto out;
+ s->hi = hi;
+ s->lo = le64_to_cpu(ri.lo);
+ }
+ dlm_unlock_sync(lockres);
+out:
+ return s;
+}
+
+static void recover_bitmaps(struct md_thread *thread)
+{
+ struct mddev *mddev = thread->mddev;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct dlm_lock_resource *bm_lockres;
+ char str[64];
+ int slot, ret;
+ struct suspend_info *s, *tmp;
+ sector_t lo, hi;
+
+ while (cinfo->recovery_map) {
+ slot = fls64((u64)cinfo->recovery_map) - 1;
+
+ /* Clear suspend_area associated with the bitmap */
+ spin_lock_irq(&cinfo->suspend_lock);
+ list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+ if (slot == s->slot) {
+ list_del(&s->list);
+ kfree(s);
+ }
+ spin_unlock_irq(&cinfo->suspend_lock);
+
+ snprintf(str, 64, "bitmap%04d", slot);
+ bm_lockres = lockres_init(mddev, str, NULL, 1);
+ if (!bm_lockres) {
+ pr_err("md-cluster: Cannot initialize bitmaps\n");
+ goto clear_bit;
+ }
+
+ ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
+ if (ret) {
+ pr_err("md-cluster: Could not DLM lock %s: %d\n",
+ str, ret);
+ goto clear_bit;
+ }
+ ret = bitmap_copy_from_slot(mddev, slot, &lo, &hi, true);
+ if (ret) {
+ pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
+ goto dlm_unlock;
+ }
+ if (hi > 0) {
+ /* TODO:Wait for current resync to get over */
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ if (lo < mddev->recovery_cp)
+ mddev->recovery_cp = lo;
+ md_check_recovery(mddev);
+ }
+dlm_unlock:
+ dlm_unlock_sync(bm_lockres);
+clear_bit:
+ clear_bit(slot, &cinfo->recovery_map);
+ }
+}
+
+static void recover_prep(void *arg)
+{
+}
+
+static void recover_slot(void *arg, struct dlm_slot *slot)
+{
+ struct mddev *mddev = arg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n",
+ mddev->bitmap_info.cluster_name,
+ slot->nodeid, slot->slot,
+ cinfo->slot_number);
+ set_bit(slot->slot - 1, &cinfo->recovery_map);
+ if (!cinfo->recovery_thread) {
+ cinfo->recovery_thread = md_register_thread(recover_bitmaps,
+ mddev, "recover");
+ if (!cinfo->recovery_thread) {
+ pr_warn("md-cluster: Could not create recovery thread\n");
+ return;
+ }
+ }
+ md_wakeup_thread(cinfo->recovery_thread);
+}
+
+static void recover_done(void *arg, struct dlm_slot *slots,
+ int num_slots, int our_slot,
+ uint32_t generation)
+{
+ struct mddev *mddev = arg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ cinfo->slot_number = our_slot;
+ complete(&cinfo->completion);
+}
+
+static const struct dlm_lockspace_ops md_ls_ops = {
+ .recover_prep = recover_prep,
+ .recover_slot = recover_slot,
+ .recover_done = recover_done,
+};
+
+/*
+ * The BAST function for the ack lock resource
+ * This function wakes up the receive thread in
+ * order to receive and process the message.
+ */
+static void ack_bast(void *arg, int mode)
+{
+ struct dlm_lock_resource *res = (struct dlm_lock_resource *)arg;
+ struct md_cluster_info *cinfo = res->mddev->cluster_info;
+
+ if (mode == DLM_LOCK_EX)
+ md_wakeup_thread(cinfo->recv_thread);
+}
+
+static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot)
+{
+ struct suspend_info *s, *tmp;
+
+ list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+ if (slot == s->slot) {
+ pr_info("%s:%d Deleting suspend_info: %d\n",
+ __func__, __LINE__, slot);
+ list_del(&s->list);
+ kfree(s);
+ break;
+ }
+}
+
+static void remove_suspend_info(struct md_cluster_info *cinfo, int slot)
+{
+ spin_lock_irq(&cinfo->suspend_lock);
+ __remove_suspend_info(cinfo, slot);
+ spin_unlock_irq(&cinfo->suspend_lock);
+}
+
+
+static void process_suspend_info(struct md_cluster_info *cinfo,
+ int slot, sector_t lo, sector_t hi)
+{
+ struct suspend_info *s;
+
+ if (!hi) {
+ remove_suspend_info(cinfo, slot);
+ return;
+ }
+ s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
+ if (!s)
+ return;
+ s->slot = slot;
+ s->lo = lo;
+ s->hi = hi;
+ spin_lock_irq(&cinfo->suspend_lock);
+ /* Remove existing entry (if exists) before adding */
+ __remove_suspend_info(cinfo, slot);
+ list_add(&s->list, &cinfo->suspend_list);
+ spin_unlock_irq(&cinfo->suspend_lock);
+}
+
+static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
+{
+ char disk_uuid[64];
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ char event_name[] = "EVENT=ADD_DEVICE";
+ char raid_slot[16];
+ char *envp[] = {event_name, disk_uuid, raid_slot, NULL};
+ int len;
+
+ len = snprintf(disk_uuid, 64, "DEVICE_UUID=");
+ pretty_uuid(disk_uuid + len, cmsg->uuid);
+ snprintf(raid_slot, 16, "RAID_DISK=%d", cmsg->raid_slot);
+ pr_info("%s:%d Sending kobject change with %s and %s\n", __func__, __LINE__, disk_uuid, raid_slot);
+ init_completion(&cinfo->newdisk_completion);
+ set_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
+ kobject_uevent_env(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE, envp);
+ wait_for_completion_timeout(&cinfo->newdisk_completion,
+ NEW_DEV_TIMEOUT);
+ clear_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
+}
+
+
+static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ md_reload_sb(mddev);
+ dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
+}
+
+static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg)
+{
+ struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, msg->raid_slot);
+
+ if (rdev)
+ md_kick_rdev_from_array(rdev);
+ else
+ pr_warn("%s: %d Could not find disk(%d) to REMOVE\n", __func__, __LINE__, msg->raid_slot);
+}
+
+static void process_readd_disk(struct mddev *mddev, struct cluster_msg *msg)
+{
+ struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, msg->raid_slot);
+
+ if (rdev && test_bit(Faulty, &rdev->flags))
+ clear_bit(Faulty, &rdev->flags);
+ else
+ pr_warn("%s: %d Could not find disk(%d) which is faulty", __func__, __LINE__, msg->raid_slot);
+}
+
+static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
+{
+ switch (msg->type) {
+ case METADATA_UPDATED:
+ pr_info("%s: %d Received message: METADATA_UPDATE from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_metadata_update(mddev, msg);
+ break;
+ case RESYNCING:
+ pr_info("%s: %d Received message: RESYNCING from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_suspend_info(mddev->cluster_info, msg->slot,
+ msg->low, msg->high);
+ break;
+ case NEWDISK:
+ pr_info("%s: %d Received message: NEWDISK from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_add_new_disk(mddev, msg);
+ break;
+ case REMOVE:
+ pr_info("%s: %d Received REMOVE from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_remove_disk(mddev, msg);
+ break;
+ case RE_ADD:
+ pr_info("%s: %d Received RE_ADD from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_readd_disk(mddev, msg);
+ break;
+ default:
+ pr_warn("%s:%d Received unknown message from %d\n",
+ __func__, __LINE__, msg->slot);
+ }
+}
+
+/*
+ * thread for receiving message
+ */
+static void recv_daemon(struct md_thread *thread)
+{
+ struct md_cluster_info *cinfo = thread->mddev->cluster_info;
+ struct dlm_lock_resource *ack_lockres = cinfo->ack_lockres;
+ struct dlm_lock_resource *message_lockres = cinfo->message_lockres;
+ struct cluster_msg msg;
+
+ /*get CR on Message*/
+ if (dlm_lock_sync(message_lockres, DLM_LOCK_CR)) {
+ pr_err("md/raid1:failed to get CR on MESSAGE\n");
+ return;
+ }
+
+ /* read lvb and wake up thread to process this message_lockres */
+ memcpy(&msg, message_lockres->lksb.sb_lvbptr, sizeof(struct cluster_msg));
+ process_recvd_msg(thread->mddev, &msg);
+
+ /*release CR on ack_lockres*/
+ dlm_unlock_sync(ack_lockres);
+ /*up-convert to EX on message_lockres*/
+ dlm_lock_sync(message_lockres, DLM_LOCK_EX);
+ /*get CR on ack_lockres again*/
+ dlm_lock_sync(ack_lockres, DLM_LOCK_CR);
+ /*release CR on message_lockres*/
+ dlm_unlock_sync(message_lockres);
+}
+
+/* lock_comm()
+ * Takes the lock on the TOKEN lock resource so no other
+ * node can communicate while the operation is underway.
+ */
+static int lock_comm(struct md_cluster_info *cinfo)
+{
+ int error;
+
+ error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
+ if (error)
+ pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
+ __func__, __LINE__, error);
+ return error;
+}
+
+static void unlock_comm(struct md_cluster_info *cinfo)
+{
+ dlm_unlock_sync(cinfo->token_lockres);
+}
+
+/* __sendmsg()
+ * This function performs the actual sending of the message. This function is
+ * usually called after performing the encompassing operation
+ * The function:
+ * 1. Grabs the message lockresource in EX mode
+ * 2. Copies the message to the message LVB
+ * 3. Downconverts message lockresource to CR
+ * 4. Upconverts ack lock resource from CR to EX. This forces the BAST on other nodes
+ * and the other nodes read the message. The thread will wait here until all other
+ * nodes have released ack lock resource.
+ * 5. Downconvert ack lockresource to CR
+ */
+static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
+{
+ int error;
+ int slot = cinfo->slot_number - 1;
+
+ cmsg->slot = cpu_to_le32(slot);
+ /*get EX on Message*/
+ error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_EX);
+ if (error) {
+ pr_err("md-cluster: failed to get EX on MESSAGE (%d)\n", error);
+ goto failed_message;
+ }
+
+ memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg,
+ sizeof(struct cluster_msg));
+ /*down-convert EX to CR on Message*/
+ error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CR);
+ if (error) {
+ pr_err("md-cluster: failed to convert EX to CR on MESSAGE(%d)\n",
+ error);
+ goto failed_message;
+ }
+
+ /*up-convert CR to EX on Ack*/
+ error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_EX);
+ if (error) {
+ pr_err("md-cluster: failed to convert CR to EX on ACK(%d)\n",
+ error);
+ goto failed_ack;
+ }
+
+ /*down-convert EX to CR on Ack*/
+ error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR);
+ if (error) {
+ pr_err("md-cluster: failed to convert EX to CR on ACK(%d)\n",
+ error);
+ goto failed_ack;
+ }
+
+failed_ack:
+ dlm_unlock_sync(cinfo->message_lockres);
+failed_message:
+ return error;
+}
+
+static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
+{
+ int ret;
+
+ lock_comm(cinfo);
+ ret = __sendmsg(cinfo, cmsg);
+ unlock_comm(cinfo);
+ return ret;
+}
+
+static int gather_all_resync_info(struct mddev *mddev, int total_slots)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ int i, ret = 0;
+ struct dlm_lock_resource *bm_lockres;
+ struct suspend_info *s;
+ char str[64];
+
+
+ for (i = 0; i < total_slots; i++) {
+ memset(str, '\0', 64);
+ snprintf(str, 64, "bitmap%04d", i);
+ bm_lockres = lockres_init(mddev, str, NULL, 1);
+ if (!bm_lockres)
+ return -ENOMEM;
+ if (i == (cinfo->slot_number - 1))
+ continue;
+
+ bm_lockres->flags |= DLM_LKF_NOQUEUE;
+ ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
+ if (ret == -EAGAIN) {
+ memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
+ s = read_resync_info(mddev, bm_lockres);
+ if (s) {
+ pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
+ __func__, __LINE__,
+ (unsigned long long) s->lo,
+ (unsigned long long) s->hi, i);
+ spin_lock_irq(&cinfo->suspend_lock);
+ s->slot = i;
+ list_add(&s->list, &cinfo->suspend_list);
+ spin_unlock_irq(&cinfo->suspend_lock);
+ }
+ ret = 0;
+ lockres_free(bm_lockres);
+ continue;
+ }
+ if (ret)
+ goto out;
+ /* TODO: Read the disk bitmap sb and check if it needs recovery */
+ dlm_unlock_sync(bm_lockres);
+ lockres_free(bm_lockres);
+ }
+out:
+ return ret;
+}
+
+static int join(struct mddev *mddev, int nodes)
+{
+ struct md_cluster_info *cinfo;
+ int ret, ops_rv;
+ char str[64];
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL);
+ if (!cinfo)
+ return -ENOMEM;
+
+ init_completion(&cinfo->completion);
+
+ mutex_init(&cinfo->sb_mutex);
+ mddev->cluster_info = cinfo;
+
+ memset(str, 0, 64);
+ pretty_uuid(str, mddev->uuid);
+ ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
+ DLM_LSFL_FS, LVB_SIZE,
+ &md_ls_ops, mddev, &ops_rv, &cinfo->lockspace);
+ if (ret)
+ goto err;
+ wait_for_completion(&cinfo->completion);
+ if (nodes < cinfo->slot_number) {
+ pr_err("md-cluster: Slot allotted(%d) is greater than available slots(%d).",
+ cinfo->slot_number, nodes);
+ ret = -ERANGE;
+ goto err;
+ }
+ cinfo->sb_lock = lockres_init(mddev, "cmd-super",
+ NULL, 0);
+ if (!cinfo->sb_lock) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ /* Initiate the communication resources */
+ ret = -ENOMEM;
+ cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv");
+ if (!cinfo->recv_thread) {
+ pr_err("md-cluster: cannot allocate memory for recv_thread!\n");
+ goto err;
+ }
+ cinfo->message_lockres = lockres_init(mddev, "message", NULL, 1);
+ if (!cinfo->message_lockres)
+ goto err;
+ cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0);
+ if (!cinfo->token_lockres)
+ goto err;
+ cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
+ if (!cinfo->ack_lockres)
+ goto err;
+ cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0);
+ if (!cinfo->no_new_dev_lockres)
+ goto err;
+
+ /* get sync CR lock on ACK. */
+ if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
+ pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
+ ret);
+ /* get sync CR lock on no-new-dev. */
+ if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR))
+ pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret);
+
+
+ pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
+ snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
+ cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
+ if (!cinfo->bitmap_lockres)
+ goto err;
+ if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
+ pr_err("Failed to get bitmap lock\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&cinfo->suspend_list);
+ spin_lock_init(&cinfo->suspend_lock);
+
+ ret = gather_all_resync_info(mddev, nodes);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ lockres_free(cinfo->message_lockres);
+ lockres_free(cinfo->token_lockres);
+ lockres_free(cinfo->ack_lockres);
+ lockres_free(cinfo->no_new_dev_lockres);
+ lockres_free(cinfo->bitmap_lockres);
+ lockres_free(cinfo->sb_lock);
+ if (cinfo->lockspace)
+ dlm_release_lockspace(cinfo->lockspace, 2);
+ mddev->cluster_info = NULL;
+ kfree(cinfo);
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static int leave(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ if (!cinfo)
+ return 0;
+ md_unregister_thread(&cinfo->recovery_thread);
+ md_unregister_thread(&cinfo->recv_thread);
+ lockres_free(cinfo->message_lockres);
+ lockres_free(cinfo->token_lockres);
+ lockres_free(cinfo->ack_lockres);
+ lockres_free(cinfo->no_new_dev_lockres);
+ lockres_free(cinfo->sb_lock);
+ lockres_free(cinfo->bitmap_lockres);
+ dlm_release_lockspace(cinfo->lockspace, 2);
+ return 0;
+}
+
+/* slot_number(): Returns the MD slot number to use
+ * DLM starts the slot numbers from 1, wheras cluster-md
+ * wants the number to be from zero, so we deduct one
+ */
+static int slot_number(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ return cinfo->slot_number - 1;
+}
+
+static void resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ add_resync_info(mddev, cinfo->bitmap_lockres, lo, hi);
+ /* Re-acquire the lock to refresh LVB */
+ dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW);
+}
+
+static int metadata_update_start(struct mddev *mddev)
+{
+ return lock_comm(mddev->cluster_info);
+}
+
+static int metadata_update_finish(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg;
+ int ret;
+
+ memset(&cmsg, 0, sizeof(cmsg));
+ cmsg.type = cpu_to_le32(METADATA_UPDATED);
+ ret = __sendmsg(cinfo, &cmsg);
+ unlock_comm(cinfo);
+ return ret;
+}
+
+static int metadata_update_cancel(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ return dlm_unlock_sync(cinfo->token_lockres);
+}
+
+static int resync_send(struct mddev *mddev, enum msg_type type,
+ sector_t lo, sector_t hi)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg;
+ int slot = cinfo->slot_number - 1;
+
+ pr_info("%s:%d lo: %llu hi: %llu\n", __func__, __LINE__,
+ (unsigned long long)lo,
+ (unsigned long long)hi);
+ resync_info_update(mddev, lo, hi);
+ cmsg.type = cpu_to_le32(type);
+ cmsg.slot = cpu_to_le32(slot);
+ cmsg.low = cpu_to_le64(lo);
+ cmsg.high = cpu_to_le64(hi);
+ return sendmsg(cinfo, &cmsg);
+}
+
+static int resync_start(struct mddev *mddev, sector_t lo, sector_t hi)
+{
+ pr_info("%s:%d\n", __func__, __LINE__);
+ return resync_send(mddev, RESYNCING, lo, hi);
+}
+
+static void resync_finish(struct mddev *mddev)
+{
+ pr_info("%s:%d\n", __func__, __LINE__);
+ resync_send(mddev, RESYNCING, 0, 0);
+}
+
+static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ int ret = 0;
+ struct suspend_info *s;
+
+ spin_lock_irq(&cinfo->suspend_lock);
+ if (list_empty(&cinfo->suspend_list))
+ goto out;
+ list_for_each_entry(s, &cinfo->suspend_list, list)
+ if (hi > s->lo && lo < s->hi) {
+ ret = 1;
+ break;
+ }
+out:
+ spin_unlock_irq(&cinfo->suspend_lock);
+ return ret;
+}
+
+static int add_new_disk_start(struct mddev *mddev, struct md_rdev *rdev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg;
+ int ret = 0;
+ struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
+ char *uuid = sb->device_uuid;
+
+ memset(&cmsg, 0, sizeof(cmsg));
+ cmsg.type = cpu_to_le32(NEWDISK);
+ memcpy(cmsg.uuid, uuid, 16);
+ cmsg.raid_slot = rdev->desc_nr;
+ lock_comm(cinfo);
+ ret = __sendmsg(cinfo, &cmsg);
+ if (ret)
+ return ret;
+ cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
+ ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
+ cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
+ /* Some node does not "see" the device */
+ if (ret == -EAGAIN)
+ ret = -ENOENT;
+ else
+ dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
+ return ret;
+}
+
+static int add_new_disk_finish(struct mddev *mddev)
+{
+ struct cluster_msg cmsg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ int ret;
+ /* Write sb and inform others */
+ md_update_sb(mddev, 1);
+ cmsg.type = METADATA_UPDATED;
+ ret = __sendmsg(cinfo, &cmsg);
+ unlock_comm(cinfo);
+ return ret;
+}
+
+static int new_disk_ack(struct mddev *mddev, bool ack)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ if (!test_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state)) {
+ pr_warn("md-cluster(%s): Spurious cluster confirmation\n", mdname(mddev));
+ return -EINVAL;
+ }
+
+ if (ack)
+ dlm_unlock_sync(cinfo->no_new_dev_lockres);
+ complete(&cinfo->newdisk_completion);
+ return 0;
+}
+
+static int remove_disk(struct mddev *mddev, struct md_rdev *rdev)
+{
+ struct cluster_msg cmsg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ cmsg.type = REMOVE;
+ cmsg.raid_slot = rdev->desc_nr;
+ return __sendmsg(cinfo, &cmsg);
+}
+
+static int gather_bitmaps(struct md_rdev *rdev)
+{
+ int sn, err;
+ sector_t lo, hi;
+ struct cluster_msg cmsg;
+ struct mddev *mddev = rdev->mddev;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ cmsg.type = RE_ADD;
+ cmsg.raid_slot = rdev->desc_nr;
+ err = sendmsg(cinfo, &cmsg);
+ if (err)
+ goto out;
+
+ for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) {
+ if (sn == (cinfo->slot_number - 1))
+ continue;
+ err = bitmap_copy_from_slot(mddev, sn, &lo, &hi, false);
+ if (err) {
+ pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
+ goto out;
+ }
+ if ((hi > 0) && (lo < mddev->recovery_cp))
+ mddev->recovery_cp = lo;
+ }
+out:
+ return err;
+}
+
+static struct md_cluster_operations cluster_ops = {
+ .join = join,
+ .leave = leave,
+ .slot_number = slot_number,
+ .resync_info_update = resync_info_update,
+ .resync_start = resync_start,
+ .resync_finish = resync_finish,
+ .metadata_update_start = metadata_update_start,
+ .metadata_update_finish = metadata_update_finish,
+ .metadata_update_cancel = metadata_update_cancel,
+ .area_resyncing = area_resyncing,
+ .add_new_disk_start = add_new_disk_start,
+ .add_new_disk_finish = add_new_disk_finish,
+ .new_disk_ack = new_disk_ack,
+ .remove_disk = remove_disk,
+ .gather_bitmaps = gather_bitmaps,
+};
+
+static int __init cluster_init(void)
+{
+ pr_warn("md-cluster: EXPERIMENTAL. Use with caution\n");
+ pr_info("Registering Cluster MD functions\n");
+ register_md_cluster_operations(&cluster_ops, THIS_MODULE);
+ return 0;
+}
+
+static void cluster_exit(void)
+{
+ unregister_md_cluster_operations();
+}
+
+module_init(cluster_init);
+module_exit(cluster_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Clustering support for MD");
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
new file mode 100644
index 000000000000..6817ee00e053
--- /dev/null
+++ b/drivers/md/md-cluster.h
@@ -0,0 +1,29 @@
+
+
+#ifndef _MD_CLUSTER_H
+#define _MD_CLUSTER_H
+
+#include "md.h"
+
+struct mddev;
+struct md_rdev;
+
+struct md_cluster_operations {
+ int (*join)(struct mddev *mddev, int nodes);
+ int (*leave)(struct mddev *mddev);
+ int (*slot_number)(struct mddev *mddev);
+ void (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi);
+ int (*resync_start)(struct mddev *mddev, sector_t lo, sector_t hi);
+ void (*resync_finish)(struct mddev *mddev);
+ int (*metadata_update_start)(struct mddev *mddev);
+ int (*metadata_update_finish)(struct mddev *mddev);
+ int (*metadata_update_cancel)(struct mddev *mddev);
+ int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi);
+ int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
+ int (*add_new_disk_finish)(struct mddev *mddev);
+ int (*new_disk_ack)(struct mddev *mddev, bool ack);
+ int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev);
+ int (*gather_bitmaps)(struct md_rdev *rdev);
+};
+
+#endif /* _MD_CLUSTER_H */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c8d2bac4e28b..593a02476c78 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -53,6 +53,7 @@
#include <linux/slab.h>
#include "md.h"
#include "bitmap.h"
+#include "md-cluster.h"
#ifndef MODULE
static void autostart_arrays(int part);
@@ -66,6 +67,11 @@ static void autostart_arrays(int part);
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
+struct md_cluster_operations *md_cluster_ops;
+EXPORT_SYMBOL(md_cluster_ops);
+struct module *md_cluster_mod;
+EXPORT_SYMBOL(md_cluster_mod);
+
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
@@ -249,6 +255,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
const int rw = bio_data_dir(bio);
struct mddev *mddev = q->queuedata;
unsigned int sectors;
+ int cpu;
if (mddev == NULL || mddev->pers == NULL
|| !mddev->ready) {
@@ -284,7 +291,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
sectors = bio_sectors(bio);
mddev->pers->make_request(mddev, bio);
- generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
+ part_stat_unlock();
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
@@ -636,7 +646,7 @@ void mddev_unlock(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(mddev_unlock);
-static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
+struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
{
struct md_rdev *rdev;
@@ -646,6 +656,7 @@ static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
return NULL;
}
+EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
{
@@ -2043,11 +2054,11 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
int choice = 0;
if (mddev->pers)
choice = mddev->raid_disks;
- while (find_rdev_nr_rcu(mddev, choice))
+ while (md_find_rdev_nr_rcu(mddev, choice))
choice++;
rdev->desc_nr = choice;
} else {
- if (find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
+ if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
rcu_read_unlock();
return -EBUSY;
}
@@ -2162,11 +2173,12 @@ static void export_rdev(struct md_rdev *rdev)
kobject_put(&rdev->kobj);
}
-static void kick_rdev_from_array(struct md_rdev *rdev)
+void md_kick_rdev_from_array(struct md_rdev *rdev)
{
unbind_rdev_from_array(rdev);
export_rdev(rdev);
}
+EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
static void export_array(struct mddev *mddev)
{
@@ -2175,7 +2187,7 @@ static void export_array(struct mddev *mddev)
while (!list_empty(&mddev->disks)) {
rdev = list_first_entry(&mddev->disks, struct md_rdev,
same_set);
- kick_rdev_from_array(rdev);
+ md_kick_rdev_from_array(rdev);
}
mddev->raid_disks = 0;
mddev->major_version = 0;
@@ -2204,7 +2216,7 @@ static void sync_sbs(struct mddev *mddev, int nospares)
}
}
-static void md_update_sb(struct mddev *mddev, int force_change)
+void md_update_sb(struct mddev *mddev, int force_change)
{
struct md_rdev *rdev;
int sync_req;
@@ -2365,6 +2377,37 @@ repeat:
wake_up(&rdev->blocked_wait);
}
}
+EXPORT_SYMBOL(md_update_sb);
+
+static int add_bound_rdev(struct md_rdev *rdev)
+{
+ struct mddev *mddev = rdev->mddev;
+ int err = 0;
+
+ if (!mddev->pers->hot_remove_disk) {
+ /* If there is hot_add_disk but no hot_remove_disk
+ * then added disks for geometry changes,
+ * and should be added immediately.
+ */
+ super_types[mddev->major_version].
+ validate_super(mddev, rdev);
+ err = mddev->pers->hot_add_disk(mddev, rdev);
+ if (err) {
+ unbind_rdev_from_array(rdev);
+ export_rdev(rdev);
+ return err;
+ }
+ }
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
+
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ if (mddev->degraded)
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_new_event(mddev);
+ md_wakeup_thread(mddev->thread);
+ return 0;
+}
/* words written to sysfs files may, or may not, be \n terminated.
* We want to accept with case. For this we use cmd_match.
@@ -2467,10 +2510,16 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = -EBUSY;
else {
struct mddev *mddev = rdev->mddev;
- kick_rdev_from_array(rdev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->remove_disk(mddev, rdev);
+ md_kick_rdev_from_array(rdev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
if (mddev->pers)
md_update_sb(mddev, 1);
md_new_event(mddev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
err = 0;
}
} else if (cmd_match(buf, "writemostly")) {
@@ -2549,13 +2598,28 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
clear_bit(Replacement, &rdev->flags);
err = 0;
}
+ } else if (cmd_match(buf, "re-add")) {
+ if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
+ /* clear_bit is performed _after_ all the devices
+ * have their local Faulty bit cleared. If any writes
+ * happen in the meantime in the local node, they
+ * will land in the local bitmap, which will be synced
+ * by this node eventually
+ */
+ if (!mddev_is_clustered(rdev->mddev) ||
+ (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
+ clear_bit(Faulty, &rdev->flags);
+ err = add_bound_rdev(rdev);
+ }
+ } else
+ err = -EBUSY;
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
}
static struct rdev_sysfs_entry rdev_state =
-__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
+__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
static ssize_t
errors_show(struct md_rdev *rdev, char *page)
@@ -3123,7 +3187,7 @@ static void analyze_sbs(struct mddev *mddev)
"md: fatal superblock inconsistency in %s"
" -- removing from array\n",
bdevname(rdev->bdev,b));
- kick_rdev_from_array(rdev);
+ md_kick_rdev_from_array(rdev);
}
super_types[mddev->major_version].
@@ -3138,18 +3202,27 @@ static void analyze_sbs(struct mddev *mddev)
"md: %s: %s: only %d devices permitted\n",
mdname(mddev), bdevname(rdev->bdev, b),
mddev->max_disks);
- kick_rdev_from_array(rdev);
+ md_kick_rdev_from_array(rdev);
continue;
}
- if (rdev != freshest)
+ if (rdev != freshest) {
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
printk(KERN_WARNING "md: kicking non-fresh %s"
" from array!\n",
bdevname(rdev->bdev,b));
- kick_rdev_from_array(rdev);
+ md_kick_rdev_from_array(rdev);
continue;
}
+ /* No device should have a Candidate flag
+ * when reading devices
+ */
+ if (test_bit(Candidate, &rdev->flags)) {
+ pr_info("md: kicking Cluster Candidate %s from array!\n",
+ bdevname(rdev->bdev, b));
+ md_kick_rdev_from_array(rdev);
+ }
+ }
if (mddev->level == LEVEL_MULTIPATH) {
rdev->desc_nr = i++;
rdev->raid_disk = rdev->desc_nr;
@@ -3638,7 +3711,8 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
return err ?: len;
}
static struct md_sysfs_entry md_resync_start =
-__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
+__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
+ resync_start_show, resync_start_store);
/*
* The array state can be:
@@ -3851,7 +3925,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
return err ?: len;
}
static struct md_sysfs_entry md_array_state =
-__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
+__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
static ssize_t
max_corrected_read_errors_show(struct mddev *mddev, char *page) {
@@ -4003,8 +4077,12 @@ size_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
if (mddev->pers) {
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
err = update_size(mddev, sectors);
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
} else {
if (mddev->dev_sectors == 0 ||
mddev->dev_sectors > sectors)
@@ -4101,7 +4179,7 @@ out_unlock:
}
static struct md_sysfs_entry md_metadata =
-__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
+__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
static ssize_t
action_show(struct mddev *mddev, char *page)
@@ -4189,7 +4267,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
}
static struct md_sysfs_entry md_scan_mode =
-__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
+__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
static ssize_t
last_sync_action_show(struct mddev *mddev, char *page)
@@ -4335,7 +4413,8 @@ sync_completed_show(struct mddev *mddev, char *page)
return sprintf(page, "%llu / %llu\n", resync, max_sectors);
}
-static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
+static struct md_sysfs_entry md_sync_completed =
+ __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
static ssize_t
min_sync_show(struct mddev *mddev, char *page)
@@ -4348,7 +4427,6 @@ min_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long min;
int err;
- int chunk;
if (kstrtoull(buf, 10, &min))
return -EINVAL;
@@ -4362,16 +4440,8 @@ min_sync_store(struct mddev *mddev, const char *buf, size_t len)
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
goto out_unlock;
- /* Must be a multiple of chunk_size */
- chunk = mddev->chunk_sectors;
- if (chunk) {
- sector_t temp = min;
-
- err = -EINVAL;
- if (sector_div(temp, chunk))
- goto out_unlock;
- }
- mddev->resync_min = min;
+ /* Round down to multiple of 4K for safety */
+ mddev->resync_min = round_down(min, 8);
err = 0;
out_unlock:
@@ -4748,12 +4818,12 @@ static void md_free(struct kobject *ko)
if (mddev->sysfs_state)
sysfs_put(mddev->sysfs_state);
+ if (mddev->queue)
+ blk_cleanup_queue(mddev->queue);
if (mddev->gendisk) {
del_gendisk(mddev->gendisk);
put_disk(mddev->gendisk);
}
- if (mddev->queue)
- blk_cleanup_queue(mddev->queue);
kfree(mddev);
}
@@ -5071,14 +5141,21 @@ int md_run(struct mddev *mddev)
}
if (err == 0 && pers->sync_request &&
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
- err = bitmap_create(mddev);
- if (err)
+ struct bitmap *bitmap;
+
+ bitmap = bitmap_create(mddev, -1);
+ if (IS_ERR(bitmap)) {
+ err = PTR_ERR(bitmap);
printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
+ } else
+ mddev->bitmap = bitmap;
+
}
if (err) {
mddev_detach(mddev);
- pers->free(mddev, mddev->private);
+ if (mddev->private)
+ pers->free(mddev, mddev->private);
module_put(pers->owner);
bitmap_destroy(mddev);
return err;
@@ -5225,6 +5302,8 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev)
{
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
@@ -5243,6 +5322,8 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
}
void md_stop_writes(struct mddev *mddev)
@@ -5629,6 +5710,8 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
info.state = (1<<MD_SB_CLEAN);
if (mddev->bitmap && mddev->bitmap_info.offset)
info.state |= (1<<MD_SB_BITMAP_PRESENT);
+ if (mddev_is_clustered(mddev))
+ info.state |= (1<<MD_SB_CLUSTERED);
info.active_disks = insync;
info.working_disks = working;
info.failed_disks = failed;
@@ -5684,7 +5767,7 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
return -EFAULT;
rcu_read_lock();
- rdev = find_rdev_nr_rcu(mddev, info.number);
+ rdev = md_find_rdev_nr_rcu(mddev, info.number);
if (rdev) {
info.major = MAJOR(rdev->bdev->bd_dev);
info.minor = MINOR(rdev->bdev->bd_dev);
@@ -5717,6 +5800,13 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
struct md_rdev *rdev;
dev_t dev = MKDEV(info->major,info->minor);
+ if (mddev_is_clustered(mddev) &&
+ !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
+ pr_err("%s: Cannot add to clustered mddev.\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+
if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
return -EOVERFLOW;
@@ -5803,31 +5893,38 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
else
clear_bit(WriteMostly, &rdev->flags);
+ /*
+ * check whether the device shows up in other nodes
+ */
+ if (mddev_is_clustered(mddev)) {
+ if (info->state & (1 << MD_DISK_CANDIDATE)) {
+ /* Through --cluster-confirm */
+ set_bit(Candidate, &rdev->flags);
+ err = md_cluster_ops->new_disk_ack(mddev, true);
+ if (err) {
+ export_rdev(rdev);
+ return err;
+ }
+ } else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
+ /* --add initiated by this node */
+ err = md_cluster_ops->add_new_disk_start(mddev, rdev);
+ if (err) {
+ md_cluster_ops->add_new_disk_finish(mddev);
+ export_rdev(rdev);
+ return err;
+ }
+ }
+ }
+
rdev->raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
- if (!err && !mddev->pers->hot_remove_disk) {
- /* If there is hot_add_disk but no hot_remove_disk
- * then added disks for geometry changes,
- * and should be added immediately.
- */
- super_types[mddev->major_version].
- validate_super(mddev, rdev);
- err = mddev->pers->hot_add_disk(mddev, rdev);
- if (err)
- unbind_rdev_from_array(rdev);
- }
if (err)
export_rdev(rdev);
else
- sysfs_notify_dirent_safe(rdev->sysfs_state);
-
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- if (mddev->degraded)
- set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (!err)
- md_new_event(mddev);
- md_wakeup_thread(mddev->thread);
+ err = add_bound_rdev(rdev);
+ if (mddev_is_clustered(mddev) &&
+ (info->state & (1 << MD_DISK_CLUSTER_ADD)))
+ md_cluster_ops->add_new_disk_finish(mddev);
return err;
}
@@ -5888,18 +5985,29 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
if (!rdev)
return -ENXIO;
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
+
clear_bit(Blocked, &rdev->flags);
remove_and_add_spares(mddev, rdev);
if (rdev->raid_disk >= 0)
goto busy;
- kick_rdev_from_array(rdev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->remove_disk(mddev, rdev);
+
+ md_kick_rdev_from_array(rdev);
md_update_sb(mddev, 1);
md_new_event(mddev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
+
return 0;
busy:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_cancel(mddev);
printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
@@ -5949,12 +6057,15 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
err = -EINVAL;
goto abort_export;
}
+
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (err)
- goto abort_export;
+ goto abort_clustered;
/*
* The rest should better be atomic, we can have disk failures
@@ -5965,6 +6076,8 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
@@ -5974,6 +6087,9 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
md_new_event(mddev);
return 0;
+abort_clustered:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_cancel(mddev);
abort_export:
export_rdev(rdev);
return err;
@@ -6031,9 +6147,14 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
if (fd >= 0) {
- err = bitmap_create(mddev);
- if (!err)
+ struct bitmap *bitmap;
+
+ bitmap = bitmap_create(mddev, -1);
+ if (!IS_ERR(bitmap)) {
+ mddev->bitmap = bitmap;
err = bitmap_load(mddev);
+ } else
+ err = PTR_ERR(bitmap);
}
if (fd < 0 || err) {
bitmap_destroy(mddev);
@@ -6286,6 +6407,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
return rv;
}
}
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2);
@@ -6293,33 +6416,49 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
rv = update_raid_disks(mddev, info->raid_disks);
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
- if (mddev->pers->quiesce == NULL || mddev->thread == NULL)
- return -EINVAL;
- if (mddev->recovery || mddev->sync_thread)
- return -EBUSY;
+ if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
+ rv = -EINVAL;
+ goto err;
+ }
+ if (mddev->recovery || mddev->sync_thread) {
+ rv = -EBUSY;
+ goto err;
+ }
if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
+ struct bitmap *bitmap;
/* add the bitmap */
- if (mddev->bitmap)
- return -EEXIST;
- if (mddev->bitmap_info.default_offset == 0)
- return -EINVAL;
+ if (mddev->bitmap) {
+ rv = -EEXIST;
+ goto err;
+ }
+ if (mddev->bitmap_info.default_offset == 0) {
+ rv = -EINVAL;
+ goto err;
+ }
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
mddev->pers->quiesce(mddev, 1);
- rv = bitmap_create(mddev);
- if (!rv)
+ bitmap = bitmap_create(mddev, -1);
+ if (!IS_ERR(bitmap)) {
+ mddev->bitmap = bitmap;
rv = bitmap_load(mddev);
+ } else
+ rv = PTR_ERR(bitmap);
if (rv)
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
} else {
/* remove the bitmap */
- if (!mddev->bitmap)
- return -ENOENT;
- if (mddev->bitmap->storage.file)
- return -EINVAL;
+ if (!mddev->bitmap) {
+ rv = -ENOENT;
+ goto err;
+ }
+ if (mddev->bitmap->storage.file) {
+ rv = -EINVAL;
+ goto err;
+ }
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
@@ -6327,6 +6466,12 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
}
}
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
+ return rv;
+err:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_cancel(mddev);
return rv;
}
@@ -6386,6 +6531,7 @@ static inline bool md_ioctl_valid(unsigned int cmd)
case SET_DISK_FAULTY:
case STOP_ARRAY:
case STOP_ARRAY_RO:
+ case CLUSTERED_DISK_NACK:
return true;
default:
return false;
@@ -6658,6 +6804,13 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto unlock;
}
+ case CLUSTERED_DISK_NACK:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->new_disk_ack(mddev, false);
+ else
+ err = -EINVAL;
+ goto unlock;
+
case HOT_ADD_DISK:
err = hot_add_disk(mddev, new_decode_dev(arg));
goto unlock;
@@ -7231,6 +7384,55 @@ int unregister_md_personality(struct md_personality *p)
}
EXPORT_SYMBOL(unregister_md_personality);
+int register_md_cluster_operations(struct md_cluster_operations *ops, struct module *module)
+{
+ if (md_cluster_ops != NULL)
+ return -EALREADY;
+ spin_lock(&pers_lock);
+ md_cluster_ops = ops;
+ md_cluster_mod = module;
+ spin_unlock(&pers_lock);
+ return 0;
+}
+EXPORT_SYMBOL(register_md_cluster_operations);
+
+int unregister_md_cluster_operations(void)
+{
+ spin_lock(&pers_lock);
+ md_cluster_ops = NULL;
+ spin_unlock(&pers_lock);
+ return 0;
+}
+EXPORT_SYMBOL(unregister_md_cluster_operations);
+
+int md_setup_cluster(struct mddev *mddev, int nodes)
+{
+ int err;
+
+ err = request_module("md-cluster");
+ if (err) {
+ pr_err("md-cluster module not found.\n");
+ return err;
+ }
+
+ spin_lock(&pers_lock);
+ if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
+ spin_unlock(&pers_lock);
+ return -ENOENT;
+ }
+ spin_unlock(&pers_lock);
+
+ return md_cluster_ops->join(mddev, nodes);
+}
+
+void md_cluster_stop(struct mddev *mddev)
+{
+ if (!md_cluster_ops)
+ return;
+ md_cluster_ops->leave(mddev);
+ module_put(md_cluster_mod);
+}
+
static int is_mddev_idle(struct mddev *mddev, int init)
{
struct md_rdev *rdev;
@@ -7368,7 +7570,11 @@ int md_allow_write(struct mddev *mddev)
mddev->safemode == 0)
mddev->safemode = 1;
spin_unlock(&mddev->lock);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
} else
spin_unlock(&mddev->lock);
@@ -7569,6 +7775,9 @@ void md_do_sync(struct md_thread *thread)
md_new_event(mddev);
update_time = jiffies;
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->resync_start(mddev, j, max_sectors);
+
blk_start_plug(&plug);
while (j < max_sectors) {
sector_t sectors;
@@ -7611,8 +7820,7 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
- sectors = mddev->pers->sync_request(mddev, j, &skipped,
- currspeed < speed_min(mddev));
+ sectors = mddev->pers->sync_request(mddev, j, &skipped);
if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
break;
@@ -7629,6 +7837,8 @@ void md_do_sync(struct md_thread *thread)
j += sectors;
if (j > 2)
mddev->curr_resync = j;
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->resync_info_update(mddev, j, max_sectors);
mddev->curr_mark_cnt = io_sectors;
if (last_check == 0)
/* this is the earliest that rebuild will be
@@ -7670,11 +7880,18 @@ void md_do_sync(struct md_thread *thread)
/((jiffies-mddev->resync_mark)/HZ +1) +1;
if (currspeed > speed_min(mddev)) {
- if ((currspeed > speed_max(mddev)) ||
- !is_mddev_idle(mddev, 0)) {
+ if (currspeed > speed_max(mddev)) {
msleep(500);
goto repeat;
}
+ if (!is_mddev_idle(mddev, 0)) {
+ /*
+ * Give other IO more of a chance.
+ * The faster the devices, the less we wait.
+ */
+ wait_event(mddev->recovery_wait,
+ !atomic_read(&mddev->recovery_active));
+ }
}
}
printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
@@ -7687,7 +7904,10 @@ void md_do_sync(struct md_thread *thread)
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
- mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
+ mddev->pers->sync_request(mddev, max_sectors, &skipped);
+
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->resync_finish(mddev);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > 2) {
@@ -7918,8 +8138,13 @@ void md_check_recovery(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- if (mddev->flags & MD_UPDATE_SB_FLAGS)
+ if (mddev->flags & MD_UPDATE_SB_FLAGS) {
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
+ }
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
@@ -8017,6 +8242,8 @@ void md_reap_sync_thread(struct mddev *mddev)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
mddev->pers->finish_reshape)
mddev->pers->finish_reshape(mddev);
@@ -8029,6 +8256,8 @@ void md_reap_sync_thread(struct mddev *mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -8649,6 +8878,28 @@ err_wq:
return ret;
}
+void md_reload_sb(struct mddev *mddev)
+{
+ struct md_rdev *rdev, *tmp;
+
+ rdev_for_each_safe(rdev, tmp, mddev) {
+ rdev->sb_loaded = 0;
+ ClearPageUptodate(rdev->sb_page);
+ }
+ mddev->raid_disks = 0;
+ analyze_sbs(mddev);
+ rdev_for_each_safe(rdev, tmp, mddev) {
+ struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
+ /* since we don't write to faulty devices, we figure out if the
+ * disk is faulty by comparing events
+ */
+ if (mddev->events > sb->events)
+ set_bit(Faulty, &rdev->flags);
+ }
+
+}
+EXPORT_SYMBOL(md_reload_sb);
+
#ifndef MODULE
/*
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 318ca8fd430f..4046a6c6f223 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -23,6 +23,7 @@
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include "md-cluster.h"
#define MaxSector (~(sector_t)0)
@@ -170,6 +171,10 @@ enum flag_bits {
* a want_replacement device with same
* raid_disk number.
*/
+ Candidate, /* For clustered environments only:
+ * This device is seen locally but not
+ * by the whole cluster
+ */
};
#define BB_LEN_MASK (0x00000000000001FFULL)
@@ -202,6 +207,8 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
extern void md_ack_all_badblocks(struct badblocks *bb);
+struct md_cluster_info;
+
struct mddev {
void *private;
struct md_personality *pers;
@@ -430,6 +437,8 @@ struct mddev {
unsigned long daemon_sleep; /* how many jiffies between updates? */
unsigned long max_write_behind; /* write-behind mode */
int external;
+ int nodes; /* Maximum number of nodes in the cluster */
+ char cluster_name[64]; /* Name of the cluster */
} bitmap_info;
atomic_t max_corr_read_errors; /* max read retries */
@@ -448,6 +457,7 @@ struct mddev {
struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
+ struct md_cluster_info *cluster_info;
};
static inline int __must_check mddev_lock(struct mddev *mddev)
@@ -496,7 +506,7 @@ struct md_personality
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*spare_active) (struct mddev *mddev);
- sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster);
+ sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
int (*resize) (struct mddev *mddev, sector_t sectors);
sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
int (*check_reshape) (struct mddev *mddev);
@@ -608,6 +618,11 @@ static inline void safe_put_page(struct page *p)
extern int register_md_personality(struct md_personality *p);
extern int unregister_md_personality(struct md_personality *p);
+extern int register_md_cluster_operations(struct md_cluster_operations *ops,
+ struct module *module);
+extern int unregister_md_cluster_operations(void);
+extern int md_setup_cluster(struct mddev *mddev, int nodes);
+extern void md_cluster_stop(struct mddev *mddev);
extern struct md_thread *md_register_thread(
void (*run)(struct md_thread *thread),
struct mddev *mddev,
@@ -654,6 +669,10 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev);
extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
+extern void md_reload_sb(struct mddev *mddev);
+extern void md_update_sb(struct mddev *mddev, int force);
+extern void md_kick_rdev_from_array(struct md_rdev * rdev);
+struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
static inline int mddev_check_plugged(struct mddev *mddev)
{
return !!blk_check_plugged(md_unplug, mddev,
@@ -669,4 +688,9 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
}
}
+extern struct md_cluster_operations *md_cluster_ops;
+static inline int mddev_is_clustered(struct mddev *mddev)
+{
+ return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
+}
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a13f738a7b39..2cb59a641cd2 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -271,14 +271,16 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
goto abort;
}
- blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
- blk_queue_io_opt(mddev->queue,
- (mddev->chunk_sectors << 9) * mddev->raid_disks);
-
- if (!discard_supported)
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
- else
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ if (mddev->queue) {
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ blk_queue_io_opt(mddev->queue,
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
+
+ if (!discard_supported)
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ else
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ }
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;
@@ -313,7 +315,7 @@ static struct strip_zone *find_zone(struct r0conf *conf,
/*
* remaps the bio to the target device. we separate two flows.
- * power 2 flow and a general flow for the sake of perfromance
+ * power 2 flow and a general flow for the sake of performance
*/
static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
sector_t sector, sector_t *sector_offset)
@@ -429,9 +431,12 @@ static int raid0_run(struct mddev *mddev)
}
if (md_check_no_bitmap(mddev))
return -EINVAL;
- blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
- blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
- blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+
+ if (mddev->queue) {
+ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+ }
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
@@ -448,16 +453,17 @@ static int raid0_run(struct mddev *mddev)
printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
mdname(mddev),
(unsigned long long)mddev->array_sectors);
- /* calculate the max read-ahead size.
- * For read-ahead of large files to be effective, we need to
- * readahead at least twice a whole stripe. i.e. number of devices
- * multiplied by chunk size times 2.
- * If an individual device has an ra_pages greater than the
- * chunk size, then we will not drive that device as hard as it
- * wants. We consider this a configuration error: a larger
- * chunksize should be used in that case.
- */
- {
+
+ if (mddev->queue) {
+ /* calculate the max read-ahead size.
+ * For read-ahead of large files to be effective, we need to
+ * readahead at least twice a whole stripe. i.e. number of devices
+ * multiplied by chunk size times 2.
+ * If an individual device has an ra_pages greater than the
+ * chunk size, then we will not drive that device as hard as it
+ * wants. We consider this a configuration error: a larger
+ * chunksize should be used in that case.
+ */
int stripe = mddev->raid_disks *
(mddev->chunk_sectors << 9) / PAGE_SIZE;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
@@ -467,8 +473,6 @@ static int raid0_run(struct mddev *mddev)
dump_zones(mddev);
ret = md_integrity_register(mddev);
- if (ret)
- raid0_free(mddev, conf);
return ret;
}
@@ -526,6 +530,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
split = bio;
}
+ sector = bio->bi_iter.bi_sector;
zone = find_zone(mddev->private, &sector);
tmp_dev = map_sector(mddev, zone, sector, &sector);
split->bi_bdev = tmp_dev->bdev;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4153da5d4011..9157a29c8dbf 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -539,7 +539,13 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
has_nonrot_disk = 0;
choose_next_idle = 0;
- choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
+ if ((conf->mddev->recovery_cp < this_sector + sectors) ||
+ (mddev_is_clustered(conf->mddev) &&
+ md_cluster_ops->area_resyncing(conf->mddev, this_sector,
+ this_sector + sectors)))
+ choose_first = 1;
+ else
+ choose_first = 0;
for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
sector_t dist;
@@ -560,7 +566,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if (test_bit(WriteMostly, &rdev->flags)) {
/* Don't balance among write-mostly, just
* use the first as a last resort */
- if (best_disk < 0) {
+ if (best_dist_disk < 0) {
if (is_badblock(rdev, this_sector, sectors,
&first_bad, &bad_sectors)) {
if (first_bad < this_sector)
@@ -569,7 +575,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
best_good_sectors = first_bad - this_sector;
} else
best_good_sectors = sectors;
- best_disk = disk;
+ best_dist_disk = disk;
+ best_pending_disk = disk;
}
continue;
}
@@ -1101,8 +1108,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
md_write_start(mddev, bio); /* wait on superblock update early */
if (bio_data_dir(bio) == WRITE &&
- bio_end_sector(bio) > mddev->suspend_lo &&
- bio->bi_iter.bi_sector < mddev->suspend_hi) {
+ ((bio_end_sector(bio) > mddev->suspend_lo &&
+ bio->bi_iter.bi_sector < mddev->suspend_hi) ||
+ (mddev_is_clustered(mddev) &&
+ md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
@@ -1113,7 +1122,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
prepare_to_wait(&conf->wait_barrier,
&w, TASK_INTERRUPTIBLE);
if (bio_end_sector(bio) <= mddev->suspend_lo ||
- bio->bi_iter.bi_sector >= mddev->suspend_hi)
+ bio->bi_iter.bi_sector >= mddev->suspend_hi ||
+ (mddev_is_clustered(mddev) &&
+ !md_cluster_ops->area_resyncing(mddev,
+ bio->bi_iter.bi_sector, bio_end_sector(bio))))
break;
schedule();
}
@@ -1560,6 +1572,7 @@ static int raid1_spare_active(struct mddev *mddev)
struct md_rdev *rdev = conf->mirrors[i].rdev;
struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
if (repl
+ && !test_bit(Candidate, &repl->flags)
&& repl->recovery_offset == MaxSector
&& !test_bit(Faulty, &repl->flags)
&& !test_and_set_bit(In_sync, &repl->flags)) {
@@ -2467,7 +2480,7 @@ static int init_resync(struct r1conf *conf)
* that can be installed to exclude normal IO requests.
*/
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
@@ -2520,13 +2533,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
*skipped = 1;
return sync_blocks;
}
- /*
- * If there is non-resync activity waiting for a turn,
- * and resync is going fast enough,
- * then let it though before starting on this new sync request.
- */
- if (!go_faster && conf->nr_waiting)
- msleep_interruptible(1000);
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a7196c49d15d..e793ab6b3570 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2889,7 +2889,7 @@ static int init_resync(struct r10conf *conf)
*/
static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
- int *skipped, int go_faster)
+ int *skipped)
{
struct r10conf *conf = mddev->private;
struct r10bio *r10_bio;
@@ -2994,12 +2994,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (conf->geo.near_copies < conf->geo.raid_disks &&
max_sector > (sector_nr | chunk_mask))
max_sector = (sector_nr | chunk_mask) + 1;
- /*
- * If there is non-resync activity waiting for us then
- * put in a delay to throttle resync.
- */
- if (!go_faster && conf->nr_waiting)
- msleep_interruptible(1000);
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e75d48c0421a..77dfd720aaa0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -54,6 +54,7 @@
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/nodemask.h>
+#include <linux/flex_array.h>
#include <trace/events/block.h>
#include "md.h"
@@ -496,7 +497,7 @@ static void shrink_buffers(struct stripe_head *sh)
}
}
-static int grow_buffers(struct stripe_head *sh)
+static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
{
int i;
int num = sh->raid_conf->pool_size;
@@ -504,7 +505,7 @@ static int grow_buffers(struct stripe_head *sh)
for (i = 0; i < num; i++) {
struct page *page;
- if (!(page = alloc_page(GFP_KERNEL))) {
+ if (!(page = alloc_page(gfp))) {
return 1;
}
sh->dev[i].page = page;
@@ -525,6 +526,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
BUG_ON(atomic_read(&sh->count) != 0);
BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
BUG_ON(stripe_operations_active(sh));
+ BUG_ON(sh->batch_head);
pr_debug("init_stripe called, stripe %llu\n",
(unsigned long long)sector);
@@ -552,8 +554,10 @@ retry:
}
if (read_seqcount_retry(&conf->gen_lock, seq))
goto retry;
+ sh->overwrite_disks = 0;
insert_hash(conf, sh);
sh->cpu = smp_processor_id();
+ set_bit(STRIPE_BATCH_READY, &sh->state);
}
static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
@@ -668,20 +672,28 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
*(conf->hash_locks + hash));
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
- if (!conf->inactive_blocked)
+ if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
sh = get_free_stripe(conf, hash);
+ if (!sh && llist_empty(&conf->released_stripes) &&
+ !test_bit(R5_DID_ALLOC, &conf->cache_state))
+ set_bit(R5_ALLOC_MORE,
+ &conf->cache_state);
+ }
if (noblock && sh == NULL)
break;
if (!sh) {
- conf->inactive_blocked = 1;
+ set_bit(R5_INACTIVE_BLOCKED,
+ &conf->cache_state);
wait_event_lock_irq(
conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash) &&
(atomic_read(&conf->active_stripes)
< (conf->max_nr_stripes * 3 / 4)
- || !conf->inactive_blocked),
+ || !test_bit(R5_INACTIVE_BLOCKED,
+ &conf->cache_state)),
*(conf->hash_locks + hash));
- conf->inactive_blocked = 0;
+ clear_bit(R5_INACTIVE_BLOCKED,
+ &conf->cache_state);
} else {
init_stripe(sh, sector, previous);
atomic_inc(&sh->count);
@@ -708,6 +720,130 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
return sh;
}
+static bool is_full_stripe_write(struct stripe_head *sh)
+{
+ BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
+ return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded);
+}
+
+static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+{
+ local_irq_disable();
+ if (sh1 > sh2) {
+ spin_lock(&sh2->stripe_lock);
+ spin_lock_nested(&sh1->stripe_lock, 1);
+ } else {
+ spin_lock(&sh1->stripe_lock);
+ spin_lock_nested(&sh2->stripe_lock, 1);
+ }
+}
+
+static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+{
+ spin_unlock(&sh1->stripe_lock);
+ spin_unlock(&sh2->stripe_lock);
+ local_irq_enable();
+}
+
+/* Only freshly new full stripe normal write stripe can be added to a batch list */
+static bool stripe_can_batch(struct stripe_head *sh)
+{
+ return test_bit(STRIPE_BATCH_READY, &sh->state) &&
+ is_full_stripe_write(sh);
+}
+
+/* we only do back search */
+static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh)
+{
+ struct stripe_head *head;
+ sector_t head_sector, tmp_sec;
+ int hash;
+ int dd_idx;
+
+ if (!stripe_can_batch(sh))
+ return;
+ /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
+ tmp_sec = sh->sector;
+ if (!sector_div(tmp_sec, conf->chunk_sectors))
+ return;
+ head_sector = sh->sector - STRIPE_SECTORS;
+
+ hash = stripe_hash_locks_hash(head_sector);
+ spin_lock_irq(conf->hash_locks + hash);
+ head = __find_stripe(conf, head_sector, conf->generation);
+ if (head && !atomic_inc_not_zero(&head->count)) {
+ spin_lock(&conf->device_lock);
+ if (!atomic_read(&head->count)) {
+ if (!test_bit(STRIPE_HANDLE, &head->state))
+ atomic_inc(&conf->active_stripes);
+ BUG_ON(list_empty(&head->lru) &&
+ !test_bit(STRIPE_EXPANDING, &head->state));
+ list_del_init(&head->lru);
+ if (head->group) {
+ head->group->stripes_cnt--;
+ head->group = NULL;
+ }
+ }
+ atomic_inc(&head->count);
+ spin_unlock(&conf->device_lock);
+ }
+ spin_unlock_irq(conf->hash_locks + hash);
+
+ if (!head)
+ return;
+ if (!stripe_can_batch(head))
+ goto out;
+
+ lock_two_stripes(head, sh);
+ /* clear_batch_ready clear the flag */
+ if (!stripe_can_batch(head) || !stripe_can_batch(sh))
+ goto unlock_out;
+
+ if (sh->batch_head)
+ goto unlock_out;
+
+ dd_idx = 0;
+ while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
+ dd_idx++;
+ if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)
+ goto unlock_out;
+
+ if (head->batch_head) {
+ spin_lock(&head->batch_head->batch_lock);
+ /* This batch list is already running */
+ if (!stripe_can_batch(head)) {
+ spin_unlock(&head->batch_head->batch_lock);
+ goto unlock_out;
+ }
+
+ /*
+ * at this point, head's BATCH_READY could be cleared, but we
+ * can still add the stripe to batch list
+ */
+ list_add(&sh->batch_list, &head->batch_list);
+ spin_unlock(&head->batch_head->batch_lock);
+
+ sh->batch_head = head->batch_head;
+ } else {
+ head->batch_head = head;
+ sh->batch_head = head->batch_head;
+ spin_lock(&head->batch_lock);
+ list_add_tail(&sh->batch_list, &head->batch_list);
+ spin_unlock(&head->batch_lock);
+ }
+
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ if (atomic_dec_return(&conf->preread_active_stripes)
+ < IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+
+ atomic_inc(&sh->count);
+unlock_out:
+ unlock_two_stripes(head, sh);
+out:
+ release_stripe(head);
+}
+
/* Determine if 'data_offset' or 'new_data_offset' should be used
* in this stripe_head.
*/
@@ -738,6 +874,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
{
struct r5conf *conf = sh->raid_conf;
int i, disks = sh->disks;
+ struct stripe_head *head_sh = sh;
might_sleep();
@@ -746,6 +883,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
int replace_only = 0;
struct bio *bi, *rbi;
struct md_rdev *rdev, *rrdev = NULL;
+
+ sh = head_sh;
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
rw = WRITE_FUA;
@@ -764,6 +903,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
rw |= REQ_SYNC;
+again:
bi = &sh->dev[i].req;
rbi = &sh->dev[i].rreq; /* For writing to replacement */
@@ -782,7 +922,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
/* We raced and saw duplicates */
rrdev = NULL;
} else {
- if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
+ if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev)
rdev = rrdev;
rrdev = NULL;
}
@@ -853,13 +993,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
__func__, (unsigned long long)sh->sector,
bi->bi_rw, i);
atomic_inc(&sh->count);
+ if (sh != head_sh)
+ atomic_inc(&head_sh->count);
if (use_new_offset(conf, sh))
bi->bi_iter.bi_sector = (sh->sector
+ rdev->new_data_offset);
else
bi->bi_iter.bi_sector = (sh->sector
+ rdev->data_offset);
- if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+ if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
bi->bi_rw |= REQ_NOMERGE;
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
@@ -903,6 +1045,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
__func__, (unsigned long long)sh->sector,
rbi->bi_rw, i);
atomic_inc(&sh->count);
+ if (sh != head_sh)
+ atomic_inc(&head_sh->count);
if (use_new_offset(conf, sh))
rbi->bi_iter.bi_sector = (sh->sector
+ rrdev->new_data_offset);
@@ -934,8 +1078,18 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
pr_debug("skip op %ld on disc %d for sector %llu\n",
bi->bi_rw, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
+ if (sh->batch_head)
+ set_bit(STRIPE_BATCH_ERR,
+ &sh->batch_head->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
+
+ if (!head_sh->batch_head)
+ continue;
+ sh = list_first_entry(&sh->batch_list, struct stripe_head,
+ batch_list);
+ if (sh != head_sh)
+ goto again;
}
}
@@ -1051,6 +1205,7 @@ static void ops_run_biofill(struct stripe_head *sh)
struct async_submit_ctl submit;
int i;
+ BUG_ON(sh->batch_head);
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
@@ -1109,16 +1264,28 @@ static void ops_complete_compute(void *stripe_head_ref)
/* return a pointer to the address conversion region of the scribble buffer */
static addr_conv_t *to_addr_conv(struct stripe_head *sh,
- struct raid5_percpu *percpu)
+ struct raid5_percpu *percpu, int i)
{
- return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
+ void *addr;
+
+ addr = flex_array_get(percpu->scribble, i);
+ return addr + sizeof(struct page *) * (sh->disks + 2);
+}
+
+/* return a pointer to the address conversion region of the scribble buffer */
+static struct page **to_addr_page(struct raid5_percpu *percpu, int i)
+{
+ void *addr;
+
+ addr = flex_array_get(percpu->scribble, i);
+ return addr;
}
static struct dma_async_tx_descriptor *
ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
{
int disks = sh->disks;
- struct page **xor_srcs = percpu->scribble;
+ struct page **xor_srcs = to_addr_page(percpu, 0);
int target = sh->ops.target;
struct r5dev *tgt = &sh->dev[target];
struct page *xor_dest = tgt->page;
@@ -1127,6 +1294,8 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
struct async_submit_ctl submit;
int i;
+ BUG_ON(sh->batch_head);
+
pr_debug("%s: stripe %llu block: %d\n",
__func__, (unsigned long long)sh->sector, target);
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
@@ -1138,7 +1307,7 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
- ops_complete_compute, sh, to_addr_conv(sh, percpu));
+ ops_complete_compute, sh, to_addr_conv(sh, percpu, 0));
if (unlikely(count == 1))
tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
else
@@ -1156,7 +1325,9 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
* destination buffer is recorded in srcs[count] and the Q destination
* is recorded in srcs[count+1]].
*/
-static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
+static int set_syndrome_sources(struct page **srcs,
+ struct stripe_head *sh,
+ int srctype)
{
int disks = sh->disks;
int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
@@ -1171,8 +1342,15 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
i = d0_idx;
do {
int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
+ struct r5dev *dev = &sh->dev[i];
- srcs[slot] = sh->dev[i].page;
+ if (i == sh->qd_idx || i == sh->pd_idx ||
+ (srctype == SYNDROME_SRC_ALL) ||
+ (srctype == SYNDROME_SRC_WANT_DRAIN &&
+ test_bit(R5_Wantdrain, &dev->flags)) ||
+ (srctype == SYNDROME_SRC_WRITTEN &&
+ dev->written))
+ srcs[slot] = sh->dev[i].page;
i = raid6_next_disk(i, disks);
} while (i != d0_idx);
@@ -1183,7 +1361,7 @@ static struct dma_async_tx_descriptor *
ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
{
int disks = sh->disks;
- struct page **blocks = percpu->scribble;
+ struct page **blocks = to_addr_page(percpu, 0);
int target;
int qd_idx = sh->qd_idx;
struct dma_async_tx_descriptor *tx;
@@ -1193,6 +1371,7 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
int i;
int count;
+ BUG_ON(sh->batch_head);
if (sh->ops.target < 0)
target = sh->ops.target2;
else if (sh->ops.target2 < 0)
@@ -1211,12 +1390,12 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
atomic_inc(&sh->count);
if (target == qd_idx) {
- count = set_syndrome_sources(blocks, sh);
+ count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
blocks[count] = NULL; /* regenerating p is not necessary */
BUG_ON(blocks[count+1] != dest); /* q should already be set */
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
} else {
/* Compute any data- or p-drive using XOR */
@@ -1229,7 +1408,7 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
NULL, ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
}
@@ -1248,9 +1427,10 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
struct r5dev *tgt = &sh->dev[target];
struct r5dev *tgt2 = &sh->dev[target2];
struct dma_async_tx_descriptor *tx;
- struct page **blocks = percpu->scribble;
+ struct page **blocks = to_addr_page(percpu, 0);
struct async_submit_ctl submit;
+ BUG_ON(sh->batch_head);
pr_debug("%s: stripe %llu block1: %d block2: %d\n",
__func__, (unsigned long long)sh->sector, target, target2);
BUG_ON(target < 0 || target2 < 0);
@@ -1290,7 +1470,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
/* Missing P+Q, just recompute */
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
return async_gen_syndrome(blocks, 0, syndrome_disks+2,
STRIPE_SIZE, &submit);
} else {
@@ -1314,21 +1494,21 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
init_async_submit(&submit,
ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
NULL, NULL, NULL,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
&submit);
- count = set_syndrome_sources(blocks, sh);
+ count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
init_async_submit(&submit, ASYNC_TX_FENCE, tx,
ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
return async_gen_syndrome(blocks, 0, count+2,
STRIPE_SIZE, &submit);
}
} else {
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
if (failb == syndrome_disks) {
/* We're missing D+P. */
return async_raid6_datap_recov(syndrome_disks+2,
@@ -1352,17 +1532,18 @@ static void ops_complete_prexor(void *stripe_head_ref)
}
static struct dma_async_tx_descriptor *
-ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
- struct dma_async_tx_descriptor *tx)
+ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
+ struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
- struct page **xor_srcs = percpu->scribble;
+ struct page **xor_srcs = to_addr_page(percpu, 0);
int count = 0, pd_idx = sh->pd_idx, i;
struct async_submit_ctl submit;
/* existing parity data subtracted */
struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
+ BUG_ON(sh->batch_head);
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
@@ -1374,31 +1555,56 @@ ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
}
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
- ops_complete_prexor, sh, to_addr_conv(sh, percpu));
+ ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
return tx;
}
static struct dma_async_tx_descriptor *
+ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
+ struct dma_async_tx_descriptor *tx)
+{
+ struct page **blocks = to_addr_page(percpu, 0);
+ int count;
+ struct async_submit_ctl submit;
+
+ pr_debug("%s: stripe %llu\n", __func__,
+ (unsigned long long)sh->sector);
+
+ count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN);
+
+ init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx,
+ ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
+ tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
+
+ return tx;
+}
+
+static struct dma_async_tx_descriptor *
ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
int i;
+ struct stripe_head *head_sh = sh;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = disks; i--; ) {
- struct r5dev *dev = &sh->dev[i];
+ struct r5dev *dev;
struct bio *chosen;
- if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
+ sh = head_sh;
+ if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) {
struct bio *wbi;
+again:
+ dev = &sh->dev[i];
spin_lock_irq(&sh->stripe_lock);
chosen = dev->towrite;
dev->towrite = NULL;
+ sh->overwrite_disks = 0;
BUG_ON(dev->written);
wbi = dev->written = chosen;
spin_unlock_irq(&sh->stripe_lock);
@@ -1423,6 +1629,15 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
}
wbi = r5_next_bio(wbi, dev->sector);
}
+
+ if (head_sh->batch_head) {
+ sh = list_first_entry(&sh->batch_list,
+ struct stripe_head,
+ batch_list);
+ if (sh == head_sh)
+ continue;
+ goto again;
+ }
}
}
@@ -1478,12 +1693,15 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
- struct page **xor_srcs = percpu->scribble;
+ struct page **xor_srcs;
struct async_submit_ctl submit;
- int count = 0, pd_idx = sh->pd_idx, i;
+ int count, pd_idx = sh->pd_idx, i;
struct page *xor_dest;
int prexor = 0;
unsigned long flags;
+ int j = 0;
+ struct stripe_head *head_sh = sh;
+ int last_stripe;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
@@ -1500,15 +1718,18 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
ops_complete_reconstruct(sh);
return;
}
+again:
+ count = 0;
+ xor_srcs = to_addr_page(percpu, j);
/* check if prexor is active which means only process blocks
* that are part of a read-modify-write (written)
*/
- if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
+ if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
prexor = 1;
xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (dev->written)
+ if (head_sh->dev[i].written)
xor_srcs[count++] = dev->page;
}
} else {
@@ -1525,17 +1746,32 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
* set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
* for the synchronous xor case
*/
- flags = ASYNC_TX_ACK |
- (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
-
- atomic_inc(&sh->count);
+ last_stripe = !head_sh->batch_head ||
+ list_first_entry(&sh->batch_list,
+ struct stripe_head, batch_list) == head_sh;
+ if (last_stripe) {
+ flags = ASYNC_TX_ACK |
+ (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
+
+ atomic_inc(&head_sh->count);
+ init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh,
+ to_addr_conv(sh, percpu, j));
+ } else {
+ flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST;
+ init_async_submit(&submit, flags, tx, NULL, NULL,
+ to_addr_conv(sh, percpu, j));
+ }
- init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
- to_addr_conv(sh, percpu));
if (unlikely(count == 1))
tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
else
tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
+ if (!last_stripe) {
+ j++;
+ sh = list_first_entry(&sh->batch_list, struct stripe_head,
+ batch_list);
+ goto again;
+ }
}
static void
@@ -1543,8 +1779,12 @@ ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx)
{
struct async_submit_ctl submit;
- struct page **blocks = percpu->scribble;
- int count, i;
+ struct page **blocks;
+ int count, i, j = 0;
+ struct stripe_head *head_sh = sh;
+ int last_stripe;
+ int synflags;
+ unsigned long txflags;
pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
@@ -1562,13 +1802,36 @@ ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
return;
}
- count = set_syndrome_sources(blocks, sh);
+again:
+ blocks = to_addr_page(percpu, j);
- atomic_inc(&sh->count);
+ if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
+ synflags = SYNDROME_SRC_WRITTEN;
+ txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST;
+ } else {
+ synflags = SYNDROME_SRC_ALL;
+ txflags = ASYNC_TX_ACK;
+ }
+
+ count = set_syndrome_sources(blocks, sh, synflags);
+ last_stripe = !head_sh->batch_head ||
+ list_first_entry(&sh->batch_list,
+ struct stripe_head, batch_list) == head_sh;
- init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
- sh, to_addr_conv(sh, percpu));
+ if (last_stripe) {
+ atomic_inc(&head_sh->count);
+ init_async_submit(&submit, txflags, tx, ops_complete_reconstruct,
+ head_sh, to_addr_conv(sh, percpu, j));
+ } else
+ init_async_submit(&submit, 0, tx, NULL, NULL,
+ to_addr_conv(sh, percpu, j));
async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
+ if (!last_stripe) {
+ j++;
+ sh = list_first_entry(&sh->batch_list, struct stripe_head,
+ batch_list);
+ goto again;
+ }
}
static void ops_complete_check(void *stripe_head_ref)
@@ -1589,7 +1852,7 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
int pd_idx = sh->pd_idx;
int qd_idx = sh->qd_idx;
struct page *xor_dest;
- struct page **xor_srcs = percpu->scribble;
+ struct page **xor_srcs = to_addr_page(percpu, 0);
struct dma_async_tx_descriptor *tx;
struct async_submit_ctl submit;
int count;
@@ -1598,6 +1861,7 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
+ BUG_ON(sh->batch_head);
count = 0;
xor_dest = sh->dev[pd_idx].page;
xor_srcs[count++] = xor_dest;
@@ -1608,7 +1872,7 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
}
init_async_submit(&submit, 0, NULL, NULL, NULL,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
&sh->ops.zero_sum_result, &submit);
@@ -1619,20 +1883,21 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
{
- struct page **srcs = percpu->scribble;
+ struct page **srcs = to_addr_page(percpu, 0);
struct async_submit_ctl submit;
int count;
pr_debug("%s: stripe %llu checkp: %d\n", __func__,
(unsigned long long)sh->sector, checkp);
- count = set_syndrome_sources(srcs, sh);
+ BUG_ON(sh->batch_head);
+ count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL);
if (!checkp)
srcs[count] = NULL;
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
- sh, to_addr_conv(sh, percpu));
+ sh, to_addr_conv(sh, percpu, 0));
async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
&sh->ops.zero_sum_result, percpu->spare_page, &submit);
}
@@ -1667,8 +1932,12 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
async_tx_ack(tx);
}
- if (test_bit(STRIPE_OP_PREXOR, &ops_request))
- tx = ops_run_prexor(sh, percpu, tx);
+ if (test_bit(STRIPE_OP_PREXOR, &ops_request)) {
+ if (level < 6)
+ tx = ops_run_prexor5(sh, percpu, tx);
+ else
+ tx = ops_run_prexor6(sh, percpu, tx);
+ }
if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
tx = ops_run_biodrain(sh, tx);
@@ -1693,7 +1962,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
BUG();
}
- if (overlap_clear)
+ if (overlap_clear && !sh->batch_head)
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (test_and_clear_bit(R5_Overlap, &dev->flags))
@@ -1702,10 +1971,10 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
put_cpu();
}
-static int grow_one_stripe(struct r5conf *conf, int hash)
+static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
{
struct stripe_head *sh;
- sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
+ sh = kmem_cache_zalloc(conf->slab_cache, gfp);
if (!sh)
return 0;
@@ -1713,17 +1982,23 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
spin_lock_init(&sh->stripe_lock);
- if (grow_buffers(sh)) {
+ if (grow_buffers(sh, gfp)) {
shrink_buffers(sh);
kmem_cache_free(conf->slab_cache, sh);
return 0;
}
- sh->hash_lock_index = hash;
+ sh->hash_lock_index =
+ conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
/* we just created an active stripe so... */
atomic_set(&sh->count, 1);
atomic_inc(&conf->active_stripes);
INIT_LIST_HEAD(&sh->lru);
+
+ spin_lock_init(&sh->batch_lock);
+ INIT_LIST_HEAD(&sh->batch_list);
+ sh->batch_head = NULL;
release_stripe(sh);
+ conf->max_nr_stripes++;
return 1;
}
@@ -1731,7 +2006,6 @@ static int grow_stripes(struct r5conf *conf, int num)
{
struct kmem_cache *sc;
int devs = max(conf->raid_disks, conf->previous_raid_disks);
- int hash;
if (conf->mddev->gendisk)
sprintf(conf->cache_name[0],
@@ -1749,13 +2023,10 @@ static int grow_stripes(struct r5conf *conf, int num)
return 1;
conf->slab_cache = sc;
conf->pool_size = devs;
- hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
- while (num--) {
- if (!grow_one_stripe(conf, hash))
+ while (num--)
+ if (!grow_one_stripe(conf, GFP_KERNEL))
return 1;
- conf->max_nr_stripes++;
- hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
- }
+
return 0;
}
@@ -1772,13 +2043,21 @@ static int grow_stripes(struct r5conf *conf, int num)
* calculate over all devices (not just the data blocks), using zeros in place
* of the P and Q blocks.
*/
-static size_t scribble_len(int num)
+static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags)
{
+ struct flex_array *ret;
size_t len;
len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
-
- return len;
+ ret = flex_array_alloc(len, cnt, flags);
+ if (!ret)
+ return NULL;
+ /* always prealloc all elements, so no locking is required */
+ if (flex_array_prealloc(ret, 0, cnt, flags)) {
+ flex_array_free(ret);
+ return NULL;
+ }
+ return ret;
}
static int resize_stripes(struct r5conf *conf, int newsize)
@@ -1896,16 +2175,16 @@ static int resize_stripes(struct r5conf *conf, int newsize)
err = -ENOMEM;
get_online_cpus();
- conf->scribble_len = scribble_len(newsize);
for_each_present_cpu(cpu) {
struct raid5_percpu *percpu;
- void *scribble;
+ struct flex_array *scribble;
percpu = per_cpu_ptr(conf->percpu, cpu);
- scribble = kmalloc(conf->scribble_len, GFP_NOIO);
+ scribble = scribble_alloc(newsize, conf->chunk_sectors /
+ STRIPE_SECTORS, GFP_NOIO);
if (scribble) {
- kfree(percpu->scribble);
+ flex_array_free(percpu->scribble);
percpu->scribble = scribble;
} else {
err = -ENOMEM;
@@ -1937,9 +2216,10 @@ static int resize_stripes(struct r5conf *conf, int newsize)
return err;
}
-static int drop_one_stripe(struct r5conf *conf, int hash)
+static int drop_one_stripe(struct r5conf *conf)
{
struct stripe_head *sh;
+ int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
spin_lock_irq(conf->hash_locks + hash);
sh = get_free_stripe(conf, hash);
@@ -1950,15 +2230,15 @@ static int drop_one_stripe(struct r5conf *conf, int hash)
shrink_buffers(sh);
kmem_cache_free(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes);
+ conf->max_nr_stripes--;
return 1;
}
static void shrink_stripes(struct r5conf *conf)
{
- int hash;
- for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++)
- while (drop_one_stripe(conf, hash))
- ;
+ while (conf->max_nr_stripes &&
+ drop_one_stripe(conf))
+ ;
if (conf->slab_cache)
kmem_cache_destroy(conf->slab_cache);
@@ -2154,10 +2434,16 @@ static void raid5_end_write_request(struct bio *bi, int error)
}
rdev_dec_pending(rdev, conf->mddev);
+ if (sh->batch_head && !uptodate)
+ set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
+
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
+
+ if (sh->batch_head && sh != sh->batch_head)
+ release_stripe(sh->batch_head);
}
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
@@ -2535,7 +2821,7 @@ static void
schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int rcw, int expand)
{
- int i, pd_idx = sh->pd_idx, disks = sh->disks;
+ int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks;
struct r5conf *conf = sh->raid_conf;
int level = conf->level;
@@ -2571,13 +2857,15 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
atomic_inc(&conf->pending_full_writes);
} else {
- BUG_ON(level == 6);
BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
+ BUG_ON(level == 6 &&
+ (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) ||
+ test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags))));
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (i == pd_idx)
+ if (i == pd_idx || i == qd_idx)
continue;
if (dev->towrite &&
@@ -2624,7 +2912,8 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
* toread/towrite point to the first in a chain.
* The bi_next chain must be in order.
*/
-static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
+static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
+ int forwrite, int previous)
{
struct bio **bip;
struct r5conf *conf = sh->raid_conf;
@@ -2643,6 +2932,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
* protect it.
*/
spin_lock_irq(&sh->stripe_lock);
+ /* Don't allow new IO added to stripes in batch list */
+ if (sh->batch_head)
+ goto overlap;
if (forwrite) {
bip = &sh->dev[dd_idx].towrite;
if (*bip == NULL)
@@ -2657,6 +2949,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
goto overlap;
+ if (!forwrite || previous)
+ clear_bit(STRIPE_BATCH_READY, &sh->state);
+
BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
if (*bip)
bi->bi_next = *bip;
@@ -2674,7 +2969,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
sector = bio_end_sector(bi);
}
if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
- set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
+ if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags))
+ sh->overwrite_disks++;
}
pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
@@ -2688,6 +2984,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
sh->bm_seq = conf->seq_flush+1;
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
+
+ if (stripe_can_batch(sh))
+ stripe_add_to_batch_list(conf, sh);
return 1;
overlap:
@@ -2720,6 +3019,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct bio **return_bi)
{
int i;
+ BUG_ON(sh->batch_head);
for (i = disks; i--; ) {
struct bio *bi;
int bitmap_end = 0;
@@ -2746,6 +3046,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
/* fail all writes first */
bi = sh->dev[i].towrite;
sh->dev[i].towrite = NULL;
+ sh->overwrite_disks = 0;
spin_unlock_irq(&sh->stripe_lock);
if (bi)
bitmap_end = 1;
@@ -2834,6 +3135,7 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
int abort = 0;
int i;
+ BUG_ON(sh->batch_head);
clear_bit(STRIPE_SYNCING, &sh->state);
if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
wake_up(&conf->wait_for_overlap);
@@ -3064,6 +3366,7 @@ static void handle_stripe_fill(struct stripe_head *sh,
{
int i;
+ BUG_ON(sh->batch_head);
/* look for blocks to read/compute, skip this if a compute
* is already in flight, or if the stripe contents are in the
* midst of changing due to a write
@@ -3087,6 +3390,9 @@ static void handle_stripe_clean_event(struct r5conf *conf,
int i;
struct r5dev *dev;
int discard_pending = 0;
+ struct stripe_head *head_sh = sh;
+ bool do_endio = false;
+ int wakeup_nr = 0;
for (i = disks; i--; )
if (sh->dev[i].written) {
@@ -3102,8 +3408,11 @@ static void handle_stripe_clean_event(struct r5conf *conf,
clear_bit(R5_UPTODATE, &dev->flags);
if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
- dev->page = dev->orig_page;
}
+ do_endio = true;
+
+returnbi:
+ dev->page = dev->orig_page;
wbi = dev->written;
dev->written = NULL;
while (wbi && wbi->bi_iter.bi_sector <
@@ -3120,6 +3429,17 @@ static void handle_stripe_clean_event(struct r5conf *conf,
STRIPE_SECTORS,
!test_bit(STRIPE_DEGRADED, &sh->state),
0);
+ if (head_sh->batch_head) {
+ sh = list_first_entry(&sh->batch_list,
+ struct stripe_head,
+ batch_list);
+ if (sh != head_sh) {
+ dev = &sh->dev[i];
+ goto returnbi;
+ }
+ }
+ sh = head_sh;
+ dev = &sh->dev[i];
} else if (test_bit(R5_Discard, &dev->flags))
discard_pending = 1;
WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
@@ -3141,8 +3461,17 @@ static void handle_stripe_clean_event(struct r5conf *conf,
* will be reinitialized
*/
spin_lock_irq(&conf->device_lock);
+unhash:
remove_hash(sh);
+ if (head_sh->batch_head) {
+ sh = list_first_entry(&sh->batch_list,
+ struct stripe_head, batch_list);
+ if (sh != head_sh)
+ goto unhash;
+ }
spin_unlock_irq(&conf->device_lock);
+ sh = head_sh;
+
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
set_bit(STRIPE_HANDLE, &sh->state);
@@ -3151,6 +3480,45 @@ static void handle_stripe_clean_event(struct r5conf *conf,
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
md_wakeup_thread(conf->mddev->thread);
+
+ if (!head_sh->batch_head || !do_endio)
+ return;
+ for (i = 0; i < head_sh->disks; i++) {
+ if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
+ wakeup_nr++;
+ }
+ while (!list_empty(&head_sh->batch_list)) {
+ int i;
+ sh = list_first_entry(&head_sh->batch_list,
+ struct stripe_head, batch_list);
+ list_del_init(&sh->batch_list);
+
+ set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
+ head_sh->state & ~((1 << STRIPE_ACTIVE) |
+ (1 << STRIPE_PREREAD_ACTIVE) |
+ STRIPE_EXPAND_SYNC_FLAG));
+ sh->check_state = head_sh->check_state;
+ sh->reconstruct_state = head_sh->reconstruct_state;
+ for (i = 0; i < sh->disks; i++) {
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+ wakeup_nr++;
+ sh->dev[i].flags = head_sh->dev[i].flags;
+ }
+
+ spin_lock_irq(&sh->stripe_lock);
+ sh->batch_head = NULL;
+ spin_unlock_irq(&sh->stripe_lock);
+ if (sh->state & STRIPE_EXPAND_SYNC_FLAG)
+ set_bit(STRIPE_HANDLE, &sh->state);
+ release_stripe(sh);
+ }
+
+ spin_lock_irq(&head_sh->stripe_lock);
+ head_sh->batch_head = NULL;
+ spin_unlock_irq(&head_sh->stripe_lock);
+ wake_up_nr(&conf->wait_for_overlap, wakeup_nr);
+ if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG)
+ set_bit(STRIPE_HANDLE, &head_sh->state);
}
static void handle_stripe_dirtying(struct r5conf *conf,
@@ -3161,28 +3529,27 @@ static void handle_stripe_dirtying(struct r5conf *conf,
int rmw = 0, rcw = 0, i;
sector_t recovery_cp = conf->mddev->recovery_cp;
- /* RAID6 requires 'rcw' in current implementation.
- * Otherwise, check whether resync is now happening or should start.
+ /* Check whether resync is now happening or should start.
* If yes, then the array is dirty (after unclean shutdown or
* initial creation), so parity in some stripes might be inconsistent.
* In this case, we need to always do reconstruct-write, to ensure
* that in case of drive failure or read-error correction, we
* generate correct data from the parity.
*/
- if (conf->max_degraded == 2 ||
+ if (conf->rmw_level == PARITY_DISABLE_RMW ||
(recovery_cp < MaxSector && sh->sector >= recovery_cp &&
s->failed == 0)) {
/* Calculate the real rcw later - for now make it
* look like rcw is cheaper
*/
rcw = 1; rmw = 2;
- pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n",
- conf->max_degraded, (unsigned long long)recovery_cp,
+ pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
+ conf->rmw_level, (unsigned long long)recovery_cp,
(unsigned long long)sh->sector);
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx) &&
+ if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
@@ -3192,7 +3559,8 @@ static void handle_stripe_dirtying(struct r5conf *conf,
rmw += 2*disks; /* cannot read it */
}
/* Would I have to read this buffer for reconstruct_write */
- if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
+ if (!test_bit(R5_OVERWRITE, &dev->flags) &&
+ i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
@@ -3205,7 +3573,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
pr_debug("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state);
- if (rmw < rcw && rmw > 0) {
+ if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_ENABLE_RMW)) && rmw > 0) {
/* prefer read-modify-write, but need to get some data */
if (conf->mddev->queue)
blk_add_trace_msg(conf->mddev->queue,
@@ -3213,7 +3581,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
(unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx) &&
+ if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags)) &&
@@ -3232,7 +3600,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
}
}
}
- if (rcw <= rmw && rcw > 0) {
+ if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_ENABLE_RMW)) && rcw > 0) {
/* want reconstruct write, but need to get some data */
int qread =0;
rcw = 0;
@@ -3290,6 +3658,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
{
struct r5dev *dev = NULL;
+ BUG_ON(sh->batch_head);
set_bit(STRIPE_HANDLE, &sh->state);
switch (sh->check_state) {
@@ -3380,6 +3749,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
int qd_idx = sh->qd_idx;
struct r5dev *dev;
+ BUG_ON(sh->batch_head);
set_bit(STRIPE_HANDLE, &sh->state);
BUG_ON(s->failed > 2);
@@ -3543,6 +3913,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
* copy some of them into a target stripe for expand.
*/
struct dma_async_tx_descriptor *tx = NULL;
+ BUG_ON(sh->batch_head);
clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
for (i = 0; i < sh->disks; i++)
if (i != sh->pd_idx && i != sh->qd_idx) {
@@ -3615,8 +3986,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
memset(s, 0, sizeof(*s));
- s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
+ s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head;
+ s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head;
s->failed_num[0] = -1;
s->failed_num[1] = -1;
@@ -3786,6 +4157,80 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
rcu_read_unlock();
}
+static int clear_batch_ready(struct stripe_head *sh)
+{
+ struct stripe_head *tmp;
+ if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
+ return 0;
+ spin_lock(&sh->stripe_lock);
+ if (!sh->batch_head) {
+ spin_unlock(&sh->stripe_lock);
+ return 0;
+ }
+
+ /*
+ * this stripe could be added to a batch list before we check
+ * BATCH_READY, skips it
+ */
+ if (sh->batch_head != sh) {
+ spin_unlock(&sh->stripe_lock);
+ return 1;
+ }
+ spin_lock(&sh->batch_lock);
+ list_for_each_entry(tmp, &sh->batch_list, batch_list)
+ clear_bit(STRIPE_BATCH_READY, &tmp->state);
+ spin_unlock(&sh->batch_lock);
+ spin_unlock(&sh->stripe_lock);
+
+ /*
+ * BATCH_READY is cleared, no new stripes can be added.
+ * batch_list can be accessed without lock
+ */
+ return 0;
+}
+
+static void check_break_stripe_batch_list(struct stripe_head *sh)
+{
+ struct stripe_head *head_sh, *next;
+ int i;
+
+ if (!test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
+ return;
+
+ head_sh = sh;
+ do {
+ sh = list_first_entry(&sh->batch_list,
+ struct stripe_head, batch_list);
+ BUG_ON(sh == head_sh);
+ } while (!test_bit(STRIPE_DEGRADED, &sh->state));
+
+ while (sh != head_sh) {
+ next = list_first_entry(&sh->batch_list,
+ struct stripe_head, batch_list);
+ list_del_init(&sh->batch_list);
+
+ set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
+ head_sh->state & ~((1 << STRIPE_ACTIVE) |
+ (1 << STRIPE_PREREAD_ACTIVE) |
+ (1 << STRIPE_DEGRADED) |
+ STRIPE_EXPAND_SYNC_FLAG));
+ sh->check_state = head_sh->check_state;
+ sh->reconstruct_state = head_sh->reconstruct_state;
+ for (i = 0; i < sh->disks; i++)
+ sh->dev[i].flags = head_sh->dev[i].flags &
+ (~((1 << R5_WriteError) | (1 << R5_Overlap)));
+
+ spin_lock_irq(&sh->stripe_lock);
+ sh->batch_head = NULL;
+ spin_unlock_irq(&sh->stripe_lock);
+
+ set_bit(STRIPE_HANDLE, &sh->state);
+ release_stripe(sh);
+
+ sh = next;
+ }
+}
+
static void handle_stripe(struct stripe_head *sh)
{
struct stripe_head_state s;
@@ -3803,7 +4248,14 @@ static void handle_stripe(struct stripe_head *sh)
return;
}
- if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
+ if (clear_batch_ready(sh) ) {
+ clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
+ return;
+ }
+
+ check_break_stripe_batch_list(sh);
+
+ if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
spin_lock(&sh->stripe_lock);
/* Cannot process 'sync' concurrently with 'discard' */
if (!test_bit(STRIPE_DISCARD, &sh->state) &&
@@ -4158,7 +4610,7 @@ static int raid5_congested(struct mddev *mddev, int bits)
* how busy the stripe_cache is
*/
- if (conf->inactive_blocked)
+ if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
return 1;
if (conf->quiesce)
return 1;
@@ -4180,8 +4632,12 @@ static int raid5_mergeable_bvec(struct mddev *mddev,
unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9;
- if ((bvm->bi_rw & 1) == WRITE)
- return biovec->bv_len; /* always allow writes to be mergeable */
+ /*
+ * always allow writes to be mergeable, read as well if array
+ * is degraded as we'll go through stripe cache anyway.
+ */
+ if ((bvm->bi_rw & 1) == WRITE || mddev->degraded)
+ return biovec->bv_len;
if (mddev->new_chunk_sectors < mddev->chunk_sectors)
chunk_sectors = mddev->new_chunk_sectors;
@@ -4603,12 +5059,14 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
}
set_bit(STRIPE_DISCARD, &sh->state);
finish_wait(&conf->wait_for_overlap, &w);
+ sh->overwrite_disks = 0;
for (d = 0; d < conf->raid_disks; d++) {
if (d == sh->pd_idx || d == sh->qd_idx)
continue;
sh->dev[d].towrite = bi;
set_bit(R5_OVERWRITE, &sh->dev[d].flags);
raid5_inc_bi_active_stripes(bi);
+ sh->overwrite_disks++;
}
spin_unlock_irq(&sh->stripe_lock);
if (conf->mddev->bitmap) {
@@ -4656,7 +5114,12 @@ static void make_request(struct mddev *mddev, struct bio * bi)
md_write_start(mddev, bi);
- if (rw == READ &&
+ /*
+ * If array is degraded, better not do chunk aligned read because
+ * later we might have to read it again in order to reconstruct
+ * data on failed drives.
+ */
+ if (rw == READ && mddev->degraded == 0 &&
mddev->reshape_position == MaxSector &&
chunk_aligned_read(mddev,bi))
return;
@@ -4772,7 +5235,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
}
if (test_bit(STRIPE_EXPANDING, &sh->state) ||
- !add_stripe_bio(sh, bi, dd_idx, rw)) {
+ !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
/* Stripe is busy expanding or
* add failed due to overlap. Flush everything
* and wait a while
@@ -4785,7 +5248,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
}
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
- if ((bi->bi_rw & REQ_SYNC) &&
+ if ((!sh->batch_head || sh == sh->batch_head) &&
+ (bi->bi_rw & REQ_SYNC) &&
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
release_stripe_plug(mddev, sh);
@@ -5050,8 +5514,7 @@ ret:
return reshape_sectors;
}
-/* FIXME go_faster isn't used */
-static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
{
struct r5conf *conf = mddev->private;
struct stripe_head *sh;
@@ -5121,12 +5584,17 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
schedule_timeout_uninterruptible(1);
}
/* Need to check if array will still be degraded after recovery/resync
- * We don't need to check the 'failed' flag as when that gets set,
- * recovery aborts.
+ * Note in case of > 1 drive failures it's possible we're rebuilding
+ * one drive while leaving another faulty drive in array.
*/
- for (i = 0; i < conf->raid_disks; i++)
- if (conf->disks[i].rdev == NULL)
+ rcu_read_lock();
+ for (i = 0; i < conf->raid_disks; i++) {
+ struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
+
+ if (rdev == NULL || test_bit(Faulty, &rdev->flags))
still_degraded = 1;
+ }
+ rcu_read_unlock();
bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
@@ -5181,7 +5649,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
return handled;
}
- if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
+ if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
release_stripe(sh);
raid5_set_bi_processed_stripes(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;
@@ -5307,6 +5775,8 @@ static void raid5d(struct md_thread *thread)
int batch_size, released;
released = release_stripe_list(conf, conf->temp_inactive_list);
+ if (released)
+ clear_bit(R5_DID_ALLOC, &conf->cache_state);
if (
!list_empty(&conf->bitmap_list)) {
@@ -5345,6 +5815,13 @@ static void raid5d(struct md_thread *thread)
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
+ if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
+ grow_one_stripe(conf, __GFP_NOWARN);
+ /* Set flag even if allocation failed. This helps
+ * slow down allocation requests when mem is short
+ */
+ set_bit(R5_DID_ALLOC, &conf->cache_state);
+ }
async_tx_issue_pending_all();
blk_finish_plug(&plug);
@@ -5360,7 +5837,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
spin_lock(&mddev->lock);
conf = mddev->private;
if (conf)
- ret = sprintf(page, "%d\n", conf->max_nr_stripes);
+ ret = sprintf(page, "%d\n", conf->min_nr_stripes);
spin_unlock(&mddev->lock);
return ret;
}
@@ -5370,30 +5847,24 @@ raid5_set_cache_size(struct mddev *mddev, int size)
{
struct r5conf *conf = mddev->private;
int err;
- int hash;
if (size <= 16 || size > 32768)
return -EINVAL;
- hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
- while (size < conf->max_nr_stripes) {
- if (drop_one_stripe(conf, hash))
- conf->max_nr_stripes--;
- else
- break;
- hash--;
- if (hash < 0)
- hash = NR_STRIPE_HASH_LOCKS - 1;
- }
+
+ conf->min_nr_stripes = size;
+ while (size < conf->max_nr_stripes &&
+ drop_one_stripe(conf))
+ ;
+
+
err = md_allow_write(mddev);
if (err)
return err;
- hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
- while (size > conf->max_nr_stripes) {
- if (grow_one_stripe(conf, hash))
- conf->max_nr_stripes++;
- else break;
- hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
- }
+
+ while (size > conf->max_nr_stripes)
+ if (!grow_one_stripe(conf, GFP_KERNEL))
+ break;
+
return 0;
}
EXPORT_SYMBOL(raid5_set_cache_size);
@@ -5428,6 +5899,49 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
raid5_store_stripe_cache_size);
static ssize_t
+raid5_show_rmw_level(struct mddev *mddev, char *page)
+{
+ struct r5conf *conf = mddev->private;
+ if (conf)
+ return sprintf(page, "%d\n", conf->rmw_level);
+ else
+ return 0;
+}
+
+static ssize_t
+raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
+{
+ struct r5conf *conf = mddev->private;
+ unsigned long new;
+
+ if (!conf)
+ return -ENODEV;
+
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+
+ if (kstrtoul(page, 10, &new))
+ return -EINVAL;
+
+ if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
+ return -EINVAL;
+
+ if (new != PARITY_DISABLE_RMW &&
+ new != PARITY_ENABLE_RMW &&
+ new != PARITY_PREFER_RMW)
+ return -EINVAL;
+
+ conf->rmw_level = new;
+ return len;
+}
+
+static struct md_sysfs_entry
+raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
+ raid5_show_rmw_level,
+ raid5_store_rmw_level);
+
+
+static ssize_t
raid5_show_preread_threshold(struct mddev *mddev, char *page)
{
struct r5conf *conf;
@@ -5458,7 +5972,7 @@ raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
conf = mddev->private;
if (!conf)
err = -ENODEV;
- else if (new > conf->max_nr_stripes)
+ else if (new > conf->min_nr_stripes)
err = -EINVAL;
else
conf->bypass_threshold = new;
@@ -5613,6 +6127,7 @@ static struct attribute *raid5_attrs[] = {
&raid5_preread_bypass_threshold.attr,
&raid5_group_thread_cnt.attr,
&raid5_skip_copy.attr,
+ &raid5_rmw_level.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
@@ -5694,7 +6209,8 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
{
safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
+ if (percpu->scribble)
+ flex_array_free(percpu->scribble);
percpu->spare_page = NULL;
percpu->scribble = NULL;
}
@@ -5704,7 +6220,9 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
if (conf->level == 6 && !percpu->spare_page)
percpu->spare_page = alloc_page(GFP_KERNEL);
if (!percpu->scribble)
- percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+ percpu->scribble = scribble_alloc(max(conf->raid_disks,
+ conf->previous_raid_disks), conf->chunk_sectors /
+ STRIPE_SECTORS, GFP_KERNEL);
if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
free_scratch_buffer(conf, percpu);
@@ -5735,6 +6253,8 @@ static void raid5_free_percpu(struct r5conf *conf)
static void free_conf(struct r5conf *conf)
{
+ if (conf->shrinker.seeks)
+ unregister_shrinker(&conf->shrinker);
free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
@@ -5802,6 +6322,30 @@ static int raid5_alloc_percpu(struct r5conf *conf)
return err;
}
+static unsigned long raid5_cache_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
+ int ret = 0;
+ while (ret < sc->nr_to_scan) {
+ if (drop_one_stripe(conf) == 0)
+ return SHRINK_STOP;
+ ret++;
+ }
+ return ret;
+}
+
+static unsigned long raid5_cache_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
+
+ if (conf->max_nr_stripes < conf->min_nr_stripes)
+ /* unlikely, but not impossible */
+ return 0;
+ return conf->max_nr_stripes - conf->min_nr_stripes;
+}
+
static struct r5conf *setup_conf(struct mddev *mddev)
{
struct r5conf *conf;
@@ -5874,7 +6418,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
else
conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
max_disks = max(conf->raid_disks, conf->previous_raid_disks);
- conf->scribble_len = scribble_len(max_disks);
conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
GFP_KERNEL);
@@ -5902,6 +6445,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(conf->temp_inactive_list + i);
conf->level = mddev->new_level;
+ conf->chunk_sectors = mddev->new_chunk_sectors;
if (raid5_alloc_percpu(conf) != 0)
goto abort;
@@ -5934,12 +6478,17 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->fullsync = 1;
}
- conf->chunk_sectors = mddev->new_chunk_sectors;
conf->level = mddev->new_level;
- if (conf->level == 6)
+ if (conf->level == 6) {
conf->max_degraded = 2;
- else
+ if (raid6_call.xor_syndrome)
+ conf->rmw_level = PARITY_ENABLE_RMW;
+ else
+ conf->rmw_level = PARITY_DISABLE_RMW;
+ } else {
conf->max_degraded = 1;
+ conf->rmw_level = PARITY_ENABLE_RMW;
+ }
conf->algorithm = mddev->new_layout;
conf->reshape_progress = mddev->reshape_position;
if (conf->reshape_progress != MaxSector) {
@@ -5947,10 +6496,11 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->prev_algo = mddev->layout;
}
- memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
+ conf->min_nr_stripes = NR_STRIPES;
+ memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
- if (grow_stripes(conf, NR_STRIPES)) {
+ if (grow_stripes(conf, conf->min_nr_stripes)) {
printk(KERN_ERR
"md/raid:%s: couldn't allocate %dkB for buffers\n",
mdname(mddev), memory);
@@ -5958,6 +6508,17 @@ static struct r5conf *setup_conf(struct mddev *mddev)
} else
printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
mdname(mddev), memory);
+ /*
+ * Losing a stripe head costs more than the time to refill it,
+ * it reduces the queue depth and so can hurt throughput.
+ * So set it rather large, scaled by number of devices.
+ */
+ conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
+ conf->shrinker.scan_objects = raid5_cache_scan;
+ conf->shrinker.count_objects = raid5_cache_count;
+ conf->shrinker.batch = 128;
+ conf->shrinker.flags = 0;
+ register_shrinker(&conf->shrinker);
sprintf(pers_name, "raid%d", mddev->new_level);
conf->thread = md_register_thread(raid5d, mddev, pers_name);
@@ -6599,9 +7160,9 @@ static int check_stripe_cache(struct mddev *mddev)
*/
struct r5conf *conf = mddev->private;
if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
- > conf->max_nr_stripes ||
+ > conf->min_nr_stripes ||
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
- > conf->max_nr_stripes) {
+ > conf->min_nr_stripes) {
printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
mdname(mddev),
((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 983e18a83db1..7dc0dd86074b 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -210,11 +210,19 @@ struct stripe_head {
atomic_t count; /* nr of active thread/requests */
int bm_seq; /* sequence number for bitmap flushes */
int disks; /* disks in stripe */
+ int overwrite_disks; /* total overwrite disks in stripe,
+ * this is only checked when stripe
+ * has STRIPE_BATCH_READY
+ */
enum check_states check_state;
enum reconstruct_states reconstruct_state;
spinlock_t stripe_lock;
int cpu;
struct r5worker_group *group;
+
+ struct stripe_head *batch_head; /* protected by stripe lock */
+ spinlock_t batch_lock; /* only header's lock is useful */
+ struct list_head batch_list; /* protected by head's batch lock*/
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
@@ -327,8 +335,15 @@ enum {
STRIPE_ON_UNPLUG_LIST,
STRIPE_DISCARD,
STRIPE_ON_RELEASE_LIST,
+ STRIPE_BATCH_READY,
+ STRIPE_BATCH_ERR,
};
+#define STRIPE_EXPAND_SYNC_FLAG \
+ ((1 << STRIPE_EXPAND_SOURCE) |\
+ (1 << STRIPE_EXPAND_READY) |\
+ (1 << STRIPE_EXPANDING) |\
+ (1 << STRIPE_SYNC_REQUESTED))
/*
* Operation request flags
*/
@@ -340,6 +355,24 @@ enum {
STRIPE_OP_RECONSTRUCT,
STRIPE_OP_CHECK,
};
+
+/*
+ * RAID parity calculation preferences
+ */
+enum {
+ PARITY_DISABLE_RMW = 0,
+ PARITY_ENABLE_RMW,
+ PARITY_PREFER_RMW,
+};
+
+/*
+ * Pages requested from set_syndrome_sources()
+ */
+enum {
+ SYNDROME_SRC_ALL,
+ SYNDROME_SRC_WANT_DRAIN,
+ SYNDROME_SRC_WRITTEN,
+};
/*
* Plugging:
*
@@ -396,10 +429,11 @@ struct r5conf {
spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
struct mddev *mddev;
int chunk_sectors;
- int level, algorithm;
+ int level, algorithm, rmw_level;
int max_degraded;
int raid_disks;
int max_nr_stripes;
+ int min_nr_stripes;
/* reshape_progress is the leading edge of a 'reshape'
* It has value MaxSector when no reshape is happening
@@ -458,15 +492,11 @@ struct r5conf {
/* per cpu variables */
struct raid5_percpu {
struct page *spare_page; /* Used when checking P/Q in raid6 */
- void *scribble; /* space for constructing buffer
+ struct flex_array *scribble; /* space for constructing buffer
* lists and performing address
* conversions
*/
} __percpu *percpu;
- size_t scribble_len; /* size of scribble region must be
- * associated with conf to handle
- * cpu hotplug while reshaping
- */
#ifdef CONFIG_HOTPLUG_CPU
struct notifier_block cpu_notify;
#endif
@@ -480,9 +510,19 @@ struct r5conf {
struct llist_head released_stripes;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
- int inactive_blocked; /* release of inactive stripes blocked,
- * waiting for 25% to be free
- */
+ unsigned long cache_state;
+#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
+ * waiting for 25% to be free
+ */
+#define R5_ALLOC_MORE 2 /* It might help to allocate another
+ * stripe.
+ */
+#define R5_DID_ALLOC 4 /* A stripe was allocated, don't allocate
+ * more until at least one has been
+ * released. This avoids flooding
+ * the cache.
+ */
+ struct shrinker shrinker;
int pool_size; /* number of disks in stripeheads in pool */
spinlock_t device_lock;
struct disk_info *disks;
@@ -497,6 +537,7 @@ struct r5conf {
int worker_cnt_per_group;
};
+
/*
* Our supported algorithms
*/