diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-log-userspace-base.c | 1 | ||||
-rw-r--r-- | drivers/md/dm-log-userspace-transfer.c | 1 | ||||
-rw-r--r-- | drivers/md/dm-region-hash.c | 1 | ||||
-rw-r--r-- | drivers/md/dm-service-time.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-target.c | 1 | ||||
-rw-r--r-- | drivers/md/faulty.c | 1 | ||||
-rw-r--r-- | drivers/md/linear.c | 13 | ||||
-rw-r--r-- | drivers/md/md.c | 11 | ||||
-rw-r--r-- | drivers/md/multipath.c | 21 | ||||
-rw-r--r-- | drivers/md/raid0.c | 14 | ||||
-rw-r--r-- | drivers/md/raid1.c | 29 | ||||
-rw-r--r-- | drivers/md/raid10.c | 29 | ||||
-rw-r--r-- | drivers/md/raid5.c | 55 | ||||
-rw-r--r-- | drivers/md/raid6algos.c | 1 |
14 files changed, 107 insertions, 73 deletions
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c index 7ac2c1450d10..1ed0094f064b 100644 --- a/drivers/md/dm-log-userspace-base.c +++ b/drivers/md/dm-log-userspace-base.c @@ -5,6 +5,7 @@ */ #include <linux/bio.h> +#include <linux/slab.h> #include <linux/dm-dirty-log.h> #include <linux/device-mapper.h> #include <linux/dm-log-userspace.h> diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index f1c8cae70b4b..075cbcf8a9f5 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include <net/sock.h> #include <linux/workqueue.h> #include <linux/connector.h> diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 168bd38f5006..bd5c58b28868 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -11,6 +11,7 @@ #include <linux/ctype.h> #include <linux/init.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/vmalloc.h> #include "dm.h" diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c index cfa668f46c40..9c6c2e47ad62 100644 --- a/drivers/md/dm-service-time.c +++ b/drivers/md/dm-service-time.c @@ -11,6 +11,8 @@ #include "dm.h" #include "dm-path-selector.h" +#include <linux/slab.h> + #define DM_MSG_PREFIX "multipath service-time" #define ST_MIN_IO 1 #define ST_MAX_RELATIVE_THROUGHPUT 100 diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c index 04feccf2a997..11dea11dc0b6 100644 --- a/drivers/md/dm-target.c +++ b/drivers/md/dm-target.c @@ -10,7 +10,6 @@ #include <linux/init.h> #include <linux/kmod.h> #include <linux/bio.h> -#include <linux/slab.h> #define DM_MSG_PREFIX "target" diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 713acd02ab39..8e3850b98cca 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -64,6 +64,7 @@ #define MaxFault 50 #include <linux/blkdev.h> #include <linux/raid/md_u.h> +#include <linux/slab.h> #include "md.h" #include <linux/seq_file.h> diff --git a/drivers/md/linear.c b/drivers/md/linear.c index af2d39d603c7..09437e958235 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -19,6 +19,7 @@ #include <linux/blkdev.h> #include <linux/raid/md_u.h> #include <linux/seq_file.h> +#include <linux/slab.h> #include "md.h" #include "linear.h" @@ -172,12 +173,14 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + * violating it, so limit max_segments to 1 lying within + * a single page. */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } conf->array_sectors += rdev->sectors; cnt++; diff --git a/drivers/md/md.c b/drivers/md/md.c index fdc1890b6ac5..cefd63daff31 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -49,6 +49,7 @@ #include <linux/delay.h> #include <linux/raid/md_p.h> #include <linux/raid/md_u.h> +#include <linux/slab.h> #include "md.h" #include "bitmap.h" @@ -2108,12 +2109,18 @@ repeat: if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ /* .. if the array isn't clean, an 'even' event must also go * to spares. */ - if ((mddev->events&1)==0) + if ((mddev->events&1)==0) { nospares = 0; + sync_req = 2; /* force a second update to get the + * even/odd in sync */ + } } else { /* otherwise an 'odd' event must go to spares */ - if ((mddev->events&1)) + if ((mddev->events&1)) { nospares = 0; + sync_req = 2; /* force a second update to get the + * even/odd in sync */ + } } } diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 4b323f45ad74..789bf535d29c 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -22,6 +22,7 @@ #include <linux/blkdev.h> #include <linux/raid/md_u.h> #include <linux/seq_file.h> +#include <linux/slab.h> #include "md.h" #include "multipath.h" @@ -301,14 +302,16 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + * violating it, so limit ->max_segments to one, lying + * within a single page. * (Note: it is very unlikely that a device with * merge_bvec_fn will be involved in multipath.) */ - if (q->merge_bvec_fn && - queue_max_sectors(q) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (q->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } conf->working_disks++; mddev->degraded--; @@ -476,9 +479,11 @@ static int multipath_run (mddev_t *mddev) /* as we don't honour merge_bvec_fn, we must never risk * violating it, not that we ever expect a device with * a merge_bvec_fn to be involved in multipath */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } if (!test_bit(Faulty, &rdev->flags)) conf->working_disks++; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a1f7147b757f..c3bec024612e 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -20,6 +20,7 @@ #include <linux/blkdev.h> #include <linux/seq_file.h> +#include <linux/slab.h> #include "md.h" #include "raid0.h" @@ -176,14 +177,15 @@ static int create_strip_zones(mddev_t *mddev) disk_stack_limits(mddev->gendisk, rdev1->bdev, rdev1->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + * violating it, so limit ->max_segments to 1, lying within + * a single page. */ - if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); - + if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } if (!smallest || (rdev1->sectors < smallest->sectors)) smallest = rdev1; cnt++; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 5a06122abd3b..e59b10e66edb 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -31,6 +31,7 @@ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/slab.h> #include <linux/delay.h> #include <linux/blkdev.h> #include <linux/seq_file.h> @@ -1152,13 +1153,17 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); - /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + /* as we don't honour merge_bvec_fn, we must + * never risk violating it, so limit + * ->max_segments to one lying with a single + * page, as a one page request is never in + * violation. */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } p->head_position = 0; rdev->raid_disk = mirror; @@ -2098,12 +2103,14 @@ static int run(mddev_t *mddev) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + * violating it, so limit ->max_segments to 1 lying within + * a single page, as a one page request is never in violation. */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } } mddev->degraded = 0; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 7584f9ab9bcf..e2766d8251a1 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -18,6 +18,7 @@ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/slab.h> #include <linux/delay.h> #include <linux/blkdev.h> #include <linux/seq_file.h> @@ -1155,13 +1156,17 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); - /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + /* as we don't honour merge_bvec_fn, we must + * never risk violating it, so limit + * ->max_segments to one lying with a single + * page, as a one page request is never in + * violation. */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } p->head_position = 0; rdev->raid_disk = mirror; @@ -2255,12 +2260,14 @@ static int run(mddev_t *mddev) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + * violating it, so limit max_segments to 1 lying + * within a single page. */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } disk->head_position = 0; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 70ffbd071b2e..15348c393b5d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -50,6 +50,7 @@ #include <linux/async.h> #include <linux/seq_file.h> #include <linux/cpu.h> +#include <linux/slab.h> #include "md.h" #include "raid5.h" #include "bitmap.h" @@ -1526,7 +1527,7 @@ static void raid5_end_read_request(struct bio * bi, int error) clear_bit(R5_UPTODATE, &sh->dev[i].flags); atomic_inc(&rdev->read_errors); - if (conf->mddev->degraded) + if (conf->mddev->degraded >= conf->max_degraded) printk_rl(KERN_WARNING "raid5:%s: read error not correctable " "(sector %llu on %s).\n", @@ -1649,8 +1650,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, int previous, int *dd_idx, struct stripe_head *sh) { - long stripe; - unsigned long chunk_number; + sector_t stripe, stripe2; + sector_t chunk_number; unsigned int chunk_offset; int pd_idx, qd_idx; int ddf_layout = 0; @@ -1670,18 +1671,13 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, */ chunk_offset = sector_div(r_sector, sectors_per_chunk); chunk_number = r_sector; - BUG_ON(r_sector != chunk_number); /* * Compute the stripe number */ - stripe = chunk_number / data_disks; - - /* - * Compute the data disk and parity disk indexes inside the stripe - */ - *dd_idx = chunk_number % data_disks; - + stripe = chunk_number; + *dd_idx = sector_div(stripe, data_disks); + stripe2 = stripe; /* * Select the parity disk based on the user selected algorithm. */ @@ -1693,21 +1689,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, case 5: switch (algorithm) { case ALGORITHM_LEFT_ASYMMETRIC: - pd_idx = data_disks - stripe % raid_disks; + pd_idx = data_disks - sector_div(stripe2, raid_disks); if (*dd_idx >= pd_idx) (*dd_idx)++; break; case ALGORITHM_RIGHT_ASYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); if (*dd_idx >= pd_idx) (*dd_idx)++; break; case ALGORITHM_LEFT_SYMMETRIC: - pd_idx = data_disks - stripe % raid_disks; + pd_idx = data_disks - sector_div(stripe2, raid_disks); *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; break; case ALGORITHM_RIGHT_SYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; break; case ALGORITHM_PARITY_0: @@ -1727,7 +1723,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, switch (algorithm) { case ALGORITHM_LEFT_ASYMMETRIC: - pd_idx = raid_disks - 1 - (stripe % raid_disks); + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1736,7 +1732,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, (*dd_idx) += 2; /* D D P Q D */ break; case ALGORITHM_RIGHT_ASYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1745,12 +1741,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, (*dd_idx) += 2; /* D D P Q D */ break; case ALGORITHM_LEFT_SYMMETRIC: - pd_idx = raid_disks - 1 - (stripe % raid_disks); + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = (pd_idx + 1) % raid_disks; *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; break; case ALGORITHM_RIGHT_SYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); qd_idx = (pd_idx + 1) % raid_disks; *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; break; @@ -1769,7 +1765,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, /* Exactly the same as RIGHT_ASYMMETRIC, but or * of blocks for computing Q is different. */ - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1784,7 +1780,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, * D D D P Q rather than * Q D D D P */ - pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); + stripe2 += 1; + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1796,7 +1793,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, case ALGORITHM_ROTATING_N_CONTINUE: /* Same as left_symmetric but Q is before P */ - pd_idx = raid_disks - 1 - (stripe % raid_disks); + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = (pd_idx + raid_disks - 1) % raid_disks; *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; ddf_layout = 1; @@ -1804,27 +1801,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, case ALGORITHM_LEFT_ASYMMETRIC_6: /* RAID5 left_asymmetric, with Q on last device */ - pd_idx = data_disks - stripe % (raid_disks-1); + pd_idx = data_disks - sector_div(stripe2, raid_disks-1); if (*dd_idx >= pd_idx) (*dd_idx)++; qd_idx = raid_disks - 1; break; case ALGORITHM_RIGHT_ASYMMETRIC_6: - pd_idx = stripe % (raid_disks-1); + pd_idx = sector_div(stripe2, raid_disks-1); if (*dd_idx >= pd_idx) (*dd_idx)++; qd_idx = raid_disks - 1; break; case ALGORITHM_LEFT_SYMMETRIC_6: - pd_idx = data_disks - stripe % (raid_disks-1); + pd_idx = data_disks - sector_div(stripe2, raid_disks-1); *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); qd_idx = raid_disks - 1; break; case ALGORITHM_RIGHT_SYMMETRIC_6: - pd_idx = stripe % (raid_disks-1); + pd_idx = sector_div(stripe2, raid_disks-1); *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); qd_idx = raid_disks - 1; break; @@ -1869,14 +1866,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) : conf->algorithm; sector_t stripe; int chunk_offset; - int chunk_number, dummy1, dd_idx = i; + sector_t chunk_number; + int dummy1, dd_idx = i; sector_t r_sector; struct stripe_head sh2; chunk_offset = sector_div(new_sector, sectors_per_chunk); stripe = new_sector; - BUG_ON(new_sector != stripe); if (i == sh->pd_idx) return 0; @@ -1969,7 +1966,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) } chunk_number = stripe * data_disks + i; - r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; + r_sector = chunk_number * sectors_per_chunk + chunk_offset; check = raid5_compute_sector(conf, r_sector, previous, &dummy1, &sh2); diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c index bffc61bff5ab..1f8784bfd44d 100644 --- a/drivers/md/raid6algos.c +++ b/drivers/md/raid6algos.c @@ -17,6 +17,7 @@ */ #include <linux/raid/pq.h> +#include <linux/gfp.h> #ifndef __KERNEL__ #include <sys/mman.h> #include <stdio.h> |