summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-02-17 14:08:19 -0700
committerJens Axboe <axboe@fb.com>2017-02-17 14:08:19 -0700
commit818551e2b2c662a1b26de6b4f7d6b8411a838d18 (patch)
treef38b4c951df4d33db81ae7b7765a56bce491c2a8
parent6010720da8aab51f33beee63b73cf88016e9b250 (diff)
parent7520872c0cf4d3df6d74242c6edfb9e70a47df4d (diff)
downloadlwn-818551e2b2c662a1b26de6b4f7d6b8411a838d18.tar.gz
lwn-818551e2b2c662a1b26de6b4f7d6b8411a838d18.zip
Merge branch 'for-4.11/next' into for-4.11/linus-merge
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/Kconfig5
-rw-r--r--block/Makefile5
-rw-r--r--block/bio.c10
-rw-r--r--block/blk-cgroup.c10
-rw-r--r--block/blk-core.c325
-rw-r--r--block/blk-exec.c19
-rw-r--r--block/blk-flush.c14
-rw-r--r--block/blk-integrity.c4
-rw-r--r--block/blk-ioc.c22
-rw-r--r--block/blk-map.c13
-rw-r--r--block/blk-merge.c58
-rw-r--r--block/blk-mq-debugfs.c88
-rw-r--r--block/blk-mq-sched.c86
-rw-r--r--block/blk-mq-sched.h9
-rw-r--r--block/blk-mq-sysfs.c2
-rw-r--r--block/blk-mq.c73
-rw-r--r--block/blk-mq.h5
-rw-r--r--block/blk-settings.c22
-rw-r--r--block/blk-sysfs.c68
-rw-r--r--block/blk-wbt.c8
-rw-r--r--block/blk.h21
-rw-r--r--block/bsg-lib.c49
-rw-r--r--block/bsg.c64
-rw-r--r--block/cfq-iosched.c4
-rw-r--r--block/compat_ioctl.c7
-rw-r--r--block/deadline-iosched.c12
-rw-r--r--block/elevator.c16
-rw-r--r--block/genhd.c25
-rw-r--r--block/ioctl.c7
-rw-r--r--block/mq-deadline.c15
-rw-r--r--block/scsi_ioctl.c83
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/block/Kconfig13
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/cciss.c77
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/block/drbd/drbd_nl.c12
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/hd.c45
-rw-r--r--drivers/block/mg_disk.c31
-rw-r--r--drivers/block/nbd.c258
-rw-r--r--drivers/block/null_blk.c4
-rw-r--r--drivers/block/osdblk.c6
-rw-r--r--drivers/block/paride/Kconfig1
-rw-r--r--drivers/block/paride/pd.c15
-rw-r--r--drivers/block/pktcdvd.c12
-rw-r--r--drivers/block/ps3disk.c15
-rw-r--r--drivers/block/rbd.c24
-rw-r--r--drivers/block/skd_main.c15
-rw-r--r--drivers/block/sx8.c4
-rw-r--r--drivers/block/virtio_blk.c205
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/block/xsysace.c2
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/cdrom/cdrom.c34
-rw-r--r--drivers/cdrom/gdrom.c29
-rw-r--r--drivers/ide/Kconfig1
-rw-r--r--drivers/ide/ide-atapi.c78
-rw-r--r--drivers/ide/ide-cd.c190
-rw-r--r--drivers/ide/ide-cd_ioctl.c5
-rw-r--r--drivers/ide/ide-cd_verbose.c6
-rw-r--r--drivers/ide/ide-devsets.c13
-rw-r--r--drivers/ide/ide-disk.c12
-rw-r--r--drivers/ide/ide-eh.c8
-rw-r--r--drivers/ide/ide-floppy.c37
-rw-r--r--drivers/ide/ide-io.c13
-rw-r--r--drivers/ide/ide-ioctls.c14
-rw-r--r--drivers/ide/ide-park.c20
-rw-r--r--drivers/ide/ide-pm.c20
-rw-r--r--drivers/ide/ide-probe.c36
-rw-r--r--drivers/ide/ide-tape.c41
-rw-r--r--drivers/ide/ide-taskfile.c8
-rw-r--r--drivers/ide/sis5513.c2
-rw-r--r--drivers/md/bcache/request.c10
-rw-r--r--drivers/md/bcache/super.c8
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-mpath.c132
-rw-r--r--drivers/md/dm-rq.c268
-rw-r--r--drivers/md/dm-rq.h2
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-target.c7
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm.c49
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/raid0.c6
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c10
-rw-r--r--drivers/md/raid5.c12
-rw-r--r--drivers/memstick/core/ms_block.c11
-rw-r--r--drivers/memstick/core/mspro_block.c13
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/mmc/core/queue.c9
-rw-r--r--drivers/mtd/mtd_blkdevs.c13
-rw-r--r--drivers/mtd/ubi/block.c15
-rw-r--r--drivers/nvme/host/core.c56
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/host/rdma.c6
-rw-r--r--drivers/nvme/host/scsi.c7
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/s390/block/scm_blk.c7
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c247
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c222
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c174
-rw-r--r--drivers/scsi/hosts.c24
-rw-r--r--drivers/scsi/hpsa.c4
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c8
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c38
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/osd/osd_initiator.c22
-rw-r--r--drivers/scsi/osst.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c2
-rw-r--r--drivers/scsi/scsi.c354
-rw-r--r--drivers/scsi/scsi_error.c43
-rw-r--r--drivers/scsi/scsi_lib.c264
-rw-r--r--drivers/scsi/scsi_priv.h5
-rw-r--r--drivers/scsi/scsi_transport_fc.c34
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c14
-rw-r--r--drivers/scsi/scsi_transport_sas.c5
-rw-r--r--drivers/scsi/sd.c48
-rw-r--r--drivers/scsi/sg.c33
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c2
-rw-r--r--drivers/scsi/sr.c9
-rw-r--r--drivers/scsi/st.c28
-rw-r--r--drivers/scsi/sun3_scsi.c2
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/target_core_pscsi.c14
-rw-r--r--fs/block_dev.c22
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/debugfs/inode.c36
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/blocklayout.c19
-rw-r--r--fs/nilfs2/super.c2
-rw-r--r--fs/super.c2
-rw-r--r--fs/xfs/xfs_buf.c3
-rw-r--r--fs/xfs/xfs_buf.h1
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/backing-dev.h12
-rw-r--r--include/linux/blk_types.h7
-rw-r--r--include/linux/blkdev.h96
-rw-r--r--include/linux/blktrace_api.h18
-rw-r--r--include/linux/bsg-lib.h5
-rw-r--r--include/linux/debugfs.h8
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/elevator.h31
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/genhd.h8
-rw-r--r--include/linux/ide.h58
-rw-r--r--include/linux/nvme.h2
-rw-r--r--include/scsi/scsi_cmnd.h4
-rw-r--r--include/scsi/scsi_host.h5
-rw-r--r--include/scsi/scsi_request.h30
-rw-r--r--include/scsi/scsi_transport.h2
-rw-r--r--include/trace/events/block.h27
-rw-r--r--kernel/trace/blktrace.c78
-rw-r--r--mm/backing-dev.c43
-rw-r--r--mm/page-writeback.c4
171 files changed, 2494 insertions, 2850 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 1aef809affae..a2a92e57a87d 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -49,9 +49,13 @@ config LBDAF
If unsure, say Y.
+config BLK_SCSI_REQUEST
+ bool
+
config BLK_DEV_BSG
bool "Block layer SG support v4"
default y
+ select BLK_SCSI_REQUEST
help
Saying Y here will enable generic SG (SCSI generic) v4 support
for any block device.
@@ -71,6 +75,7 @@ config BLK_DEV_BSGLIB
bool "Block layer SG support v4 helper lib"
default n
select BLK_DEV_BSG
+ select BLK_SCSI_REQUEST
help
Subsystems will normally enable this if needed. Users will not
normally need to manually enable this.
diff --git a/block/Makefile b/block/Makefile
index 6ba1b1bc9529..2ad7c304e3f5 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -7,10 +7,11 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
- genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
+ genhd.o partition-generic.o ioprio.o \
badblocks.o partitions/
-obj-$(CONFIG_BOUNCE) += bounce.o
+obj-$(CONFIG_BOUNCE) += bounce.o
+obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
diff --git a/block/bio.c b/block/bio.c
index d3c26d1cb1da..4b564d0c3e29 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1227,9 +1227,6 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
if (!bio)
goto out_bmd;
- if (iter->type & WRITE)
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-
ret = 0;
if (map_data) {
@@ -1394,12 +1391,6 @@ struct bio *bio_map_user_iov(struct request_queue *q,
kfree(pages);
- /*
- * set data direction, and check if mapped pages need bouncing
- */
- if (iter->type & WRITE)
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-
bio_set_flag(bio, BIO_USER_MAPPED);
/*
@@ -1590,7 +1581,6 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
bio->bi_private = data;
} else {
bio->bi_end_io = bio_copy_kern_endio;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
}
return bio;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index fb59a3edc778..295e98c2c8cc 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -184,7 +184,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
goto err_free_blkg;
}
- wb_congested = wb_congested_get_create(&q->backing_dev_info,
+ wb_congested = wb_congested_get_create(q->backing_dev_info,
blkcg->css.id,
GFP_NOWAIT | __GFP_NOWARN);
if (!wb_congested) {
@@ -469,8 +469,8 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
const char *blkg_dev_name(struct blkcg_gq *blkg)
{
/* some drivers (floppy) instantiate a queue w/o disk registered */
- if (blkg->q->backing_dev_info.dev)
- return dev_name(blkg->q->backing_dev_info.dev);
+ if (blkg->q->backing_dev_info->dev)
+ return dev_name(blkg->q->backing_dev_info->dev);
return NULL;
}
EXPORT_SYMBOL_GPL(blkg_dev_name);
@@ -1079,10 +1079,8 @@ int blkcg_init_queue(struct request_queue *q)
if (preloaded)
radix_tree_preload_end();
- if (IS_ERR(blkg)) {
- blkg_free(new_blkg);
+ if (IS_ERR(blkg))
return PTR_ERR(blkg);
- }
q->root_blkg = blkg;
q->root_rl.blkg = blkg;
diff --git a/block/blk-core.c b/block/blk-core.c
index b2df55a65250..b9e857f4afe8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -33,6 +33,7 @@
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
+#include <linux/debugfs.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -42,6 +43,10 @@
#include "blk-mq-sched.h"
#include "blk-wbt.h"
+#ifdef CONFIG_DEBUG_FS
+struct dentry *blk_debugfs_root;
+#endif
+
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
@@ -75,7 +80,7 @@ static void blk_clear_congested(struct request_list *rl, int sync)
* flip its congestion state for events on other blkcgs.
*/
if (rl == &rl->q->root_rl)
- clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
+ clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}
@@ -86,7 +91,7 @@ static void blk_set_congested(struct request_list *rl, int sync)
#else
/* see blk_clear_congested() */
if (rl == &rl->q->root_rl)
- set_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
+ set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}
@@ -105,22 +110,6 @@ void blk_queue_congestion_threshold(struct request_queue *q)
q->nr_congestion_off = nr;
}
-/**
- * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
- * @bdev: device
- *
- * Locates the passed device's request queue and returns the address of its
- * backing_dev_info. This function can only be called if @bdev is opened
- * and the return value is never NULL.
- */
-struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- return &q->backing_dev_info;
-}
-EXPORT_SYMBOL(blk_get_backing_dev_info);
-
void blk_rq_init(struct request_queue *q, struct request *rq)
{
memset(rq, 0, sizeof(*rq));
@@ -132,8 +121,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->__sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
- rq->cmd = rq->__cmd;
- rq->cmd_len = BLK_MAX_CDB;
rq->tag = -1;
rq->internal_tag = -1;
rq->start_time = jiffies;
@@ -160,10 +147,8 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
void blk_dump_rq_flags(struct request *rq, char *msg)
{
- int bit;
-
- printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg,
- rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
+ printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
+ rq->rq_disk ? rq->rq_disk->disk_name : "?",
(unsigned long long) rq->cmd_flags);
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
@@ -171,13 +156,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
printk(KERN_INFO " bio %p, biotail %p, len %u\n",
rq->bio, rq->biotail, blk_rq_bytes(rq));
-
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
- printk(KERN_INFO " cdb: ");
- for (bit = 0; bit < BLK_MAX_CDB; bit++)
- printk("%02x ", rq->cmd[bit]);
- printk("\n");
- }
}
EXPORT_SYMBOL(blk_dump_rq_flags);
@@ -588,7 +566,7 @@ void blk_cleanup_queue(struct request_queue *q)
blk_flush_integrity();
/* @q won't process any more request, flush async actions */
- del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
+ del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
blk_sync_queue(q);
if (q->mq_ops)
@@ -600,7 +578,8 @@ void blk_cleanup_queue(struct request_queue *q)
q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock);
- bdi_unregister(&q->backing_dev_info);
+ bdi_unregister(q->backing_dev_info);
+ put_disk_devt(q->disk_devt);
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
@@ -608,17 +587,41 @@ void blk_cleanup_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_cleanup_queue);
/* Allocate memory local to the request queue */
-static void *alloc_request_struct(gfp_t gfp_mask, void *data)
+static void *alloc_request_simple(gfp_t gfp_mask, void *data)
{
- int nid = (int)(long)data;
- return kmem_cache_alloc_node(request_cachep, gfp_mask, nid);
+ struct request_queue *q = data;
+
+ return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
}
-static void free_request_struct(void *element, void *unused)
+static void free_request_simple(void *element, void *data)
{
kmem_cache_free(request_cachep, element);
}
+static void *alloc_request_size(gfp_t gfp_mask, void *data)
+{
+ struct request_queue *q = data;
+ struct request *rq;
+
+ rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
+ q->node);
+ if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
+ kfree(rq);
+ rq = NULL;
+ }
+ return rq;
+}
+
+static void free_request_size(void *element, void *data)
+{
+ struct request_queue *q = data;
+
+ if (q->exit_rq_fn)
+ q->exit_rq_fn(q, element);
+ kfree(element);
+}
+
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask)
{
@@ -631,10 +634,15 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
- rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct,
- free_request_struct,
- (void *)(long)q->node, gfp_mask,
- q->node);
+ if (q->cmd_size) {
+ rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
+ alloc_request_size, free_request_size,
+ q, gfp_mask, q->node);
+ } else {
+ rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
+ alloc_request_simple, free_request_simple,
+ q, gfp_mask, q->node);
+ }
if (!rl->rq_pool)
return -ENOMEM;
@@ -697,7 +705,6 @@ static void blk_rq_timed_out_timer(unsigned long data)
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
- int err;
q = kmem_cache_alloc_node(blk_requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
@@ -712,17 +719,17 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q->bio_split)
goto fail_id;
- q->backing_dev_info.ra_pages =
+ q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
+ if (!q->backing_dev_info)
+ goto fail_split;
+
+ q->backing_dev_info->ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
- q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
- q->backing_dev_info.name = "block";
+ q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
+ q->backing_dev_info->name = "block";
q->node = node_id;
- err = bdi_init(&q->backing_dev_info);
- if (err)
- goto fail_split;
-
- setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
+ setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->queue_head);
@@ -772,7 +779,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
fail_ref:
percpu_ref_exit(&q->q_usage_counter);
fail_bdi:
- bdi_destroy(&q->backing_dev_info);
+ bdi_put(q->backing_dev_info);
fail_split:
bioset_free(q->bio_split);
fail_id:
@@ -825,15 +832,19 @@ EXPORT_SYMBOL(blk_init_queue);
struct request_queue *
blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{
- struct request_queue *uninit_q, *q;
+ struct request_queue *q;
- uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
- if (!uninit_q)
+ q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+ if (!q)
return NULL;
- q = blk_init_allocated_queue(uninit_q, rfn, lock);
- if (!q)
- blk_cleanup_queue(uninit_q);
+ q->request_fn = rfn;
+ if (lock)
+ q->queue_lock = lock;
+ if (blk_init_allocated_queue(q) < 0) {
+ blk_cleanup_queue(q);
+ return NULL;
+ }
return q;
}
@@ -841,30 +852,22 @@ EXPORT_SYMBOL(blk_init_queue_node);
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
-struct request_queue *
-blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
- spinlock_t *lock)
-{
- if (!q)
- return NULL;
- q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0);
+int blk_init_allocated_queue(struct request_queue *q)
+{
+ q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
if (!q->fq)
- return NULL;
+ return -ENOMEM;
+
+ if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
+ goto out_free_flush_queue;
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
- goto fail;
+ goto out_exit_flush_rq;
INIT_WORK(&q->timeout_work, blk_timeout_work);
- q->request_fn = rfn;
- q->prep_rq_fn = NULL;
- q->unprep_rq_fn = NULL;
q->queue_flags |= QUEUE_FLAG_DEFAULT;
- /* Override internal queue lock with supplied lock pointer */
- if (lock)
- q->queue_lock = lock;
-
/*
* This also sets hw/phys segments, boundary and size
*/
@@ -878,17 +881,19 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
/* init elevator */
if (elevator_init(q, NULL)) {
mutex_unlock(&q->sysfs_lock);
- goto fail;
+ goto out_exit_flush_rq;
}
mutex_unlock(&q->sysfs_lock);
+ return 0;
- return q;
-
-fail:
+out_exit_flush_rq:
+ if (q->exit_rq_fn)
+ q->exit_rq_fn(q, q->fq->flush_rq);
+out_free_flush_queue:
blk_free_flush_queue(q->fq);
wbt_exit(q);
- return NULL;
+ return -ENOMEM;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1024,25 +1029,6 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
return 0;
}
-/*
- * Determine if elevator data should be initialized when allocating the
- * request associated with @bio.
- */
-static bool blk_rq_should_init_elevator(struct bio *bio)
-{
- if (!bio)
- return true;
-
- /*
- * Flush requests do not use the elevator so skip initialization.
- * This allows a request to share the flush and elevator data.
- */
- if (op_is_flush(bio->bi_opf))
- return false;
-
- return true;
-}
-
/**
* __get_request - get a free request
* @rl: request list to allocate from
@@ -1121,10 +1107,13 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
* request is freed. This guarantees icq's won't be destroyed and
* makes creating new ones safe.
*
+ * Flush requests do not use the elevator so skip initialization.
+ * This allows a request to share the flush and elevator data.
+ *
* Also, lookup icq while holding queue_lock. If it doesn't exist,
* it will be created after releasing queue_lock.
*/
- if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
+ if (!op_is_flush(op) && !blk_queue_bypass(q)) {
rq_flags |= RQF_ELVPRIV;
q->nr_rqs_elvpriv++;
if (et->icq_cache && ioc)
@@ -1184,7 +1173,7 @@ fail_elvpriv:
* disturb iosched and blkcg but weird is bettern than dead.
*/
printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
- __func__, dev_name(q->backing_dev_info.dev));
+ __func__, dev_name(q->backing_dev_info->dev));
rq->rq_flags &= ~RQF_ELVPRIV;
rq->elv.icq = NULL;
@@ -1278,8 +1267,6 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
{
struct request *rq;
- BUG_ON(rw != READ && rw != WRITE);
-
/* create ioc upfront */
create_io_context(gfp_mask, q->node);
@@ -1309,18 +1296,6 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
EXPORT_SYMBOL(blk_get_request);
/**
- * blk_rq_set_block_pc - initialize a request to type BLOCK_PC
- * @rq: request to be initialized
- *
- */
-void blk_rq_set_block_pc(struct request *rq)
-{
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
- memset(rq->__cmd, 0, sizeof(rq->__cmd));
-}
-EXPORT_SYMBOL(blk_rq_set_block_pc);
-
-/**
* blk_requeue_request - put a request back on queue
* @q: request queue where request should be inserted
* @rq: request to be inserted
@@ -1510,6 +1485,30 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
return true;
}
+bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
+ struct bio *bio)
+{
+ unsigned short segments = blk_rq_nr_discard_segments(req);
+
+ if (segments >= queue_max_discard_segments(q))
+ goto no_merge;
+ if (blk_rq_sectors(req) + bio_sectors(bio) >
+ blk_rq_get_max_sectors(req, blk_rq_pos(req)))
+ goto no_merge;
+
+ req->biotail->bi_next = bio;
+ req->biotail = bio;
+ req->__data_len += bio->bi_iter.bi_size;
+ req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
+ req->nr_phys_segments = segments + 1;
+
+ blk_account_io_start(req, false);
+ return true;
+no_merge:
+ req_set_nomerge(q, req);
+ return false;
+}
+
/**
* blk_attempt_plug_merge - try to merge with %current's plugged list
* @q: request_queue new bio is being queued at
@@ -1538,12 +1537,11 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
{
struct blk_plug *plug;
struct request *rq;
- bool ret = false;
struct list_head *plug_list;
plug = current->plug;
if (!plug)
- goto out;
+ return false;
*request_count = 0;
if (q->mq_ops)
@@ -1552,7 +1550,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
plug_list = &plug->list;
list_for_each_entry_reverse(rq, plug_list, queuelist) {
- int el_ret;
+ bool merged = false;
if (rq->q == q) {
(*request_count)++;
@@ -1568,19 +1566,25 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
if (rq->q != q || !blk_rq_merge_ok(rq, bio))
continue;
- el_ret = blk_try_merge(rq, bio);
- if (el_ret == ELEVATOR_BACK_MERGE) {
- ret = bio_attempt_back_merge(q, rq, bio);
- if (ret)
- break;
- } else if (el_ret == ELEVATOR_FRONT_MERGE) {
- ret = bio_attempt_front_merge(q, rq, bio);
- if (ret)
- break;
+ switch (blk_try_merge(rq, bio)) {
+ case ELEVATOR_BACK_MERGE:
+ merged = bio_attempt_back_merge(q, rq, bio);
+ break;
+ case ELEVATOR_FRONT_MERGE:
+ merged = bio_attempt_front_merge(q, rq, bio);
+ break;
+ case ELEVATOR_DISCARD_MERGE:
+ merged = bio_attempt_discard_merge(q, rq, bio);
+ break;
+ default:
+ break;
}
+
+ if (merged)
+ return true;
}
-out:
- return ret;
+
+ return false;
}
unsigned int blk_plug_queued_count(struct request_queue *q)
@@ -1609,7 +1613,6 @@ out:
void init_request_from_bio(struct request *req, struct bio *bio)
{
- req->cmd_type = REQ_TYPE_FS;
if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
@@ -1623,8 +1626,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
struct blk_plug *plug;
- int el_ret, where = ELEVATOR_INSERT_SORT;
- struct request *req;
+ int where = ELEVATOR_INSERT_SORT;
+ struct request *req, *free;
unsigned int request_count = 0;
unsigned int wb_acct;
@@ -1661,21 +1664,29 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
spin_lock_irq(q->queue_lock);
- el_ret = elv_merge(q, &req, bio);
- if (el_ret == ELEVATOR_BACK_MERGE) {
- if (bio_attempt_back_merge(q, req, bio)) {
- elv_bio_merged(q, req, bio);
- if (!attempt_back_merge(q, req))
- elv_merged_request(q, req, el_ret);
- goto out_unlock;
- }
- } else if (el_ret == ELEVATOR_FRONT_MERGE) {
- if (bio_attempt_front_merge(q, req, bio)) {
- elv_bio_merged(q, req, bio);
- if (!attempt_front_merge(q, req))
- elv_merged_request(q, req, el_ret);
- goto out_unlock;
- }
+ switch (elv_merge(q, &req, bio)) {
+ case ELEVATOR_BACK_MERGE:
+ if (!bio_attempt_back_merge(q, req, bio))
+ break;
+ elv_bio_merged(q, req, bio);
+ free = attempt_back_merge(q, req);
+ if (free)
+ __blk_put_request(q, free);
+ else
+ elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
+ goto out_unlock;
+ case ELEVATOR_FRONT_MERGE:
+ if (!bio_attempt_front_merge(q, req, bio))
+ break;
+ elv_bio_merged(q, req, bio);
+ free = attempt_front_merge(q, req);
+ if (free)
+ __blk_put_request(q, free);
+ else
+ elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
+ goto out_unlock;
+ default:
+ break;
}
get_rq:
@@ -2452,14 +2463,6 @@ void blk_start_request(struct request *req)
wbt_issue(req->q->rq_wb, &req->issue_stat);
}
- /*
- * We are now handing the request to the hardware, initialize
- * resid_len to full count and add the timeout handler.
- */
- req->resid_len = blk_rq_bytes(req);
- if (unlikely(blk_bidi_rq(req)))
- req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
-
BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
blk_add_timer(req);
}
@@ -2530,10 +2533,10 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
* TODO: tj: This is too subtle. It would be better to let
* low level drivers do what they see fit.
*/
- if (req->cmd_type == REQ_TYPE_FS)
+ if (!blk_rq_is_passthrough(req))
req->errors = 0;
- if (error && req->cmd_type == REQ_TYPE_FS &&
+ if (error && !blk_rq_is_passthrough(req) &&
!(req->rq_flags & RQF_QUIET)) {
char *error_type;
@@ -2605,7 +2608,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->__data_len -= total_bytes;
/* update sector only for requests with clear definition of sector */
- if (req->cmd_type == REQ_TYPE_FS)
+ if (!blk_rq_is_passthrough(req))
req->__sector += total_bytes >> 9;
/* mixed attributes always follow the first bio */
@@ -2683,8 +2686,8 @@ void blk_finish_request(struct request *req, int error)
BUG_ON(blk_queued_rq(req));
- if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
- laptop_io_completion(&req->q->backing_dev_info);
+ if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
+ laptop_io_completion(req->q->backing_dev_info);
blk_delete_timer(req);
@@ -3007,8 +3010,6 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
- dst->cmd_flags = src->cmd_flags | REQ_NOMERGE;
- dst->cmd_type = src->cmd_type;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
dst->nr_phys_segments = src->nr_phys_segments;
@@ -3484,5 +3485,9 @@ int __init blk_dev_init(void)
blk_requestq_cachep = kmem_cache_create("request_queue",
sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
+#ifdef CONFIG_DEBUG_FS
+ blk_debugfs_root = debugfs_create_dir("block", NULL);
+#endif
+
return 0;
}
diff --git a/block/blk-exec.c b/block/blk-exec.c
index ed1f10165268..8cd0e9bc8dc8 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -11,11 +11,6 @@
#include "blk.h"
#include "blk-mq-sched.h"
-/*
- * for max sense size
- */
-#include <scsi/scsi_cmnd.h>
-
/**
* blk_end_sync_rq - executes a completion event on a request
* @rq: request to complete
@@ -56,7 +51,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
WARN_ON(irqs_disabled());
- WARN_ON(rq->cmd_type == REQ_TYPE_FS);
+ WARN_ON(!blk_rq_is_passthrough(rq));
rq->rq_disk = bd_disk;
rq->end_io = done;
@@ -101,16 +96,9 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq, int at_head)
{
DECLARE_COMPLETION_ONSTACK(wait);
- char sense[SCSI_SENSE_BUFFERSIZE];
int err = 0;
unsigned long hang_check;
- if (!rq->sense) {
- memset(sense, 0, sizeof(sense));
- rq->sense = sense;
- rq->sense_len = 0;
- }
-
rq->end_io_data = &wait;
blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
@@ -124,11 +112,6 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
if (rq->errors)
err = -EIO;
- if (rq->sense == sense) {
- rq->sense = NULL;
- rq->sense_len = 0;
- }
-
return err;
}
EXPORT_SYMBOL(blk_execute_rq);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 4427896641ac..0d5a9c1da1fc 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -297,8 +297,14 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
return false;
- /* C2 and C3 */
+ /* C2 and C3
+ *
+ * For blk-mq + scheduling, we can risk having all driver tags
+ * assigned to empty flushes, and we deadlock if we are expecting
+ * other requests to make progress. Don't defer for that case.
+ */
if (!list_empty(&fq->flush_data_in_flight) &&
+ !(q->mq_ops && q->elevator) &&
time_before(jiffies,
fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
return false;
@@ -327,7 +333,6 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
}
- flush_rq->cmd_type = REQ_TYPE_FS;
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq->rq_disk = first_rq->rq_disk;
@@ -547,11 +552,10 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
if (!fq)
goto fail;
- if (q->mq_ops) {
+ if (q->mq_ops)
spin_lock_init(&fq->mq_flush_lock);
- rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
- }
+ rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
if (!fq->flush_rq)
goto fail_rq;
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index d69c5c79f98e..9f0ff5ba4f84 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -443,10 +443,10 @@ void blk_integrity_revalidate(struct gendisk *disk)
return;
if (bi->profile)
- disk->queue->backing_dev_info.capabilities |=
+ disk->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES;
else
- disk->queue->backing_dev_info.capabilities &=
+ disk->queue->backing_dev_info->capabilities &=
~BDI_CAP_STABLE_WRITES;
}
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fe186a9eade9..b12f9c87b4c3 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -35,7 +35,10 @@ static void icq_free_icq_rcu(struct rcu_head *head)
kmem_cache_free(icq->__rcu_icq_cache, icq);
}
-/* Exit an icq. Called with both ioc and q locked. */
+/*
+ * Exit an icq. Called with both ioc and q locked for sq, only ioc locked for
+ * mq.
+ */
static void ioc_exit_icq(struct io_cq *icq)
{
struct elevator_type *et = icq->q->elevator->type;
@@ -166,6 +169,7 @@ EXPORT_SYMBOL(put_io_context);
*/
void put_io_context_active(struct io_context *ioc)
{
+ struct elevator_type *et;
unsigned long flags;
struct io_cq *icq;
@@ -184,13 +188,19 @@ retry:
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
if (icq->flags & ICQ_EXITED)
continue;
- if (spin_trylock(icq->q->queue_lock)) {
+
+ et = icq->q->elevator->type;
+ if (et->uses_mq) {
ioc_exit_icq(icq);
- spin_unlock(icq->q->queue_lock);
} else {
- spin_unlock_irqrestore(&ioc->lock, flags);
- cpu_relax();
- goto retry;
+ if (spin_trylock(icq->q->queue_lock)) {
+ ioc_exit_icq(icq);
+ spin_unlock(icq->q->queue_lock);
+ } else {
+ spin_unlock_irqrestore(&ioc->lock, flags);
+ cpu_relax();
+ goto retry;
+ }
}
}
spin_unlock_irqrestore(&ioc->lock, flags);
diff --git a/block/blk-map.c b/block/blk-map.c
index 0acb6640ead7..2f18c2a0be1b 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -16,8 +16,6 @@
int blk_rq_append_bio(struct request *rq, struct bio *bio)
{
if (!rq->bio) {
- rq->cmd_flags &= REQ_OP_MASK;
- rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK);
blk_rq_bio_prep(rq->q, rq, bio);
} else {
if (!ll_back_merge_fn(rq->q, rq, bio))
@@ -62,6 +60,9 @@ static int __blk_rq_map_user_iov(struct request *rq,
if (IS_ERR(bio))
return PTR_ERR(bio);
+ bio->bi_opf &= ~REQ_OP_MASK;
+ bio->bi_opf |= req_op(rq);
+
if (map_data && map_data->null_mapped)
bio_set_flag(bio, BIO_NULL_MAPPED);
@@ -90,7 +91,7 @@ static int __blk_rq_map_user_iov(struct request *rq,
}
/**
- * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
+ * blk_rq_map_user_iov - map user data to a request, for passthrough requests
* @q: request queue where request should be inserted
* @rq: request to map data to
* @map_data: pointer to the rq_map_data holding pages (if necessary)
@@ -199,7 +200,7 @@ int blk_rq_unmap_user(struct bio *bio)
EXPORT_SYMBOL(blk_rq_unmap_user);
/**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
+ * blk_rq_map_kern - map kernel data to a request, for passthrough requests
* @q: request queue where request should be inserted
* @rq: request to fill
* @kbuf: the kernel buffer
@@ -234,8 +235,8 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (!reading)
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf &= ~REQ_OP_MASK;
+ bio->bi_opf |= req_op(rq);
if (do_copy)
rq->rq_flags |= RQF_COPY_USER;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 6aa43dec5af4..2afa262425d1 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -482,13 +482,6 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
}
EXPORT_SYMBOL(blk_rq_map_sg);
-static void req_set_nomerge(struct request_queue *q, struct request *req)
-{
- req->cmd_flags |= REQ_NOMERGE;
- if (req == q->last_merge)
- q->last_merge = NULL;
-}
-
static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req,
struct bio *bio)
@@ -659,31 +652,32 @@ static void blk_account_io_merge(struct request *req)
}
/*
- * Has to be called with the request spinlock acquired
+ * For non-mq, this has to be called with the request spinlock acquired.
+ * For mq with scheduling, the appropriate queue wide lock should be held.
*/
-static int attempt_merge(struct request_queue *q, struct request *req,
- struct request *next)
+static struct request *attempt_merge(struct request_queue *q,
+ struct request *req, struct request *next)
{
if (!rq_mergeable(req) || !rq_mergeable(next))
- return 0;
+ return NULL;
if (req_op(req) != req_op(next))
- return 0;
+ return NULL;
/*
* not contiguous
*/
if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
- return 0;
+ return NULL;
if (rq_data_dir(req) != rq_data_dir(next)
|| req->rq_disk != next->rq_disk
|| req_no_special_merge(next))
- return 0;
+ return NULL;
if (req_op(req) == REQ_OP_WRITE_SAME &&
!blk_write_same_mergeable(req->bio, next->bio))
- return 0;
+ return NULL;
/*
* If we are allowed to merge, then append bio list
@@ -692,7 +686,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
* counts here.
*/
if (!ll_merge_requests_fn(q, req, next))
- return 0;
+ return NULL;
/*
* If failfast settings disagree or any of the two is already
@@ -732,42 +726,51 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (blk_rq_cpu_valid(next))
req->cpu = next->cpu;
- /* owner-ship of bio passed from next to req */
+ /*
+ * ownership of bio passed from next to req, return 'next' for
+ * the caller to free
+ */
next->bio = NULL;
- __blk_put_request(q, next);
- return 1;
+ return next;
}
-int attempt_back_merge(struct request_queue *q, struct request *rq)
+struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
{
struct request *next = elv_latter_request(q, rq);
if (next)
return attempt_merge(q, rq, next);
- return 0;
+ return NULL;
}
-int attempt_front_merge(struct request_queue *q, struct request *rq)
+struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
{
struct request *prev = elv_former_request(q, rq);
if (prev)
return attempt_merge(q, prev, rq);
- return 0;
+ return NULL;
}
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next)
{
struct elevator_queue *e = q->elevator;
+ struct request *free;
if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
return 0;
- return attempt_merge(q, rq, next);
+ free = attempt_merge(q, rq, next);
+ if (free) {
+ __blk_put_request(q, free);
+ return 1;
+ }
+
+ return 0;
}
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
@@ -798,9 +801,12 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
return true;
}
-int blk_try_merge(struct request *rq, struct bio *bio)
+enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
{
- if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
+ if (req_op(rq) == REQ_OP_DISCARD &&
+ queue_max_discard_segments(rq->q) > 1)
+ return ELEVATOR_DISCARD_MERGE;
+ else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
return ELEVATOR_BACK_MERGE;
else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
return ELEVATOR_FRONT_MERGE;
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 5cd2b435a9f5..f6d917977b33 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -19,6 +19,7 @@
#include <linux/debugfs.h>
#include <linux/blk-mq.h>
+#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
@@ -28,8 +29,6 @@ struct blk_mq_debugfs_attr {
const struct file_operations *fops;
};
-static struct dentry *block_debugfs_root;
-
static int blk_mq_debugfs_seq_open(struct inode *inode, struct file *file,
const struct seq_operations *ops)
{
@@ -88,13 +87,14 @@ static int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
{
struct request *rq = list_entry_rq(v);
- seq_printf(m, "%p {.cmd_type=%u, .cmd_flags=0x%x, .rq_flags=0x%x, .tag=%d, .internal_tag=%d}\n",
- rq, rq->cmd_type, rq->cmd_flags, (unsigned int)rq->rq_flags,
+ seq_printf(m, "%p {.cmd_flags=0x%x, .rq_flags=0x%x, .tag=%d, .internal_tag=%d}\n",
+ rq, rq->cmd_flags, (__force unsigned int)rq->rq_flags,
rq->tag, rq->internal_tag);
return 0;
}
static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
+ __acquires(&hctx->lock)
{
struct blk_mq_hw_ctx *hctx = m->private;
@@ -110,6 +110,7 @@ static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
}
static void hctx_dispatch_stop(struct seq_file *m, void *v)
+ __releases(&hctx->lock)
{
struct blk_mq_hw_ctx *hctx = m->private;
@@ -176,13 +177,17 @@ static int hctx_tags_show(struct seq_file *m, void *v)
{
struct blk_mq_hw_ctx *hctx = m->private;
struct request_queue *q = hctx->queue;
+ int res;
- mutex_lock(&q->sysfs_lock);
+ res = mutex_lock_interruptible(&q->sysfs_lock);
+ if (res)
+ goto out;
if (hctx->tags)
blk_mq_debugfs_tags_show(m, hctx->tags);
mutex_unlock(&q->sysfs_lock);
- return 0;
+out:
+ return res;
}
static int hctx_tags_open(struct inode *inode, struct file *file)
@@ -201,12 +206,17 @@ static int hctx_tags_bitmap_show(struct seq_file *m, void *v)
{
struct blk_mq_hw_ctx *hctx = m->private;
struct request_queue *q = hctx->queue;
+ int res;
- mutex_lock(&q->sysfs_lock);
+ res = mutex_lock_interruptible(&q->sysfs_lock);
+ if (res)
+ goto out;
if (hctx->tags)
sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
mutex_unlock(&q->sysfs_lock);
- return 0;
+
+out:
+ return res;
}
static int hctx_tags_bitmap_open(struct inode *inode, struct file *file)
@@ -225,13 +235,17 @@ static int hctx_sched_tags_show(struct seq_file *m, void *v)
{
struct blk_mq_hw_ctx *hctx = m->private;
struct request_queue *q = hctx->queue;
+ int res;
- mutex_lock(&q->sysfs_lock);
+ res = mutex_lock_interruptible(&q->sysfs_lock);
+ if (res)
+ goto out;
if (hctx->sched_tags)
blk_mq_debugfs_tags_show(m, hctx->sched_tags);
mutex_unlock(&q->sysfs_lock);
- return 0;
+out:
+ return res;
}
static int hctx_sched_tags_open(struct inode *inode, struct file *file)
@@ -250,12 +264,17 @@ static int hctx_sched_tags_bitmap_show(struct seq_file *m, void *v)
{
struct blk_mq_hw_ctx *hctx = m->private;
struct request_queue *q = hctx->queue;
+ int res;
- mutex_lock(&q->sysfs_lock);
+ res = mutex_lock_interruptible(&q->sysfs_lock);
+ if (res)
+ goto out;
if (hctx->sched_tags)
sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
mutex_unlock(&q->sysfs_lock);
- return 0;
+
+out:
+ return res;
}
static int hctx_sched_tags_bitmap_open(struct inode *inode, struct file *file)
@@ -482,6 +501,7 @@ static const struct file_operations hctx_active_fops = {
};
static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
+ __acquires(&ctx->lock)
{
struct blk_mq_ctx *ctx = m->private;
@@ -497,6 +517,7 @@ static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
}
static void ctx_rq_list_stop(struct seq_file *m, void *v)
+ __releases(&ctx->lock)
{
struct blk_mq_ctx *ctx = m->private;
@@ -630,6 +651,7 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
{"queued", 0600, &hctx_queued_fops},
{"run", 0600, &hctx_run_fops},
{"active", 0400, &hctx_active_fops},
+ {},
};
static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
@@ -637,14 +659,15 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
{"dispatched", 0600, &ctx_dispatched_fops},
{"merged", 0600, &ctx_merged_fops},
{"completed", 0600, &ctx_completed_fops},
+ {},
};
int blk_mq_debugfs_register(struct request_queue *q, const char *name)
{
- if (!block_debugfs_root)
+ if (!blk_debugfs_root)
return -ENOENT;
- q->debugfs_dir = debugfs_create_dir(name, block_debugfs_root);
+ q->debugfs_dir = debugfs_create_dir(name, blk_debugfs_root);
if (!q->debugfs_dir)
goto err;
@@ -665,27 +688,31 @@ void blk_mq_debugfs_unregister(struct request_queue *q)
q->debugfs_dir = NULL;
}
+static bool debugfs_create_files(struct dentry *parent, void *data,
+ const struct blk_mq_debugfs_attr *attr)
+{
+ for (; attr->name; attr++) {
+ if (!debugfs_create_file(attr->name, attr->mode, parent,
+ data, attr->fops))
+ return false;
+ }
+ return true;
+}
+
static int blk_mq_debugfs_register_ctx(struct request_queue *q,
struct blk_mq_ctx *ctx,
struct dentry *hctx_dir)
{
struct dentry *ctx_dir;
char name[20];
- int i;
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
ctx_dir = debugfs_create_dir(name, hctx_dir);
if (!ctx_dir)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(blk_mq_debugfs_ctx_attrs); i++) {
- const struct blk_mq_debugfs_attr *attr;
-
- attr = &blk_mq_debugfs_ctx_attrs[i];
- if (!debugfs_create_file(attr->name, attr->mode, ctx_dir, ctx,
- attr->fops))
- return -ENOMEM;
- }
+ if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
+ return -ENOMEM;
return 0;
}
@@ -703,14 +730,8 @@ static int blk_mq_debugfs_register_hctx(struct request_queue *q,
if (!hctx_dir)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(blk_mq_debugfs_hctx_attrs); i++) {
- const struct blk_mq_debugfs_attr *attr;
-
- attr = &blk_mq_debugfs_hctx_attrs[i];
- if (!debugfs_create_file(attr->name, attr->mode, hctx_dir, hctx,
- attr->fops))
- return -ENOMEM;
- }
+ if (!debugfs_create_files(hctx_dir, hctx, blk_mq_debugfs_hctx_attrs))
+ return -ENOMEM;
hctx_for_each_ctx(hctx, ctx, i) {
if (blk_mq_debugfs_register_ctx(q, ctx, hctx_dir))
@@ -749,8 +770,3 @@ void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
debugfs_remove_recursive(q->mq_debugfs_dir);
q->mq_debugfs_dir = NULL;
}
-
-void blk_mq_debugfs_init(void)
-{
- block_debugfs_root = debugfs_create_dir("block", NULL);
-}
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 114814ec3d49..9e8d6795a8c1 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -68,7 +68,9 @@ error:
EXPORT_SYMBOL_GPL(blk_mq_sched_init_hctx_data);
static void __blk_mq_sched_assign_ioc(struct request_queue *q,
- struct request *rq, struct io_context *ioc)
+ struct request *rq,
+ struct bio *bio,
+ struct io_context *ioc)
{
struct io_cq *icq;
@@ -83,7 +85,7 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q,
}
rq->elv.icq = icq;
- if (!blk_mq_sched_get_rq_priv(q, rq)) {
+ if (!blk_mq_sched_get_rq_priv(q, rq, bio)) {
rq->rq_flags |= RQF_ELVPRIV;
get_io_context(icq->ioc);
return;
@@ -99,7 +101,7 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
ioc = rq_ioc(bio);
if (ioc)
- __blk_mq_sched_assign_ioc(q, rq, ioc);
+ __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
}
struct request *blk_mq_sched_get_request(struct request_queue *q,
@@ -173,6 +175,8 @@ void blk_mq_sched_put_request(struct request *rq)
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
struct elevator_queue *e = hctx->queue->elevator;
+ const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
+ bool did_work = false;
LIST_HEAD(rq_list);
if (unlikely(blk_mq_hctx_stopped(hctx)))
@@ -202,11 +206,18 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
*/
if (!list_empty(&rq_list)) {
blk_mq_sched_mark_restart(hctx);
- blk_mq_dispatch_rq_list(hctx, &rq_list);
- } else if (!e || !e->type->ops.mq.dispatch_request) {
+ did_work = blk_mq_dispatch_rq_list(hctx, &rq_list);
+ } else if (!has_sched_dispatch) {
blk_mq_flush_busy_ctxs(hctx, &rq_list);
blk_mq_dispatch_rq_list(hctx, &rq_list);
- } else {
+ }
+
+ /*
+ * We want to dispatch from the scheduler if we had no work left
+ * on the dispatch list, OR if we did have work but weren't able
+ * to make progress.
+ */
+ if (!did_work && has_sched_dispatch) {
do {
struct request *rq;
@@ -234,31 +245,33 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
}
EXPORT_SYMBOL_GPL(blk_mq_sched_move_to_dispatch);
-bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio)
+bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
+ struct request **merged_request)
{
struct request *rq;
- int ret;
- ret = elv_merge(q, &rq, bio);
- if (ret == ELEVATOR_BACK_MERGE) {
+ switch (elv_merge(q, &rq, bio)) {
+ case ELEVATOR_BACK_MERGE:
if (!blk_mq_sched_allow_merge(q, rq, bio))
return false;
- if (bio_attempt_back_merge(q, rq, bio)) {
- if (!attempt_back_merge(q, rq))
- elv_merged_request(q, rq, ret);
- return true;
- }
- } else if (ret == ELEVATOR_FRONT_MERGE) {
+ if (!bio_attempt_back_merge(q, rq, bio))
+ return false;
+ *merged_request = attempt_back_merge(q, rq);
+ if (!*merged_request)
+ elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
+ return true;
+ case ELEVATOR_FRONT_MERGE:
if (!blk_mq_sched_allow_merge(q, rq, bio))
return false;
- if (bio_attempt_front_merge(q, rq, bio)) {
- if (!attempt_front_merge(q, rq))
- elv_merged_request(q, rq, ret);
- return true;
- }
+ if (!bio_attempt_front_merge(q, rq, bio))
+ return false;
+ *merged_request = attempt_front_merge(q, rq);
+ if (!*merged_request)
+ elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
+ return true;
+ default:
+ return false;
}
-
- return false;
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
@@ -289,7 +302,8 @@ void blk_mq_sched_request_inserted(struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
-bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq)
+static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
{
if (rq->tag == -1) {
rq->rq_flags |= RQF_SORTED;
@@ -305,7 +319,6 @@ bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq)
spin_unlock(&hctx->lock);
return true;
}
-EXPORT_SYMBOL_GPL(blk_mq_sched_bypass_insert);
static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
@@ -347,7 +360,7 @@ static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx,
blk_insert_flush(rq);
blk_mq_run_hw_queue(hctx, true);
} else
- blk_mq_add_to_requeue_list(rq, true, true);
+ blk_mq_add_to_requeue_list(rq, false, true);
}
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
@@ -363,6 +376,9 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
return;
}
+ if (e && blk_mq_sched_bypass_insert(hctx, rq))
+ goto run;
+
if (e && e->type->ops.mq.insert_requests) {
LIST_HEAD(list);
@@ -374,6 +390,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
spin_unlock(&ctx->lock);
}
+run:
if (run_queue)
blk_mq_run_hw_queue(hctx, async);
}
@@ -385,6 +402,23 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
struct elevator_queue *e = hctx->queue->elevator;
+ if (e) {
+ struct request *rq, *next;
+
+ /*
+ * We bypass requests that already have a driver tag assigned,
+ * which should only be flushes. Flushes are only ever inserted
+ * as single requests, so we shouldn't ever hit the
+ * WARN_ON_ONCE() below (but let's handle it just in case).
+ */
+ list_for_each_entry_safe(rq, next, list, queuelist) {
+ if (WARN_ON_ONCE(rq->tag != -1)) {
+ list_del_init(&rq->queuelist);
+ blk_mq_sched_bypass_insert(hctx, rq);
+ }
+ }
+ }
+
if (e && e->type->ops.mq.insert_requests)
e->type->ops.mq.insert_requests(hctx, list, false);
else
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 9478aaeb48c5..7b5f3b95c78e 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -15,8 +15,8 @@ struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bi
void blk_mq_sched_put_request(struct request *rq);
void blk_mq_sched_request_inserted(struct request *rq);
-bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq);
-bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio);
+bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
+ struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
@@ -49,12 +49,13 @@ blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
}
static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
- struct request *rq)
+ struct request *rq,
+ struct bio *bio)
{
struct elevator_queue *e = q->elevator;
if (e && e->type->ops.mq.get_rq_priv)
- return e->type->ops.mq.get_rq_priv(q, rq);
+ return e->type->ops.mq.get_rq_priv(q, rq, bio);
return 0;
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 308b3f4fc310..295e69670c39 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -254,7 +254,7 @@ static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
kobject_put(&hctx->kobj);
}
- blk_mq_debugfs_unregister(q);
+ blk_mq_debugfs_unregister_hctxs(q);
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 489076e7ae15..b29e7dc7b309 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -199,13 +199,7 @@ void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
rq->special = NULL;
/* tag was already set */
rq->errors = 0;
-
- rq->cmd = rq->__cmd;
-
rq->extra_len = 0;
- rq->sense_len = 0;
- rq->resid_len = 0;
- rq->sense = NULL;
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
@@ -487,10 +481,6 @@ void blk_mq_start_request(struct request *rq)
trace_block_rq_issue(q, rq);
- rq->resid_len = blk_rq_bytes(rq);
- if (unlikely(blk_bidi_rq(rq)))
- rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
-
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
blk_stat_set_issue_time(&rq->issue_stat);
rq->rq_flags |= RQF_STATS;
@@ -773,7 +763,7 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
int checked = 8;
list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
- int el_ret;
+ bool merged = false;
if (!checked--)
break;
@@ -781,26 +771,25 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
if (!blk_rq_merge_ok(rq, bio))
continue;
- el_ret = blk_try_merge(rq, bio);
- if (el_ret == ELEVATOR_NO_MERGE)
- continue;
-
- if (!blk_mq_sched_allow_merge(q, rq, bio))
+ switch (blk_try_merge(rq, bio)) {
+ case ELEVATOR_BACK_MERGE:
+ if (blk_mq_sched_allow_merge(q, rq, bio))
+ merged = bio_attempt_back_merge(q, rq, bio);
break;
-
- if (el_ret == ELEVATOR_BACK_MERGE) {
- if (bio_attempt_back_merge(q, rq, bio)) {
- ctx->rq_merged++;
- return true;
- }
+ case ELEVATOR_FRONT_MERGE:
+ if (blk_mq_sched_allow_merge(q, rq, bio))
+ merged = bio_attempt_front_merge(q, rq, bio);
break;
- } else if (el_ret == ELEVATOR_FRONT_MERGE) {
- if (bio_attempt_front_merge(q, rq, bio)) {
- ctx->rq_merged++;
- return true;
- }
+ case ELEVATOR_DISCARD_MERGE:
+ merged = bio_attempt_discard_merge(q, rq, bio);
break;
+ default:
+ continue;
}
+
+ if (merged)
+ ctx->rq_merged++;
+ return merged;
}
return false;
@@ -1013,7 +1002,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
blk_mq_run_hw_queue(hctx, true);
}
- return ret != BLK_MQ_RQ_QUEUE_BUSY;
+ return queued != 0;
}
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
@@ -1442,12 +1431,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
cookie = request_to_qc_t(data.hctx, rq);
if (unlikely(is_flush_fua)) {
- blk_mq_put_ctx(data.ctx);
+ if (q->elevator)
+ goto elv_insert;
blk_mq_bio_to_request(rq, bio);
- blk_mq_get_driver_tag(rq, NULL, true);
blk_insert_flush(rq);
- blk_mq_run_hw_queue(data.hctx, true);
- goto done;
+ goto run_queue;
}
plug = current->plug;
@@ -1497,6 +1485,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
}
if (q->elevator) {
+elv_insert:
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
blk_mq_sched_insert_request(rq, false, true,
@@ -1510,6 +1499,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
* latter allows for merging opportunities and more efficient
* dispatching.
*/
+run_queue:
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
}
blk_mq_put_ctx(data.ctx);
@@ -1565,12 +1555,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
cookie = request_to_qc_t(data.hctx, rq);
if (unlikely(is_flush_fua)) {
- blk_mq_put_ctx(data.ctx);
+ if (q->elevator)
+ goto elv_insert;
blk_mq_bio_to_request(rq, bio);
- blk_mq_get_driver_tag(rq, NULL, true);
blk_insert_flush(rq);
- blk_mq_run_hw_queue(data.hctx, true);
- goto done;
+ goto run_queue;
}
/*
@@ -1608,6 +1597,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
}
if (q->elevator) {
+elv_insert:
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
blk_mq_sched_insert_request(rq, false, true,
@@ -1621,6 +1611,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
* latter allows for merging opportunities and more efficient
* dispatching.
*/
+run_queue:
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
}
@@ -2637,10 +2628,14 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q);
+ /*
+ * Manually set the make_request_fn as blk_queue_make_request
+ * resets a lot of the queue settings.
+ */
if (q->nr_hw_queues > 1)
- blk_queue_make_request(q, blk_mq_make_request);
+ q->make_request_fn = blk_mq_make_request;
else
- blk_queue_make_request(q, blk_sq_make_request);
+ q->make_request_fn = blk_sq_make_request;
blk_mq_queue_reinit(q, cpu_online_mask);
}
@@ -2824,8 +2819,6 @@ void blk_mq_enable_hotplug(void)
static int __init blk_mq_init(void)
{
- blk_mq_debugfs_init();
-
cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
blk_mq_hctx_notify_dead);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index b52abd62b1b0..24b2256186f3 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -85,16 +85,11 @@ extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
* debugfs helpers
*/
#ifdef CONFIG_BLK_DEBUG_FS
-void blk_mq_debugfs_init(void);
int blk_mq_debugfs_register(struct request_queue *q, const char *name);
void blk_mq_debugfs_unregister(struct request_queue *q);
int blk_mq_debugfs_register_hctxs(struct request_queue *q);
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
#else
-static inline void blk_mq_debugfs_init(void)
-{
-}
-
static inline int blk_mq_debugfs_register(struct request_queue *q,
const char *name)
{
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 529e55f52a03..1e7174ffc9d4 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -88,6 +88,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
void blk_set_default_limits(struct queue_limits *lim)
{
lim->max_segments = BLK_MAX_SEGMENTS;
+ lim->max_discard_segments = 1;
lim->max_integrity_segments = 0;
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->virt_boundary_mask = 0;
@@ -128,6 +129,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
/* Inherit limits from component devices */
lim->discard_zeroes_data = 1;
lim->max_segments = USHRT_MAX;
+ lim->max_discard_segments = 1;
lim->max_hw_sectors = UINT_MAX;
lim->max_segment_size = UINT_MAX;
lim->max_sectors = UINT_MAX;
@@ -253,7 +255,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
limits->max_sectors = max_sectors;
- q->backing_dev_info.io_pages = max_sectors >> (PAGE_SHIFT - 9);
+ q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
@@ -337,6 +339,22 @@ void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments
EXPORT_SYMBOL(blk_queue_max_segments);
/**
+ * blk_queue_max_discard_segments - set max segments for discard requests
+ * @q: the request queue for the device
+ * @max_segments: max number of segments
+ *
+ * Description:
+ * Enables a low level driver to set an upper limit on the number of
+ * segments in a discard request.
+ **/
+void blk_queue_max_discard_segments(struct request_queue *q,
+ unsigned short max_segments)
+{
+ q->limits.max_discard_segments = max_segments;
+}
+EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
+
+/**
* blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
* @q: the request queue for the device
* @max_size: max size of segment in bytes
@@ -553,6 +571,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->virt_boundary_mask);
t->max_segments = min_not_zero(t->max_segments, b->max_segments);
+ t->max_discard_segments = min_not_zero(t->max_discard_segments,
+ b->max_discard_segments);
t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
b->max_integrity_segments);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 1dbce057592d..002af836aa87 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -89,7 +89,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
- unsigned long ra_kb = q->backing_dev_info.ra_pages <<
+ unsigned long ra_kb = q->backing_dev_info->ra_pages <<
(PAGE_SHIFT - 10);
return queue_var_show(ra_kb, (page));
@@ -104,7 +104,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0)
return ret;
- q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
+ q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
return ret;
}
@@ -121,6 +121,12 @@ static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
return queue_var_show(queue_max_segments(q), (page));
}
+static ssize_t queue_max_discard_segments_show(struct request_queue *q,
+ char *page)
+{
+ return queue_var_show(queue_max_discard_segments(q), (page));
+}
+
static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
{
return queue_var_show(q->limits.max_integrity_segments, (page));
@@ -236,7 +242,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
spin_lock_irq(q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1;
- q->backing_dev_info.io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
+ q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
spin_unlock_irq(q->queue_lock);
return ret;
@@ -545,6 +551,11 @@ static struct queue_sysfs_entry queue_max_segments_entry = {
.show = queue_max_segments_show,
};
+static struct queue_sysfs_entry queue_max_discard_segments_entry = {
+ .attr = {.name = "max_discard_segments", .mode = S_IRUGO },
+ .show = queue_max_discard_segments_show,
+};
+
static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
.attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
.show = queue_max_integrity_segments_show,
@@ -697,6 +708,7 @@ static struct attribute *default_attrs[] = {
&queue_max_hw_sectors_entry.attr,
&queue_max_sectors_entry.attr,
&queue_max_segments_entry.attr,
+ &queue_max_discard_segments_entry.attr,
&queue_max_integrity_segments_entry.attr,
&queue_max_segment_size_entry.attr,
&queue_iosched_entry.attr,
@@ -799,7 +811,7 @@ static void blk_release_queue(struct kobject *kobj)
container_of(kobj, struct request_queue, kobj);
wbt_exit(q);
- bdi_exit(&q->backing_dev_info);
+ bdi_put(q->backing_dev_info);
blkcg_exit_queue(q);
if (q->elevator) {
@@ -814,13 +826,19 @@ static void blk_release_queue(struct kobject *kobj)
if (q->queue_tags)
__blk_queue_free_tags(q);
- if (!q->mq_ops)
+ if (!q->mq_ops) {
+ if (q->exit_rq_fn)
+ q->exit_rq_fn(q, q->fq->flush_rq);
blk_free_flush_queue(q->fq);
- else
+ } else {
blk_mq_release(q);
+ }
blk_trace_shutdown(q);
+ if (q->mq_ops)
+ blk_mq_debugfs_unregister(q);
+
if (q->bio_split)
bioset_free(q->bio_split);
@@ -884,32 +902,36 @@ int blk_register_queue(struct gendisk *disk)
if (ret)
return ret;
+ if (q->mq_ops)
+ blk_mq_register_dev(dev, q);
+
+ /* Prevent changes through sysfs until registration is completed. */
+ mutex_lock(&q->sysfs_lock);
+
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
if (ret < 0) {
blk_trace_remove_sysfs(dev);
- return ret;
+ goto unlock;
}
kobject_uevent(&q->kobj, KOBJ_ADD);
- if (q->mq_ops)
- blk_mq_register_dev(dev, q);
-
blk_wb_init(q);
- if (!q->request_fn)
- return 0;
-
- ret = elv_register_queue(q);
- if (ret) {
- kobject_uevent(&q->kobj, KOBJ_REMOVE);
- kobject_del(&q->kobj);
- blk_trace_remove_sysfs(dev);
- kobject_put(&dev->kobj);
- return ret;
+ if (q->request_fn || (q->mq_ops && q->elevator)) {
+ ret = elv_register_queue(q);
+ if (ret) {
+ kobject_uevent(&q->kobj, KOBJ_REMOVE);
+ kobject_del(&q->kobj);
+ blk_trace_remove_sysfs(dev);
+ kobject_put(&dev->kobj);
+ goto unlock;
+ }
}
-
- return 0;
+ ret = 0;
+unlock:
+ mutex_unlock(&q->sysfs_lock);
+ return ret;
}
void blk_unregister_queue(struct gendisk *disk)
@@ -922,7 +944,7 @@ void blk_unregister_queue(struct gendisk *disk)
if (q->mq_ops)
blk_mq_unregister_dev(disk_to_dev(disk), q);
- if (q->request_fn)
+ if (q->request_fn || (q->mq_ops && q->elevator))
elv_unregister_queue(q);
kobject_uevent(&q->kobj, KOBJ_REMOVE);
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index f0a9c07b4c7a..1aedb1f7ee0c 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -96,7 +96,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
*/
static bool wb_recent_wait(struct rq_wb *rwb)
{
- struct bdi_writeback *wb = &rwb->queue->backing_dev_info.wb;
+ struct bdi_writeback *wb = &rwb->queue->backing_dev_info->wb;
return time_before(jiffies, wb->dirty_sleep + HZ);
}
@@ -279,7 +279,7 @@ enum {
static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
{
- struct backing_dev_info *bdi = &rwb->queue->backing_dev_info;
+ struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
u64 thislat;
/*
@@ -339,7 +339,7 @@ static int latency_exceeded(struct rq_wb *rwb)
static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{
- struct backing_dev_info *bdi = &rwb->queue->backing_dev_info;
+ struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
rwb->wb_background, rwb->wb_normal, rwb->wb_max);
@@ -423,7 +423,7 @@ static void wb_timer_fn(unsigned long data)
status = latency_exceeded(rwb);
- trace_wbt_timer(&rwb->queue->backing_dev_info, status, rwb->scale_step,
+ trace_wbt_timer(rwb->queue->backing_dev_info, status, rwb->scale_step,
inflight);
/*
diff --git a/block/blk.h b/block/blk.h
index 9a716b5925a4..d1ea4bd9b9a3 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -14,6 +14,10 @@
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
+#ifdef CONFIG_DEBUG_FS
+extern struct dentry *blk_debugfs_root;
+#endif
+
struct blk_flush_queue {
unsigned int flush_queue_delayed:1;
unsigned int flush_pending_idx:1;
@@ -96,6 +100,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
struct bio *bio);
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
struct bio *bio);
+bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
+ struct bio *bio);
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count,
struct request **same_queue_rq);
@@ -204,14 +210,14 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio);
int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio);
-int attempt_back_merge(struct request_queue *q, struct request *rq);
-int attempt_front_merge(struct request_queue *q, struct request *rq);
+struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
+struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next);
void blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq);
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
-int blk_try_merge(struct request *rq, struct bio *bio);
+enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
void blk_queue_congestion_threshold(struct request_queue *q);
@@ -249,7 +255,14 @@ static inline int blk_do_io_stat(struct request *rq)
{
return rq->rq_disk &&
(rq->rq_flags & RQF_IO_STAT) &&
- (rq->cmd_type == REQ_TYPE_FS);
+ !blk_rq_is_passthrough(rq);
+}
+
+static inline void req_set_nomerge(struct request_queue *q, struct request *req)
+{
+ req->cmd_flags |= REQ_NOMERGE;
+ if (req == q->last_merge)
+ q->last_merge = NULL;
}
/*
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 9d652a992316..cd15f9dbb147 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -71,22 +71,24 @@ void bsg_job_done(struct bsg_job *job, int result,
{
struct request *req = job->req;
struct request *rsp = req->next_rq;
+ struct scsi_request *rq = scsi_req(req);
int err;
err = job->req->errors = result;
if (err < 0)
/* we're only returning the result field in the reply */
- job->req->sense_len = sizeof(u32);
+ rq->sense_len = sizeof(u32);
else
- job->req->sense_len = job->reply_len;
+ rq->sense_len = job->reply_len;
/* we assume all request payload was transferred, residual == 0 */
- req->resid_len = 0;
+ rq->resid_len = 0;
if (rsp) {
- WARN_ON(reply_payload_rcv_len > rsp->resid_len);
+ WARN_ON(reply_payload_rcv_len > scsi_req(rsp)->resid_len);
/* set reply (bidi) residual */
- rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len);
+ scsi_req(rsp)->resid_len -=
+ min(reply_payload_rcv_len, scsi_req(rsp)->resid_len);
}
blk_complete_request(req);
}
@@ -113,6 +115,7 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
if (!buf->sg_list)
return -ENOMEM;
sg_init_table(buf->sg_list, req->nr_phys_segments);
+ scsi_req(req)->resid_len = blk_rq_bytes(req);
buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
buf->payload_len = blk_rq_bytes(req);
return 0;
@@ -127,6 +130,7 @@ static int bsg_create_job(struct device *dev, struct request *req)
{
struct request *rsp = req->next_rq;
struct request_queue *q = req->q;
+ struct scsi_request *rq = scsi_req(req);
struct bsg_job *job;
int ret;
@@ -140,9 +144,9 @@ static int bsg_create_job(struct device *dev, struct request *req)
job->req = req;
if (q->bsg_job_size)
job->dd_data = (void *)&job[1];
- job->request = req->cmd;
- job->request_len = req->cmd_len;
- job->reply = req->sense;
+ job->request = rq->cmd;
+ job->request_len = rq->cmd_len;
+ job->reply = rq->sense;
job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
* allocated */
if (req->bio) {
@@ -177,7 +181,7 @@ failjob_rls_job:
*
* Drivers/subsys should pass this to the queue init function.
*/
-void bsg_request_fn(struct request_queue *q)
+static void bsg_request_fn(struct request_queue *q)
__releases(q->queue_lock)
__acquires(q->queue_lock)
{
@@ -214,24 +218,30 @@ void bsg_request_fn(struct request_queue *q)
put_device(dev);
spin_lock_irq(q->queue_lock);
}
-EXPORT_SYMBOL_GPL(bsg_request_fn);
/**
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
* @dev: device to attach bsg device to
- * @q: request queue setup by caller
* @name: device to give bsg device
* @job_fn: bsg job handler
* @dd_job_size: size of LLD data needed for each job
- *
- * The caller should have setup the reuqest queue with bsg_request_fn
- * as the request_fn.
*/
-int bsg_setup_queue(struct device *dev, struct request_queue *q,
- char *name, bsg_job_fn *job_fn, int dd_job_size)
+struct request_queue *bsg_setup_queue(struct device *dev, char *name,
+ bsg_job_fn *job_fn, int dd_job_size)
{
+ struct request_queue *q;
int ret;
+ q = blk_alloc_queue(GFP_KERNEL);
+ if (!q)
+ return ERR_PTR(-ENOMEM);
+ q->cmd_size = sizeof(struct scsi_request);
+ q->request_fn = bsg_request_fn;
+
+ ret = blk_init_allocated_queue(q);
+ if (ret)
+ goto out_cleanup_queue;
+
q->queuedata = dev;
q->bsg_job_size = dd_job_size;
q->bsg_job_fn = job_fn;
@@ -243,9 +253,12 @@ int bsg_setup_queue(struct device *dev, struct request_queue *q,
if (ret) {
printk(KERN_ERR "%s: bsg interface failed to "
"initialize - register queue\n", dev->kobj.name);
- return ret;
+ goto out_cleanup_queue;
}
- return 0;
+ return q;
+out_cleanup_queue:
+ blk_cleanup_queue(q);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(bsg_setup_queue);
diff --git a/block/bsg.c b/block/bsg.c
index a57046de2f07..a9a8b8e0446f 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -85,7 +85,6 @@ struct bsg_command {
struct bio *bidi_bio;
int err;
struct sg_io_v4 hdr;
- char sense[SCSI_SENSE_BUFFERSIZE];
};
static void bsg_free_command(struct bsg_command *bc)
@@ -140,18 +139,20 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, struct bsg_device *bd,
fmode_t has_write_perm)
{
+ struct scsi_request *req = scsi_req(rq);
+
if (hdr->request_len > BLK_MAX_CDB) {
- rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
- if (!rq->cmd)
+ req->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
+ if (!req->cmd)
return -ENOMEM;
}
- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
+ if (copy_from_user(req->cmd, (void __user *)(unsigned long)hdr->request,
hdr->request_len))
return -EFAULT;
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
- if (blk_verify_command(rq->cmd, has_write_perm))
+ if (blk_verify_command(req->cmd, has_write_perm))
return -EPERM;
} else if (!capable(CAP_SYS_RAWIO))
return -EPERM;
@@ -159,7 +160,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
/*
* fill in request structure
*/
- rq->cmd_len = hdr->request_len;
+ req->cmd_len = hdr->request_len;
rq->timeout = msecs_to_jiffies(hdr->timeout);
if (!rq->timeout)
@@ -176,7 +177,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
* Check if sg_io_v4 from user is allowed and valid
*/
static int
-bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw)
+bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *op)
{
int ret = 0;
@@ -197,7 +198,7 @@ bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw)
ret = -EINVAL;
}
- *rw = hdr->dout_xfer_len ? WRITE : READ;
+ *op = hdr->dout_xfer_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN;
return ret;
}
@@ -205,13 +206,12 @@ bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw)
* map sg_io_v4 to a request.
*/
static struct request *
-bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
- u8 *sense)
+bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
{
struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL;
- int ret, rw;
- unsigned int dxfer_len;
+ int ret;
+ unsigned int op, dxfer_len;
void __user *dxferp = NULL;
struct bsg_class_device *bcd = &q->bsg_dev;
@@ -226,36 +226,35 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
hdr->din_xfer_len);
- ret = bsg_validate_sgv4_hdr(hdr, &rw);
+ ret = bsg_validate_sgv4_hdr(hdr, &op);
if (ret)
return ERR_PTR(ret);
/*
* map scatter-gather elements separately and string them to request
*/
- rq = blk_get_request(q, rw, GFP_KERNEL);
+ rq = blk_get_request(q, op, GFP_KERNEL);
if (IS_ERR(rq))
return rq;
- blk_rq_set_block_pc(rq);
+ scsi_req_init(rq);
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
if (ret)
goto out;
- if (rw == WRITE && hdr->din_xfer_len) {
+ if (op == REQ_OP_SCSI_OUT && hdr->din_xfer_len) {
if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
ret = -EOPNOTSUPP;
goto out;
}
- next_rq = blk_get_request(q, READ, GFP_KERNEL);
+ next_rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(next_rq)) {
ret = PTR_ERR(next_rq);
next_rq = NULL;
goto out;
}
rq->next_rq = next_rq;
- next_rq->cmd_type = rq->cmd_type;
dxferp = (void __user *)(unsigned long)hdr->din_xferp;
ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
@@ -280,13 +279,9 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
goto out;
}
- rq->sense = sense;
- rq->sense_len = 0;
-
return rq;
out:
- if (rq->cmd != rq->__cmd)
- kfree(rq->cmd);
+ scsi_req_free_cmd(scsi_req(rq));
blk_put_request(rq);
if (next_rq) {
blk_rq_unmap_user(next_rq->bio);
@@ -393,6 +388,7 @@ static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
struct bio *bio, struct bio *bidi_bio)
{
+ struct scsi_request *req = scsi_req(rq);
int ret = 0;
dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
@@ -407,12 +403,12 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
hdr->info |= SG_INFO_CHECK;
hdr->response_len = 0;
- if (rq->sense_len && hdr->response) {
+ if (req->sense_len && hdr->response) {
int len = min_t(unsigned int, hdr->max_response_len,
- rq->sense_len);
+ req->sense_len);
ret = copy_to_user((void __user *)(unsigned long)hdr->response,
- rq->sense, len);
+ req->sense, len);
if (!ret)
hdr->response_len = len;
else
@@ -420,14 +416,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
}
if (rq->next_rq) {
- hdr->dout_resid = rq->resid_len;
- hdr->din_resid = rq->next_rq->resid_len;
+ hdr->dout_resid = req->resid_len;
+ hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
blk_rq_unmap_user(bidi_bio);
blk_put_request(rq->next_rq);
} else if (rq_data_dir(rq) == READ)
- hdr->din_resid = rq->resid_len;
+ hdr->din_resid = req->resid_len;
else
- hdr->dout_resid = rq->resid_len;
+ hdr->dout_resid = req->resid_len;
/*
* If the request generated a negative error number, return it
@@ -439,8 +435,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
ret = rq->errors;
blk_rq_unmap_user(bio);
- if (rq->cmd != rq->__cmd)
- kfree(rq->cmd);
+ scsi_req_free_cmd(req);
blk_put_request(rq);
return ret;
@@ -625,7 +620,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf,
/*
* get a request, fill in the blanks, and add to request queue
*/
- rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense);
+ rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
rq = NULL;
@@ -911,12 +906,11 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct bio *bio, *bidi_bio = NULL;
struct sg_io_v4 hdr;
int at_head;
- u8 sense[SCSI_SENSE_BUFFERSIZE];
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
return -EFAULT;
- rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense);
+ rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE);
if (IS_ERR(rq))
return PTR_ERR(rq);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index f0f29ee731e1..921262770636 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2528,7 +2528,7 @@ static void cfq_remove_request(struct request *rq)
}
}
-static int cfq_merge(struct request_queue *q, struct request **req,
+static enum elv_merge cfq_merge(struct request_queue *q, struct request **req,
struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
@@ -2544,7 +2544,7 @@ static int cfq_merge(struct request_queue *q, struct request **req,
}
static void cfq_merged_request(struct request_queue *q, struct request *req,
- int type)
+ enum elv_merge type)
{
if (type == ELEVATOR_FRONT_MERGE) {
struct cfq_queue *cfqq = RQ_CFQQ(req);
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 556826ac7cb4..570021a0dc1c 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -661,7 +661,6 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
struct block_device *bdev = inode->i_bdev;
struct gendisk *disk = bdev->bd_disk;
fmode_t mode = file->f_mode;
- struct backing_dev_info *bdi;
loff_t size;
unsigned int max_sectors;
@@ -708,9 +707,8 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKFRAGET:
if (!arg)
return -EINVAL;
- bdi = blk_get_backing_dev_info(bdev);
return compat_put_long(arg,
- (bdi->ra_pages * PAGE_SIZE) / 512);
+ (bdev->bd_bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET: /* compatible */
return compat_put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
@@ -728,8 +726,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKFRASET:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- bdi = blk_get_backing_dev_info(bdev);
- bdi->ra_pages = (arg * 512) / PAGE_SIZE;
+ bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKGETSIZE:
size = i_size_read(bdev->bd_inode);
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 05fc0ea25a98..c68f6bbc0dcd 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -120,12 +120,11 @@ static void deadline_remove_request(struct request_queue *q, struct request *rq)
deadline_del_rq_rb(dd, rq);
}
-static int
+static enum elv_merge
deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
struct deadline_data *dd = q->elevator->elevator_data;
struct request *__rq;
- int ret;
/*
* check for front merge
@@ -138,20 +137,17 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
BUG_ON(sector != blk_rq_pos(__rq));
if (elv_bio_merge_ok(__rq, bio)) {
- ret = ELEVATOR_FRONT_MERGE;
- goto out;
+ *req = __rq;
+ return ELEVATOR_FRONT_MERGE;
}
}
}
return ELEVATOR_NO_MERGE;
-out:
- *req = __rq;
- return ret;
}
static void deadline_merged_request(struct request_queue *q,
- struct request *req, int type)
+ struct request *req, enum elv_merge type)
{
struct deadline_data *dd = q->elevator->elevator_data;
diff --git a/block/elevator.c b/block/elevator.c
index b2a55167f0c2..699d10f71a2c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -428,11 +428,11 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
}
EXPORT_SYMBOL(elv_dispatch_add_tail);
-int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
+enum elv_merge elv_merge(struct request_queue *q, struct request **req,
+ struct bio *bio)
{
struct elevator_queue *e = q->elevator;
struct request *__rq;
- int ret;
/*
* Levels of merges:
@@ -447,7 +447,8 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
* First try one-hit cache.
*/
if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
- ret = blk_try_merge(q->last_merge, bio);
+ enum elv_merge ret = blk_try_merge(q->last_merge, bio);
+
if (ret != ELEVATOR_NO_MERGE) {
*req = q->last_merge;
return ret;
@@ -515,7 +516,8 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
return ret;
}
-void elv_merged_request(struct request_queue *q, struct request *rq, int type)
+void elv_merged_request(struct request_queue *q, struct request *rq,
+ enum elv_merge type)
{
struct elevator_queue *e = q->elevator;
@@ -539,7 +541,7 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
if (e->uses_mq && e->type->ops.mq.requests_merged)
e->type->ops.mq.requests_merged(q, rq, next);
else if (e->type->ops.sq.elevator_merge_req_fn) {
- next_sorted = next->rq_flags & RQF_SORTED;
+ next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
if (next_sorted)
e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
}
@@ -635,7 +637,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
if (rq->rq_flags & RQF_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
- if (rq->cmd_type == REQ_TYPE_FS) {
+ if (!blk_rq_is_passthrough(rq)) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
@@ -677,7 +679,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
if (elv_attempt_insert_merge(q, rq))
break;
case ELEVATOR_INSERT_SORT:
- BUG_ON(rq->cmd_type != REQ_TYPE_FS);
+ BUG_ON(blk_rq_is_passthrough(rq));
rq->rq_flags |= RQF_SORTED;
q->nr_sorted++;
if (rq_mergeable(rq)) {
diff --git a/block/genhd.c b/block/genhd.c
index fcd6d4fae657..3631cd480295 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -572,6 +572,20 @@ exit:
disk_part_iter_exit(&piter);
}
+void put_disk_devt(struct disk_devt *disk_devt)
+{
+ if (disk_devt && atomic_dec_and_test(&disk_devt->count))
+ disk_devt->release(disk_devt);
+}
+EXPORT_SYMBOL(put_disk_devt);
+
+void get_disk_devt(struct disk_devt *disk_devt)
+{
+ if (disk_devt)
+ atomic_inc(&disk_devt->count);
+}
+EXPORT_SYMBOL(get_disk_devt);
+
/**
* device_add_disk - add partitioning information to kernel list
* @parent: parent device for the disk
@@ -612,8 +626,15 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
disk_alloc_events(disk);
+ /*
+ * Take a reference on the devt and assign it to queue since it
+ * must not be reallocated while the bdi is registered
+ */
+ disk->queue->disk_devt = disk->disk_devt;
+ get_disk_devt(disk->disk_devt);
+
/* Register BDI before referencing it from bdev */
- bdi = &disk->queue->backing_dev_info;
+ bdi = disk->queue->backing_dev_info;
bdi_register_owner(bdi, disk_to_dev(disk));
blk_register_region(disk_devt(disk), disk->minors, NULL,
@@ -648,6 +669,8 @@ void del_gendisk(struct gendisk *disk)
disk_part_iter_init(&piter, disk,
DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
while ((part = disk_part_iter_next(&piter))) {
+ bdev_unhash_inode(MKDEV(disk->major,
+ disk->first_minor + part->partno));
invalidate_partition(disk, part->partno);
delete_partition(disk, part->partno);
}
diff --git a/block/ioctl.c b/block/ioctl.c
index be7f4de3eb3c..7b88820b93d9 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -505,7 +505,6 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
unsigned long arg)
{
- struct backing_dev_info *bdi;
void __user *argp = (void __user *)arg;
loff_t size;
unsigned int max_sectors;
@@ -532,8 +531,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKFRAGET:
if (!arg)
return -EINVAL;
- bdi = blk_get_backing_dev_info(bdev);
- return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
+ return put_long(arg, (bdev->bd_bdi->ra_pages*PAGE_SIZE) / 512);
case BLKROGET:
return put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
@@ -560,8 +558,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKFRASET:
if(!capable(CAP_SYS_ADMIN))
return -EACCES;
- bdi = blk_get_backing_dev_info(bdev);
- bdi->ra_pages = (arg * 512) / PAGE_SIZE;
+ bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKBSZSET:
return blkdev_bszset(bdev, mode, argp);
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index d93ec713fa62..236121633ca0 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -121,7 +121,7 @@ static void deadline_remove_request(struct request_queue *q, struct request *rq)
}
static void dd_request_merged(struct request_queue *q, struct request *req,
- int type)
+ enum elv_merge type)
{
struct deadline_data *dd = q->elevator->elevator_data;
@@ -371,12 +371,16 @@ static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
{
struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data;
- int ret;
+ struct request *free = NULL;
+ bool ret;
spin_lock(&dd->lock);
- ret = blk_mq_sched_try_merge(q, bio);
+ ret = blk_mq_sched_try_merge(q, bio, &free);
spin_unlock(&dd->lock);
+ if (free)
+ blk_mq_free_request(free);
+
return ret;
}
@@ -395,10 +399,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
blk_mq_sched_request_inserted(rq);
- if (blk_mq_sched_bypass_insert(hctx, rq))
- return;
-
- if (at_head || rq->cmd_type != REQ_TYPE_FS) {
+ if (at_head || blk_rq_is_passthrough(rq)) {
if (at_head)
list_add(&rq->queuelist, &dd->dispatch);
else
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index c2b64923ab66..2a2fc768b27a 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -230,15 +230,17 @@ EXPORT_SYMBOL(blk_verify_command);
static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_hdr *hdr, fmode_t mode)
{
- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
+ struct scsi_request *req = scsi_req(rq);
+
+ if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
- if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
+ if (blk_verify_command(req->cmd, mode & FMODE_WRITE))
return -EPERM;
/*
* fill in request structure
*/
- rq->cmd_len = hdr->cmd_len;
+ req->cmd_len = hdr->cmd_len;
rq->timeout = msecs_to_jiffies(hdr->timeout);
if (!rq->timeout)
@@ -254,6 +256,7 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
struct bio *bio)
{
+ struct scsi_request *req = scsi_req(rq);
int r, ret = 0;
/*
@@ -267,13 +270,13 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
hdr->info = 0;
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
- hdr->resid = rq->resid_len;
+ hdr->resid = req->resid_len;
hdr->sb_len_wr = 0;
- if (rq->sense_len && hdr->sbp) {
- int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
+ if (req->sense_len && hdr->sbp) {
+ int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);
- if (!copy_to_user(hdr->sbp, rq->sense, len))
+ if (!copy_to_user(hdr->sbp, req->sense, len))
hdr->sb_len_wr = len;
else
ret = -EFAULT;
@@ -294,7 +297,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
int writing = 0;
int at_head = 0;
struct request *rq;
- char sense[SCSI_SENSE_BUFFERSIZE];
+ struct scsi_request *req;
struct bio *bio;
if (hdr->interface_id != 'S')
@@ -318,14 +321,16 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
at_head = 1;
ret = -ENOMEM;
- rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
+ rq = blk_get_request(q, writing ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+ GFP_KERNEL);
if (IS_ERR(rq))
return PTR_ERR(rq);
- blk_rq_set_block_pc(rq);
+ req = scsi_req(rq);
+ scsi_req_init(rq);
if (hdr->cmd_len > BLK_MAX_CDB) {
- rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
- if (!rq->cmd)
+ req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
+ if (!req->cmd)
goto out_put_request;
}
@@ -357,9 +362,6 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
goto out_free_cdb;
bio = rq->bio;
- memset(sense, 0, sizeof(sense));
- rq->sense = sense;
- rq->sense_len = 0;
rq->retries = 0;
start_time = jiffies;
@@ -375,8 +377,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
ret = blk_complete_sghdr_rq(rq, hdr, bio);
out_free_cdb:
- if (rq->cmd != rq->__cmd)
- kfree(rq->cmd);
+ scsi_req_free_cmd(req);
out_put_request:
blk_put_request(rq);
return ret;
@@ -420,9 +421,10 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
struct scsi_ioctl_command __user *sic)
{
struct request *rq;
+ struct scsi_request *req;
int err;
unsigned int in_len, out_len, bytes, opcode, cmdlen;
- char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
+ char *buffer = NULL;
if (!sic)
return -EINVAL;
@@ -447,12 +449,14 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
}
- rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_RECLAIM);
+ rq = blk_get_request(q, in_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+ __GFP_RECLAIM);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto error_free_buffer;
}
- blk_rq_set_block_pc(rq);
+ req = scsi_req(rq);
+ scsi_req_init(rq);
cmdlen = COMMAND_SIZE(opcode);
@@ -460,14 +464,14 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
* get command and data to send to device, if any
*/
err = -EFAULT;
- rq->cmd_len = cmdlen;
- if (copy_from_user(rq->cmd, sic->data, cmdlen))
+ req->cmd_len = cmdlen;
+ if (copy_from_user(req->cmd, sic->data, cmdlen))
goto error;
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error;
- err = blk_verify_command(rq->cmd, mode & FMODE_WRITE);
+ err = blk_verify_command(req->cmd, mode & FMODE_WRITE);
if (err)
goto error;
@@ -503,18 +507,14 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
goto error;
}
- memset(sense, 0, sizeof(sense));
- rq->sense = sense;
- rq->sense_len = 0;
-
blk_execute_rq(q, disk, rq, 0);
err = rq->errors & 0xff; /* only 8 bit SCSI status */
if (err) {
- if (rq->sense_len && rq->sense) {
- bytes = (OMAX_SB_LEN > rq->sense_len) ?
- rq->sense_len : OMAX_SB_LEN;
- if (copy_to_user(sic->data, rq->sense, bytes))
+ if (req->sense_len && req->sense) {
+ bytes = (OMAX_SB_LEN > req->sense_len) ?
+ req->sense_len : OMAX_SB_LEN;
+ if (copy_to_user(sic->data, req->sense, bytes))
err = -EFAULT;
}
} else {
@@ -539,14 +539,14 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq;
int err;
- rq = blk_get_request(q, WRITE, __GFP_RECLAIM);
+ rq = blk_get_request(q, REQ_OP_SCSI_OUT, __GFP_RECLAIM);
if (IS_ERR(rq))
return PTR_ERR(rq);
- blk_rq_set_block_pc(rq);
+ scsi_req_init(rq);
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
- rq->cmd[0] = cmd;
- rq->cmd[4] = data;
- rq->cmd_len = 6;
+ scsi_req(rq)->cmd[0] = cmd;
+ scsi_req(rq)->cmd[4] = data;
+ scsi_req(rq)->cmd_len = 6;
err = blk_execute_rq(q, bd_disk, rq, 0);
blk_put_request(rq);
@@ -743,6 +743,17 @@ int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
}
EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
+void scsi_req_init(struct request *rq)
+{
+ struct scsi_request *req = scsi_req(rq);
+
+ memset(req->__cmd, 0, sizeof(req->__cmd));
+ req->cmd = req->__cmd;
+ req->cmd_len = BLK_MAX_CDB;
+ req->sense_len = 0;
+}
+EXPORT_SYMBOL(scsi_req_init);
+
static int __init blk_scsi_ioctl_init(void)
{
blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1f863e757ee4..c771d4c341ea 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1265,13 +1265,13 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
*/
static int atapi_drain_needed(struct request *rq)
{
- if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
+ if (likely(!blk_rq_is_passthrough(rq)))
return 0;
if (!blk_rq_bytes(rq) || op_is_write(req_op(rq)))
return 0;
- return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
+ return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
}
static int ata_scsi_dev_config(struct scsi_device *sdev,
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 223ff2fcae7e..f744de7a0f9b 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -69,6 +69,7 @@ config AMIGA_Z2RAM
config GDROM
tristate "SEGA Dreamcast GD-ROM drive"
depends on SH_DREAMCAST
+ select BLK_SCSI_REQUEST # only for the generic cdrom code
help
A standard SEGA Dreamcast comes with a modified CD ROM drive called a
"GD-ROM" by SEGA to signify it is capable of reading special disks
@@ -114,6 +115,7 @@ config BLK_CPQ_CISS_DA
tristate "Compaq Smart Array 5xxx support"
depends on PCI
select CHECK_SIGNATURE
+ select BLK_SCSI_REQUEST
help
This is the driver for Compaq Smart Array 5xxx controllers.
Everyone using these boards should say Y here.
@@ -386,6 +388,7 @@ config BLK_DEV_RAM_DAX
config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML
+ select BLK_SCSI_REQUEST
help
Note: This driver is deprecated and will be removed from the
kernel in the near future!
@@ -501,6 +504,16 @@ config VIRTIO_BLK
This is the virtual block driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
+config VIRTIO_BLK_SCSI
+ bool "SCSI passthrough request for the Virtio block driver"
+ depends on VIRTIO_BLK
+ select BLK_SCSI_REQUEST
+ ---help---
+ Enable support for SCSI passthrough (e.g. the SG_IO ioctl) on
+ virtio-blk devices. This is only supported for the legacy
+ virtio protocol and not enabled by default by any hypervisor.
+ Your probably want to virtio-scsi instead.
+
config BLK_DEV_HD
bool "Very old hard disk (MFM/RLL/IDE) driver"
depends on HAVE_IDE
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index ec9d8610b25f..027b876370bc 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -396,8 +396,8 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
- q->backing_dev_info.name = "aoe";
- q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
+ q->backing_dev_info->name = "aoe";
+ q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 3a44438a1195..27d613795653 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -52,6 +52,7 @@
#include <scsi/scsi.h>
#include <scsi/sg.h>
#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_request.h>
#include <linux/cdrom.h>
#include <linux/scatterlist.h>
#include <linux/kthread.h>
@@ -1853,8 +1854,8 @@ static void cciss_softirq_done(struct request *rq)
dev_dbg(&h->pdev->dev, "Done with %p\n", rq);
/* set the residual count for pc requests */
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
- rq->resid_len = c->err_info->ResidualCnt;
+ if (blk_rq_is_passthrough(rq))
+ scsi_req(rq)->resid_len = c->err_info->ResidualCnt;
blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
@@ -1941,9 +1942,16 @@ static void cciss_get_serial_no(ctlr_info_t *h, int logvol,
static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
int drv_index)
{
- disk->queue = blk_init_queue(do_cciss_request, &h->lock);
+ disk->queue = blk_alloc_queue(GFP_KERNEL);
if (!disk->queue)
goto init_queue_failure;
+
+ disk->queue->cmd_size = sizeof(struct scsi_request);
+ disk->queue->request_fn = do_cciss_request;
+ disk->queue->queue_lock = &h->lock;
+ if (blk_init_allocated_queue(disk->queue) < 0)
+ goto cleanup_queue;
+
sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
disk->major = h->major;
disk->first_minor = drv_index << NWD_SHIFT;
@@ -3075,7 +3083,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
driver_byte = DRIVER_OK;
msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
- if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ if (blk_rq_is_passthrough(cmd->rq))
host_byte = DID_PASSTHROUGH;
else
host_byte = DID_OK;
@@ -3084,7 +3092,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
host_byte, driver_byte);
if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
- if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)
+ if (!blk_rq_is_passthrough(cmd->rq))
dev_warn(&h->pdev->dev, "cmd %p "
"has SCSI Status 0x%x\n",
cmd, cmd->err_info->ScsiStatus);
@@ -3095,31 +3103,23 @@ static inline int evaluate_target_status(ctlr_info_t *h,
sense_key = 0xf & cmd->err_info->SenseInfo[2];
/* no status or recovered error */
if (((sense_key == 0x0) || (sense_key == 0x1)) &&
- (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC))
+ !blk_rq_is_passthrough(cmd->rq))
error_value = 0;
if (check_for_unit_attention(h, cmd)) {
- *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC);
+ *retry_cmd = !blk_rq_is_passthrough(cmd->rq);
return 0;
}
/* Not SG_IO or similar? */
- if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) {
+ if (!blk_rq_is_passthrough(cmd->rq)) {
if (error_value != 0)
dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION"
" sense key = 0x%x\n", cmd, sense_key);
return error_value;
}
- /* SG_IO or similar, copy sense data back */
- if (cmd->rq->sense) {
- if (cmd->rq->sense_len > cmd->err_info->SenseLen)
- cmd->rq->sense_len = cmd->err_info->SenseLen;
- memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
- cmd->rq->sense_len);
- } else
- cmd->rq->sense_len = 0;
-
+ scsi_req(cmd->rq)->sense_len = cmd->err_info->SenseLen;
return error_value;
}
@@ -3146,15 +3146,14 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
break;
case CMD_DATA_UNDERRUN:
- if (cmd->rq->cmd_type == REQ_TYPE_FS) {
+ if (!blk_rq_is_passthrough(cmd->rq)) {
dev_warn(&h->pdev->dev, "cmd %p has"
" completed with data underrun "
"reported\n", cmd);
- cmd->rq->resid_len = cmd->err_info->ResidualCnt;
}
break;
case CMD_DATA_OVERRUN:
- if (cmd->rq->cmd_type == REQ_TYPE_FS)
+ if (!blk_rq_is_passthrough(cmd->rq))
dev_warn(&h->pdev->dev, "cciss: cmd %p has"
" completed with data overrun "
"reported\n", cmd);
@@ -3164,7 +3163,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"reported invalid\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_PROTOCOL_ERR:
@@ -3172,7 +3171,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"protocol error\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_HARDWARE_ERR:
@@ -3180,7 +3179,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
" hardware error\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_CONNECTION_LOST:
@@ -3188,7 +3187,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"connection lost\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_ABORTED:
@@ -3196,7 +3195,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"aborted\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ABORT);
break;
case CMD_ABORT_FAILED:
@@ -3204,7 +3203,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"abort failed\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_UNSOLICITED_ABORT:
@@ -3219,21 +3218,21 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"%p retried too many times\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ABORT);
break;
case CMD_TIMEOUT:
dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_UNABORTABLE:
dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
default:
@@ -3242,7 +3241,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
cmd->err_info->CommandStatus);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
}
@@ -3395,7 +3394,9 @@ static void do_cciss_request(struct request_queue *q)
c->Header.SGList = h->max_cmd_sgentries;
set_performant_mode(h, c);
- if (likely(creq->cmd_type == REQ_TYPE_FS)) {
+ switch (req_op(creq)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
if(h->cciss_read == CCISS_READ_10) {
c->Request.CDB[1] = 0;
c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
@@ -3425,12 +3426,16 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
c->Request.CDB[14] = c->Request.CDB[15] = 0;
}
- } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) {
- c->Request.CDBLen = creq->cmd_len;
- memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
- } else {
+ break;
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
+ c->Request.CDBLen = scsi_req(creq)->cmd_len;
+ memcpy(c->Request.CDB, scsi_req(creq)->cmd, BLK_MAX_CDB);
+ scsi_req(creq)->sense = c->err_info->SenseInfo;
+ break;
+ default:
dev_warn(&h->pdev->dev, "bad request type %d\n",
- creq->cmd_type);
+ creq->cmd_flags);
BUG();
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 83482721bc01..d305f05be648 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2462,7 +2462,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
if (get_ldev(device)) {
q = bdev_get_queue(device->ldev->backing_bdev);
- r = bdi_congested(&q->backing_dev_info, bdi_bits);
+ r = bdi_congested(q->backing_dev_info, bdi_bits);
put_ldev(device);
if (r)
reason = 'b';
@@ -2834,8 +2834,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
/* we have no partitions. we contain only ourselves. */
device->this_bdev->bd_contains = device->this_bdev;
- q->backing_dev_info.congested_fn = drbd_congested;
- q->backing_dev_info.congested_data = device;
+ q->backing_dev_info->congested_fn = drbd_congested;
+ q->backing_dev_info->congested_data = device;
blk_queue_make_request(q, drbd_make_request);
blk_queue_write_cache(q, true, true);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index f35db29cac76..908c704e20aa 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1328,11 +1328,13 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
if (b) {
blk_queue_stack_limits(q, b);
- if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
+ if (q->backing_dev_info->ra_pages !=
+ b->backing_dev_info->ra_pages) {
drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
- q->backing_dev_info.ra_pages,
- b->backing_dev_info.ra_pages);
- q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
+ q->backing_dev_info->ra_pages,
+ b->backing_dev_info->ra_pages);
+ q->backing_dev_info->ra_pages =
+ b->backing_dev_info->ra_pages;
}
}
fixup_discard_if_not_supported(q);
@@ -3345,7 +3347,7 @@ static void device_to_statistics(struct device_statistics *s,
s->dev_disk_flags = md->flags;
q = bdev_get_queue(device->ldev->backing_bdev);
s->dev_lower_blocked =
- bdi_congested(&q->backing_dev_info,
+ bdi_congested(q->backing_dev_info,
(1 << WB_async_congested) |
(1 << WB_sync_congested));
put_ldev(device);
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index be2b93fd2c11..8378142f7a55 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -288,7 +288,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
/* reset device->congestion_reason */
- bdi_rw_congested(&device->rq_queue->backing_dev_info);
+ bdi_rw_congested(device->rq_queue->backing_dev_info);
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index de279fe4e4fd..cb6bdb75d52d 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -938,7 +938,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
switch (rbm) {
case RB_CONGESTED_REMOTE:
- bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
+ bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
return bdi_read_congested(bdi);
case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) >
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 184887af4b9f..45b4384f650c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2900,8 +2900,8 @@ static void do_fd_request(struct request_queue *q)
return;
if (WARN(atomic_read(&usage_count) == 0,
- "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n",
- current_req, (long)blk_rq_pos(current_req), current_req->cmd_type,
+ "warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
+ current_req, (long)blk_rq_pos(current_req),
(unsigned long long) current_req->cmd_flags))
return;
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index a9b48ed7a3cd..6043648da1e8 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -626,30 +626,29 @@ repeat:
req_data_dir(req) == READ ? "read" : "writ",
cyl, head, sec, nsect, bio_data(req->bio));
#endif
- if (req->cmd_type == REQ_TYPE_FS) {
- switch (rq_data_dir(req)) {
- case READ:
- hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
- &read_intr);
- if (reset)
- goto repeat;
- break;
- case WRITE:
- hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE,
- &write_intr);
- if (reset)
- goto repeat;
- if (wait_DRQ()) {
- bad_rw_intr();
- goto repeat;
- }
- outsw(HD_DATA, bio_data(req->bio), 256);
- break;
- default:
- printk("unknown hd-command\n");
- hd_end_request_cur(-EIO);
- break;
+
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
+ &read_intr);
+ if (reset)
+ goto repeat;
+ break;
+ case REQ_OP_WRITE:
+ hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE,
+ &write_intr);
+ if (reset)
+ goto repeat;
+ if (wait_DRQ()) {
+ bad_rw_intr();
+ goto repeat;
}
+ outsw(HD_DATA, bio_data(req->bio), 256);
+ break;
+ default:
+ printk("unknown hd-command\n");
+ hd_end_request_cur(-EIO);
+ break;
}
}
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index e937fcf71769..286f276f586e 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -670,15 +670,17 @@ static void mg_request_poll(struct request_queue *q)
break;
}
- if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) {
- mg_end_request_cur(host, -EIO);
- continue;
- }
-
- if (rq_data_dir(host->req) == READ)
+ switch (req_op(host->req)) {
+ case REQ_OP_READ:
mg_read(host->req);
- else
+ break;
+ case REQ_OP_WRITE:
mg_write(host->req);
+ break;
+ default:
+ mg_end_request_cur(host, -EIO);
+ break;
+ }
}
}
@@ -687,13 +689,15 @@ static unsigned int mg_issue_req(struct request *req,
unsigned int sect_num,
unsigned int sect_cnt)
{
- if (rq_data_dir(req) == READ) {
+ switch (req_op(host->req)) {
+ case REQ_OP_READ:
if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
!= MG_ERR_NONE) {
mg_bad_rw_intr(host);
return host->error;
}
- } else {
+ break;
+ case REQ_OP_WRITE:
/* TODO : handler */
outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
@@ -712,6 +716,10 @@ static unsigned int mg_issue_req(struct request *req,
mod_timer(&host->timer, jiffies + 3 * HZ);
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
+ break;
+ default:
+ mg_end_request_cur(host, -EIO);
+ break;
}
return MG_ERR_NONE;
}
@@ -753,11 +761,6 @@ static void mg_request(struct request_queue *q)
continue;
}
- if (unlikely(req->cmd_type != REQ_TYPE_FS)) {
- mg_end_request_cur(host, -EIO);
- continue;
- }
-
if (!mg_issue_req(req, host, sect_num, sect_cnt))
return;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9fd06eeb1a17..0be84a3cb6d7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -41,6 +41,9 @@
#include <linux/nbd.h>
+static DEFINE_IDR(nbd_index_idr);
+static DEFINE_MUTEX(nbd_index_mutex);
+
struct nbd_sock {
struct socket *sock;
struct mutex tx_lock;
@@ -89,8 +92,9 @@ static struct dentry *nbd_dbg_dir;
#define NBD_MAGIC 0x68797548
static unsigned int nbds_max = 16;
-static struct nbd_device *nbd_dev;
static int max_part;
+static struct workqueue_struct *recv_workqueue;
+static int part_shift;
static inline struct device *nbd_to_dev(struct nbd_device *nbd)
{
@@ -193,13 +197,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
req->errors++;
- /*
- * If our disconnect packet times out then we're already holding the
- * config_lock and could deadlock here, so just set an error and return,
- * we'll handle shutting everything down later.
- */
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
- return BLK_EH_HANDLED;
mutex_lock(&nbd->config_lock);
sock_shutdown(nbd);
mutex_unlock(&nbd->config_lock);
@@ -278,14 +275,29 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
u32 type;
u32 tag = blk_mq_unique_tag(req);
- if (req_op(req) == REQ_OP_DISCARD)
+ switch (req_op(req)) {
+ case REQ_OP_DISCARD:
type = NBD_CMD_TRIM;
- else if (req_op(req) == REQ_OP_FLUSH)
+ break;
+ case REQ_OP_FLUSH:
type = NBD_CMD_FLUSH;
- else if (rq_data_dir(req) == WRITE)
+ break;
+ case REQ_OP_WRITE:
type = NBD_CMD_WRITE;
- else
+ break;
+ case REQ_OP_READ:
type = NBD_CMD_READ;
+ break;
+ default:
+ return -EIO;
+ }
+
+ if (rq_data_dir(req) == WRITE &&
+ (nbd->flags & NBD_FLAG_READ_ONLY)) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Write on read-only\n");
+ return -EIO;
+ }
memset(&request, 0, sizeof(request));
request.magic = htonl(NBD_REQUEST_MAGIC);
@@ -510,18 +522,6 @@ static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
goto error_out;
}
- if (req->cmd_type != REQ_TYPE_FS &&
- req->cmd_type != REQ_TYPE_DRV_PRIV)
- goto error_out;
-
- if (req->cmd_type == REQ_TYPE_FS &&
- rq_data_dir(req) == WRITE &&
- (nbd->flags & NBD_FLAG_READ_ONLY)) {
- dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Write on read-only\n");
- goto error_out;
- }
-
req->errors = 0;
nsock = nbd->socks[index];
@@ -785,7 +785,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
INIT_WORK(&args[i].work, recv_work);
args[i].nbd = nbd;
args[i].index = i;
- queue_work(system_long_wq, &args[i].work);
+ queue_work(recv_workqueue, &args[i].work);
}
wait_event_interruptible(nbd->recv_wq,
atomic_read(&nbd->recv_threads) == 0);
@@ -996,6 +996,103 @@ static struct blk_mq_ops nbd_mq_ops = {
.timeout = nbd_xmit_timeout,
};
+static void nbd_dev_remove(struct nbd_device *nbd)
+{
+ struct gendisk *disk = nbd->disk;
+ nbd->magic = 0;
+ if (disk) {
+ del_gendisk(disk);
+ blk_cleanup_queue(disk->queue);
+ blk_mq_free_tag_set(&nbd->tag_set);
+ put_disk(disk);
+ }
+ kfree(nbd);
+}
+
+static int nbd_dev_add(int index)
+{
+ struct nbd_device *nbd;
+ struct gendisk *disk;
+ struct request_queue *q;
+ int err = -ENOMEM;
+
+ nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
+ if (!nbd)
+ goto out;
+
+ disk = alloc_disk(1 << part_shift);
+ if (!disk)
+ goto out_free_nbd;
+
+ if (index >= 0) {
+ err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
+ GFP_KERNEL);
+ if (err == -ENOSPC)
+ err = -EEXIST;
+ } else {
+ err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
+ if (err >= 0)
+ index = err;
+ }
+ if (err < 0)
+ goto out_free_disk;
+
+ nbd->disk = disk;
+ nbd->tag_set.ops = &nbd_mq_ops;
+ nbd->tag_set.nr_hw_queues = 1;
+ nbd->tag_set.queue_depth = 128;
+ nbd->tag_set.numa_node = NUMA_NO_NODE;
+ nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
+ nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
+ BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
+ nbd->tag_set.driver_data = nbd;
+
+ err = blk_mq_alloc_tag_set(&nbd->tag_set);
+ if (err)
+ goto out_free_idr;
+
+ q = blk_mq_init_queue(&nbd->tag_set);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto out_free_tags;
+ }
+ disk->queue = q;
+
+ /*
+ * Tell the block layer that we are not a rotational device
+ */
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
+ disk->queue->limits.discard_granularity = 512;
+ blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
+ disk->queue->limits.discard_zeroes_data = 0;
+ blk_queue_max_hw_sectors(disk->queue, 65536);
+ disk->queue->limits.max_sectors = 256;
+
+ nbd->magic = NBD_MAGIC;
+ mutex_init(&nbd->config_lock);
+ disk->major = NBD_MAJOR;
+ disk->first_minor = index << part_shift;
+ disk->fops = &nbd_fops;
+ disk->private_data = nbd;
+ sprintf(disk->disk_name, "nbd%d", index);
+ init_waitqueue_head(&nbd->recv_wq);
+ nbd_reset(nbd);
+ add_disk(disk);
+ return index;
+
+out_free_tags:
+ blk_mq_free_tag_set(&nbd->tag_set);
+out_free_idr:
+ idr_remove(&nbd_index_idr, index);
+out_free_disk:
+ put_disk(disk);
+out_free_nbd:
+ kfree(nbd);
+out:
+ return err;
+}
+
/*
* And here should be modules and kernel interface
* (Just smiley confuses emacs :-)
@@ -1003,9 +1100,7 @@ static struct blk_mq_ops nbd_mq_ops = {
static int __init nbd_init(void)
{
- int err = -ENOMEM;
int i;
- int part_shift;
BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
@@ -1034,111 +1129,38 @@ static int __init nbd_init(void)
if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL;
-
- nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
- if (!nbd_dev)
+ recv_workqueue = alloc_workqueue("knbd-recv",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (!recv_workqueue)
return -ENOMEM;
- for (i = 0; i < nbds_max; i++) {
- struct request_queue *q;
- struct gendisk *disk = alloc_disk(1 << part_shift);
- if (!disk)
- goto out;
- nbd_dev[i].disk = disk;
-
- nbd_dev[i].tag_set.ops = &nbd_mq_ops;
- nbd_dev[i].tag_set.nr_hw_queues = 1;
- nbd_dev[i].tag_set.queue_depth = 128;
- nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE;
- nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd);
- nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
- nbd_dev[i].tag_set.driver_data = &nbd_dev[i];
-
- err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set);
- if (err) {
- put_disk(disk);
- goto out;
- }
-
- /*
- * The new linux 2.5 block layer implementation requires
- * every gendisk to have its very own request_queue struct.
- * These structs are big so we dynamically allocate them.
- */
- q = blk_mq_init_queue(&nbd_dev[i].tag_set);
- if (IS_ERR(q)) {
- blk_mq_free_tag_set(&nbd_dev[i].tag_set);
- put_disk(disk);
- goto out;
- }
- disk->queue = q;
-
- /*
- * Tell the block layer that we are not a rotational device
- */
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
- queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
- disk->queue->limits.discard_granularity = 512;
- blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
- disk->queue->limits.discard_zeroes_data = 0;
- blk_queue_max_hw_sectors(disk->queue, 65536);
- disk->queue->limits.max_sectors = 256;
- }
-
- if (register_blkdev(NBD_MAJOR, "nbd")) {
- err = -EIO;
- goto out;
- }
-
- printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
+ if (register_blkdev(NBD_MAJOR, "nbd"))
+ return -EIO;
nbd_dbg_init();
- for (i = 0; i < nbds_max; i++) {
- struct gendisk *disk = nbd_dev[i].disk;
- nbd_dev[i].magic = NBD_MAGIC;
- mutex_init(&nbd_dev[i].config_lock);
- disk->major = NBD_MAJOR;
- disk->first_minor = i << part_shift;
- disk->fops = &nbd_fops;
- disk->private_data = &nbd_dev[i];
- sprintf(disk->disk_name, "nbd%d", i);
- init_waitqueue_head(&nbd_dev[i].recv_wq);
- nbd_reset(&nbd_dev[i]);
- add_disk(disk);
- }
+ mutex_lock(&nbd_index_mutex);
+ for (i = 0; i < nbds_max; i++)
+ nbd_dev_add(i);
+ mutex_unlock(&nbd_index_mutex);
+ return 0;
+}
+static int nbd_exit_cb(int id, void *ptr, void *data)
+{
+ struct nbd_device *nbd = ptr;
+ nbd_dev_remove(nbd);
return 0;
-out:
- while (i--) {
- blk_mq_free_tag_set(&nbd_dev[i].tag_set);
- blk_cleanup_queue(nbd_dev[i].disk->queue);
- put_disk(nbd_dev[i].disk);
- }
- kfree(nbd_dev);
- return err;
}
static void __exit nbd_cleanup(void)
{
- int i;
-
nbd_dbg_close();
- for (i = 0; i < nbds_max; i++) {
- struct gendisk *disk = nbd_dev[i].disk;
- nbd_dev[i].magic = 0;
- if (disk) {
- del_gendisk(disk);
- blk_cleanup_queue(disk->queue);
- blk_mq_free_tag_set(&nbd_dev[i].tag_set);
- put_disk(disk);
- }
- }
+ idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL);
+ idr_destroy(&nbd_index_idr);
+ destroy_workqueue(recv_workqueue);
unregister_blkdev(NBD_MAJOR, "nbd");
- kfree(nbd_dev);
- printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
}
module_init(nbd_init);
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index a67b7ea1e3bf..6f2e565bccc5 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -432,11 +432,11 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct request *rq;
struct bio *bio = rqd->bio;
- rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
+ rq = blk_mq_alloc_request(q,
+ op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return -ENOMEM;
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->__sector = bio->bi_iter.bi_sector;
rq->ioprio = bio_prio(bio);
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 92900f5f0b47..8127b8201a01 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -308,12 +308,6 @@ static void osdblk_rq_fn(struct request_queue *q)
if (!rq)
break;
- /* filter out block requests we don't understand */
- if (rq->cmd_type != REQ_TYPE_FS) {
- blk_end_request_all(rq, 0);
- continue;
- }
-
/* deduce our operation (read, write, flush) */
/* I wish the block layer simplified cmd_type/cmd_flags/cmd[]
* into a clearly defined set of RPC commands:
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
index efefb5ac3004..3a15247942e4 100644
--- a/drivers/block/paride/Kconfig
+++ b/drivers/block/paride/Kconfig
@@ -25,6 +25,7 @@ config PARIDE_PD
config PARIDE_PCD
tristate "Parallel port ATAPI CD-ROMs"
depends on PARIDE
+ select BLK_SCSI_REQUEST # only for the generic cdrom code
---help---
This option enables the high-level driver for ATAPI CD-ROM devices
connected through a parallel port. If you chose to build PARIDE
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index c3ed2fc72daa..644ba0888bd4 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -439,18 +439,16 @@ static int pd_retries = 0; /* i/o error retry count */
static int pd_block; /* address of next requested block */
static int pd_count; /* number of blocks still to do */
static int pd_run; /* sectors in current cluster */
-static int pd_cmd; /* current command READ/WRITE */
static char *pd_buf; /* buffer for request in progress */
static enum action do_pd_io_start(void)
{
- if (pd_req->cmd_type == REQ_TYPE_DRV_PRIV) {
+ switch (req_op(pd_req)) {
+ case REQ_OP_DRV_IN:
phase = pd_special;
return pd_special();
- }
-
- pd_cmd = rq_data_dir(pd_req);
- if (pd_cmd == READ || pd_cmd == WRITE) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
pd_block = blk_rq_pos(pd_req);
pd_count = blk_rq_cur_sectors(pd_req);
if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
@@ -458,7 +456,7 @@ static enum action do_pd_io_start(void)
pd_run = blk_rq_sectors(pd_req);
pd_buf = bio_data(pd_req->bio);
pd_retries = 0;
- if (pd_cmd == READ)
+ if (req_op(pd_req) == REQ_OP_READ)
return do_pd_read_start();
else
return do_pd_write_start();
@@ -723,11 +721,10 @@ static int pd_special_command(struct pd_unit *disk,
struct request *rq;
int err = 0;
- rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM);
+ rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
if (IS_ERR(rq))
return PTR_ERR(rq);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->special = func;
err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 1b94c1ca5c5f..66d846ba85a9 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -704,10 +704,10 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
int ret = 0;
rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
- WRITE : READ, __GFP_RECLAIM);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
if (IS_ERR(rq))
return PTR_ERR(rq);
- blk_rq_set_block_pc(rq);
+ scsi_req_init(rq);
if (cgc->buflen) {
ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
@@ -716,8 +716,8 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
goto out;
}
- rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
- memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
+ scsi_req(rq)->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
+ memcpy(scsi_req(rq)->cmd, cgc->cmd, CDROM_PACKET_SIZE);
rq->timeout = 60*HZ;
if (cgc->quiet)
@@ -1243,7 +1243,7 @@ try_next_bio:
&& pd->bio_queue_size <= pd->write_congestion_off);
spin_unlock(&pd->lock);
if (wakeup) {
- clear_bdi_congested(&pd->disk->queue->backing_dev_info,
+ clear_bdi_congested(pd->disk->queue->backing_dev_info,
BLK_RW_ASYNC);
}
@@ -2370,7 +2370,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
spin_lock(&pd->lock);
if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) {
- set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
+ set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
do {
spin_unlock(&pd->lock);
congestion_wait(BLK_RW_ASYNC, HZ);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 76f33c84ce3d..a809e3e9feb8 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -196,16 +196,19 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
while ((req = blk_fetch_request(q))) {
- if (req_op(req) == REQ_OP_FLUSH) {
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
if (ps3disk_submit_flush_request(dev, req))
- break;
- } else if (req->cmd_type == REQ_TYPE_FS) {
+ return;
+ break;
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
if (ps3disk_submit_request_sg(dev, req))
- break;
- } else {
+ return;
+ break;
+ default:
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
__blk_end_request_all(req, -EIO);
- continue;
}
}
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 36d2b9f4e836..588721f30a22 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4099,19 +4099,21 @@ static void rbd_queue_workfn(struct work_struct *work)
bool must_be_locked;
int result;
- if (rq->cmd_type != REQ_TYPE_FS) {
- dout("%s: non-fs request type %d\n", __func__,
- (int) rq->cmd_type);
- result = -EIO;
- goto err;
- }
-
- if (req_op(rq) == REQ_OP_DISCARD)
+ switch (req_op(rq)) {
+ case REQ_OP_DISCARD:
op_type = OBJ_OP_DISCARD;
- else if (req_op(rq) == REQ_OP_WRITE)
+ break;
+ case REQ_OP_WRITE:
op_type = OBJ_OP_WRITE;
- else
+ break;
+ case REQ_OP_READ:
op_type = OBJ_OP_READ;
+ break;
+ default:
+ dout("%s: non-fs request type %d\n", __func__, req_op(rq));
+ result = -EIO;
+ goto err;
+ }
/* Ignore/skip any zero-length requests */
@@ -4524,7 +4526,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q->limits.discard_zeroes_data = 1;
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
- q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+ q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
disk->queue = q;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index abf805e332e2..27833e4dae2a 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -1204,10 +1204,11 @@ static void skd_complete_special(struct skd_device *skdev,
static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
uint cmd_in, ulong arg)
{
- int rc = 0;
+ static const int sg_version_num = 30527;
+ int rc = 0, timeout;
struct gendisk *disk = bdev->bd_disk;
struct skd_device *skdev = disk->private_data;
- void __user *p = (void *)arg;
+ int __user *p = (int __user *)arg;
pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
skdev->name, __func__, __LINE__,
@@ -1218,12 +1219,18 @@ static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
switch (cmd_in) {
case SG_SET_TIMEOUT:
+ rc = get_user(timeout, p);
+ if (!rc)
+ disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
+ break;
case SG_GET_TIMEOUT:
+ rc = jiffies_to_clock_t(disk->queue->sg_timeout);
+ break;
case SG_GET_VERSION_NUM:
- rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
+ rc = put_user(sg_version_num, p);
break;
case SG_IO:
- rc = skd_ioctl_sg_io(skdev, mode, p);
+ rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
break;
default:
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 0e93ad7b8511..c8e072caf56f 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -567,7 +567,7 @@ static struct carm_request *carm_get_special(struct carm_host *host)
if (!crq)
return NULL;
- rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL);
+ rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, GFP_KERNEL);
if (IS_ERR(rq)) {
spin_lock_irqsave(&host->lock, flags);
carm_put_request(host, crq);
@@ -620,7 +620,6 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
spin_unlock_irq(&host->lock);
DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
- crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
crq->rq->special = crq;
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
@@ -661,7 +660,6 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
crq->msg_bucket = (u32) rc;
DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
- crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
crq->rq->special = crq;
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 10332c24f961..a363170e45b1 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -52,11 +52,13 @@ struct virtio_blk {
};
struct virtblk_req {
- struct request *req;
- struct virtio_blk_outhdr out_hdr;
+#ifdef CONFIG_VIRTIO_BLK_SCSI
+ struct scsi_request sreq; /* for SCSI passthrough, must be first */
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
struct virtio_scsi_inhdr in_hdr;
+#endif
+ struct virtio_blk_outhdr out_hdr;
u8 status;
- u8 sense[SCSI_SENSE_BUFFERSIZE];
struct scatterlist sg[];
};
@@ -72,28 +74,88 @@ static inline int virtblk_result(struct virtblk_req *vbr)
}
}
-static int __virtblk_add_req(struct virtqueue *vq,
- struct virtblk_req *vbr,
- struct scatterlist *data_sg,
- bool have_data)
+/*
+ * If this is a packet command we need a couple of additional headers. Behind
+ * the normal outhdr we put a segment with the scsi command block, and before
+ * the normal inhdr we put the sense data and the inhdr with additional status
+ * information.
+ */
+#ifdef CONFIG_VIRTIO_BLK_SCSI
+static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
+ struct scatterlist *data_sg, bool have_data)
{
struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
unsigned int num_out = 0, num_in = 0;
- __virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT);
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
sgs[num_out++] = &hdr;
+ sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
+ sgs[num_out++] = &cmd;
+
+ if (have_data) {
+ if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
+ sgs[num_out++] = data_sg;
+ else
+ sgs[num_out + num_in++] = data_sg;
+ }
+
+ sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
+ sgs[num_out + num_in++] = &sense;
+ sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
+ sgs[num_out + num_in++] = &inhdr;
+ sg_init_one(&status, &vbr->status, sizeof(vbr->status));
+ sgs[num_out + num_in++] = &status;
+
+ return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
+}
+
+static inline void virtblk_scsi_reques_done(struct request *req)
+{
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ struct virtio_blk *vblk = req->q->queuedata;
+ struct scsi_request *sreq = &vbr->sreq;
+
+ sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
+ sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
+ req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
+}
+
+static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long data)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct virtio_blk *vblk = disk->private_data;
/*
- * If this is a packet command we need a couple of additional headers.
- * Behind the normal outhdr we put a segment with the scsi command
- * block, and before the normal inhdr we put the sense data and the
- * inhdr with additional status information.
+ * Only allow the generic SCSI ioctls if the host can support it.
*/
- if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
- sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
- sgs[num_out++] = &cmd;
- }
+ if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
+ return -ENOTTY;
+
+ return scsi_cmd_blk_ioctl(bdev, mode, cmd,
+ (void __user *)data);
+}
+#else
+static inline int virtblk_add_req_scsi(struct virtqueue *vq,
+ struct virtblk_req *vbr, struct scatterlist *data_sg,
+ bool have_data)
+{
+ return -EIO;
+}
+static inline void virtblk_scsi_reques_done(struct request *req)
+{
+}
+#define virtblk_ioctl NULL
+#endif /* CONFIG_VIRTIO_BLK_SCSI */
+
+static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
+ struct scatterlist *data_sg, bool have_data)
+{
+ struct scatterlist hdr, status, *sgs[3];
+ unsigned int num_out = 0, num_in = 0;
+
+ sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
+ sgs[num_out++] = &hdr;
if (have_data) {
if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
@@ -102,14 +164,6 @@ static int __virtblk_add_req(struct virtqueue *vq,
sgs[num_out + num_in++] = data_sg;
}
- if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
- memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
- sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
- sgs[num_out + num_in++] = &sense;
- sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
- sgs[num_out + num_in++] = &inhdr;
- }
-
sg_init_one(&status, &vbr->status, sizeof(vbr->status));
sgs[num_out + num_in++] = &status;
@@ -119,15 +173,16 @@ static int __virtblk_add_req(struct virtqueue *vq,
static inline void virtblk_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
- struct virtio_blk *vblk = req->q->queuedata;
int error = virtblk_result(vbr);
- if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
- req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
- req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
- req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
- } else if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
+ switch (req_op(req)) {
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
+ virtblk_scsi_reques_done(req);
+ break;
+ case REQ_OP_DRV_IN:
req->errors = (error != 0);
+ break;
}
blk_mq_end_request(req, error);
@@ -146,7 +201,9 @@ static void virtblk_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
- blk_mq_complete_request(vbr->req, vbr->req->errors);
+ struct request *req = blk_mq_rq_from_pdu(vbr);
+
+ blk_mq_complete_request(req, req->errors);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
@@ -170,49 +227,50 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
int qid = hctx->queue_num;
int err;
bool notify = false;
+ u32 type;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
- vbr->req = req;
- if (req_op(req) == REQ_OP_FLUSH) {
- vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
- vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
- } else {
- switch (req->cmd_type) {
- case REQ_TYPE_FS:
- vbr->out_hdr.type = 0;
- vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
- break;
- case REQ_TYPE_BLOCK_PC:
- vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
- vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
- break;
- case REQ_TYPE_DRV_PRIV:
- vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
- vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
- break;
- default:
- /* We don't put anything else in the queue. */
- BUG();
- }
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ type = 0;
+ break;
+ case REQ_OP_FLUSH:
+ type = VIRTIO_BLK_T_FLUSH;
+ break;
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
+ type = VIRTIO_BLK_T_SCSI_CMD;
+ break;
+ case REQ_OP_DRV_IN:
+ type = VIRTIO_BLK_T_GET_ID;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return BLK_MQ_RQ_QUEUE_ERROR;
}
+ vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
+ vbr->out_hdr.sector = type ?
+ 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
+
blk_mq_start_request(req);
- num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
+ num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
if (num) {
- if (rq_data_dir(vbr->req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
else
vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
+ if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
+ err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
+ else
+ err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
blk_mq_stop_hw_queue(hctx);
@@ -242,10 +300,9 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
struct request *req;
int err;
- req = blk_get_request(q, READ, GFP_KERNEL);
+ req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL);
if (IS_ERR(req))
return PTR_ERR(req);
- req->cmd_type = REQ_TYPE_DRV_PRIV;
err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
if (err)
@@ -257,22 +314,6 @@ out:
return err;
}
-static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long data)
-{
- struct gendisk *disk = bdev->bd_disk;
- struct virtio_blk *vblk = disk->private_data;
-
- /*
- * Only allow the generic SCSI ioctls if the host can support it.
- */
- if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
- return -ENOTTY;
-
- return scsi_cmd_blk_ioctl(bdev, mode, cmd,
- (void __user *)data);
-}
-
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
@@ -538,6 +579,9 @@ static int virtblk_init_request(void *data, struct request *rq,
struct virtio_blk *vblk = data;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
+#ifdef CONFIG_VIRTIO_BLK_SCSI
+ vbr->sreq.sense = vbr->sense;
+#endif
sg_init_table(vbr->sg, vblk->sg_elems);
return 0;
}
@@ -821,7 +865,10 @@ static const struct virtio_device_id id_table[] = {
static unsigned int features_legacy[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
- VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
+ VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
+#ifdef CONFIG_VIRTIO_BLK_SCSI
+ VIRTIO_BLK_F_SCSI,
+#endif
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ,
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 265f1a7072e9..5067a0a952cb 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -865,7 +865,7 @@ static inline void flush_requests(struct blkfront_ring_info *rinfo)
static inline bool blkif_request_flush_invalid(struct request *req,
struct blkfront_info *info)
{
- return ((req->cmd_type != REQ_TYPE_FS) ||
+ return (blk_rq_is_passthrough(req) ||
((req_op(req) == REQ_OP_FLUSH) &&
!info->feature_flush) ||
((req->cmd_flags & REQ_FUA) &&
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index c4328d9d9981..757dce2147e0 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -468,7 +468,7 @@ static struct request *ace_get_next_request(struct request_queue *q)
struct request *req;
while ((req = blk_peek_request(q)) != NULL) {
- if (req->cmd_type == REQ_TYPE_FS)
+ if (!blk_rq_is_passthrough(req))
break;
blk_start_request(req);
__blk_end_request_all(req, -EIO);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e5ab7d9e8c45..3cd7856156b4 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -117,7 +117,7 @@ static void zram_revalidate_disk(struct zram *zram)
{
revalidate_disk(zram->disk);
/* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
- zram->disk->queue->backing_dev_info.capabilities |=
+ zram->disk->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES;
}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index bbbd3caa927c..87739649eac2 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -281,8 +281,8 @@
#include <linux/fcntl.h>
#include <linux/blkdev.h>
#include <linux/times.h>
-
#include <linux/uaccess.h>
+#include <scsi/scsi_request.h>
/* used to tell the module to turn on full debugging messages */
static bool debug;
@@ -2170,6 +2170,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
{
struct request_queue *q = cdi->disk->queue;
struct request *rq;
+ struct scsi_request *req;
struct bio *bio;
unsigned int len;
int nr, ret = 0;
@@ -2188,12 +2189,13 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
len = nr * CD_FRAMESIZE_RAW;
- rq = blk_get_request(q, READ, GFP_KERNEL);
+ rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
break;
}
- blk_rq_set_block_pc(rq);
+ req = scsi_req(rq);
+ scsi_req_init(rq);
ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
if (ret) {
@@ -2201,23 +2203,23 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
break;
}
- rq->cmd[0] = GPCMD_READ_CD;
- rq->cmd[1] = 1 << 2;
- rq->cmd[2] = (lba >> 24) & 0xff;
- rq->cmd[3] = (lba >> 16) & 0xff;
- rq->cmd[4] = (lba >> 8) & 0xff;
- rq->cmd[5] = lba & 0xff;
- rq->cmd[6] = (nr >> 16) & 0xff;
- rq->cmd[7] = (nr >> 8) & 0xff;
- rq->cmd[8] = nr & 0xff;
- rq->cmd[9] = 0xf8;
-
- rq->cmd_len = 12;
+ req->cmd[0] = GPCMD_READ_CD;
+ req->cmd[1] = 1 << 2;
+ req->cmd[2] = (lba >> 24) & 0xff;
+ req->cmd[3] = (lba >> 16) & 0xff;
+ req->cmd[4] = (lba >> 8) & 0xff;
+ req->cmd[5] = lba & 0xff;
+ req->cmd[6] = (nr >> 16) & 0xff;
+ req->cmd[7] = (nr >> 8) & 0xff;
+ req->cmd[8] = nr & 0xff;
+ req->cmd[9] = 0xf8;
+
+ req->cmd_len = 12;
rq->timeout = 60 * HZ;
bio = rq->bio;
if (blk_execute_rq(q, cdi->disk, rq, 0)) {
- struct request_sense *s = rq->sense;
+ struct request_sense *s = req->sense;
ret = -EIO;
cdi->last_sense = s->sense_key;
}
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 1afab6558d0c..1372763a948f 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -659,23 +659,24 @@ static void gdrom_request(struct request_queue *rq)
struct request *req;
while ((req = blk_fetch_request(rq)) != NULL) {
- if (req->cmd_type != REQ_TYPE_FS) {
- printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
- __blk_end_request_all(req, -EIO);
- continue;
- }
- if (rq_data_dir(req) != READ) {
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ /*
+ * Add to list of deferred work and then schedule
+ * workqueue.
+ */
+ list_add_tail(&req->queuelist, &gdrom_deferred);
+ schedule_work(&work);
+ break;
+ case REQ_OP_WRITE:
pr_notice("Read only device - write request ignored\n");
__blk_end_request_all(req, -EIO);
- continue;
+ break;
+ default:
+ printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
+ __blk_end_request_all(req, -EIO);
+ break;
}
-
- /*
- * Add to list of deferred work and then schedule
- * workqueue.
- */
- list_add_tail(&req->queuelist, &gdrom_deferred);
- schedule_work(&work);
}
}
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 39ea67f9b066..c99a25c075bc 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -10,6 +10,7 @@ menuconfig IDE
tristate "ATA/ATAPI/MFM/RLL support (DEPRECATED)"
depends on HAVE_IDE
depends on BLOCK
+ select BLK_SCSI_REQUEST
---help---
If you say Y here, your kernel will be able to manage ATA/(E)IDE and
ATAPI units. The most common cases are IDE hard drives and ATAPI
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index f90ea221f7f2..feb30061123b 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -92,8 +92,9 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
struct request *rq;
int error;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_MISC;
rq->special = (char *)pc;
if (buf && bufflen) {
@@ -103,9 +104,9 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
goto put_req;
}
- memcpy(rq->cmd, pc->c, 12);
+ memcpy(scsi_req(rq)->cmd, pc->c, 12);
if (drive->media == ide_tape)
- rq->cmd[13] = REQ_IDETAPE_PC1;
+ scsi_req(rq)->cmd[13] = REQ_IDETAPE_PC1;
error = blk_execute_rq(drive->queue, disk, rq, 0);
put_req:
blk_put_request(rq);
@@ -171,7 +172,8 @@ EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
void ide_prep_sense(ide_drive_t *drive, struct request *rq)
{
struct request_sense *sense = &drive->sense_data;
- struct request *sense_rq = &drive->sense_rq;
+ struct request *sense_rq = drive->sense_rq;
+ struct scsi_request *req = scsi_req(sense_rq);
unsigned int cmd_len, sense_len;
int err;
@@ -191,12 +193,13 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
BUG_ON(sense_len > sizeof(*sense));
- if (rq->cmd_type == REQ_TYPE_ATA_SENSE || drive->sense_rq_armed)
+ if (ata_sense_request(rq) || drive->sense_rq_armed)
return;
memset(sense, 0, sizeof(*sense));
blk_rq_init(rq->q, sense_rq);
+ scsi_req_init(sense_rq);
err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
GFP_NOIO);
@@ -208,13 +211,14 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
}
sense_rq->rq_disk = rq->rq_disk;
- sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
- sense_rq->cmd[4] = cmd_len;
- sense_rq->cmd_type = REQ_TYPE_ATA_SENSE;
+ sense_rq->cmd_flags = REQ_OP_DRV_IN;
+ ide_req(sense_rq)->type = ATA_PRIV_SENSE;
sense_rq->rq_flags |= RQF_PREEMPT;
+ req->cmd[0] = GPCMD_REQUEST_SENSE;
+ req->cmd[4] = cmd_len;
if (drive->media == ide_tape)
- sense_rq->cmd[13] = REQ_IDETAPE_PC1;
+ req->cmd[13] = REQ_IDETAPE_PC1;
drive->sense_rq_armed = true;
}
@@ -229,12 +233,12 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special)
return -ENOMEM;
}
- drive->sense_rq.special = special;
+ drive->sense_rq->special = special;
drive->sense_rq_armed = false;
drive->hwif->rq = NULL;
- elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT);
+ elv_add_request(drive->queue, drive->sense_rq, ELEVATOR_INSERT_FRONT);
return 0;
}
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
@@ -247,14 +251,14 @@ EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
void ide_retry_pc(ide_drive_t *drive)
{
struct request *failed_rq = drive->hwif->rq;
- struct request *sense_rq = &drive->sense_rq;
+ struct request *sense_rq = drive->sense_rq;
struct ide_atapi_pc *pc = &drive->request_sense_pc;
(void)ide_read_error(drive);
/* init pc from sense_rq */
ide_init_pc(pc);
- memcpy(pc->c, sense_rq->cmd, 12);
+ memcpy(pc->c, scsi_req(sense_rq)->cmd, 12);
if (drive->media == ide_tape)
drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
@@ -286,7 +290,7 @@ int ide_cd_expiry(ide_drive_t *drive)
* commands/drives support that. Let ide_timer_expiry keep polling us
* for these.
*/
- switch (rq->cmd[0]) {
+ switch (scsi_req(rq)->cmd[0]) {
case GPCMD_BLANK:
case GPCMD_FORMAT_UNIT:
case GPCMD_RESERVE_RZONE_TRACK:
@@ -297,7 +301,7 @@ int ide_cd_expiry(ide_drive_t *drive)
default:
if (!(rq->rq_flags & RQF_QUIET))
printk(KERN_INFO PFX "cmd 0x%x timed out\n",
- rq->cmd[0]);
+ scsi_req(rq)->cmd[0]);
wait = 0;
break;
}
@@ -307,15 +311,21 @@ EXPORT_SYMBOL_GPL(ide_cd_expiry);
int ide_cd_get_xferlen(struct request *rq)
{
- switch (rq->cmd_type) {
- case REQ_TYPE_FS:
+ switch (req_op(rq)) {
+ default:
return 32768;
- case REQ_TYPE_ATA_SENSE:
- case REQ_TYPE_BLOCK_PC:
- case REQ_TYPE_ATA_PC:
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
return blk_rq_bytes(rq);
- default:
- return 0;
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ switch (ide_req(rq)->type) {
+ case ATA_PRIV_PC:
+ case ATA_PRIV_SENSE:
+ return blk_rq_bytes(rq);
+ default:
+ return 0;
+ }
}
}
EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
@@ -374,7 +384,7 @@ int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
drive->name, __func__, ireason);
}
- if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC)
+ if (dev_is_idecd(drive) && ata_pc_request(rq))
rq->rq_flags |= RQF_FAILED;
return 1;
@@ -420,7 +430,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
? "write" : "read");
pc->flags |= PC_FLAG_DMA_ERROR;
} else
- rq->resid_len = 0;
+ scsi_req(rq)->resid_len = 0;
debug_log("%s: DMA finished\n", drive->name);
}
@@ -436,7 +446,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
local_irq_enable_in_hardirq();
if (drive->media == ide_tape &&
- (stat & ATA_ERR) && rq->cmd[0] == REQUEST_SENSE)
+ (stat & ATA_ERR) && scsi_req(rq)->cmd[0] == REQUEST_SENSE)
stat &= ~ATA_ERR;
if ((stat & ATA_ERR) || (pc->flags & PC_FLAG_DMA_ERROR)) {
@@ -446,7 +456,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
if (drive->media != ide_tape)
pc->rq->errors++;
- if (rq->cmd[0] == REQUEST_SENSE) {
+ if (scsi_req(rq)->cmd[0] == REQUEST_SENSE) {
printk(KERN_ERR PFX "%s: I/O error in request "
"sense command\n", drive->name);
return ide_do_reset(drive);
@@ -477,12 +487,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
if (uptodate == 0)
drive->failed_pc = NULL;
- if (rq->cmd_type == REQ_TYPE_DRV_PRIV) {
+ if (ata_misc_request(rq)) {
rq->errors = 0;
error = 0;
} else {
- if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) {
+ if (blk_rq_is_passthrough(rq) && uptodate <= 0) {
if (rq->errors == 0)
rq->errors = -EIO;
}
@@ -512,7 +522,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
ide_pio_bytes(drive, cmd, write, done);
/* Update transferred byte count */
- rq->resid_len -= done;
+ scsi_req(rq)->resid_len -= done;
bcount -= done;
@@ -520,7 +530,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
ide_pad_transfer(drive, write, bcount);
debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n",
- rq->cmd[0], done, bcount, rq->resid_len);
+ rq->cmd[0], done, bcount, scsi_req(rq)->resid_len);
/* And set the interrupt handler again */
ide_set_handler(drive, ide_pc_intr, timeout);
@@ -603,7 +613,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
if (dev_is_idecd(drive)) {
/* ATAPI commands get padded out to 12 bytes minimum */
- cmd_len = COMMAND_SIZE(rq->cmd[0]);
+ cmd_len = COMMAND_SIZE(scsi_req(rq)->cmd[0]);
if (cmd_len < ATAPI_MIN_CDB_BYTES)
cmd_len = ATAPI_MIN_CDB_BYTES;
@@ -650,7 +660,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
/* Send the actual packet */
if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
- hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
+ hwif->tp_ops->output_data(drive, NULL, scsi_req(rq)->cmd, cmd_len);
/* Begin DMA, if necessary */
if (dev_is_idecd(drive)) {
@@ -695,7 +705,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
bytes, 63 * 1024));
/* We haven't transferred any data yet */
- rq->resid_len = bcount;
+ scsi_req(rq)->resid_len = bcount;
if (pc->flags & PC_FLAG_DMA_ERROR) {
pc->flags &= ~PC_FLAG_DMA_ERROR;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index ab9232e1e16f..aef00511ca86 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -121,7 +121,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq)
* don't log START_STOP unit with LoEj set, since we cannot
* reliably check if drive can auto-close
*/
- if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
+ if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
break;
log = 1;
break;
@@ -163,7 +163,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
* toc has not been recorded yet, it will fail with 05/24/00 (which is a
* confusing error)
*/
- if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
+ if (failed_command && scsi_req(failed_command)->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
if (sense->sense_key == 0x05 && sense->asc == 0x24)
return;
@@ -176,7 +176,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
if (!sense->valid)
break;
if (failed_command == NULL ||
- failed_command->cmd_type != REQ_TYPE_FS)
+ blk_rq_is_passthrough(failed_command))
break;
sector = (sense->information[0] << 24) |
(sense->information[1] << 16) |
@@ -210,7 +210,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
{
/*
- * For REQ_TYPE_ATA_SENSE, "rq->special" points to the original
+ * For ATA_PRIV_SENSE, "rq->special" points to the original
* failed request. Also, the sense data should be read
* directly from rq which might be different from the original
* sense buffer if it got copied during mapping.
@@ -219,15 +219,12 @@ static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
void *sense = bio_data(rq->bio);
if (failed) {
- if (failed->sense) {
- /*
- * Sense is always read into drive->sense_data.
- * Copy back if the failed request has its
- * sense pointer set.
- */
- memcpy(failed->sense, sense, 18);
- failed->sense_len = rq->sense_len;
- }
+ /*
+ * Sense is always read into drive->sense_data, copy back to the
+ * original request.
+ */
+ memcpy(scsi_req(failed)->sense, sense, 18);
+ scsi_req(failed)->sense_len = scsi_req(rq)->sense_len;
cdrom_analyze_sense_data(drive, failed);
if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed)))
@@ -285,7 +282,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
"stat 0x%x",
rq->cmd[0], rq->cmd_type, err, stat);
- if (rq->cmd_type == REQ_TYPE_ATA_SENSE) {
+ if (ata_sense_request(rq)) {
/*
* We got an error trying to get sense info from the drive
* (probably while trying to recover from a former error).
@@ -296,7 +293,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
}
/* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors)
+ if (blk_rq_is_scsi(rq) && !rq->errors)
rq->errors = SAM_STAT_CHECK_CONDITION;
if (blk_noretry_request(rq))
@@ -304,13 +301,13 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
switch (sense_key) {
case NOT_READY:
- if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) {
+ if (req_op(rq) == REQ_OP_WRITE) {
if (ide_cd_breathe(drive, rq))
return 1;
} else {
cdrom_saw_media_change(drive);
- if (rq->cmd_type == REQ_TYPE_FS &&
+ if (!blk_rq_is_passthrough(rq) &&
!(rq->rq_flags & RQF_QUIET))
printk(KERN_ERR PFX "%s: tray open\n",
drive->name);
@@ -320,7 +317,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
case UNIT_ATTENTION:
cdrom_saw_media_change(drive);
- if (rq->cmd_type != REQ_TYPE_FS)
+ if (blk_rq_is_passthrough(rq))
return 0;
/*
@@ -338,7 +335,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
*
* cdrom_log_sense() knows this!
*/
- if (rq->cmd[0] == GPCMD_START_STOP_UNIT)
+ if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT)
break;
/* fall-through */
case DATA_PROTECT:
@@ -368,7 +365,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
do_end_request = 1;
break;
default:
- if (rq->cmd_type != REQ_TYPE_FS)
+ if (blk_rq_is_passthrough(rq))
break;
if (err & ~ATA_ABORTED) {
/* go to the default handler for other errors */
@@ -379,7 +376,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
do_end_request = 1;
}
- if (rq->cmd_type != REQ_TYPE_FS) {
+ if (blk_rq_is_passthrough(rq)) {
rq->rq_flags |= RQF_FAILED;
do_end_request = 1;
}
@@ -414,7 +411,7 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
* Some of the trailing request sense fields are optional,
* and some drives don't send them. Sigh.
*/
- if (rq->cmd[0] == GPCMD_REQUEST_SENSE &&
+ if (scsi_req(rq)->cmd[0] == GPCMD_REQUEST_SENSE &&
cmd->nleft > 0 && cmd->nleft <= 5)
cmd->nleft = 0;
}
@@ -425,12 +422,8 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
req_flags_t rq_flags)
{
struct cdrom_info *info = drive->driver_data;
- struct request_sense local_sense;
int retries = 10;
- req_flags_t flags = 0;
-
- if (!sense)
- sense = &local_sense;
+ bool failed;
ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, "
"rq_flags: 0x%x",
@@ -440,12 +433,13 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
do {
struct request *rq;
int error;
+ bool delay = false;
- rq = blk_get_request(drive->queue, write, __GFP_RECLAIM);
-
- memcpy(rq->cmd, cmd, BLK_MAX_CDB);
- rq->cmd_type = REQ_TYPE_ATA_PC;
- rq->sense = sense;
+ rq = blk_get_request(drive->queue,
+ write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB);
+ ide_req(rq)->type = ATA_PRIV_PC;
rq->rq_flags |= rq_flags;
rq->timeout = timeout;
if (buffer) {
@@ -460,21 +454,21 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
error = blk_execute_rq(drive->queue, info->disk, rq, 0);
if (buffer)
- *bufflen = rq->resid_len;
-
- flags = rq->rq_flags;
- blk_put_request(rq);
+ *bufflen = scsi_req(rq)->resid_len;
+ if (sense)
+ memcpy(sense, scsi_req(rq)->sense, sizeof(*sense));
/*
* FIXME: we should probably abort/retry or something in case of
* failure.
*/
- if (flags & RQF_FAILED) {
+ failed = (rq->rq_flags & RQF_FAILED) != 0;
+ if (failed) {
/*
* The request failed. Retry if it was due to a unit
* attention status (usually means media was changed).
*/
- struct request_sense *reqbuf = sense;
+ struct request_sense *reqbuf = scsi_req(rq)->sense;
if (reqbuf->sense_key == UNIT_ATTENTION)
cdrom_saw_media_change(drive);
@@ -485,19 +479,20 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
* a disk. Retry, but wait a little to give
* the drive time to complete the load.
*/
- ssleep(2);
+ delay = true;
} else {
/* otherwise, don't retry */
retries = 0;
}
--retries;
}
-
- /* end of retry loop */
- } while ((flags & RQF_FAILED) && retries >= 0);
+ blk_put_request(rq);
+ if (delay)
+ ssleep(2);
+ } while (failed && retries >= 0);
/* return an error if the command failed */
- return (flags & RQF_FAILED) ? -EIO : 0;
+ return failed ? -EIO : 0;
}
/*
@@ -526,7 +521,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
ide_expiry_t *expiry = NULL;
int dma_error = 0, dma, thislen, uptodate = 0;
int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
- int sense = (rq->cmd_type == REQ_TYPE_ATA_SENSE);
+ int sense = ata_sense_request(rq);
unsigned int timeout;
u16 len;
u8 ireason, stat;
@@ -569,7 +564,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
ide_read_bcount_and_ireason(drive, &len, &ireason);
- thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft;
+ thislen = !blk_rq_is_passthrough(rq) ? len : cmd->nleft;
if (thislen > len)
thislen = len;
@@ -578,7 +573,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
/* If DRQ is clear, the command has completed. */
if ((stat & ATA_DRQ) == 0) {
- if (rq->cmd_type == REQ_TYPE_FS) {
+ switch (req_op(rq)) {
+ default:
/*
* If we're not done reading/writing, complain.
* Otherwise, complete the command normally.
@@ -592,7 +588,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
rq->rq_flags |= RQF_FAILED;
uptodate = 0;
}
- } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
+ goto out_end;
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
ide_cd_request_sense_fixup(drive, cmd);
uptodate = cmd->nleft ? 0 : 1;
@@ -608,8 +606,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
if (!uptodate)
rq->rq_flags |= RQF_FAILED;
+ goto out_end;
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
+ goto out_end;
}
- goto out_end;
}
rc = ide_check_ireason(drive, rq, len, ireason, write);
@@ -636,12 +637,12 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
len -= blen;
if (sense && write == 0)
- rq->sense_len += blen;
+ scsi_req(rq)->sense_len += blen;
}
/* pad, if necessary */
if (len > 0) {
- if (rq->cmd_type != REQ_TYPE_FS || write == 0)
+ if (blk_rq_is_passthrough(rq) || write == 0)
ide_pad_transfer(drive, write, len);
else {
printk(KERN_ERR PFX "%s: confused, missing data\n",
@@ -650,12 +651,18 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
}
}
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ switch (req_op(rq)) {
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
timeout = rq->timeout;
- } else {
+ break;
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ expiry = ide_cd_expiry;
+ /*FALLTHRU*/
+ default:
timeout = ATAPI_WAIT_PC;
- if (rq->cmd_type != REQ_TYPE_FS)
- expiry = ide_cd_expiry;
+ break;
}
hwif->expiry = expiry;
@@ -663,15 +670,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
return ide_started;
out_end:
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) {
- rq->resid_len = 0;
+ if (blk_rq_is_scsi(rq) && rc == 0) {
+ scsi_req(rq)->resid_len = 0;
blk_end_request_all(rq, 0);
hwif->rq = NULL;
} else {
if (sense && uptodate)
ide_cd_complete_failed_rq(drive, rq);
- if (rq->cmd_type == REQ_TYPE_FS) {
+ if (!blk_rq_is_passthrough(rq)) {
if (cmd->nleft == 0)
uptodate = 1;
} else {
@@ -684,10 +691,10 @@ out_end:
return ide_stopped;
/* make sure it's fully ended */
- if (rq->cmd_type != REQ_TYPE_FS) {
- rq->resid_len -= cmd->nbytes - cmd->nleft;
+ if (blk_rq_is_passthrough(rq)) {
+ scsi_req(rq)->resid_len -= cmd->nbytes - cmd->nleft;
if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
- rq->resid_len += cmd->last_xfer_len;
+ scsi_req(rq)->resid_len += cmd->last_xfer_len;
}
ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
@@ -744,7 +751,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x",
rq->cmd[0], rq->cmd_type);
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ if (blk_rq_is_scsi(rq))
rq->rq_flags |= RQF_QUIET;
else
rq->rq_flags &= ~RQF_FAILED;
@@ -786,25 +793,31 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
if (drive->debug_mask & IDE_DBG_RQ)
blk_dump_rq_flags(rq, "ide_cd_do_request");
- switch (rq->cmd_type) {
- case REQ_TYPE_FS:
+ switch (req_op(rq)) {
+ default:
if (cdrom_start_rw(drive, rq) == ide_stopped)
goto out_end;
break;
- case REQ_TYPE_ATA_SENSE:
- case REQ_TYPE_BLOCK_PC:
- case REQ_TYPE_ATA_PC:
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
+ handle_pc:
if (!rq->timeout)
rq->timeout = ATAPI_WAIT_PC;
-
cdrom_do_block_pc(drive, rq);
break;
- case REQ_TYPE_DRV_PRIV:
- /* right now this can only be a reset... */
- uptodate = 1;
- goto out_end;
- default:
- BUG();
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ switch (ide_req(rq)->type) {
+ case ATA_PRIV_MISC:
+ /* right now this can only be a reset... */
+ uptodate = 1;
+ goto out_end;
+ case ATA_PRIV_SENSE:
+ case ATA_PRIV_PC:
+ goto handle_pc;
+ default:
+ BUG();
+ }
}
/* prepare sense request for this command */
@@ -817,7 +830,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
cmd.rq = rq;
- if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
+ if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd);
}
@@ -1312,28 +1325,29 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
int hard_sect = queue_logical_block_size(q);
long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
+ struct scsi_request *req = scsi_req(rq);
- memset(rq->cmd, 0, BLK_MAX_CDB);
+ memset(req->cmd, 0, BLK_MAX_CDB);
if (rq_data_dir(rq) == READ)
- rq->cmd[0] = GPCMD_READ_10;
+ req->cmd[0] = GPCMD_READ_10;
else
- rq->cmd[0] = GPCMD_WRITE_10;
+ req->cmd[0] = GPCMD_WRITE_10;
/*
* fill in lba
*/
- rq->cmd[2] = (block >> 24) & 0xff;
- rq->cmd[3] = (block >> 16) & 0xff;
- rq->cmd[4] = (block >> 8) & 0xff;
- rq->cmd[5] = block & 0xff;
+ req->cmd[2] = (block >> 24) & 0xff;
+ req->cmd[3] = (block >> 16) & 0xff;
+ req->cmd[4] = (block >> 8) & 0xff;
+ req->cmd[5] = block & 0xff;
/*
* and transfer length
*/
- rq->cmd[7] = (blocks >> 8) & 0xff;
- rq->cmd[8] = blocks & 0xff;
- rq->cmd_len = 10;
+ req->cmd[7] = (blocks >> 8) & 0xff;
+ req->cmd[8] = blocks & 0xff;
+ req->cmd_len = 10;
return BLKPREP_OK;
}
@@ -1343,7 +1357,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
*/
static int ide_cdrom_prep_pc(struct request *rq)
{
- u8 *c = rq->cmd;
+ u8 *c = scsi_req(rq)->cmd;
/* transform 6-byte read/write commands to the 10-byte version */
if (c[0] == READ_6 || c[0] == WRITE_6) {
@@ -1354,7 +1368,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
c[2] = 0;
c[1] &= 0xe0;
c[0] += (READ_10 - READ_6);
- rq->cmd_len = 10;
+ scsi_req(rq)->cmd_len = 10;
return BLKPREP_OK;
}
@@ -1373,9 +1387,9 @@ static int ide_cdrom_prep_pc(struct request *rq)
static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
{
- if (rq->cmd_type == REQ_TYPE_FS)
+ if (!blk_rq_is_passthrough(rq))
return ide_cdrom_prep_fs(q, rq);
- else if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ else if (blk_rq_is_scsi(rq))
return ide_cdrom_prep_pc(rq);
return 0;
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index f085e3a2e1d6..9fcefbc8425e 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -303,8 +303,9 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
struct request *rq;
int ret;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_MISC;
rq->rq_flags = RQF_QUIET;
ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
blk_put_request(rq);
diff --git a/drivers/ide/ide-cd_verbose.c b/drivers/ide/ide-cd_verbose.c
index f079ca2f260b..58a6feb74c02 100644
--- a/drivers/ide/ide-cd_verbose.c
+++ b/drivers/ide/ide-cd_verbose.c
@@ -315,12 +315,12 @@ void ide_cd_log_error(const char *name, struct request *failed_command,
while (hi > lo) {
mid = (lo + hi) / 2;
if (packet_command_texts[mid].packet_command ==
- failed_command->cmd[0]) {
+ scsi_req(failed_command)->cmd[0]) {
s = packet_command_texts[mid].text;
break;
}
if (packet_command_texts[mid].packet_command >
- failed_command->cmd[0])
+ scsi_req(failed_command)->cmd[0])
hi = mid;
else
lo = mid + 1;
@@ -329,7 +329,7 @@ void ide_cd_log_error(const char *name, struct request *failed_command,
printk(KERN_ERR " The failed \"%s\" packet command "
"was: \n \"", s);
for (i = 0; i < BLK_MAX_CDB; i++)
- printk(KERN_CONT "%02x ", failed_command->cmd[i]);
+ printk(KERN_CONT "%02x ", scsi_req(failed_command)->cmd[i]);
printk(KERN_CONT "\"\n");
}
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 0dd43b4fcec6..a45dda5386e4 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -165,11 +165,12 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
if (!(setting->flags & DS_SYNC))
return setting->set(drive, arg);
- rq = blk_get_request(q, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
- rq->cmd_len = 5;
- rq->cmd[0] = REQ_DEVSET_EXEC;
- *(int *)&rq->cmd[1] = arg;
+ rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_MISC;
+ scsi_req(rq)->cmd_len = 5;
+ scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
+ *(int *)&scsi_req(rq)->cmd[1] = arg;
rq->special = setting->set;
if (blk_execute_rq(q, NULL, rq, 0))
@@ -183,7 +184,7 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
{
int err, (*setfunc)(ide_drive_t *, int) = rq->special;
- err = setfunc(drive, *(int *)&rq->cmd[1]);
+ err = setfunc(drive, *(int *)&scsi_req(rq)->cmd[1]);
if (err)
rq->errors = err;
ide_complete_rq(drive, err, blk_rq_bytes(rq));
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 5ceace542b77..186159715b71 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -184,7 +184,7 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ide_hwif_t *hwif = drive->hwif;
BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
- BUG_ON(rq->cmd_type != REQ_TYPE_FS);
+ BUG_ON(blk_rq_is_passthrough(rq));
ledtrig_disk_activity();
@@ -452,8 +452,9 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd->tf_flags = IDE_TFLAG_DYN;
cmd->protocol = ATA_PROT_NODATA;
-
- rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ rq->cmd_flags &= ~REQ_OP_MASK;
+ rq->cmd_flags |= REQ_OP_DRV_OUT;
+ ide_req(rq)->type = ATA_PRIV_TASKFILE;
rq->special = cmd;
cmd->rq = rq;
@@ -477,8 +478,9 @@ static int set_multcount(ide_drive_t *drive, int arg)
if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
return -EBUSY;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_TASKFILE;
drive->mult_req = arg;
drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index d6da011299f5..cf3af6840368 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -123,8 +123,8 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
return ide_stopped;
/* retry only "normal" I/O: */
- if (rq->cmd_type != REQ_TYPE_FS) {
- if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ if (blk_rq_is_passthrough(rq)) {
+ if (ata_taskfile_request(rq)) {
struct ide_cmd *cmd = rq->special;
if (cmd)
@@ -147,8 +147,8 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
{
struct request *rq = drive->hwif->rq;
- if (rq && rq->cmd_type == REQ_TYPE_DRV_PRIV &&
- rq->cmd[0] == REQ_DRIVE_RESET) {
+ if (rq && ata_misc_request(rq) &&
+ scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
if (err <= 0 && rq->errors == 0)
rq->errors = -EIO;
ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index f079d8d1d856..a69e8013f1df 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -72,7 +72,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
drive->failed_pc = NULL;
if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
- rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ (req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT))
uptodate = 1; /* FIXME */
else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
@@ -97,7 +97,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
"Aborting request!\n");
}
- if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
+ if (ata_misc_request(rq))
rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
return uptodate;
@@ -203,7 +203,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
- memcpy(rq->cmd, pc->c, 12);
+ memcpy(scsi_req(rq)->cmd, pc->c, 12);
pc->rq = rq;
if (cmd == WRITE)
@@ -216,7 +216,7 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
struct ide_atapi_pc *pc, struct request *rq)
{
ide_init_pc(pc);
- memcpy(pc->c, rq->cmd, sizeof(pc->c));
+ memcpy(pc->c, scsi_req(rq)->cmd, sizeof(pc->c));
pc->rq = rq;
if (blk_rq_bytes(rq)) {
pc->flags |= PC_FLAG_DMA_OK;
@@ -246,7 +246,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
} else
printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
- if (rq->cmd_type == REQ_TYPE_DRV_PRIV) {
+ if (ata_misc_request(rq)) {
rq->errors = 0;
ide_complete_rq(drive, 0, blk_rq_bytes(rq));
return ide_stopped;
@@ -254,8 +254,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
goto out_end;
}
- switch (rq->cmd_type) {
- case REQ_TYPE_FS:
+ switch (req_op(rq)) {
+ default:
if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
(blk_rq_sectors(rq) % floppy->bs_factor)) {
printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
@@ -265,16 +265,21 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
pc = &floppy->queued_pc;
idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
break;
- case REQ_TYPE_DRV_PRIV:
- case REQ_TYPE_ATA_SENSE:
- pc = (struct ide_atapi_pc *)rq->special;
- break;
- case REQ_TYPE_BLOCK_PC:
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
pc = &floppy->queued_pc;
idefloppy_blockpc_cmd(floppy, pc, rq);
break;
- default:
- BUG();
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ switch (ide_req(rq)->type) {
+ case ATA_PRIV_MISC:
+ case ATA_PRIV_SENSE:
+ pc = (struct ide_atapi_pc *)rq->special;
+ break;
+ default:
+ BUG();
+ }
}
ide_prep_sense(drive, rq);
@@ -286,7 +291,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
cmd.rq = rq;
- if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
+ if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd);
}
@@ -296,7 +301,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
return ide_floppy_issue_pc(drive, &cmd, pc);
out_end:
drive->failed_pc = NULL;
- if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
+ if (blk_rq_is_passthrough(rq) && rq->errors == 0)
rq->errors = -EIO;
ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
return ide_stopped;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 201e43fcbc94..043b1fb963cb 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -102,7 +102,7 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
drive->dev_flags |= IDE_DFLAG_PARKED;
}
- if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ if (rq && ata_taskfile_request(rq)) {
struct ide_cmd *orig_cmd = rq->special;
if (cmd->tf_flags & IDE_TFLAG_DYN)
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(ide_complete_rq);
void ide_kill_rq(ide_drive_t *drive, struct request *rq)
{
- u8 drv_req = (rq->cmd_type == REQ_TYPE_DRV_PRIV) && rq->rq_disk;
+ u8 drv_req = ata_misc_request(rq) && rq->rq_disk;
u8 media = drive->media;
drive->failed_pc = NULL;
@@ -145,7 +145,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
} else {
if (media == ide_tape)
rq->errors = IDE_DRV_ERROR_GENERAL;
- else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
+ else if (blk_rq_is_passthrough(rq) && rq->errors == 0)
rq->errors = -EIO;
}
@@ -279,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
{
- u8 cmd = rq->cmd[0];
+ u8 cmd = scsi_req(rq)->cmd[0];
switch (cmd) {
case REQ_PARK_HEADS:
@@ -340,7 +340,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
if (drive->current_speed == 0xff)
ide_config_drive_speed(drive, drive->desired_speed);
- if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
+ if (ata_taskfile_request(rq))
return execute_drive_cmd(drive, rq);
else if (ata_pm_request(rq)) {
struct ide_pm_state *pm = rq->special;
@@ -353,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
pm->pm_step == IDE_PM_COMPLETED)
ide_complete_pm_rq(drive, rq);
return startstop;
- } else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_DRV_PRIV)
+ } else if (!rq->rq_disk && ata_misc_request(rq))
/*
* TODO: Once all ULDs have been modified to
* check for specific op codes rather than
@@ -545,6 +545,7 @@ repeat:
goto plug_device;
}
+ scsi_req(rq)->resid_len = blk_rq_bytes(rq);
hwif->rq = rq;
spin_unlock_irq(&hwif->lock);
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index d05db2469209..248a3e0ceb46 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -125,8 +125,9 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
if (NULL == (void *) arg) {
struct request *rq;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_TASKFILE;
err = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq);
@@ -221,10 +222,11 @@ static int generic_drive_reset(ide_drive_t *drive)
struct request *rq;
int ret = 0;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
- rq->cmd_len = 1;
- rq->cmd[0] = REQ_DRIVE_RESET;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_MISC;
+ scsi_req(rq)->cmd_len = 1;
+ scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
if (blk_execute_rq(drive->queue, NULL, rq, 1))
ret = rq->errors;
blk_put_request(rq);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 2d7dca56dd24..101aed9a61ca 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -31,10 +31,11 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
}
spin_unlock_irq(&hwif->lock);
- rq = blk_get_request(q, READ, __GFP_RECLAIM);
- rq->cmd[0] = REQ_PARK_HEADS;
- rq->cmd_len = 1;
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
+ rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ scsi_req(rq)->cmd[0] = REQ_PARK_HEADS;
+ scsi_req(rq)->cmd_len = 1;
+ ide_req(rq)->type = ATA_PRIV_MISC;
rq->special = &timeout;
rc = blk_execute_rq(q, NULL, rq, 1);
blk_put_request(rq);
@@ -45,13 +46,14 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
* Make sure that *some* command is sent to the drive after the
* timeout has expired, so power management will be reenabled.
*/
- rq = blk_get_request(q, READ, GFP_NOWAIT);
+ rq = blk_get_request(q, REQ_OP_DRV_IN, GFP_NOWAIT);
+ scsi_req_init(rq);
if (IS_ERR(rq))
goto out;
- rq->cmd[0] = REQ_UNPARK_HEADS;
- rq->cmd_len = 1;
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
+ scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
+ scsi_req(rq)->cmd_len = 1;
+ ide_req(rq)->type = ATA_PRIV_MISC;
elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
out:
@@ -64,7 +66,7 @@ ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq)
struct ide_taskfile *tf = &cmd.tf;
memset(&cmd, 0, sizeof(cmd));
- if (rq->cmd[0] == REQ_PARK_HEADS) {
+ if (scsi_req(rq)->cmd[0] == REQ_PARK_HEADS) {
drive->sleep = *(unsigned long *)rq->special;
drive->dev_flags |= IDE_DFLAG_SLEEPING;
tf->command = ATA_CMD_IDLEIMMEDIATE;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index a015acdffb39..ec951be4b0c8 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -18,8 +18,9 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_SUSPEND;
if (mesg.event == PM_EVENT_PRETHAW)
@@ -88,8 +89,9 @@ int generic_ide_resume(struct device *dev)
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_ATA_PM_RESUME;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_PM_RESUME;
rq->rq_flags |= RQF_PREEMPT;
rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
@@ -221,10 +223,10 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
#ifdef DEBUG_PM
printk("%s: completing PM request, %s\n", drive->name,
- (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND) ? "suspend" : "resume");
+ (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
#endif
spin_lock_irqsave(q->queue_lock, flags);
- if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND)
+ if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
blk_stop_queue(q);
else
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
@@ -240,11 +242,13 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
struct ide_pm_state *pm = rq->special;
- if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND &&
+ if (blk_rq_is_private(rq) &&
+ ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
pm->pm_step == IDE_PM_START_SUSPEND)
/* Mark drive blocked when starting the suspend sequence. */
drive->dev_flags |= IDE_DFLAG_BLOCKED;
- else if (rq->cmd_type == REQ_TYPE_ATA_PM_RESUME &&
+ else if (blk_rq_is_private(rq) &&
+ ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
pm->pm_step == IDE_PM_START_RESUME) {
/*
* The first thing we do on wakeup is to wait for BSY bit to
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 330e319419e6..a74ae8df4bb8 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -741,6 +741,14 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
}
}
+static int ide_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
+{
+ struct ide_request *req = blk_mq_rq_to_pdu(rq);
+
+ req->sreq.sense = req->sense;
+ return 0;
+}
+
/*
* init request queue
*/
@@ -758,11 +766,18 @@ static int ide_init_queue(ide_drive_t *drive)
* limits and LBA48 we could raise it but as yet
* do not.
*/
-
- q = blk_init_queue_node(do_ide_request, NULL, hwif_to_node(hwif));
+ q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif));
if (!q)
return 1;
+ q->request_fn = do_ide_request;
+ q->init_rq_fn = ide_init_rq;
+ q->cmd_size = sizeof(struct ide_request);
+ if (blk_init_allocated_queue(q) < 0) {
+ blk_cleanup_queue(q);
+ return 1;
+ }
+
q->queuedata = drive;
blk_queue_segment_boundary(q, 0xffff);
@@ -1131,10 +1146,12 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
ide_port_for_each_dev(i, drive, hwif) {
u8 j = (hwif->index * MAX_DRIVES) + i;
u16 *saved_id = drive->id;
+ struct request *saved_sense_rq = drive->sense_rq;
memset(drive, 0, sizeof(*drive));
memset(saved_id, 0, SECTOR_SIZE);
drive->id = saved_id;
+ drive->sense_rq = saved_sense_rq;
drive->media = ide_disk;
drive->select = (i << 4) | ATA_DEVICE_OBS;
@@ -1241,6 +1258,7 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
int i;
ide_port_for_each_dev(i, drive, hwif) {
+ kfree(drive->sense_rq);
kfree(drive->id);
kfree(drive);
}
@@ -1248,11 +1266,10 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
{
+ ide_drive_t *drive;
int i;
for (i = 0; i < MAX_DRIVES; i++) {
- ide_drive_t *drive;
-
drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node);
if (drive == NULL)
goto out_nomem;
@@ -1267,12 +1284,21 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
*/
drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node);
if (drive->id == NULL)
- goto out_nomem;
+ goto out_free_drive;
+
+ drive->sense_rq = kmalloc(sizeof(struct request) +
+ sizeof(struct ide_request), GFP_KERNEL);
+ if (!drive->sense_rq)
+ goto out_free_id;
hwif->devices[i] = drive;
}
return 0;
+out_free_id:
+ kfree(drive->id);
+out_free_drive:
+ kfree(drive);
out_nomem:
ide_port_free_devices(hwif);
return -ENOMEM;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 9ecf4e35adcd..3c1b7974d66d 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -282,7 +282,7 @@ static void idetape_analyze_error(ide_drive_t *drive)
/* correct remaining bytes to transfer */
if (pc->flags & PC_FLAG_DMA_ERROR)
- rq->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]);
+ scsi_req(rq)->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]);
/*
* If error was the result of a zero-length read or write command,
@@ -316,7 +316,7 @@ static void idetape_analyze_error(ide_drive_t *drive)
pc->flags |= PC_FLAG_ABORT;
}
if (!(pc->flags & PC_FLAG_ABORT) &&
- (blk_rq_bytes(rq) - rq->resid_len))
+ (blk_rq_bytes(rq) - scsi_req(rq)->resid_len))
pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
}
}
@@ -348,7 +348,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
"itself - Aborting request!\n");
} else if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
unsigned int blocks =
- (blk_rq_bytes(rq) - rq->resid_len) / tape->blk_size;
+ (blk_rq_bytes(rq) - scsi_req(rq)->resid_len) / tape->blk_size;
tape->avg_size += blocks * tape->blk_size;
@@ -560,7 +560,7 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
pc->flags |= PC_FLAG_WRITING;
}
- memcpy(rq->cmd, pc->c, 12);
+ memcpy(scsi_req(rq)->cmd, pc->c, 12);
}
static ide_startstop_t idetape_do_request(ide_drive_t *drive,
@@ -570,14 +570,16 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
idetape_tape_t *tape = drive->driver_data;
struct ide_atapi_pc *pc = NULL;
struct ide_cmd cmd;
+ struct scsi_request *req = scsi_req(rq);
u8 stat;
ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, sector: %llu, nr_sectors: %u",
- rq->cmd[0], (unsigned long long)blk_rq_pos(rq),
+ req->cmd[0], (unsigned long long)blk_rq_pos(rq),
blk_rq_sectors(rq));
- BUG_ON(!(rq->cmd_type == REQ_TYPE_DRV_PRIV ||
- rq->cmd_type == REQ_TYPE_ATA_SENSE));
+ BUG_ON(!blk_rq_is_private(rq));
+ BUG_ON(ide_req(rq)->type != ATA_PRIV_MISC &&
+ ide_req(rq)->type != ATA_PRIV_SENSE);
/* Retry a failed packet command */
if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) {
@@ -592,7 +594,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
stat = hwif->tp_ops->read_status(hwif);
if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 &&
- (rq->cmd[13] & REQ_IDETAPE_PC2) == 0)
+ (req->cmd[13] & REQ_IDETAPE_PC2) == 0)
drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
if (drive->dev_flags & IDE_DFLAG_POST_RESET) {
@@ -609,7 +611,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
} else if (time_after(jiffies, tape->dsc_timeout)) {
printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
tape->name);
- if (rq->cmd[13] & REQ_IDETAPE_PC2) {
+ if (req->cmd[13] & REQ_IDETAPE_PC2) {
idetape_media_access_finished(drive);
return ide_stopped;
} else {
@@ -626,23 +628,23 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
tape->postponed_rq = false;
}
- if (rq->cmd[13] & REQ_IDETAPE_READ) {
+ if (req->cmd[13] & REQ_IDETAPE_READ) {
pc = &tape->queued_pc;
ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
goto out;
}
- if (rq->cmd[13] & REQ_IDETAPE_WRITE) {
+ if (req->cmd[13] & REQ_IDETAPE_WRITE) {
pc = &tape->queued_pc;
ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6);
goto out;
}
- if (rq->cmd[13] & REQ_IDETAPE_PC1) {
+ if (req->cmd[13] & REQ_IDETAPE_PC1) {
pc = (struct ide_atapi_pc *)rq->special;
- rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
- rq->cmd[13] |= REQ_IDETAPE_PC2;
+ req->cmd[13] &= ~(REQ_IDETAPE_PC1);
+ req->cmd[13] |= REQ_IDETAPE_PC2;
goto out;
}
- if (rq->cmd[13] & REQ_IDETAPE_PC2) {
+ if (req->cmd[13] & REQ_IDETAPE_PC2) {
idetape_media_access_finished(drive);
return ide_stopped;
}
@@ -852,9 +854,10 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
BUG_ON(size < 0 || size % tape->blk_size);
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
- rq->cmd[13] = cmd;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_MISC;
+ scsi_req(rq)->cmd[13] = cmd;
rq->rq_disk = tape->disk;
rq->__sector = tape->first_frame;
@@ -868,7 +871,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
blk_execute_rq(drive->queue, tape->disk, rq, 0);
/* calculate the number of transferred bytes and update buffer state */
- size -= rq->resid_len;
+ size -= scsi_req(rq)->resid_len;
tape->cur = tape->buf;
if (cmd == REQ_IDETAPE_READ)
tape->valid = size;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index a716693417a3..247b9faccce1 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -428,10 +428,12 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
{
struct request *rq;
int error;
- int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
- rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ rq = blk_get_request(drive->queue,
+ (cmd->tf_flags & IDE_TFLAG_WRITE) ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_TASKFILE;
/*
* (ks) We transfer currently only whole sectors.
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index 247853ea1368..c3062b53056f 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -54,7 +54,7 @@
#define DRV_NAME "sis5513"
/* registers layout and init values are chipset family dependent */
-
+#undef ATA_16
#define ATA_16 0x01
#define ATA_33 0x02
#define ATA_66 0x03
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 01035e718c1c..709c9cc34369 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1009,7 +1009,7 @@ static int cached_dev_congested(void *data, int bits)
struct request_queue *q = bdev_get_queue(dc->bdev);
int ret = 0;
- if (bdi_congested(&q->backing_dev_info, bits))
+ if (bdi_congested(q->backing_dev_info, bits))
return 1;
if (cached_dev_get(dc)) {
@@ -1018,7 +1018,7 @@ static int cached_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
cached_dev_put(dc);
@@ -1032,7 +1032,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
struct gendisk *g = dc->disk.disk;
g->queue->make_request_fn = cached_dev_make_request;
- g->queue->backing_dev_info.congested_fn = cached_dev_congested;
+ g->queue->backing_dev_info->congested_fn = cached_dev_congested;
dc->disk.cache_miss = cached_dev_cache_miss;
dc->disk.ioctl = cached_dev_ioctl;
}
@@ -1125,7 +1125,7 @@ static int flash_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
return ret;
@@ -1136,7 +1136,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
struct gendisk *g = d->disk;
g->queue->make_request_fn = flash_dev_make_request;
- g->queue->backing_dev_info.congested_fn = flash_dev_congested;
+ g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl;
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 3a19cbc8b230..85e3f21c2514 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -807,7 +807,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
blk_queue_make_request(q, NULL);
d->disk->queue = q;
q->queuedata = d;
- q->backing_dev_info.congested_data = d;
+ q->backing_dev_info->congested_data = d;
q->limits.max_hw_sectors = UINT_MAX;
q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX;
@@ -1132,9 +1132,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
set_capacity(dc->disk.disk,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
- dc->disk.disk->queue->backing_dev_info.ra_pages =
- max(dc->disk.disk->queue->backing_dev_info.ra_pages,
- q->backing_dev_info.ra_pages);
+ dc->disk.disk->queue->backing_dev_info->ra_pages =
+ max(dc->disk.disk->queue->backing_dev_info->ra_pages,
+ q->backing_dev_info->ra_pages);
bch_cached_dev_request_init(dc);
bch_cached_dev_writeback_init(dc);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5b9cf56de8ef..894bc14469c8 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2284,7 +2284,7 @@ static void do_waker(struct work_struct *ws)
static int is_congested(struct dm_dev *dev, int bdi_bits)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 40ceba1fe8be..136fda3ff9e5 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -92,7 +92,6 @@ struct mapped_device {
* io objects are allocated from here.
*/
mempool_t *io_pool;
- mempool_t *rq_pool;
struct bio_set *bs;
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index bf2b2676cb8a..9fab33b113c4 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era)
static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3570bcb7a4a4..7f223dbed49f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -92,12 +92,6 @@ struct multipath {
unsigned queue_mode;
- /*
- * We must use a mempool of dm_mpath_io structs so that we
- * can resubmit bios on error.
- */
- mempool_t *mpio_pool;
-
struct mutex work_mutex;
struct work_struct trigger_event;
@@ -115,8 +109,6 @@ struct dm_mpath_io {
typedef int (*action_fn) (struct pgpath *pgpath);
-static struct kmem_cache *_mpio_cache;
-
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
@@ -209,7 +201,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);
- m->mpio_pool = NULL;
m->queue_mode = DM_TYPE_NONE;
m->ti = ti;
@@ -229,16 +220,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
else
m->queue_mode = DM_TYPE_REQUEST_BASED;
- }
-
- if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
- unsigned min_ios = dm_get_reserved_rq_based_ios();
-
- m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
- if (!m->mpio_pool)
- return -ENOMEM;
- }
- else if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
INIT_WORK(&m->process_queued_bios, process_queued_bios);
/*
* bio-based doesn't support any direct scsi_dh management;
@@ -263,7 +245,6 @@ static void free_multipath(struct multipath *m)
kfree(m->hw_handler_name);
kfree(m->hw_handler_params);
- mempool_destroy(m->mpio_pool);
kfree(m);
}
@@ -272,38 +253,6 @@ static struct dm_mpath_io *get_mpio(union map_info *info)
return info->ptr;
}
-static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
-{
- struct dm_mpath_io *mpio;
-
- if (!m->mpio_pool) {
- /* Use blk-mq pdu memory requested via per_io_data_size */
- mpio = get_mpio(info);
- memset(mpio, 0, sizeof(*mpio));
- return mpio;
- }
-
- mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
- if (!mpio)
- return NULL;
-
- memset(mpio, 0, sizeof(*mpio));
- info->ptr = mpio;
-
- return mpio;
-}
-
-static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
-{
- /* Only needed for non blk-mq (.request_fn) multipath */
- if (m->mpio_pool) {
- struct dm_mpath_io *mpio = info->ptr;
-
- info->ptr = NULL;
- mempool_free(mpio, m->mpio_pool);
- }
-}
-
static size_t multipath_per_bio_data_size(void)
{
return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
@@ -530,16 +479,17 @@ static bool must_push_back_bio(struct multipath *m)
/*
* Map cloned requests (request-based multipath)
*/
-static int __multipath_map(struct dm_target *ti, struct request *clone,
- union map_info *map_context,
- struct request *rq, struct request **__clone)
+static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
+ union map_info *map_context,
+ struct request **__clone)
{
struct multipath *m = ti->private;
int r = DM_MAPIO_REQUEUE;
- size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
+ size_t nr_bytes = blk_rq_bytes(rq);
struct pgpath *pgpath;
struct block_device *bdev;
- struct dm_mpath_io *mpio;
+ struct dm_mpath_io *mpio = get_mpio(map_context);
+ struct request *clone;
/* Do we need to select a new pgpath? */
pgpath = lockless_dereference(m->current_pgpath);
@@ -556,42 +506,23 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
return r;
}
- mpio = set_mpio(m, map_context);
- if (!mpio)
- /* ENOMEM, requeue */
- return r;
-
+ memset(mpio, 0, sizeof(*mpio));
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
bdev = pgpath->path.dev->bdev;
- if (clone) {
- /*
- * Old request-based interface: allocated clone is passed in.
- * Used by: .request_fn stacked on .request_fn path(s).
- */
- clone->q = bdev_get_queue(bdev);
- clone->rq_disk = bdev->bd_disk;
- clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
- } else {
- /*
- * blk-mq request-based interface; used by both:
- * .request_fn stacked on blk-mq path(s) and
- * blk-mq stacked on blk-mq path(s).
- */
- clone = blk_mq_alloc_request(bdev_get_queue(bdev),
- rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
- if (IS_ERR(clone)) {
- /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
- clear_request_fn_mpio(m, map_context);
- return r;
- }
- clone->bio = clone->biotail = NULL;
- clone->rq_disk = bdev->bd_disk;
- clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
- *__clone = clone;
+ clone = blk_get_request(bdev_get_queue(bdev),
+ rq->cmd_flags | REQ_NOMERGE,
+ GFP_ATOMIC);
+ if (IS_ERR(clone)) {
+ /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
+ return r;
}
+ clone->bio = clone->biotail = NULL;
+ clone->rq_disk = bdev->bd_disk;
+ clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ *__clone = clone;
if (pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
@@ -600,22 +531,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
return DM_MAPIO_REMAPPED;
}
-static int multipath_map(struct dm_target *ti, struct request *clone,
- union map_info *map_context)
-{
- return __multipath_map(ti, clone, map_context, NULL, NULL);
-}
-
-static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
- union map_info *map_context,
- struct request **clone)
-{
- return __multipath_map(ti, NULL, map_context, rq, clone);
-}
-
static void multipath_release_clone(struct request *clone)
{
- blk_mq_free_request(clone);
+ blk_put_request(clone);
}
/*
@@ -1187,7 +1105,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_write_same_bios = 1;
if (m->queue_mode == DM_TYPE_BIO_BASED)
ti->per_io_data_size = multipath_per_bio_data_size();
- else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
+ else
ti->per_io_data_size = sizeof(struct dm_mpath_io);
return 0;
@@ -1610,7 +1528,6 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
if (ps->type->end_io)
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
- clear_request_fn_mpio(m, map_context);
return r;
}
@@ -2060,7 +1977,6 @@ static struct target_type multipath_target = {
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
- .map_rq = multipath_map,
.clone_and_map_rq = multipath_clone_and_map,
.release_clone_rq = multipath_release_clone,
.rq_end_io = multipath_end_io,
@@ -2080,11 +1996,6 @@ static int __init dm_multipath_init(void)
{
int r;
- /* allocate a slab for the dm_mpath_ios */
- _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
- if (!_mpio_cache)
- return -ENOMEM;
-
r = dm_register_target(&multipath_target);
if (r < 0) {
DMERR("request-based register failed %d", r);
@@ -2120,8 +2031,6 @@ bad_alloc_kmpath_handlerd:
bad_alloc_kmultipathd:
dm_unregister_target(&multipath_target);
bad_register_target:
- kmem_cache_destroy(_mpio_cache);
-
return r;
}
@@ -2131,7 +2040,6 @@ static void __exit dm_multipath_exit(void)
destroy_workqueue(kmultipathd);
dm_unregister_target(&multipath_target);
- kmem_cache_destroy(_mpio_cache);
}
module_init(dm_multipath_init);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6e702fc69a83..67d76f21fecd 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -109,28 +109,6 @@ void dm_stop_queue(struct request_queue *q)
dm_mq_stop_queue(q);
}
-static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
- gfp_t gfp_mask)
-{
- return mempool_alloc(md->io_pool, gfp_mask);
-}
-
-static void free_old_rq_tio(struct dm_rq_target_io *tio)
-{
- mempool_free(tio, tio->md->io_pool);
-}
-
-static struct request *alloc_old_clone_request(struct mapped_device *md,
- gfp_t gfp_mask)
-{
- return mempool_alloc(md->rq_pool, gfp_mask);
-}
-
-static void free_old_clone_request(struct mapped_device *md, struct request *rq)
-{
- mempool_free(rq, md->rq_pool);
-}
-
/*
* Partial completion handling for request-based dm
*/
@@ -185,7 +163,7 @@ static void end_clone_bio(struct bio *clone)
static struct dm_rq_target_io *tio_from_request(struct request *rq)
{
- return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
+ return blk_mq_rq_to_pdu(rq);
}
static void rq_end_stats(struct mapped_device *md, struct request *orig)
@@ -233,31 +211,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
dm_put(md);
}
-static void free_rq_clone(struct request *clone)
-{
- struct dm_rq_target_io *tio = clone->end_io_data;
- struct mapped_device *md = tio->md;
-
- blk_rq_unprep_clone(clone);
-
- /*
- * It is possible for a clone_old_rq() allocated clone to
- * get passed in -- it may not yet have a request_queue.
- * This is known to occur if the error target replaces
- * a multipath target that has a request_fn queue stacked
- * on blk-mq queue(s).
- */
- if (clone->q && clone->q->mq_ops)
- /* stacked on blk-mq queue(s) */
- tio->ti->type->release_clone_rq(clone);
- else if (!md->queue->mq_ops)
- /* request_fn queue stacked on request_fn queue(s) */
- free_old_clone_request(md, clone);
-
- if (!md->queue->mq_ops)
- free_old_rq_tio(tio);
-}
-
/*
* Complete the clone and the original request.
* Must be called without clone's queue lock held,
@@ -270,20 +223,9 @@ static void dm_end_request(struct request *clone, int error)
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
- rq->errors = clone->errors;
- rq->resid_len = clone->resid_len;
-
- if (rq->sense)
- /*
- * We are using the sense buffer of the original
- * request.
- * So setting the length of the sense data is enough.
- */
- rq->sense_len = clone->sense_len;
- }
+ blk_rq_unprep_clone(clone);
+ tio->ti->type->release_clone_rq(clone);
- free_rq_clone(clone);
rq_end_stats(md, rq);
if (!rq->q->mq_ops)
blk_end_request_all(rq, error);
@@ -292,22 +234,6 @@ static void dm_end_request(struct request *clone, int error)
rq_completed(md, rw, true);
}
-static void dm_unprep_request(struct request *rq)
-{
- struct dm_rq_target_io *tio = tio_from_request(rq);
- struct request *clone = tio->clone;
-
- if (!rq->q->mq_ops) {
- rq->special = NULL;
- rq->rq_flags &= ~RQF_DONTPREP;
- }
-
- if (clone)
- free_rq_clone(clone);
- else if (!tio->md->queue->mq_ops)
- free_old_rq_tio(tio);
-}
-
/*
* Requeue the original request of a clone.
*/
@@ -346,7 +272,10 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
int rw = rq_data_dir(rq);
rq_end_stats(md, rq);
- dm_unprep_request(rq);
+ if (tio->clone) {
+ blk_rq_unprep_clone(tio->clone);
+ tio->ti->type->release_clone_rq(tio->clone);
+ }
if (!rq->q->mq_ops)
dm_old_requeue_request(rq);
@@ -401,14 +330,11 @@ static void dm_softirq_done(struct request *rq)
if (!clone) {
rq_end_stats(tio->md, rq);
rw = rq_data_dir(rq);
- if (!rq->q->mq_ops) {
+ if (!rq->q->mq_ops)
blk_end_request_all(rq, tio->error);
- rq_completed(tio->md, rw, false);
- free_old_rq_tio(tio);
- } else {
+ else
blk_mq_end_request(rq, tio->error);
- rq_completed(tio->md, rw, false);
- }
+ rq_completed(tio->md, rw, false);
return;
}
@@ -452,16 +378,6 @@ static void end_clone_request(struct request *clone, int error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
- if (!clone->q->mq_ops) {
- /*
- * For just cleaning up the information of the queue in which
- * the clone was dispatched.
- * The clone is *NOT* freed actually here because it is alloced
- * from dm own mempool (RQF_ALLOCED isn't set).
- */
- __blk_put_request(clone->q, clone);
- }
-
/*
* Actual request completion is done in a softirq context which doesn't
* hold the clone's queue lock. Otherwise, deadlock could occur because:
@@ -511,9 +427,6 @@ static int setup_clone(struct request *clone, struct request *rq,
if (r)
return r;
- clone->cmd = rq->cmd;
- clone->cmd_len = rq->cmd_len;
- clone->sense = rq->sense;
clone->end_io = end_clone_request;
clone->end_io_data = tio;
@@ -522,28 +435,6 @@ static int setup_clone(struct request *clone, struct request *rq,
return 0;
}
-static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
- struct dm_rq_target_io *tio, gfp_t gfp_mask)
-{
- /*
- * Create clone for use with .request_fn request_queue
- */
- struct request *clone;
-
- clone = alloc_old_clone_request(md, gfp_mask);
- if (!clone)
- return NULL;
-
- blk_rq_init(NULL, clone);
- if (setup_clone(clone, rq, tio, gfp_mask)) {
- /* -ENOMEM */
- free_old_clone_request(md, clone);
- return NULL;
- }
-
- return clone;
-}
-
static void map_tio_request(struct kthread_work *work);
static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
@@ -565,60 +456,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
kthread_init_work(&tio->work, map_tio_request);
}
-static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
- struct mapped_device *md,
- gfp_t gfp_mask)
-{
- struct dm_rq_target_io *tio;
- int srcu_idx;
- struct dm_table *table;
-
- tio = alloc_old_rq_tio(md, gfp_mask);
- if (!tio)
- return NULL;
-
- init_tio(tio, rq, md);
-
- table = dm_get_live_table(md, &srcu_idx);
- /*
- * Must clone a request if this .request_fn DM device
- * is stacked on .request_fn device(s).
- */
- if (!dm_table_all_blk_mq_devices(table)) {
- if (!clone_old_rq(rq, md, tio, gfp_mask)) {
- dm_put_live_table(md, srcu_idx);
- free_old_rq_tio(tio);
- return NULL;
- }
- }
- dm_put_live_table(md, srcu_idx);
-
- return tio;
-}
-
-/*
- * Called with the queue lock held.
- */
-static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_rq_target_io *tio;
-
- if (unlikely(rq->special)) {
- DMWARN("Already has something in rq->special.");
- return BLKPREP_KILL;
- }
-
- tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
- if (!tio)
- return BLKPREP_DEFER;
-
- rq->special = tio;
- rq->rq_flags |= RQF_DONTPREP;
-
- return BLKPREP_OK;
-}
-
/*
* Returns:
* DM_MAPIO_* : the request has been processed as indicated
@@ -633,31 +470,18 @@ static int map_request(struct dm_rq_target_io *tio)
struct request *rq = tio->orig;
struct request *clone = NULL;
- if (tio->clone) {
- clone = tio->clone;
- r = ti->type->map_rq(ti, clone, &tio->info);
- if (r == DM_MAPIO_DELAY_REQUEUE)
- return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
- } else {
- r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
- if (r < 0) {
- /* The target wants to complete the I/O */
- dm_kill_unmapped_request(rq, r);
- return r;
- }
- if (r == DM_MAPIO_REMAPPED &&
- setup_clone(clone, rq, tio, GFP_ATOMIC)) {
- /* -ENOMEM */
- ti->type->release_clone_rq(clone);
- return DM_MAPIO_REQUEUE;
- }
- }
-
+ r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
switch (r) {
case DM_MAPIO_SUBMITTED:
/* The target has taken the I/O to submit by itself later */
break;
case DM_MAPIO_REMAPPED:
+ if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
+ /* -ENOMEM */
+ ti->type->release_clone_rq(clone);
+ return DM_MAPIO_REQUEUE;
+ }
+
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
@@ -716,6 +540,29 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
dm_get(md);
}
+static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
+{
+ struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
+
+ /*
+ * Must initialize md member of tio, otherwise it won't
+ * be available in dm_mq_queue_rq.
+ */
+ tio->md = md;
+
+ if (md->init_tio_pdu) {
+ /* target-specific per-io data is immediately after the tio */
+ tio->info.ptr = tio + 1;
+ }
+
+ return 0;
+}
+
+static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
+{
+ return __dm_rq_init_rq(q->rq_alloc_data, rq);
+}
+
static void map_tio_request(struct kthread_work *work)
{
struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
@@ -814,6 +661,7 @@ static void dm_old_request_fn(struct request_queue *q)
dm_start_request(md, rq);
tio = tio_from_request(rq);
+ init_tio(tio, rq, md);
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
kthread_queue_work(&md->kworker, &tio->work);
@@ -824,10 +672,23 @@ static void dm_old_request_fn(struct request_queue *q)
/*
* Fully initialize a .request_fn request-based queue.
*/
-int dm_old_init_request_queue(struct mapped_device *md)
+int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
{
+ struct dm_target *immutable_tgt;
+
/* Fully initialize the queue */
- if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL))
+ md->queue->cmd_size = sizeof(struct dm_rq_target_io);
+ md->queue->rq_alloc_data = md;
+ md->queue->request_fn = dm_old_request_fn;
+ md->queue->init_rq_fn = dm_rq_init_rq;
+
+ immutable_tgt = dm_table_get_immutable_target(t);
+ if (immutable_tgt && immutable_tgt->per_io_data_size) {
+ /* any target-specific per-io data is immediately after the tio */
+ md->queue->cmd_size += immutable_tgt->per_io_data_size;
+ md->init_tio_pdu = true;
+ }
+ if (blk_init_allocated_queue(md->queue) < 0)
return -EINVAL;
/* disable dm_old_request_fn's merge heuristic by default */
@@ -835,7 +696,6 @@ int dm_old_init_request_queue(struct mapped_device *md)
dm_init_normal_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
- blk_queue_prep_rq(md->queue, dm_old_prep_fn);
/* Initialize the request-based DM worker thread */
kthread_init_worker(&md->kworker);
@@ -856,21 +716,7 @@ static int dm_mq_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
{
- struct mapped_device *md = data;
- struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
-
- /*
- * Must initialize md member of tio, otherwise it won't
- * be available in dm_mq_queue_rq.
- */
- tio->md = md;
-
- if (md->init_tio_pdu) {
- /* target-specific per-io data is immediately after the tio */
- tio->info.ptr = tio + 1;
- }
-
- return 0;
+ return __dm_rq_init_rq(data, rq);
}
static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index 4da06cae7bad..f0020d21b95f 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -48,7 +48,7 @@ struct dm_rq_clone_bio_info {
bool dm_use_blk_mq_default(void);
bool dm_use_blk_mq(struct mapped_device *md);
-int dm_old_init_request_queue(struct mapped_device *md);
+int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t);
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
void dm_mq_cleanup_mapped_device(struct mapped_device *md);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 0a427de23ed2..3ad16d9c9d5a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1750,7 +1750,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
char b[BDEVNAME_SIZE];
if (likely(q))
- r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+ r |= bdi_congested(q->backing_dev_info, bdi_bits);
else
DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
dm_device_name(t->md),
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 710ae28fd618..43d3445b121d 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -131,12 +131,6 @@ static int io_err_map(struct dm_target *tt, struct bio *bio)
return -EIO;
}
-static int io_err_map_rq(struct dm_target *ti, struct request *clone,
- union map_info *map_context)
-{
- return -EIO;
-}
-
static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
union map_info *map_context,
struct request **clone)
@@ -161,7 +155,6 @@ static struct target_type error_target = {
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
- .map_rq = io_err_map_rq,
.clone_and_map_rq = io_err_clone_and_map_rq,
.release_clone_rq = io_err_release_clone_rq,
.direct_access = io_err_direct_access,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 110982db4b48..2b266a2b5035 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2711,7 +2711,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
return 1;
q = bdev_get_queue(pt->data_dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static void requeue_bios(struct pool *pool)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3086da5664f3..5bd9ab06a562 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -91,7 +91,6 @@ static int dm_numa_node = DM_NUMA_NODE;
*/
struct dm_md_mempools {
mempool_t *io_pool;
- mempool_t *rq_pool;
struct bio_set *bs;
};
@@ -466,13 +465,16 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
if (r > 0) {
/*
- * Target determined this ioctl is being issued against
- * a logical partition of the parent bdev; so extra
- * validation is needed.
+ * Target determined this ioctl is being issued against a
+ * subset of the parent bdev; require extra privileges.
*/
- r = scsi_verify_blk_ioctl(NULL, cmd);
- if (r)
+ if (!capable(CAP_SYS_RAWIO)) {
+ DMWARN_LIMIT(
+ "%s: sending ioctl %x to DM device without required privilege.",
+ current->comm, cmd);
+ r = -ENOIOCTLCMD;
goto out;
+ }
}
r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
@@ -1314,7 +1316,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
* With request-based DM we only need to check the
* top-level queue for congestion.
*/
- r = md->queue->backing_dev_info.wb.state & bdi_bits;
+ r = md->queue->backing_dev_info->wb.state & bdi_bits;
} else {
map = dm_get_live_table_fast(md);
if (map)
@@ -1397,7 +1399,7 @@ void dm_init_md_queue(struct mapped_device *md)
* - must do so here (in alloc_dev callchain) before queue is used
*/
md->queue->queuedata = md;
- md->queue->backing_dev_info.congested_data = md;
+ md->queue->backing_dev_info->congested_data = md;
}
void dm_init_normal_md_queue(struct mapped_device *md)
@@ -1408,7 +1410,7 @@ void dm_init_normal_md_queue(struct mapped_device *md)
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
- md->queue->backing_dev_info.congested_fn = dm_any_congested;
+ md->queue->backing_dev_info->congested_fn = dm_any_congested;
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
}
@@ -1419,7 +1421,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->kworker_task)
kthread_stop(md->kworker_task);
mempool_destroy(md->io_pool);
- mempool_destroy(md->rq_pool);
if (md->bs)
bioset_free(md->bs);
@@ -1595,12 +1596,10 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
goto out;
}
- BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
+ BUG_ON(!p || md->io_pool || md->bs);
md->io_pool = p->io_pool;
p->io_pool = NULL;
- md->rq_pool = p->rq_pool;
- p->rq_pool = NULL;
md->bs = p->bs;
p->bs = NULL;
@@ -1777,7 +1776,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
switch (type) {
case DM_TYPE_REQUEST_BASED:
- r = dm_old_init_request_queue(md);
+ r = dm_old_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based mapped device");
return r;
@@ -2493,7 +2492,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
unsigned integrity, unsigned per_io_data_size)
{
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
- struct kmem_cache *cachep = NULL;
unsigned int pool_size = 0;
unsigned int front_pad;
@@ -2503,20 +2501,16 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
switch (type) {
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
- cachep = _io_cache;
pool_size = dm_get_reserved_bio_based_ios();
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
+
+ pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
+ if (!pools->io_pool)
+ goto out;
break;
case DM_TYPE_REQUEST_BASED:
- cachep = _rq_tio_cache;
- pool_size = dm_get_reserved_rq_based_ios();
- pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
- if (!pools->rq_pool)
- goto out;
- /* fall through to setup remaining rq-based pools */
case DM_TYPE_MQ_REQUEST_BASED:
- if (!pool_size)
- pool_size = dm_get_reserved_rq_based_ios();
+ pool_size = dm_get_reserved_rq_based_ios();
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_io_data_size is used for blk-mq pdu at queue allocation */
break;
@@ -2524,12 +2518,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
BUG();
}
- if (cachep) {
- pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
- if (!pools->io_pool)
- goto out;
- }
-
pools->bs = bioset_create_nobvec(pool_size, front_pad);
if (!pools->bs)
goto out;
@@ -2551,7 +2539,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
return;
mempool_destroy(pools->io_pool);
- mempool_destroy(pools->rq_pool);
if (pools->bs)
bioset_free(pools->bs);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index f0aad08b9654..f298b01f7ab3 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -95,8 +95,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
/*
* To check whether the target type is request-based or not (bio-based).
*/
-#define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \
- ((t)->type->clone_and_map_rq != NULL))
+#define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
/*
* To check whether the target type is a hybrid (capable of being
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 5975c9915684..f1c7bbac31a5 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -62,7 +62,7 @@ static int linear_congested(struct mddev *mddev, int bits)
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
return ret;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 01175dac0db6..ba485dcf1064 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5346,8 +5346,8 @@ int md_run(struct mddev *mddev)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
- mddev->queue->backing_dev_info.congested_data = mddev;
- mddev->queue->backing_dev_info.congested_fn = md_congested;
+ mddev->queue->backing_dev_info->congested_data = mddev;
+ mddev->queue->backing_dev_info->congested_fn = md_congested;
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -5704,7 +5704,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
__md_stop_writes(mddev);
__md_stop(mddev);
- mddev->queue->backing_dev_info.congested_fn = NULL;
+ mddev->queue->backing_dev_info->congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index aa8c4e5c1ee2..d457afa672d5 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -169,7 +169,7 @@ static int multipath_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
/* Just like multipath_map, we just check the
* first available device
*/
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 848365d474f3..d6585239bff2 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -41,7 +41,7 @@ static int raid0_congested(struct mddev *mddev, int bits)
for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
return ret;
}
@@ -420,8 +420,8 @@ static int raid0_run(struct mddev *mddev)
*/
int stripe = mddev->raid_disks *
(mddev->chunk_sectors << 9) / PAGE_SIZE;
- if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
- mddev->queue->backing_dev_info.ra_pages = 2* stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2* stripe;
}
dump_zones(mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 7b0f647bcccb..830ff2b20346 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -744,9 +744,9 @@ static int raid1_congested(struct mddev *mddev, int bits)
* non-congested targets, it can be removed
*/
if ((bits & (1 << WB_async_congested)) || 1)
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
else
- ret &= bdi_congested(&q->backing_dev_info, bits);
+ ret &= bdi_congested(q->backing_dev_info, bits);
}
}
rcu_read_unlock();
@@ -1170,10 +1170,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
int i, disks;
struct bitmap *bitmap = mddev->bitmap;
unsigned long flags;
- const int op = bio_op(bio);
- const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
- const unsigned long do_flush_fua = (bio->bi_opf &
- (REQ_PREFLUSH | REQ_FUA));
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
struct raid1_plug_cb *plug = NULL;
@@ -1389,7 +1385,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
- bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
+ mbio->bi_opf = bio_op(bio) |
+ (bio->bi_opf & (REQ_SYNC | REQ_PREFLUSH | REQ_FUA));
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
!test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
conf->raid_disks - mddev->degraded > 1)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1920756828df..6bc5c2a85160 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -860,7 +860,7 @@ static int raid10_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
}
rcu_read_unlock();
@@ -3841,8 +3841,8 @@ static int raid10_run(struct mddev *mddev)
* maybe...
*/
stripe /= conf->geo.near_copies;
- if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
if (md_integrity_register(mddev))
@@ -4643,8 +4643,8 @@ static void end_reshape(struct r10conf *conf)
int stripe = conf->geo.raid_disks *
((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
stripe /= conf->geo.near_copies;
- if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
conf->fullsync = 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3c7e106c12a2..6214e699342c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6331,10 +6331,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
mddev_suspend(mddev);
conf->skip_copy = new;
if (new)
- mddev->queue->backing_dev_info.capabilities |=
+ mddev->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES;
else
- mddev->queue->backing_dev_info.capabilities &=
+ mddev->queue->backing_dev_info->capabilities &=
~BDI_CAP_STABLE_WRITES;
mddev_resume(mddev);
}
@@ -7153,8 +7153,8 @@ static int raid5_run(struct mddev *mddev)
int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
- if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
@@ -7763,8 +7763,8 @@ static void end_reshape(struct r5conf *conf)
int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE);
- if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
}
}
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index f3512404bc52..99e651c27fb7 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2000,16 +2000,6 @@ static int msb_bd_getgeo(struct block_device *bdev,
return 0;
}
-static int msb_prepare_req(struct request_queue *q, struct request *req)
-{
- if (req->cmd_type != REQ_TYPE_FS) {
- blk_dump_rq_flags(req, "MS unsupported request");
- return BLKPREP_KILL;
- }
- req->rq_flags |= RQF_DONTPREP;
- return BLKPREP_OK;
-}
-
static void msb_submit_req(struct request_queue *q)
{
struct memstick_dev *card = q->queuedata;
@@ -2132,7 +2122,6 @@ static int msb_init_disk(struct memstick_dev *card)
}
msb->queue->queuedata = card;
- blk_queue_prep_rq(msb->queue, msb_prepare_req);
blk_queue_bounce_limit(msb->queue, limit);
blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index fa0746d182ff..c00d8a266878 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -827,18 +827,6 @@ static void mspro_block_start(struct memstick_dev *card)
spin_unlock_irqrestore(&msb->q_lock, flags);
}
-static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
-{
- if (req->cmd_type != REQ_TYPE_FS) {
- blk_dump_rq_flags(req, "MSPro unsupported request");
- return BLKPREP_KILL;
- }
-
- req->rq_flags |= RQF_DONTPREP;
-
- return BLKPREP_OK;
-}
-
static void mspro_block_submit_req(struct request_queue *q)
{
struct memstick_dev *card = q->queuedata;
@@ -1228,7 +1216,6 @@ static int mspro_block_init_disk(struct memstick_dev *card)
}
msb->queue->queuedata = card;
- blk_queue_prep_rq(msb->queue, mspro_block_prepare_req);
blk_queue_bounce_limit(msb->queue, limit);
blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7ee1667acde4..b8c4b2ba7519 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2320,10 +2320,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
SmpPassthroughReply_t *smprep;
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
- memcpy(req->sense, smprep, sizeof(*smprep));
- req->sense_len = sizeof(*smprep);
- req->resid_len = 0;
- rsp->resid_len -= smprep->ResponseDataLength;
+ memcpy(scsi_req(req)->sense, smprep, sizeof(*smprep));
+ scsi_req(req)->sense_len = sizeof(*smprep);
+ scsi_req(req)->resid_len = 0;
+ scsi_req(rsp)->resid_len -= smprep->ResponseDataLength;
} else {
printk(MYIOC_s_ERR_FMT
"%s: smp passthru reply failed to be returned\n",
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index a6496d8027bc..033f641eb8b7 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -30,15 +30,6 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
{
struct mmc_queue *mq = q->queuedata;
- /*
- * We only like normal block requests and discards.
- */
- if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
- req_op(req) != REQ_OP_SECURE_ERASE) {
- blk_dump_rq_flags(req, "MMC bad request");
- return BLKPREP_KILL;
- }
-
if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
return BLKPREP_KILL;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index df8a5ef334c0..6b8d5cd7dbf6 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -84,9 +84,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
buf = bio_data(req->bio);
- if (req->cmd_type != REQ_TYPE_FS)
- return -EIO;
-
if (req_op(req) == REQ_OP_FLUSH)
return tr->flush(dev);
@@ -94,16 +91,16 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
get_capacity(req->rq_disk))
return -EIO;
- if (req_op(req) == REQ_OP_DISCARD)
+ switch (req_op(req)) {
+ case REQ_OP_DISCARD:
return tr->discard(dev, block, nsect);
-
- if (rq_data_dir(req) == READ) {
+ case REQ_OP_READ:
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->readsect(dev, block, buf))
return -EIO;
rq_flush_dcache_pages(req);
return 0;
- } else {
+ case REQ_OP_WRITE:
if (!tr->writesect)
return -EIO;
@@ -112,6 +109,8 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (tr->writesect(dev, block, buf))
return -EIO;
return 0;
+ default:
+ return -EIO;
}
}
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index d1e6931c132f..c80869e60909 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -323,16 +323,15 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
struct ubiblock *dev = hctx->queue->queuedata;
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
- if (req->cmd_type != REQ_TYPE_FS)
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ ubi_sgl_init(&pdu->usgl);
+ queue_work(dev->wq, &pdu->work);
+ return BLK_MQ_RQ_QUEUE_OK;
+ default:
return BLK_MQ_RQ_QUEUE_ERROR;
+ }
- if (rq_data_dir(req) != READ)
- return BLK_MQ_RQ_QUEUE_ERROR; /* Write not implemented */
-
- ubi_sgl_init(&pdu->usgl);
- queue_work(dev->wq, &pdu->work);
-
- return BLK_MQ_RQ_QUEUE_OK;
}
static int ubiblock_init_request(void *data, struct request *req,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 138c6fa00cd5..44a1a257e0b5 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -208,18 +208,18 @@ EXPORT_SYMBOL_GPL(nvme_requeue_req);
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags, int qid)
{
+ unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
struct request *req;
if (qid == NVME_QID_ANY) {
- req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags);
+ req = blk_mq_alloc_request(q, op, flags);
} else {
- req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags,
+ req = blk_mq_alloc_request_hctx(q, op, flags,
qid ? qid - 1 : 0);
}
if (IS_ERR(req))
return req;
- req->cmd_type = REQ_TYPE_DRV_PRIV;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
nvme_req(req)->cmd = cmd;
@@ -238,26 +238,38 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd)
{
+ unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
struct nvme_dsm_range *range;
- unsigned int nr_bytes = blk_rq_bytes(req);
+ struct bio *bio;
- range = kmalloc(sizeof(*range), GFP_ATOMIC);
+ range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
if (!range)
return BLK_MQ_RQ_QUEUE_BUSY;
- range->cattr = cpu_to_le32(0);
- range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift);
- range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ __rq_for_each_bio(bio, req) {
+ u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
+ u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+
+ range[n].cattr = cpu_to_le32(0);
+ range[n].nlb = cpu_to_le32(nlb);
+ range[n].slba = cpu_to_le64(slba);
+ n++;
+ }
+
+ if (WARN_ON_ONCE(n != segments)) {
+ kfree(range);
+ return BLK_MQ_RQ_QUEUE_ERROR;
+ }
memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
- cmnd->dsm.nr = 0;
+ cmnd->dsm.nr = segments - 1;
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
req->special_vec.bv_page = virt_to_page(range);
req->special_vec.bv_offset = offset_in_page(range);
- req->special_vec.bv_len = sizeof(*range);
+ req->special_vec.bv_len = sizeof(*range) * segments;
req->rq_flags |= RQF_SPECIAL_PAYLOAD;
return BLK_MQ_RQ_QUEUE_OK;
@@ -309,17 +321,27 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
{
int ret = BLK_MQ_RQ_QUEUE_OK;
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ switch (req_op(req)) {
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
- else if (req_op(req) == REQ_OP_FLUSH)
+ break;
+ case REQ_OP_FLUSH:
nvme_setup_flush(ns, cmd);
- else if (req_op(req) == REQ_OP_DISCARD)
+ break;
+ case REQ_OP_DISCARD:
ret = nvme_setup_discard(ns, req, cmd);
- else
+ break;
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
nvme_setup_rw(ns, req, cmd);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return BLK_MQ_RQ_QUEUE_ERROR;
+ }
cmd->common.command_id = req->tag;
-
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
@@ -868,6 +890,9 @@ static void nvme_config_discard(struct nvme_ns *ns)
struct nvme_ctrl *ctrl = ns->ctrl;
u32 logical_block_size = queue_logical_block_size(ns->queue);
+ BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
+ NVME_DSM_MAX_RANGES);
+
if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES)
ns->queue->limits.discard_zeroes_data = 1;
else
@@ -876,6 +901,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
ns->queue->limits.discard_alignment = logical_block_size;
ns->queue->limits.discard_granularity = logical_block_size;
blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
+ blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index e65041c640cb..fb51a8de9b29 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1937,7 +1937,7 @@ nvme_fc_complete_rq(struct request *rq)
return;
}
- if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
+ if (blk_rq_is_passthrough(rq))
error = rq->errors;
else
error = nvme_error_status(rq->errors);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d67d0d0a3bc0..ddc51adb594d 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -589,7 +589,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
*/
if (ns && ns->ms && !blk_integrity_rq(req)) {
if (!(ns->pi_type && ns->ms == 8) &&
- req->cmd_type != REQ_TYPE_DRV_PRIV) {
+ !blk_rq_is_passthrough(req)) {
blk_mq_end_request(req, -EFAULT);
return BLK_MQ_RQ_QUEUE_OK;
}
@@ -646,7 +646,7 @@ static void nvme_complete_rq(struct request *req)
return;
}
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ if (blk_rq_is_passthrough(req))
error = req->errors;
else
error = nvme_error_status(req->errors);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 557f29b1f1bb..a75e95d42b3f 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1423,7 +1423,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
struct nvme_command *cmd = nvme_req(rq)->cmd;
- if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
+ if (!blk_rq_is_passthrough(rq) ||
cmd->common.opcode != nvme_fabrics_command ||
cmd->fabrics.fctype != nvme_fabrics_type_connect)
return false;
@@ -1471,7 +1471,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
ib_dma_sync_single_for_device(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE);
- if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
+ if (req_op(rq) == REQ_OP_FLUSH)
flush = true;
ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
@@ -1522,7 +1522,7 @@ static void nvme_rdma_complete_rq(struct request *rq)
return;
}
- if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
+ if (blk_rq_is_passthrough(rq))
error = rq->errors;
else
error = nvme_error_status(rq->errors);
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index a5c09e703bd8..f49ae2758bb7 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -43,6 +43,7 @@
#include <asm/unaligned.h>
#include <scsi/sg.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_request.h>
#include "nvme.h"
@@ -2347,12 +2348,14 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
{
- u8 cmd[BLK_MAX_CDB];
+ u8 cmd[16];
int retcode;
unsigned int opcode;
if (hdr->cmdp == NULL)
return -EMSGSIZE;
+ if (hdr->cmd_len > sizeof(cmd))
+ return -EINVAL;
if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
@@ -2451,8 +2454,6 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
return -EFAULT;
if (hdr.interface_id != 'S')
return -EINVAL;
- if (hdr.cmd_len > BLK_MAX_CDB)
- return -EINVAL;
/*
* A positive return code means a NVMe status, which has been
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9aaa70071ae5..f3862e38f574 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -104,7 +104,7 @@ static void nvme_loop_complete_rq(struct request *req)
return;
}
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ if (blk_rq_is_passthrough(req))
error = req->errors;
else
error = nvme_error_status(req->errors);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 9f16ea6964ec..152de6817875 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -300,13 +300,6 @@ static void scm_blk_request(struct request_queue *rq)
struct request *req;
while ((req = blk_peek_request(rq))) {
- if (req->cmd_type != REQ_TYPE_FS) {
- blk_start_request(req);
- blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
- __blk_end_request_all(req, -EIO);
- continue;
- }
-
if (!scm_permit_request(bdev, req))
goto out;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a4f6b0d95515..d4023bf1e739 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -18,6 +18,7 @@ config SCSI
depends on BLOCK
select SCSI_DMA if HAS_DMA
select SG_POOL
+ select BLK_SCSI_REQUEST
---help---
If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
any other SCSI device under Linux, say Y and make sure that you know
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 5b80746980b8..4a7679f6c73d 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -88,12 +88,6 @@ struct clariion_dh_data {
*/
unsigned char buffer[CLARIION_BUFFER_SIZE];
/*
- * SCSI sense buffer for commands -- assumes serial issuance
- * and completion sequence of all commands for same multipath.
- */
- unsigned char sense[SCSI_SENSE_BUFFERSIZE];
- unsigned int senselen;
- /*
* LUN state
*/
int lun_state;
@@ -116,44 +110,38 @@ struct clariion_dh_data {
/*
* Parse MODE_SELECT cmd reply.
*/
-static int trespass_endio(struct scsi_device *sdev, char *sense)
+static int trespass_endio(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sshdr)
{
int err = SCSI_DH_IO;
- struct scsi_sense_hdr sshdr;
-
- if (!scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
- sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, "
- "0x%2x, 0x%2x while sending CLARiiON trespass "
- "command.\n", CLARIION_NAME, sshdr.sense_key,
- sshdr.asc, sshdr.ascq);
- if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
- (sshdr.ascq == 0x00)) {
- /*
- * Array based copy in progress -- do not send
- * mode_select or copy will be aborted mid-stream.
- */
- sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in "
- "progress while sending CLARiiON trespass "
- "command.\n", CLARIION_NAME);
- err = SCSI_DH_DEV_TEMP_BUSY;
- } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
- (sshdr.ascq == 0x03)) {
- /*
- * LUN Not Ready - Manual Intervention Required
- * indicates in-progress ucode upgrade (NDU).
- */
- sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress "
- "ucode upgrade NDU operation while sending "
- "CLARiiON trespass command.\n", CLARIION_NAME);
- err = SCSI_DH_DEV_TEMP_BUSY;
- } else
- err = SCSI_DH_DEV_FAILED;
- } else {
- sdev_printk(KERN_INFO, sdev,
- "%s: failed to send MODE SELECT, no sense available\n",
- CLARIION_NAME);
- }
+ sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, "
+ "0x%2x, 0x%2x while sending CLARiiON trespass "
+ "command.\n", CLARIION_NAME, sshdr->sense_key,
+ sshdr->asc, sshdr->ascq);
+
+ if (sshdr->sense_key == 0x05 && sshdr->asc == 0x04 &&
+ sshdr->ascq == 0x00) {
+ /*
+ * Array based copy in progress -- do not send
+ * mode_select or copy will be aborted mid-stream.
+ */
+ sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in "
+ "progress while sending CLARiiON trespass "
+ "command.\n", CLARIION_NAME);
+ err = SCSI_DH_DEV_TEMP_BUSY;
+ } else if (sshdr->sense_key == 0x02 && sshdr->asc == 0x04 &&
+ sshdr->ascq == 0x03) {
+ /*
+ * LUN Not Ready - Manual Intervention Required
+ * indicates in-progress ucode upgrade (NDU).
+ */
+ sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress "
+ "ucode upgrade NDU operation while sending "
+ "CLARiiON trespass command.\n", CLARIION_NAME);
+ err = SCSI_DH_DEV_TEMP_BUSY;
+ } else
+ err = SCSI_DH_DEV_FAILED;
return err;
}
@@ -257,103 +245,15 @@ out:
return sp_model;
}
-/*
- * Get block request for REQ_BLOCK_PC command issued to path. Currently
- * limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
- *
- * Uses data and sense buffers in hardware handler context structure and
- * assumes serial servicing of commands, both issuance and completion.
- */
-static struct request *get_req(struct scsi_device *sdev, int cmd,
- unsigned char *buffer)
-{
- struct request *rq;
- int len = 0;
-
- rq = blk_get_request(sdev->request_queue,
- (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO);
- if (IS_ERR(rq)) {
- sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
- return NULL;
- }
-
- blk_rq_set_block_pc(rq);
- rq->cmd_len = COMMAND_SIZE(cmd);
- rq->cmd[0] = cmd;
-
- switch (cmd) {
- case MODE_SELECT:
- len = sizeof(short_trespass);
- rq->cmd[1] = 0x10;
- rq->cmd[4] = len;
- break;
- case MODE_SELECT_10:
- len = sizeof(long_trespass);
- rq->cmd[1] = 0x10;
- rq->cmd[8] = len;
- break;
- case INQUIRY:
- len = CLARIION_BUFFER_SIZE;
- rq->cmd[4] = len;
- memset(buffer, 0, len);
- break;
- default:
- BUG_ON(1);
- break;
- }
-
- rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
- rq->timeout = CLARIION_TIMEOUT;
- rq->retries = CLARIION_RETRIES;
-
- if (blk_rq_map_kern(rq->q, rq, buffer, len, GFP_NOIO)) {
- blk_put_request(rq);
- return NULL;
- }
-
- return rq;
-}
-
-static int send_inquiry_cmd(struct scsi_device *sdev, int page,
- struct clariion_dh_data *csdev)
-{
- struct request *rq = get_req(sdev, INQUIRY, csdev->buffer);
- int err;
-
- if (!rq)
- return SCSI_DH_RES_TEMP_UNAVAIL;
-
- rq->sense = csdev->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = csdev->senselen = 0;
-
- rq->cmd[0] = INQUIRY;
- if (page != 0) {
- rq->cmd[1] = 1;
- rq->cmd[2] = page;
- }
- err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
- if (err == -EIO) {
- sdev_printk(KERN_INFO, sdev,
- "%s: failed to send %s INQUIRY: %x\n",
- CLARIION_NAME, page?"EVPD":"standard",
- rq->errors);
- csdev->senselen = rq->sense_len;
- err = SCSI_DH_IO;
- }
-
- blk_put_request(rq);
-
- return err;
-}
-
static int send_trespass_cmd(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
- struct request *rq;
unsigned char *page22;
- int err, len, cmd;
+ unsigned char cdb[COMMAND_SIZE(MODE_SELECT)];
+ int err, res = SCSI_DH_OK, len;
+ struct scsi_sense_hdr sshdr;
+ u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
if (csdev->flags & CLARIION_SHORT_TRESPASS) {
page22 = short_trespass;
@@ -361,40 +261,37 @@ static int send_trespass_cmd(struct scsi_device *sdev,
/* Set Honor Reservations bit */
page22[6] |= 0x80;
len = sizeof(short_trespass);
- cmd = MODE_SELECT;
+ cdb[0] = MODE_SELECT;
+ cdb[1] = 0x10;
+ cdb[4] = len;
} else {
page22 = long_trespass;
if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
/* Set Honor Reservations bit */
page22[10] |= 0x80;
len = sizeof(long_trespass);
- cmd = MODE_SELECT_10;
+ cdb[0] = MODE_SELECT_10;
+ cdb[8] = len;
}
BUG_ON((len > CLARIION_BUFFER_SIZE));
memcpy(csdev->buffer, page22, len);
- rq = get_req(sdev, cmd, csdev->buffer);
- if (!rq)
- return SCSI_DH_RES_TEMP_UNAVAIL;
-
- rq->sense = csdev->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = csdev->senselen = 0;
-
- err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
- if (err == -EIO) {
- if (rq->sense_len) {
- err = trespass_endio(sdev, csdev->sense);
- } else {
+ err = scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
+ csdev->buffer, len, &sshdr,
+ CLARIION_TIMEOUT * HZ, CLARIION_RETRIES,
+ NULL, req_flags, 0);
+ if (err) {
+ if (scsi_sense_valid(&sshdr))
+ res = trespass_endio(sdev, &sshdr);
+ else {
sdev_printk(KERN_INFO, sdev,
"%s: failed to send MODE SELECT: %x\n",
- CLARIION_NAME, rq->errors);
+ CLARIION_NAME, err);
+ res = SCSI_DH_IO;
}
}
- blk_put_request(rq);
-
- return err;
+ return res;
}
static int clariion_check_sense(struct scsi_device *sdev,
@@ -464,21 +361,7 @@ static int clariion_std_inquiry(struct scsi_device *sdev,
int err;
char *sp_model;
- err = send_inquiry_cmd(sdev, 0, csdev);
- if (err != SCSI_DH_OK && csdev->senselen) {
- struct scsi_sense_hdr sshdr;
-
- if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
- &sshdr)) {
- sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
- "%02x/%02x/%02x\n", CLARIION_NAME,
- sshdr.sense_key, sshdr.asc, sshdr.ascq);
- }
- err = SCSI_DH_IO;
- goto out;
- }
-
- sp_model = parse_sp_model(sdev, csdev->buffer);
+ sp_model = parse_sp_model(sdev, sdev->inquiry);
if (!sp_model) {
err = SCSI_DH_DEV_UNSUPP;
goto out;
@@ -500,30 +383,12 @@ out:
static int clariion_send_inquiry(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
- int err, retry = CLARIION_RETRIES;
-
-retry:
- err = send_inquiry_cmd(sdev, 0xC0, csdev);
- if (err != SCSI_DH_OK && csdev->senselen) {
- struct scsi_sense_hdr sshdr;
-
- err = scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
- &sshdr);
- if (!err)
- return SCSI_DH_IO;
-
- err = clariion_check_sense(sdev, &sshdr);
- if (retry > 0 && err == ADD_TO_MLQUEUE) {
- retry--;
- goto retry;
- }
- sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
- "%02x/%02x/%02x\n", CLARIION_NAME,
- sshdr.sense_key, sshdr.asc, sshdr.ascq);
- err = SCSI_DH_IO;
- } else {
+ int err = SCSI_DH_IO;
+
+ if (!scsi_get_vpd_page(sdev, 0xC0, csdev->buffer,
+ CLARIION_BUFFER_SIZE))
err = parse_sp_info_reply(sdev, csdev);
- }
+
return err;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 308e87195dc1..be43c940636d 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -38,13 +38,10 @@
#define HP_SW_PATH_PASSIVE 1
struct hp_sw_dh_data {
- unsigned char sense[SCSI_SENSE_BUFFERSIZE];
int path_state;
int retries;
int retry_cnt;
struct scsi_device *sdev;
- activate_complete callback_fn;
- void *callback_data;
};
static int hp_sw_start_stop(struct hp_sw_dh_data *);
@@ -56,43 +53,34 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *);
*
* Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path
*/
-static int tur_done(struct scsi_device *sdev, unsigned char *sense)
+static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h,
+ struct scsi_sense_hdr *sshdr)
{
- struct scsi_sense_hdr sshdr;
- int ret;
+ int ret = SCSI_DH_IO;
- ret = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
- if (!ret) {
- sdev_printk(KERN_WARNING, sdev,
- "%s: sending tur failed, no sense available\n",
- HP_SW_NAME);
- ret = SCSI_DH_IO;
- goto done;
- }
- switch (sshdr.sense_key) {
+ switch (sshdr->sense_key) {
case UNIT_ATTENTION:
ret = SCSI_DH_IMM_RETRY;
break;
case NOT_READY:
- if ((sshdr.asc == 0x04) && (sshdr.ascq == 2)) {
+ if (sshdr->asc == 0x04 && sshdr->ascq == 2) {
/*
* LUN not ready - Initialization command required
*
* This is the passive path
*/
- ret = SCSI_DH_DEV_OFFLINED;
+ h->path_state = HP_SW_PATH_PASSIVE;
+ ret = SCSI_DH_OK;
break;
}
/* Fallthrough */
default:
sdev_printk(KERN_WARNING, sdev,
"%s: sending tur failed, sense %x/%x/%x\n",
- HP_SW_NAME, sshdr.sense_key, sshdr.asc,
- sshdr.ascq);
+ HP_SW_NAME, sshdr->sense_key, sshdr->asc,
+ sshdr->ascq);
break;
}
-
-done:
return ret;
}
@@ -105,131 +93,36 @@ done:
*/
static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
{
- struct request *req;
- int ret;
+ unsigned char cmd[6] = { TEST_UNIT_READY };
+ struct scsi_sense_hdr sshdr;
+ int ret = SCSI_DH_OK, res;
+ u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
retry:
- req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
- if (IS_ERR(req))
- return SCSI_DH_RES_TEMP_UNAVAIL;
-
- blk_rq_set_block_pc(req);
- req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
- req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
- req->cmd[0] = TEST_UNIT_READY;
- req->timeout = HP_SW_TIMEOUT;
- req->sense = h->sense;
- memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
- req->sense_len = 0;
-
- ret = blk_execute_rq(req->q, NULL, req, 1);
- if (ret == -EIO) {
- if (req->sense_len > 0) {
- ret = tur_done(sdev, h->sense);
- } else {
+ res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
+ HP_SW_TIMEOUT, HP_SW_RETRIES,
+ NULL, req_flags, 0);
+ if (res) {
+ if (scsi_sense_valid(&sshdr))
+ ret = tur_done(sdev, h, &sshdr);
+ else {
sdev_printk(KERN_WARNING, sdev,
"%s: sending tur failed with %x\n",
- HP_SW_NAME, req->errors);
+ HP_SW_NAME, res);
ret = SCSI_DH_IO;
}
} else {
h->path_state = HP_SW_PATH_ACTIVE;
ret = SCSI_DH_OK;
}
- if (ret == SCSI_DH_IMM_RETRY) {
- blk_put_request(req);
+ if (ret == SCSI_DH_IMM_RETRY)
goto retry;
- }
- if (ret == SCSI_DH_DEV_OFFLINED) {
- h->path_state = HP_SW_PATH_PASSIVE;
- ret = SCSI_DH_OK;
- }
-
- blk_put_request(req);
return ret;
}
/*
- * start_done - Handle START STOP UNIT return status
- * @sdev: sdev the command has been sent to
- * @errors: blk error code
- */
-static int start_done(struct scsi_device *sdev, unsigned char *sense)
-{
- struct scsi_sense_hdr sshdr;
- int rc;
-
- rc = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
- if (!rc) {
- sdev_printk(KERN_WARNING, sdev,
- "%s: sending start_stop_unit failed, "
- "no sense available\n",
- HP_SW_NAME);
- return SCSI_DH_IO;
- }
- switch (sshdr.sense_key) {
- case NOT_READY:
- if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
- /*
- * LUN not ready - manual intervention required
- *
- * Switch-over in progress, retry.
- */
- rc = SCSI_DH_RETRY;
- break;
- }
- /* fall through */
- default:
- sdev_printk(KERN_WARNING, sdev,
- "%s: sending start_stop_unit failed, sense %x/%x/%x\n",
- HP_SW_NAME, sshdr.sense_key, sshdr.asc,
- sshdr.ascq);
- rc = SCSI_DH_IO;
- }
-
- return rc;
-}
-
-static void start_stop_endio(struct request *req, int error)
-{
- struct hp_sw_dh_data *h = req->end_io_data;
- unsigned err = SCSI_DH_OK;
-
- if (error || host_byte(req->errors) != DID_OK ||
- msg_byte(req->errors) != COMMAND_COMPLETE) {
- sdev_printk(KERN_WARNING, h->sdev,
- "%s: sending start_stop_unit failed with %x\n",
- HP_SW_NAME, req->errors);
- err = SCSI_DH_IO;
- goto done;
- }
-
- if (req->sense_len > 0) {
- err = start_done(h->sdev, h->sense);
- if (err == SCSI_DH_RETRY) {
- err = SCSI_DH_IO;
- if (--h->retry_cnt) {
- blk_put_request(req);
- err = hp_sw_start_stop(h);
- if (err == SCSI_DH_OK)
- return;
- }
- }
- }
-done:
- req->end_io_data = NULL;
- __blk_put_request(req->q, req);
- if (h->callback_fn) {
- h->callback_fn(h->callback_data, err);
- h->callback_fn = h->callback_data = NULL;
- }
- return;
-
-}
-
-/*
* hp_sw_start_stop - Send START STOP UNIT command
* @sdev: sdev command should be sent to
*
@@ -237,26 +130,48 @@ done:
*/
static int hp_sw_start_stop(struct hp_sw_dh_data *h)
{
- struct request *req;
-
- req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC);
- if (IS_ERR(req))
- return SCSI_DH_RES_TEMP_UNAVAIL;
-
- blk_rq_set_block_pc(req);
- req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
- req->cmd_len = COMMAND_SIZE(START_STOP);
- req->cmd[0] = START_STOP;
- req->cmd[4] = 1; /* Start spin cycle */
- req->timeout = HP_SW_TIMEOUT;
- req->sense = h->sense;
- memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
- req->sense_len = 0;
- req->end_io_data = h;
+ unsigned char cmd[6] = { START_STOP, 0, 0, 0, 1, 0 };
+ struct scsi_sense_hdr sshdr;
+ struct scsi_device *sdev = h->sdev;
+ int res, rc = SCSI_DH_OK;
+ int retry_cnt = HP_SW_RETRIES;
+ u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
- blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio);
- return SCSI_DH_OK;
+retry:
+ res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
+ HP_SW_TIMEOUT, HP_SW_RETRIES,
+ NULL, req_flags, 0);
+ if (res) {
+ if (!scsi_sense_valid(&sshdr)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed, "
+ "no sense available\n", HP_SW_NAME);
+ return SCSI_DH_IO;
+ }
+ switch (sshdr.sense_key) {
+ case NOT_READY:
+ if (sshdr.asc == 0x04 && sshdr.ascq == 3) {
+ /*
+ * LUN not ready - manual intervention required
+ *
+ * Switch-over in progress, retry.
+ */
+ if (--retry_cnt)
+ goto retry;
+ rc = SCSI_DH_RETRY;
+ break;
+ }
+ /* fall through */
+ default:
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed, "
+ "sense %x/%x/%x\n", HP_SW_NAME,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ rc = SCSI_DH_IO;
+ }
+ }
+ return rc;
}
static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
@@ -290,15 +205,8 @@ static int hp_sw_activate(struct scsi_device *sdev,
ret = hp_sw_tur(sdev, h);
- if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) {
- h->retry_cnt = h->retries;
- h->callback_fn = fn;
- h->callback_data = data;
+ if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE)
ret = hp_sw_start_stop(h);
- if (ret == SCSI_DH_OK)
- return 0;
- h->callback_fn = h->callback_data = NULL;
- }
if (fn)
fn(data, ret);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 00d9c326158e..b64eaae8533d 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -205,7 +205,6 @@ struct rdac_dh_data {
#define RDAC_NON_PREFERRED 1
char preferred;
- unsigned char sense[SCSI_SENSE_BUFFERSIZE];
union {
struct c2_inquiry c2;
struct c4_inquiry c4;
@@ -262,40 +261,12 @@ do { \
sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
} while (0);
-static struct request *get_rdac_req(struct scsi_device *sdev,
- void *buffer, unsigned buflen, int rw)
+static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
+ struct list_head *list,
+ unsigned char *cdb)
{
- struct request *rq;
- struct request_queue *q = sdev->request_queue;
-
- rq = blk_get_request(q, rw, GFP_NOIO);
-
- if (IS_ERR(rq)) {
- sdev_printk(KERN_INFO, sdev,
- "get_rdac_req: blk_get_request failed.\n");
- return NULL;
- }
- blk_rq_set_block_pc(rq);
-
- if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
- blk_put_request(rq);
- sdev_printk(KERN_INFO, sdev,
- "get_rdac_req: blk_rq_map_kern failed.\n");
- return NULL;
- }
-
- rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
- rq->retries = RDAC_RETRIES;
- rq->timeout = RDAC_TIMEOUT;
-
- return rq;
-}
-
-static struct request *rdac_failover_get(struct scsi_device *sdev,
- struct rdac_dh_data *h, struct list_head *list)
-{
- struct request *rq;
+ struct scsi_device *sdev = ctlr->ms_sdev;
+ struct rdac_dh_data *h = sdev->handler_data;
struct rdac_mode_common *common;
unsigned data_size;
struct rdac_queue_data *qdata;
@@ -332,27 +303,17 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
lun_table[qdata->h->lun] = 0x81;
}
- /* get request for block layer packet command */
- rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
- if (!rq)
- return NULL;
-
/* Prepare the command. */
if (h->ctlr->use_ms10) {
- rq->cmd[0] = MODE_SELECT_10;
- rq->cmd[7] = data_size >> 8;
- rq->cmd[8] = data_size & 0xff;
+ cdb[0] = MODE_SELECT_10;
+ cdb[7] = data_size >> 8;
+ cdb[8] = data_size & 0xff;
} else {
- rq->cmd[0] = MODE_SELECT;
- rq->cmd[4] = data_size;
+ cdb[0] = MODE_SELECT;
+ cdb[4] = data_size;
}
- rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
-
- rq->sense = h->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = 0;
- return rq;
+ return data_size;
}
static void release_controller(struct kref *kref)
@@ -400,46 +361,14 @@ static struct rdac_controller *get_controller(int index, char *array_name,
return ctlr;
}
-static int submit_inquiry(struct scsi_device *sdev, int page_code,
- unsigned int len, struct rdac_dh_data *h)
-{
- struct request *rq;
- struct request_queue *q = sdev->request_queue;
- int err = SCSI_DH_RES_TEMP_UNAVAIL;
-
- rq = get_rdac_req(sdev, &h->inq, len, READ);
- if (!rq)
- goto done;
-
- /* Prepare the command. */
- rq->cmd[0] = INQUIRY;
- rq->cmd[1] = 1;
- rq->cmd[2] = page_code;
- rq->cmd[4] = len;
- rq->cmd_len = COMMAND_SIZE(INQUIRY);
-
- rq->sense = h->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = 0;
-
- err = blk_execute_rq(q, NULL, rq, 1);
- if (err == -EIO)
- err = SCSI_DH_IO;
-
- blk_put_request(rq);
-done:
- return err;
-}
-
static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
char *array_name, u8 *array_id)
{
- int err, i;
- struct c8_inquiry *inqp;
+ int err = SCSI_DH_IO, i;
+ struct c8_inquiry *inqp = &h->inq.c8;
- err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
- if (err == SCSI_DH_OK) {
- inqp = &h->inq.c8;
+ if (!scsi_get_vpd_page(sdev, 0xC8, (unsigned char *)inqp,
+ sizeof(struct c8_inquiry))) {
if (inqp->page_code != 0xc8)
return SCSI_DH_NOSYS;
if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
@@ -453,20 +382,20 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
*(array_name+ARRAY_LABEL_LEN-1) = '\0';
memset(array_id, 0, UNIQUE_ID_LEN);
memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
+ err = SCSI_DH_OK;
}
return err;
}
static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
{
- int err, access_state;
+ int err = SCSI_DH_IO, access_state;
struct rdac_dh_data *tmp;
- struct c9_inquiry *inqp;
+ struct c9_inquiry *inqp = &h->inq.c9;
h->state = RDAC_STATE_ACTIVE;
- err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
- if (err == SCSI_DH_OK) {
- inqp = &h->inq.c9;
+ if (!scsi_get_vpd_page(sdev, 0xC9, (unsigned char *)inqp,
+ sizeof(struct c9_inquiry))) {
/* detect the operating mode */
if ((inqp->avte_cvp >> 5) & 0x1)
h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */
@@ -501,6 +430,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
tmp->sdev->access_state = access_state;
}
rcu_read_unlock();
+ err = SCSI_DH_OK;
}
return err;
@@ -509,12 +439,11 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
static int initialize_controller(struct scsi_device *sdev,
struct rdac_dh_data *h, char *array_name, u8 *array_id)
{
- int err, index;
- struct c4_inquiry *inqp;
+ int err = SCSI_DH_IO, index;
+ struct c4_inquiry *inqp = &h->inq.c4;
- err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
- if (err == SCSI_DH_OK) {
- inqp = &h->inq.c4;
+ if (!scsi_get_vpd_page(sdev, 0xC4, (unsigned char *)inqp,
+ sizeof(struct c4_inquiry))) {
/* get the controller index */
if (inqp->slot_id[1] == 0x31)
index = 0;
@@ -530,18 +459,18 @@ static int initialize_controller(struct scsi_device *sdev,
h->sdev = sdev;
}
spin_unlock(&list_lock);
+ err = SCSI_DH_OK;
}
return err;
}
static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
{
- int err;
- struct c2_inquiry *inqp;
+ int err = SCSI_DH_IO;
+ struct c2_inquiry *inqp = &h->inq.c2;
- err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
- if (err == SCSI_DH_OK) {
- inqp = &h->inq.c2;
+ if (!scsi_get_vpd_page(sdev, 0xC2, (unsigned char *)inqp,
+ sizeof(struct c2_inquiry))) {
/*
* If more than MODE6_MAX_LUN luns are supported, use
* mode select 10
@@ -550,36 +479,35 @@ static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
h->ctlr->use_ms10 = 1;
else
h->ctlr->use_ms10 = 0;
+ err = SCSI_DH_OK;
}
return err;
}
static int mode_select_handle_sense(struct scsi_device *sdev,
- unsigned char *sensebuf)
+ struct scsi_sense_hdr *sense_hdr)
{
- struct scsi_sense_hdr sense_hdr;
- int err = SCSI_DH_IO, ret;
+ int err = SCSI_DH_IO;
struct rdac_dh_data *h = sdev->handler_data;
- ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
- if (!ret)
+ if (!scsi_sense_valid(sense_hdr))
goto done;
- switch (sense_hdr.sense_key) {
+ switch (sense_hdr->sense_key) {
case NO_SENSE:
case ABORTED_COMMAND:
case UNIT_ATTENTION:
err = SCSI_DH_RETRY;
break;
case NOT_READY:
- if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01)
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
/* LUN Not Ready and is in the Process of Becoming
* Ready
*/
err = SCSI_DH_RETRY;
break;
case ILLEGAL_REQUEST:
- if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36)
+ if (sense_hdr->asc == 0x91 && sense_hdr->ascq == 0x36)
/*
* Command Lock contention
*/
@@ -592,7 +520,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"MODE_SELECT returned with sense %02x/%02x/%02x",
(char *) h->ctlr->array_name, h->ctlr->index,
- sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
+ sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
done:
return err;
@@ -602,13 +530,16 @@ static void send_mode_select(struct work_struct *work)
{
struct rdac_controller *ctlr =
container_of(work, struct rdac_controller, ms_work);
- struct request *rq;
struct scsi_device *sdev = ctlr->ms_sdev;
struct rdac_dh_data *h = sdev->handler_data;
- struct request_queue *q = sdev->request_queue;
- int err, retry_cnt = RDAC_RETRY_COUNT;
+ int err = SCSI_DH_OK, retry_cnt = RDAC_RETRY_COUNT;
struct rdac_queue_data *tmp, *qdata;
LIST_HEAD(list);
+ unsigned char cdb[COMMAND_SIZE(MODE_SELECT_10)];
+ struct scsi_sense_hdr sshdr;
+ unsigned int data_size;
+ u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
spin_lock(&ctlr->ms_lock);
list_splice_init(&ctlr->ms_head, &list);
@@ -616,21 +547,19 @@ static void send_mode_select(struct work_struct *work)
ctlr->ms_sdev = NULL;
spin_unlock(&ctlr->ms_lock);
-retry:
- err = SCSI_DH_RES_TEMP_UNAVAIL;
- rq = rdac_failover_get(sdev, h, &list);
- if (!rq)
- goto done;
+ retry:
+ data_size = rdac_failover_get(ctlr, &list, cdb);
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"%s MODE_SELECT command",
(char *) h->ctlr->array_name, h->ctlr->index,
(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
- err = blk_execute_rq(q, NULL, rq, 1);
- blk_put_request(rq);
- if (err != SCSI_DH_OK) {
- err = mode_select_handle_sense(sdev, h->sense);
+ if (scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
+ &h->ctlr->mode_select, data_size, &sshdr,
+ RDAC_TIMEOUT * HZ,
+ RDAC_RETRIES, NULL, req_flags, 0)) {
+ err = mode_select_handle_sense(sdev, &sshdr);
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;
if (err == SCSI_DH_IMM_RETRY)
@@ -643,7 +572,6 @@ retry:
(char *) h->ctlr->array_name, h->ctlr->index);
}
-done:
list_for_each_entry_safe(qdata, tmp, &list, entry) {
list_del(&qdata->entry);
if (err == SCSI_DH_OK)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 258a3f9a2519..831a1c8b9f89 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -213,6 +213,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail;
}
+ error = scsi_init_sense_cache(shost);
+ if (error)
+ goto fail;
+
if (shost_use_blk_mq(shost)) {
error = scsi_mq_setup_tags(shost);
if (error)
@@ -226,19 +230,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
}
}
- /*
- * Note that we allocate the freelist even for the MQ case for now,
- * as we need a command set aside for scsi_reset_provider. Having
- * the full host freelist and one command available for that is a
- * little heavy-handed, but avoids introducing a special allocator
- * just for this. Eventually the structure of scsi_reset_provider
- * will need a major overhaul.
- */
- error = scsi_setup_command_freelist(shost);
- if (error)
- goto out_destroy_tags;
-
-
if (!shost->shost_gendev.parent)
shost->shost_gendev.parent = dev ? dev : &platform_bus;
if (!dma_dev)
@@ -258,7 +249,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
error = device_add(&shost->shost_gendev);
if (error)
- goto out_destroy_freelist;
+ goto out_disable_runtime_pm;
scsi_host_set_state(shost, SHOST_RUNNING);
get_device(shost->shost_gendev.parent);
@@ -308,13 +299,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
device_del(&shost->shost_dev);
out_del_gendev:
device_del(&shost->shost_gendev);
- out_destroy_freelist:
+ out_disable_runtime_pm:
device_disable_async_suspend(&shost->shost_gendev);
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
- scsi_destroy_command_freelist(shost);
- out_destroy_tags:
if (shost_use_blk_mq(shost))
scsi_mq_destroy_tags(shost);
fail:
@@ -355,7 +344,6 @@ static void scsi_host_dev_release(struct device *dev)
kfree(dev_name(&shost->shost_dev));
}
- scsi_destroy_command_freelist(shost);
if (shost_use_blk_mq(shost)) {
if (shost->tag_set.tags)
scsi_mq_destroy_tags(shost);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index cbc0c5fe5a60..c611412a8de9 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5539,8 +5539,8 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
* Retries always go down the normal I/O path.
*/
if (likely(cmd->retries == 0 &&
- cmd->request->cmd_type == REQ_TYPE_FS &&
- h->acciopath_status)) {
+ !blk_rq_is_passthrough(cmd->request) &&
+ h->acciopath_status)) {
rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
if (rc == 0)
return 0;
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 919736a74ffa..aa76f36abe03 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -2095,7 +2095,7 @@ int fc_lport_bsg_request(struct bsg_job *job)
bsg_reply->reply_payload_rcv_len = 0;
if (rsp)
- rsp->resid_len = job->reply_payload.payload_len;
+ scsi_req(rsp)->resid_len = job->reply_payload.payload_len;
mutex_lock(&lport->lp_mutex);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 022bb6e10d98..570b2cb2da43 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2174,12 +2174,12 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
bio_data(rsp->bio), blk_rq_bytes(rsp));
if (ret > 0) {
/* positive number is the untransferred residual */
- rsp->resid_len = ret;
- req->resid_len = 0;
+ scsi_req(rsp)->resid_len = ret;
+ scsi_req(req)->resid_len = 0;
ret = 0;
} else if (ret == 0) {
- rsp->resid_len = 0;
- req->resid_len = 0;
+ scsi_req(rsp)->resid_len = 0;
+ scsi_req(req)->resid_len = 0;
}
return ret;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index d24792575169..45cbbc44f4d7 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -274,15 +274,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
switch (req_data[1]) {
case SMP_REPORT_GENERAL:
- req->resid_len -= 8;
- rsp->resid_len -= 32;
+ scsi_req(req)->resid_len -= 8;
+ scsi_req(rsp)->resid_len -= 32;
resp_data[2] = SMP_RESP_FUNC_ACC;
resp_data[9] = sas_ha->num_phys;
break;
case SMP_REPORT_MANUF_INFO:
- req->resid_len -= 8;
- rsp->resid_len -= 64;
+ scsi_req(req)->resid_len -= 8;
+ scsi_req(rsp)->resid_len -= 64;
resp_data[2] = SMP_RESP_FUNC_ACC;
memcpy(resp_data + 12, shost->hostt->name,
SAS_EXPANDER_VENDOR_ID_LEN);
@@ -295,13 +295,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_DISCOVER:
- req->resid_len -= 16;
- if ((int)req->resid_len < 0) {
- req->resid_len = 0;
+ scsi_req(req)->resid_len -= 16;
+ if ((int)scsi_req(req)->resid_len < 0) {
+ scsi_req(req)->resid_len = 0;
error = -EINVAL;
goto out;
}
- rsp->resid_len -= 56;
+ scsi_req(rsp)->resid_len -= 56;
sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
break;
@@ -311,13 +311,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_REPORT_PHY_SATA:
- req->resid_len -= 16;
- if ((int)req->resid_len < 0) {
- req->resid_len = 0;
+ scsi_req(req)->resid_len -= 16;
+ if ((int)scsi_req(req)->resid_len < 0) {
+ scsi_req(req)->resid_len = 0;
error = -EINVAL;
goto out;
}
- rsp->resid_len -= 60;
+ scsi_req(rsp)->resid_len -= 60;
sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
break;
@@ -331,15 +331,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
int to_write = req_data[4];
if (blk_rq_bytes(req) < base_frame_size + to_write * 4 ||
- req->resid_len < base_frame_size + to_write * 4) {
+ scsi_req(req)->resid_len < base_frame_size + to_write * 4) {
resp_data[2] = SMP_RESP_INV_FRM_LEN;
break;
}
to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2],
req_data[3], to_write, &req_data[8]);
- req->resid_len -= base_frame_size + to_write * 4;
- rsp->resid_len -= 8;
+ scsi_req(req)->resid_len -= base_frame_size + to_write * 4;
+ scsi_req(rsp)->resid_len -= 8;
break;
}
@@ -348,13 +348,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_PHY_CONTROL:
- req->resid_len -= 44;
- if ((int)req->resid_len < 0) {
- req->resid_len = 0;
+ scsi_req(req)->resid_len -= 44;
+ if ((int)scsi_req(req)->resid_len < 0) {
+ scsi_req(req)->resid_len = 0;
error = -EINVAL;
goto out;
}
- rsp->resid_len -= 8;
+ scsi_req(rsp)->resid_len -= 8;
sas_phy_control(sas_ha, req_data[9], req_data[10],
req_data[32] >> 4, req_data[33] >> 4,
resp_data);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 0b5b423b1db0..c6d550551504 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4723,7 +4723,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* then scsi-ml does not need to handle this misbehavior.
*/
sector_sz = scmd->device->sector_size;
- if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz &&
+ if (unlikely(!blk_rq_is_passthrough(scmd->request) && sector_sz &&
xfer_cnt % sector_sz)) {
sdev_printk(KERN_INFO, scmd->device,
"unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 7f1d5785bc30..e7a7a704a315 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -2057,10 +2057,10 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
ioc->name, __func__,
le16_to_cpu(mpi_reply->ResponseDataLength)));
- memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
- req->sense_len = sizeof(*mpi_reply);
- req->resid_len = 0;
- rsp->resid_len -=
+ memcpy(scsi_req(req)->sense, mpi_reply, sizeof(*mpi_reply));
+ scsi_req(req)->sense_len = sizeof(*mpi_reply);
+ scsi_req(req)->resid_len = 0;
+ scsi_req(rsp)->resid_len -=
le16_to_cpu(mpi_reply->ResponseDataLength);
/* check if the resp needs to be copied from the allocated
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index ef99f62831fb..30b905080c61 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -48,6 +48,7 @@
#include <scsi/osd_sense.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_request.h>
#include "osd_debug.h"
@@ -477,11 +478,13 @@ static void _set_error_resid(struct osd_request *or, struct request *req,
{
or->async_error = error;
or->req_errors = req->errors ? : error;
- or->sense_len = req->sense_len;
+ or->sense_len = scsi_req(req)->sense_len;
+ if (or->sense_len)
+ memcpy(or->sense, scsi_req(req)->sense, or->sense_len);
if (or->out.req)
- or->out.residual = or->out.req->resid_len;
+ or->out.residual = scsi_req(or->out.req)->resid_len;
if (or->in.req)
- or->in.residual = or->in.req->resid_len;
+ or->in.residual = scsi_req(or->in.req)->resid_len;
}
int osd_execute_request(struct osd_request *or)
@@ -1562,10 +1565,11 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
struct bio *bio = oii->bio;
int ret;
- req = blk_get_request(q, has_write ? WRITE : READ, flags);
+ req = blk_get_request(q, has_write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+ flags);
if (IS_ERR(req))
return req;
- blk_rq_set_block_pc(req);
+ scsi_req_init(req);
for_each_bio(bio) {
struct bio *bounce_bio = bio;
@@ -1599,8 +1603,6 @@ static int _init_blk_request(struct osd_request *or,
req->timeout = or->timeout;
req->retries = or->retries;
- req->sense = or->sense;
- req->sense_len = 0;
if (has_out) {
or->out.req = req;
@@ -1612,7 +1614,7 @@ static int _init_blk_request(struct osd_request *or,
ret = PTR_ERR(req);
goto out;
}
- blk_rq_set_block_pc(req);
+ scsi_req_init(req);
or->in.req = or->request->next_rq = req;
}
} else if (has_in)
@@ -1699,8 +1701,8 @@ int osd_finalize_request(struct osd_request *or,
osd_sec_sign_cdb(&or->cdb, cap_key);
- or->request->cmd = or->cdb.buff;
- or->request->cmd_len = _osd_req_cdb_len(or);
+ scsi_req(or->request)->cmd = or->cdb.buff;
+ scsi_req(or->request)->cmd_len = _osd_req_cdb_len(or);
return 0;
}
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index e8196c55b633..451de6c5e3c9 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -322,6 +322,7 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
/* Wakeup from interrupt */
static void osst_end_async(struct request *req, int update)
{
+ struct scsi_request *rq = scsi_req(req);
struct osst_request *SRpnt = req->end_io_data;
struct osst_tape *STp = SRpnt->stp;
struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
@@ -330,6 +331,8 @@ static void osst_end_async(struct request *req, int update)
#if DEBUG
STp->write_pending = 0;
#endif
+ if (rq->sense_len)
+ memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
if (SRpnt->waiting)
complete(SRpnt->waiting);
@@ -357,17 +360,20 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
int use_sg, int timeout, int retries)
{
struct request *req;
+ struct scsi_request *rq;
struct page **pages = NULL;
struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
int err = 0;
int write = (data_direction == DMA_TO_DEVICE);
- req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL);
+ req = blk_get_request(SRpnt->stp->device->request_queue,
+ write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(req))
return DRIVER_ERROR << 24;
- blk_rq_set_block_pc(req);
+ rq = scsi_req(req);
+ scsi_req_init(req);
req->rq_flags |= RQF_QUIET;
SRpnt->bio = NULL;
@@ -404,11 +410,9 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
goto free_req;
}
- req->cmd_len = cmd_len;
- memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
- memcpy(req->cmd, cmd, req->cmd_len);
- req->sense = SRpnt->sense;
- req->sense_len = 0;
+ rq->cmd_len = cmd_len;
+ memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
+ memcpy(rq->cmd, cmd, rq->cmd_len);
req->timeout = timeout;
req->retries = retries;
req->end_io_data = SRpnt;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 1bf8061ff803..40ca75bbcb9d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -921,7 +921,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
sizeof(response) + sizeof(uint8_t);
- fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
sizeof(struct fc_bsg_reply);
memcpy(fw_sts_ptr, response, sizeof(response));
fw_sts_ptr += sizeof(response);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index a94b0b6bd030..9281bf47cbed 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1468,7 +1468,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
type, sp->handle, comp_status, fw_status[1], fw_status[2],
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->total_byte_count));
- fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
+ sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
else {
@@ -1482,7 +1483,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
pkt)->error_subcode_2));
res = DID_ERROR << 16;
bsg_reply->reply_payload_rcv_len = 0;
- fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
+ sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 02f1de18bc2b..96c33e292eba 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -2244,7 +2244,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
memcpy(fstatus.reserved_3,
pkt->reserved_2, 20 * sizeof(uint8_t));
- fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
sizeof(struct fc_bsg_reply);
memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 75455d4dab68..7bfbcfa7af40 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -98,176 +98,6 @@ EXPORT_SYMBOL(scsi_sd_probe_domain);
ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
EXPORT_SYMBOL(scsi_sd_pm_domain);
-struct scsi_host_cmd_pool {
- struct kmem_cache *cmd_slab;
- struct kmem_cache *sense_slab;
- unsigned int users;
- char *cmd_name;
- char *sense_name;
- unsigned int slab_flags;
- gfp_t gfp_mask;
-};
-
-static struct scsi_host_cmd_pool scsi_cmd_pool = {
- .cmd_name = "scsi_cmd_cache",
- .sense_name = "scsi_sense_cache",
- .slab_flags = SLAB_HWCACHE_ALIGN,
-};
-
-static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
- .cmd_name = "scsi_cmd_cache(DMA)",
- .sense_name = "scsi_sense_cache(DMA)",
- .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
- .gfp_mask = __GFP_DMA,
-};
-
-static DEFINE_MUTEX(host_cmd_pool_mutex);
-
-/**
- * scsi_host_free_command - internal function to release a command
- * @shost: host to free the command for
- * @cmd: command to release
- *
- * the command must previously have been allocated by
- * scsi_host_alloc_command.
- */
-static void
-scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
-{
- struct scsi_host_cmd_pool *pool = shost->cmd_pool;
-
- if (cmd->prot_sdb)
- kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
- kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
- kmem_cache_free(pool->cmd_slab, cmd);
-}
-
-/**
- * scsi_host_alloc_command - internal function to allocate command
- * @shost: SCSI host whose pool to allocate from
- * @gfp_mask: mask for the allocation
- *
- * Returns a fully allocated command with sense buffer and protection
- * data buffer (where applicable) or NULL on failure
- */
-static struct scsi_cmnd *
-scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
-{
- struct scsi_host_cmd_pool *pool = shost->cmd_pool;
- struct scsi_cmnd *cmd;
-
- cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
- if (!cmd)
- goto fail;
-
- cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
- gfp_mask | pool->gfp_mask);
- if (!cmd->sense_buffer)
- goto fail_free_cmd;
-
- if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
- cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
- if (!cmd->prot_sdb)
- goto fail_free_sense;
- }
-
- return cmd;
-
-fail_free_sense:
- kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
-fail_free_cmd:
- kmem_cache_free(pool->cmd_slab, cmd);
-fail:
- return NULL;
-}
-
-/**
- * __scsi_get_command - Allocate a struct scsi_cmnd
- * @shost: host to transmit command
- * @gfp_mask: allocation mask
- *
- * Description: allocate a struct scsi_cmd from host's slab, recycling from the
- * host's free_list if necessary.
- */
-static struct scsi_cmnd *
-__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
-{
- struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
-
- if (unlikely(!cmd)) {
- unsigned long flags;
-
- spin_lock_irqsave(&shost->free_list_lock, flags);
- if (likely(!list_empty(&shost->free_list))) {
- cmd = list_entry(shost->free_list.next,
- struct scsi_cmnd, list);
- list_del_init(&cmd->list);
- }
- spin_unlock_irqrestore(&shost->free_list_lock, flags);
-
- if (cmd) {
- void *buf, *prot;
-
- buf = cmd->sense_buffer;
- prot = cmd->prot_sdb;
-
- memset(cmd, 0, sizeof(*cmd));
-
- cmd->sense_buffer = buf;
- cmd->prot_sdb = prot;
- }
- }
-
- return cmd;
-}
-
-/**
- * scsi_get_command - Allocate and setup a scsi command block
- * @dev: parent scsi device
- * @gfp_mask: allocator flags
- *
- * Returns: The allocated scsi command structure.
- */
-struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
-{
- struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
- unsigned long flags;
-
- if (unlikely(cmd == NULL))
- return NULL;
-
- cmd->device = dev;
- INIT_LIST_HEAD(&cmd->list);
- INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
- spin_lock_irqsave(&dev->list_lock, flags);
- list_add_tail(&cmd->list, &dev->cmd_list);
- spin_unlock_irqrestore(&dev->list_lock, flags);
- cmd->jiffies_at_alloc = jiffies;
- return cmd;
-}
-
-/**
- * __scsi_put_command - Free a struct scsi_cmnd
- * @shost: dev->host
- * @cmd: Command to free
- */
-static void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
-{
- unsigned long flags;
-
- if (unlikely(list_empty(&shost->free_list))) {
- spin_lock_irqsave(&shost->free_list_lock, flags);
- if (list_empty(&shost->free_list)) {
- list_add(&cmd->list, &shost->free_list);
- cmd = NULL;
- }
- spin_unlock_irqrestore(&shost->free_list_lock, flags);
- }
-
- if (likely(cmd != NULL))
- scsi_host_free_command(shost, cmd);
-}
-
/**
* scsi_put_command - Free a scsi command block
* @cmd: command block to free
@@ -287,188 +117,6 @@ void scsi_put_command(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(&cmd->device->list_lock, flags);
BUG_ON(delayed_work_pending(&cmd->abort_work));
-
- __scsi_put_command(cmd->device->host, cmd);
-}
-
-static struct scsi_host_cmd_pool *
-scsi_find_host_cmd_pool(struct Scsi_Host *shost)
-{
- if (shost->hostt->cmd_size)
- return shost->hostt->cmd_pool;
- if (shost->unchecked_isa_dma)
- return &scsi_cmd_dma_pool;
- return &scsi_cmd_pool;
-}
-
-static void
-scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
-{
- kfree(pool->sense_name);
- kfree(pool->cmd_name);
- kfree(pool);
-}
-
-static struct scsi_host_cmd_pool *
-scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
-{
- struct scsi_host_template *hostt = shost->hostt;
- struct scsi_host_cmd_pool *pool;
-
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!pool)
- return NULL;
-
- pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->proc_name);
- pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->proc_name);
- if (!pool->cmd_name || !pool->sense_name) {
- scsi_free_host_cmd_pool(pool);
- return NULL;
- }
-
- pool->slab_flags = SLAB_HWCACHE_ALIGN;
- if (shost->unchecked_isa_dma) {
- pool->slab_flags |= SLAB_CACHE_DMA;
- pool->gfp_mask = __GFP_DMA;
- }
-
- if (hostt->cmd_size)
- hostt->cmd_pool = pool;
-
- return pool;
-}
-
-static struct scsi_host_cmd_pool *
-scsi_get_host_cmd_pool(struct Scsi_Host *shost)
-{
- struct scsi_host_template *hostt = shost->hostt;
- struct scsi_host_cmd_pool *retval = NULL, *pool;
- size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
-
- /*
- * Select a command slab for this host and create it if not
- * yet existent.
- */
- mutex_lock(&host_cmd_pool_mutex);
- pool = scsi_find_host_cmd_pool(shost);
- if (!pool) {
- pool = scsi_alloc_host_cmd_pool(shost);
- if (!pool)
- goto out;
- }
-
- if (!pool->users) {
- pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
- pool->slab_flags, NULL);
- if (!pool->cmd_slab)
- goto out_free_pool;
-
- pool->sense_slab = kmem_cache_create(pool->sense_name,
- SCSI_SENSE_BUFFERSIZE, 0,
- pool->slab_flags, NULL);
- if (!pool->sense_slab)
- goto out_free_slab;
- }
-
- pool->users++;
- retval = pool;
-out:
- mutex_unlock(&host_cmd_pool_mutex);
- return retval;
-
-out_free_slab:
- kmem_cache_destroy(pool->cmd_slab);
-out_free_pool:
- if (hostt->cmd_size) {
- scsi_free_host_cmd_pool(pool);
- hostt->cmd_pool = NULL;
- }
- goto out;
-}
-
-static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
-{
- struct scsi_host_template *hostt = shost->hostt;
- struct scsi_host_cmd_pool *pool;
-
- mutex_lock(&host_cmd_pool_mutex);
- pool = scsi_find_host_cmd_pool(shost);
-
- /*
- * This may happen if a driver has a mismatched get and put
- * of the command pool; the driver should be implicated in
- * the stack trace
- */
- BUG_ON(pool->users == 0);
-
- if (!--pool->users) {
- kmem_cache_destroy(pool->cmd_slab);
- kmem_cache_destroy(pool->sense_slab);
- if (hostt->cmd_size) {
- scsi_free_host_cmd_pool(pool);
- hostt->cmd_pool = NULL;
- }
- }
- mutex_unlock(&host_cmd_pool_mutex);
-}
-
-/**
- * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
- * @shost: host to allocate the freelist for.
- *
- * Description: The command freelist protects against system-wide out of memory
- * deadlock by preallocating one SCSI command structure for each host, so the
- * system can always write to a swap file on a device associated with that host.
- *
- * Returns: Nothing.
- */
-int scsi_setup_command_freelist(struct Scsi_Host *shost)
-{
- const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
- struct scsi_cmnd *cmd;
-
- spin_lock_init(&shost->free_list_lock);
- INIT_LIST_HEAD(&shost->free_list);
-
- shost->cmd_pool = scsi_get_host_cmd_pool(shost);
- if (!shost->cmd_pool)
- return -ENOMEM;
-
- /*
- * Get one backup command for this host.
- */
- cmd = scsi_host_alloc_command(shost, gfp_mask);
- if (!cmd) {
- scsi_put_host_cmd_pool(shost);
- shost->cmd_pool = NULL;
- return -ENOMEM;
- }
- list_add(&cmd->list, &shost->free_list);
- return 0;
-}
-
-/**
- * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
- * @shost: host whose freelist is going to be destroyed
- */
-void scsi_destroy_command_freelist(struct Scsi_Host *shost)
-{
- /*
- * If cmd_pool is NULL the free list was not initialized, so
- * do not attempt to release resources.
- */
- if (!shost->cmd_pool)
- return;
-
- while (!list_empty(&shost->free_list)) {
- struct scsi_cmnd *cmd;
-
- cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
- list_del_init(&cmd->list);
- scsi_host_free_command(shost, cmd);
- }
- shost->cmd_pool = NULL;
- scsi_put_host_cmd_pool(shost);
}
#ifdef CONFIG_SCSI_LOGGING
@@ -590,7 +238,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
"(result %x)\n", cmd->result));
good_bytes = scsi_bufflen(cmd);
- if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+ if (!blk_rq_is_passthrough(cmd->request)) {
int old_good_bytes = good_bytes;
drv = scsi_cmd_to_driver(cmd);
if (drv->done)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 996e134d79fa..9e82fa5715bc 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1106,7 +1106,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
{
- if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+ if (!blk_rq_is_passthrough(scmd->request)) {
struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
if (sdrv->eh_action)
rtn = sdrv->eh_action(scmd, rtn);
@@ -1746,7 +1746,7 @@ check_type:
* the check condition was retryable.
*/
if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
- scmd->request->cmd_type == REQ_TYPE_BLOCK_PC)
+ blk_rq_is_passthrough(scmd->request))
return 1;
else
return 0;
@@ -1968,25 +1968,25 @@ static void eh_lock_door_done(struct request *req, int uptodate)
static void scsi_eh_lock_door(struct scsi_device *sdev)
{
struct request *req;
+ struct scsi_request *rq;
/*
* blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a
* request becomes available
*/
- req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
+ req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(req))
return;
+ rq = scsi_req(req);
+ scsi_req_init(req);
- blk_rq_set_block_pc(req);
-
- req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
- req->cmd[1] = 0;
- req->cmd[2] = 0;
- req->cmd[3] = 0;
- req->cmd[4] = SCSI_REMOVAL_PREVENT;
- req->cmd[5] = 0;
-
- req->cmd_len = COMMAND_SIZE(req->cmd[0]);
+ rq->cmd[0] = ALLOW_MEDIUM_REMOVAL;
+ rq->cmd[1] = 0;
+ rq->cmd[2] = 0;
+ rq->cmd[3] = 0;
+ rq->cmd[4] = SCSI_REMOVAL_PREVENT;
+ rq->cmd[5] = 0;
+ rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
req->rq_flags |= RQF_QUIET;
req->timeout = 10 * HZ;
@@ -2331,7 +2331,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
{
struct scsi_cmnd *scmd;
struct Scsi_Host *shost = dev->host;
- struct request req;
+ struct request *rq;
unsigned long flags;
int error = 0, rtn, val;
@@ -2346,14 +2346,16 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
return -EIO;
error = -EIO;
- scmd = scsi_get_command(dev, GFP_KERNEL);
- if (!scmd)
+ rq = kzalloc(sizeof(struct request) + sizeof(struct scsi_cmnd) +
+ shost->hostt->cmd_size, GFP_KERNEL);
+ if (!rq)
goto out_put_autopm_host;
+ blk_rq_init(NULL, rq);
- blk_rq_init(NULL, &req);
- scmd->request = &req;
-
- scmd->cmnd = req.cmd;
+ scmd = (struct scsi_cmnd *)(rq + 1);
+ scsi_init_command(dev, scmd);
+ scmd->request = rq;
+ scmd->cmnd = scsi_req(rq)->cmd;
scmd->scsi_done = scsi_reset_provider_done_command;
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
@@ -2413,6 +2415,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
scsi_run_host_queues(shost);
scsi_put_command(scmd);
+ kfree(rq);
out_put_autopm_host:
scsi_autopm_put_host(shost);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e9e1e141af9c..90f65c8f487a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -37,8 +37,59 @@
#include "scsi_priv.h"
#include "scsi_logging.h"
+static struct kmem_cache *scsi_sdb_cache;
+static struct kmem_cache *scsi_sense_cache;
+static struct kmem_cache *scsi_sense_isadma_cache;
+static DEFINE_MUTEX(scsi_sense_cache_mutex);
-struct kmem_cache *scsi_sdb_cache;
+static inline struct kmem_cache *
+scsi_select_sense_cache(struct Scsi_Host *shost)
+{
+ return shost->unchecked_isa_dma ?
+ scsi_sense_isadma_cache : scsi_sense_cache;
+}
+
+static void scsi_free_sense_buffer(struct Scsi_Host *shost,
+ unsigned char *sense_buffer)
+{
+ kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer);
+}
+
+static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost,
+ gfp_t gfp_mask, int numa_node)
+{
+ return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask,
+ numa_node);
+}
+
+int scsi_init_sense_cache(struct Scsi_Host *shost)
+{
+ struct kmem_cache *cache;
+ int ret = 0;
+
+ cache = scsi_select_sense_cache(shost);
+ if (cache)
+ return 0;
+
+ mutex_lock(&scsi_sense_cache_mutex);
+ if (shost->unchecked_isa_dma) {
+ scsi_sense_isadma_cache =
+ kmem_cache_create("scsi_sense_cache(DMA)",
+ SCSI_SENSE_BUFFERSIZE, 0,
+ SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
+ if (!scsi_sense_isadma_cache)
+ ret = -ENOMEM;
+ } else {
+ scsi_sense_cache =
+ kmem_cache_create("scsi_sense_cache",
+ SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!scsi_sense_cache)
+ ret = -ENOMEM;
+ }
+
+ mutex_unlock(&scsi_sense_cache_mutex);
+ return ret;
+}
/*
* When to reinvoke queueing after a resource shortage. It's 3 msecs to
@@ -168,22 +219,23 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
req_flags_t rq_flags, int *resid)
{
struct request *req;
- int write = (data_direction == DMA_TO_DEVICE);
+ struct scsi_request *rq;
int ret = DRIVER_ERROR << 24;
- req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM);
+ req = blk_get_request(sdev->request_queue,
+ data_direction == DMA_TO_DEVICE ?
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
if (IS_ERR(req))
return ret;
- blk_rq_set_block_pc(req);
+ rq = scsi_req(req);
+ scsi_req_init(req);
if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
buffer, bufflen, __GFP_RECLAIM))
goto out;
- req->cmd_len = COMMAND_SIZE(cmd[0]);
- memcpy(req->cmd, cmd, req->cmd_len);
- req->sense = sense;
- req->sense_len = 0;
+ rq->cmd_len = COMMAND_SIZE(cmd[0]);
+ memcpy(rq->cmd, cmd, rq->cmd_len);
req->retries = retries;
req->timeout = timeout;
req->cmd_flags |= flags;
@@ -200,11 +252,13 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
* is invalid. Prevent the garbage from being misinterpreted
* and prevent security leaks by zeroing out the excess data.
*/
- if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
- memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
+ if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
+ memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
if (resid)
- *resid = req->resid_len;
+ *resid = rq->resid_len;
+ if (sense && rq->sense_len)
+ memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
ret = req->errors;
out:
blk_put_request(req);
@@ -529,7 +583,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
{
- if (cmd->request->cmd_type == REQ_TYPE_FS) {
+ if (!blk_rq_is_passthrough(cmd->request)) {
struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
if (drv->uninit_command)
@@ -645,14 +699,13 @@ static bool scsi_end_request(struct request *req, int error,
if (bidi_bytes)
scsi_release_bidi_buffers(cmd);
+ scsi_release_buffers(cmd);
+ scsi_put_command(cmd);
spin_lock_irqsave(q->queue_lock, flags);
blk_finish_request(req, error);
spin_unlock_irqrestore(q->queue_lock, flags);
- scsi_release_buffers(cmd);
-
- scsi_put_command(cmd);
scsi_run_queue(q);
}
@@ -754,18 +807,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
sense_deferred = scsi_sense_is_deferred(&sshdr);
}
- if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
+ if (blk_rq_is_passthrough(req)) {
if (result) {
- if (sense_valid && req->sense) {
+ if (sense_valid) {
/*
* SG_IO wants current and deferred errors
*/
- int len = 8 + cmd->sense_buffer[7];
-
- if (len > SCSI_SENSE_BUFFERSIZE)
- len = SCSI_SENSE_BUFFERSIZE;
- memcpy(req->sense, cmd->sense_buffer, len);
- req->sense_len = len;
+ scsi_req(req)->sense_len =
+ min(8 + cmd->sense_buffer[7],
+ SCSI_SENSE_BUFFERSIZE);
}
if (!sense_deferred)
error = __scsi_error_from_host_byte(cmd, result);
@@ -775,14 +825,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
req->errors = cmd->result;
- req->resid_len = scsi_get_resid(cmd);
+ scsi_req(req)->resid_len = scsi_get_resid(cmd);
if (scsi_bidi_cmnd(cmd)) {
/*
* Bidi commands Must be complete as a whole,
* both sides at once.
*/
- req->next_rq->resid_len = scsi_in(cmd)->resid;
+ scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
if (scsi_end_request(req, 0, blk_rq_bytes(req),
blk_rq_bytes(req->next_rq)))
BUG();
@@ -790,15 +840,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
}
} else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
/*
- * Certain non BLOCK_PC requests are commands that don't
- * actually transfer anything (FLUSH), so cannot use
+ * Flush commands do not transfers any data, and thus cannot use
* good_bytes != blk_rq_bytes(req) as the signal for an error.
* This sets the error explicitly for the problem case.
*/
error = __scsi_error_from_host_byte(cmd, result);
}
- /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
+ /* no bidi support for !blk_rq_is_passthrough yet */
BUG_ON(blk_bidi_rq(req));
/*
@@ -810,8 +859,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
blk_rq_sectors(req), good_bytes));
/*
- * Recovered errors need reporting, but they're always treated
- * as success, so fiddle the result code here. For BLOCK_PC
+ * Recovered errors need reporting, but they're always treated as
+ * success, so fiddle the result code here. For passthrough requests
* we already took a copy of the original into rq->errors which
* is what gets returned to the user
*/
@@ -825,7 +874,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
else if (!(req->rq_flags & RQF_QUIET))
scsi_print_sense(cmd);
result = 0;
- /* BLOCK_PC may have set error */
+ /* for passthrough error may be set */
error = 0;
}
@@ -1109,42 +1158,33 @@ err_exit:
}
EXPORT_SYMBOL(scsi_init_io);
-static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
- struct request *req)
+void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
- struct scsi_cmnd *cmd;
-
- if (!req->special) {
- /* Bail if we can't get a reference to the device */
- if (!get_device(&sdev->sdev_gendev))
- return NULL;
-
- cmd = scsi_get_command(sdev, GFP_ATOMIC);
- if (unlikely(!cmd)) {
- put_device(&sdev->sdev_gendev);
- return NULL;
- }
- req->special = cmd;
- } else {
- cmd = req->special;
- }
+ void *buf = cmd->sense_buffer;
+ void *prot = cmd->prot_sdb;
+ unsigned long flags;
- /* pull a tag out of the request if we have one */
- cmd->tag = req->tag;
- cmd->request = req;
+ /* zero out the cmd, except for the embedded scsi_request */
+ memset((char *)cmd + sizeof(cmd->req), 0,
+ sizeof(*cmd) - sizeof(cmd->req));
- cmd->cmnd = req->cmd;
- cmd->prot_op = SCSI_PROT_NORMAL;
+ cmd->device = dev;
+ cmd->sense_buffer = buf;
+ cmd->prot_sdb = prot;
+ INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
+ cmd->jiffies_at_alloc = jiffies;
- return cmd;
+ spin_lock_irqsave(&dev->list_lock, flags);
+ list_add_tail(&cmd->list, &dev->cmd_list);
+ spin_unlock_irqrestore(&dev->list_lock, flags);
}
-static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
+static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
{
struct scsi_cmnd *cmd = req->special;
/*
- * BLOCK_PC requests may transfer data, in which case they must
+ * Passthrough requests may transfer data, in which case they must
* a bio attached to them. Or they might contain a SCSI command
* that does not transfer data, in which case they may optionally
* submit a request without an attached bio.
@@ -1159,14 +1199,15 @@ static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
}
- cmd->cmd_len = req->cmd_len;
+ cmd->cmd_len = scsi_req(req)->cmd_len;
+ cmd->cmnd = scsi_req(req)->cmd;
cmd->transfersize = blk_rq_bytes(req);
cmd->allowed = req->retries;
return BLKPREP_OK;
}
/*
- * Setup a REQ_TYPE_FS command. These are simple request from filesystems
+ * Setup a normal block command. These are simple request from filesystems
* that still need to be translated to SCSI CDBs from the ULD.
*/
static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
@@ -1179,6 +1220,7 @@ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
return ret;
}
+ cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
memset(cmd->cmnd, 0, BLK_MAX_CDB);
return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
@@ -1194,14 +1236,10 @@ static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
else
cmd->sc_data_direction = DMA_FROM_DEVICE;
- switch (req->cmd_type) {
- case REQ_TYPE_FS:
+ if (blk_rq_is_scsi(req))
+ return scsi_setup_scsi_cmnd(sdev, req);
+ else
return scsi_setup_fs_cmnd(sdev, req);
- case REQ_TYPE_BLOCK_PC:
- return scsi_setup_blk_pc_cmnd(sdev, req);
- default:
- return BLKPREP_KILL;
- }
}
static int
@@ -1297,19 +1335,28 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
struct scsi_device *sdev = q->queuedata;
- struct scsi_cmnd *cmd;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
int ret;
ret = scsi_prep_state_check(sdev, req);
if (ret != BLKPREP_OK)
goto out;
- cmd = scsi_get_cmd_from_req(sdev, req);
- if (unlikely(!cmd)) {
- ret = BLKPREP_DEFER;
- goto out;
+ if (!req->special) {
+ /* Bail if we can't get a reference to the device */
+ if (unlikely(!get_device(&sdev->sdev_gendev))) {
+ ret = BLKPREP_DEFER;
+ goto out;
+ }
+
+ scsi_init_command(sdev, cmd);
+ req->special = cmd;
}
+ cmd->tag = req->tag;
+ cmd->request = req;
+ cmd->prot_op = SCSI_PROT_NORMAL;
+
ret = scsi_setup_cmnd(sdev, req);
out:
return scsi_prep_return(q, req, ret);
@@ -1826,7 +1873,9 @@ static int scsi_mq_prep_fn(struct request *req)
unsigned char *sense_buf = cmd->sense_buffer;
struct scatterlist *sg;
- memset(cmd, 0, sizeof(struct scsi_cmnd));
+ /* zero out the cmd, except for the embedded scsi_request */
+ memset((char *)cmd + sizeof(cmd->req), 0,
+ sizeof(*cmd) - sizeof(cmd->req));
req->special = cmd;
@@ -1836,7 +1885,6 @@ static int scsi_mq_prep_fn(struct request *req)
cmd->tag = req->tag;
- cmd->cmnd = req->cmd;
cmd->prot_op = SCSI_PROT_NORMAL;
INIT_LIST_HEAD(&cmd->list);
@@ -1911,7 +1959,6 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!scsi_host_queue_ready(q, shost, sdev))
goto out_dec_target_busy;
-
if (!(req->rq_flags & RQF_DONTPREP)) {
ret = prep_to_mq(scsi_mq_prep_fn(req));
if (ret != BLK_MQ_RQ_QUEUE_OK)
@@ -1981,21 +2028,24 @@ static int scsi_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
{
+ struct Scsi_Host *shost = data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL,
- numa_node);
+ cmd->sense_buffer =
+ scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node);
if (!cmd->sense_buffer)
return -ENOMEM;
+ cmd->req.sense = cmd->sense_buffer;
return 0;
}
static void scsi_exit_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx)
{
+ struct Scsi_Host *shost = data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- kfree(cmd->sense_buffer);
+ scsi_free_sense_buffer(shost, cmd->sense_buffer);
}
static int scsi_map_queues(struct blk_mq_tag_set *set)
@@ -2028,7 +2078,7 @@ static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
return bounce_limit;
}
-static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
+void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
{
struct device *dev = shost->dma_dev;
@@ -2063,28 +2113,64 @@ static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
*/
blk_queue_dma_alignment(q, 0x03);
}
+EXPORT_SYMBOL_GPL(__scsi_init_queue);
-struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
- request_fn_proc *request_fn)
+static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
{
- struct request_queue *q;
+ struct Scsi_Host *shost = q->rq_alloc_data;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- q = blk_init_queue(request_fn, NULL);
- if (!q)
- return NULL;
- __scsi_init_queue(shost, q);
- return q;
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->sense_buffer = scsi_alloc_sense_buffer(shost, gfp, NUMA_NO_NODE);
+ if (!cmd->sense_buffer)
+ goto fail;
+ cmd->req.sense = cmd->sense_buffer;
+
+ if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
+ cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp);
+ if (!cmd->prot_sdb)
+ goto fail_free_sense;
+ }
+
+ return 0;
+
+fail_free_sense:
+ scsi_free_sense_buffer(shost, cmd->sense_buffer);
+fail:
+ return -ENOMEM;
+}
+
+static void scsi_exit_rq(struct request_queue *q, struct request *rq)
+{
+ struct Scsi_Host *shost = q->rq_alloc_data;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ if (cmd->prot_sdb)
+ kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
+ scsi_free_sense_buffer(shost, cmd->sense_buffer);
}
-EXPORT_SYMBOL(__scsi_alloc_queue);
struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
{
+ struct Scsi_Host *shost = sdev->host;
struct request_queue *q;
- q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
+ q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
if (!q)
return NULL;
+ q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
+ q->rq_alloc_data = shost;
+ q->request_fn = scsi_request_fn;
+ q->init_rq_fn = scsi_init_rq;
+ q->exit_rq_fn = scsi_exit_rq;
+
+ if (blk_init_allocated_queue(q) < 0) {
+ blk_cleanup_queue(q);
+ return NULL;
+ }
+ __scsi_init_queue(shost, q);
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_unprep_rq(q, scsi_unprep_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
@@ -2208,6 +2294,8 @@ int __init scsi_init_queue(void)
void scsi_exit_queue(void)
{
+ kmem_cache_destroy(scsi_sense_cache);
+ kmem_cache_destroy(scsi_sense_isadma_cache);
kmem_cache_destroy(scsi_sdb_cache);
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 193636a59adf..99bfc985e190 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -30,8 +30,8 @@ extern void scsi_exit_hosts(void);
/* scsi.c */
extern bool scsi_use_blk_mq;
-extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
-extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
+int scsi_init_sense_cache(struct Scsi_Host *shost);
+void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_LOGGING
void scsi_log_send(struct scsi_cmnd *cmd);
void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
@@ -96,7 +96,6 @@ extern void scsi_exit_queue(void);
extern void scsi_evt_thread(struct work_struct *work);
struct request_queue;
struct request;
-extern struct kmem_cache *scsi_sdb_cache;
/* scsi_proc.c */
#ifdef CONFIG_SCSI_PROC_FS
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 03577bde6ac5..13dcb9ba823c 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3765,7 +3765,6 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
struct device *dev = &shost->shost_gendev;
struct fc_internal *i = to_fc_internal(shost->transportt);
struct request_queue *q;
- int err;
char bsg_name[20];
fc_host->rqst_q = NULL;
@@ -3776,23 +3775,14 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
snprintf(bsg_name, sizeof(bsg_name),
"fc_host%d", shost->host_no);
- q = __scsi_alloc_queue(shost, bsg_request_fn);
- if (!q) {
- dev_err(dev,
- "fc_host%d: bsg interface failed to initialize - no request queue\n",
- shost->host_no);
- return -ENOMEM;
- }
-
- err = bsg_setup_queue(dev, q, bsg_name, fc_bsg_dispatch,
- i->f->dd_bsg_size);
- if (err) {
+ q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size);
+ if (IS_ERR(q)) {
dev_err(dev,
"fc_host%d: bsg interface failed to initialize - setup queue\n",
shost->host_no);
- blk_cleanup_queue(q);
- return err;
+ return PTR_ERR(q);
}
+ __scsi_init_queue(shost, q);
blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
fc_host->rqst_q = q;
@@ -3824,26 +3814,18 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
struct device *dev = &rport->dev;
struct fc_internal *i = to_fc_internal(shost->transportt);
struct request_queue *q;
- int err;
rport->rqst_q = NULL;
if (!i->f->bsg_request)
return -ENOTSUPP;
- q = __scsi_alloc_queue(shost, bsg_request_fn);
- if (!q) {
- dev_err(dev, "bsg interface failed to initialize - no request queue\n");
- return -ENOMEM;
- }
-
- err = bsg_setup_queue(dev, q, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
- if (err) {
+ q = bsg_setup_queue(dev, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
+ if (IS_ERR(q)) {
dev_err(dev, "failed to setup bsg queue\n");
- blk_cleanup_queue(q);
- return err;
+ return PTR_ERR(q);
}
-
+ __scsi_init_queue(shost, q);
blk_queue_prep_rq(q, fc_bsg_rport_prep);
blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 42bca619f854..568c9f26a561 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1537,24 +1537,18 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
struct iscsi_internal *i = to_iscsi_internal(shost->transportt);
struct request_queue *q;
char bsg_name[20];
- int ret;
if (!i->iscsi_transport->bsg_request)
return -ENOTSUPP;
snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
-
- q = __scsi_alloc_queue(shost, bsg_request_fn);
- if (!q)
- return -ENOMEM;
-
- ret = bsg_setup_queue(dev, q, bsg_name, iscsi_bsg_host_dispatch, 0);
- if (ret) {
+ q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0);
+ if (IS_ERR(q)) {
shost_printk(KERN_ERR, shost, "bsg interface failed to "
"initialize - no request queue\n");
- blk_cleanup_queue(q);
- return ret;
+ return PTR_ERR(q);
}
+ __scsi_init_queue(shost, q);
ihost->bsg_q = q;
return 0;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 60b651bfaa01..126a5ee00987 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -33,6 +33,7 @@
#include <linux/bsg.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_request.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -177,6 +178,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
while ((req = blk_fetch_request(q)) != NULL) {
spin_unlock_irq(q->queue_lock);
+ scsi_req(req)->resid_len = blk_rq_bytes(req);
+ if (req->next_rq)
+ scsi_req(req->next_rq)->resid_len =
+ blk_rq_bytes(req->next_rq);
handler = to_sas_internal(shost->transportt)->f->smp_handler;
ret = handler(shost, rphy, req);
req->errors = ret;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 1f5d92a25a49..40b4038c019e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -781,7 +781,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
rq->special_vec.bv_len = len;
rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
- rq->resid_len = len;
+ scsi_req(rq)->resid_len = len;
ret = scsi_init_io(cmd);
out:
@@ -1179,7 +1179,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
__free_page(rq->special_vec.bv_page);
- if (SCpnt->cmnd != rq->cmd) {
+ if (SCpnt->cmnd != scsi_req(rq)->cmd) {
mempool_free(SCpnt->cmnd, sd_cdb_pool);
SCpnt->cmnd = NULL;
SCpnt->cmd_len = 0;
@@ -1750,9 +1750,6 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
unsigned int good_bytes;
- if (scmd->request->cmd_type != REQ_TYPE_FS)
- return 0;
-
info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
&bad_lba);
@@ -3082,6 +3079,23 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
put_device(&sdkp->dev);
}
+struct sd_devt {
+ int idx;
+ struct disk_devt disk_devt;
+};
+
+void sd_devt_release(struct disk_devt *disk_devt)
+{
+ struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt,
+ disk_devt);
+
+ spin_lock(&sd_index_lock);
+ ida_remove(&sd_index_ida, sd_devt->idx);
+ spin_unlock(&sd_index_lock);
+
+ kfree(sd_devt);
+}
+
/**
* sd_probe - called during driver initialization and whenever a
* new scsi device is attached to the system. It is called once
@@ -3103,6 +3117,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
static int sd_probe(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
+ struct sd_devt *sd_devt;
struct scsi_disk *sdkp;
struct gendisk *gd;
int index;
@@ -3128,9 +3143,13 @@ static int sd_probe(struct device *dev)
if (!sdkp)
goto out;
+ sd_devt = kzalloc(sizeof(*sd_devt), GFP_KERNEL);
+ if (!sd_devt)
+ goto out_free;
+
gd = alloc_disk(SD_MINORS);
if (!gd)
- goto out_free;
+ goto out_free_devt;
do {
if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
@@ -3146,6 +3165,11 @@ static int sd_probe(struct device *dev)
goto out_put;
}
+ atomic_set(&sd_devt->disk_devt.count, 1);
+ sd_devt->disk_devt.release = sd_devt_release;
+ sd_devt->idx = index;
+ gd->disk_devt = &sd_devt->disk_devt;
+
error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
if (error) {
sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
@@ -3185,13 +3209,14 @@ static int sd_probe(struct device *dev)
return 0;
out_free_index:
- spin_lock(&sd_index_lock);
- ida_remove(&sd_index_ida, index);
- spin_unlock(&sd_index_lock);
+ put_disk_devt(&sd_devt->disk_devt);
+ sd_devt = NULL;
out_put:
put_disk(gd);
out_free:
kfree(sdkp);
+ out_free_devt:
+ kfree(sd_devt);
out:
scsi_autopm_put_device(sdp);
return error;
@@ -3250,10 +3275,7 @@ static void scsi_disk_release(struct device *dev)
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct gendisk *disk = sdkp->disk;
- spin_lock(&sd_index_lock);
- ida_remove(&sd_index_ida, sdkp->index);
- spin_unlock(&sd_index_lock);
-
+ put_disk_devt(disk->disk_devt);
disk->private_data = NULL;
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index dbe5b4b95df0..e0e308b7e01a 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -781,9 +781,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
}
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
- if (srp->rq->cmd != srp->rq->__cmd)
- kfree(srp->rq->cmd);
-
+ scsi_req_free_cmd(scsi_req(srp->rq));
blk_end_request_all(srp->rq, -EIO);
srp->rq = NULL;
}
@@ -1279,6 +1277,7 @@ static void
sg_rq_end_io(struct request *rq, int uptodate)
{
struct sg_request *srp = rq->end_io_data;
+ struct scsi_request *req = scsi_req(rq);
Sg_device *sdp;
Sg_fd *sfp;
unsigned long iflags;
@@ -1297,9 +1296,9 @@ sg_rq_end_io(struct request *rq, int uptodate)
if (unlikely(atomic_read(&sdp->detaching)))
pr_info("%s: device detaching\n", __func__);
- sense = rq->sense;
+ sense = req->sense;
result = rq->errors;
- resid = rq->resid_len;
+ resid = req->resid_len;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_cmd_done: pack_id=%d, res=0x%x\n",
@@ -1333,6 +1332,10 @@ sg_rq_end_io(struct request *rq, int uptodate)
sdp->device->changed = 1;
}
}
+
+ if (req->sense_len)
+ memcpy(srp->sense_b, req->sense, SCSI_SENSE_BUFFERSIZE);
+
/* Rely on write phase to clean out srp status values, so no "else" */
/*
@@ -1342,8 +1345,7 @@ sg_rq_end_io(struct request *rq, int uptodate)
* blk_rq_unmap_user() can be called from user context.
*/
srp->rq = NULL;
- if (rq->cmd != rq->__cmd)
- kfree(rq->cmd);
+ scsi_req_free_cmd(scsi_req(rq));
__blk_put_request(rq->q, rq);
write_lock_irqsave(&sfp->rq_list_lock, iflags);
@@ -1658,6 +1660,7 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
struct request *rq;
+ struct scsi_request *req;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
@@ -1695,22 +1698,23 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
* With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
* does not sleep except under memory pressure.
*/
- rq = blk_get_request(q, rw, GFP_KERNEL);
+ rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(rq)) {
kfree(long_cmdp);
return PTR_ERR(rq);
}
+ req = scsi_req(rq);
- blk_rq_set_block_pc(rq);
+ scsi_req_init(rq);
if (hp->cmd_len > BLK_MAX_CDB)
- rq->cmd = long_cmdp;
- memcpy(rq->cmd, cmd, hp->cmd_len);
- rq->cmd_len = hp->cmd_len;
+ req->cmd = long_cmdp;
+ memcpy(req->cmd, cmd, hp->cmd_len);
+ req->cmd_len = hp->cmd_len;
srp->rq = rq;
rq->end_io_data = srp;
- rq->sense = srp->sense_b;
rq->retries = SG_DEFAULT_RETRIES;
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
@@ -1786,8 +1790,7 @@ sg_finish_rem_req(Sg_request *srp)
ret = blk_rq_unmap_user(srp->bio);
if (srp->rq) {
- if (srp->rq->cmd != srp->rq->__cmd)
- kfree(srp->rq->cmd);
+ scsi_req_free_cmd(scsi_req(srp->rq));
blk_put_request(srp->rq);
}
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 8702d9cf8040..11c0dfb3dfa3 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -4499,7 +4499,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
if (pqi_is_logical_device(device)) {
raid_bypassed = false;
if (device->offload_enabled &&
- scmd->request->cmd_type == REQ_TYPE_FS) {
+ !blk_rq_is_passthrough(scmd->request)) {
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
scmd, queue_group);
if (rc == 0 ||
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 013bfe049a48..0b29b9329b1c 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -437,14 +437,17 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
goto out;
}
- if (rq_data_dir(rq) == WRITE) {
+ switch (req_op(rq)) {
+ case REQ_OP_WRITE:
if (!cd->writeable)
goto out;
SCpnt->cmnd[0] = WRITE_10;
cd->cdi.media_written = 1;
- } else if (rq_data_dir(rq) == READ) {
+ break;
+ case REQ_OP_READ:
SCpnt->cmnd[0] = READ_10;
- } else {
+ break;
+ default:
blk_dump_rq_flags(rq, "Unknown sr command");
goto out;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 5f35b863e1a7..81212d4bd9bf 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -475,7 +475,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
ktime_t now;
now = ktime_get();
- if (req->cmd[0] == WRITE_6) {
+ if (scsi_req(req)->cmd[0] == WRITE_6) {
now = ktime_sub(now, STp->stats->write_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
@@ -489,7 +489,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
} else
atomic64_add(atomic_read(&STp->stats->last_write_size),
&STp->stats->write_byte_cnt);
- } else if (req->cmd[0] == READ_6) {
+ } else if (scsi_req(req)->cmd[0] == READ_6) {
now = ktime_sub(now, STp->stats->read_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
@@ -514,15 +514,18 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
static void st_scsi_execute_end(struct request *req, int uptodate)
{
struct st_request *SRpnt = req->end_io_data;
+ struct scsi_request *rq = scsi_req(req);
struct scsi_tape *STp = SRpnt->stp;
struct bio *tmp;
STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
- STp->buffer->cmdstat.residual = req->resid_len;
+ STp->buffer->cmdstat.residual = rq->resid_len;
st_do_stats(STp, req);
tmp = SRpnt->bio;
+ if (rq->sense_len)
+ memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
if (SRpnt->waiting)
complete(SRpnt->waiting);
@@ -535,17 +538,18 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
int timeout, int retries)
{
struct request *req;
+ struct scsi_request *rq;
struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
int err = 0;
- int write = (data_direction == DMA_TO_DEVICE);
struct scsi_tape *STp = SRpnt->stp;
- req = blk_get_request(SRpnt->stp->device->request_queue, write,
- GFP_KERNEL);
+ req = blk_get_request(SRpnt->stp->device->request_queue,
+ data_direction == DMA_TO_DEVICE ?
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(req))
return DRIVER_ERROR << 24;
-
- blk_rq_set_block_pc(req);
+ rq = scsi_req(req);
+ scsi_req_init(req);
req->rq_flags |= RQF_QUIET;
mdata->null_mapped = 1;
@@ -571,11 +575,9 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
}
SRpnt->bio = req->bio;
- req->cmd_len = COMMAND_SIZE(cmd[0]);
- memset(req->cmd, 0, BLK_MAX_CDB);
- memcpy(req->cmd, cmd, req->cmd_len);
- req->sense = SRpnt->sense;
- req->sense_len = 0;
+ rq->cmd_len = COMMAND_SIZE(cmd[0]);
+ memset(rq->cmd, 0, BLK_MAX_CDB);
+ memcpy(rq->cmd, cmd, rq->cmd_len);
req->timeout = timeout;
req->retries = retries;
req->end_io_data = SRpnt;
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 88db6992420e..bcf7d05d1aab 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -260,7 +260,7 @@ static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
{
int wanted_len = cmd->SCp.this_residual;
- if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS)
+ if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(cmd->request))
return 0;
return wanted_len;
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 257361280510..e2bc99980f75 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -4,6 +4,7 @@ menuconfig TARGET_CORE
depends on SCSI && BLOCK
select CONFIGFS_FS
select CRC_T10DIF
+ select BLK_SCSI_REQUEST # only for scsi_command_size_tbl..
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 04d7aa7390d0..a8f8e53f2f57 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1005,7 +1005,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
scsi_command_size(cmd->t_task_cdb));
req = blk_get_request(pdv->pdv_sd->request_queue,
- (cmd->data_direction == DMA_TO_DEVICE),
+ cmd->data_direction == DMA_TO_DEVICE ?
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
GFP_KERNEL);
if (IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed\n");
@@ -1013,7 +1014,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
goto fail;
}
- blk_rq_set_block_pc(req);
+ scsi_req_init(req);
if (sgl) {
ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
@@ -1023,10 +1024,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
req->end_io = pscsi_req_done;
req->end_io_data = cmd;
- req->cmd_len = scsi_command_size(pt->pscsi_cdb);
- req->cmd = &pt->pscsi_cdb[0];
- req->sense = &pt->pscsi_sense[0];
- req->sense_len = 0;
+ scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb);
+ scsi_req(req)->cmd = &pt->pscsi_cdb[0];
if (pdv->pdv_sd->type == TYPE_DISK)
req->timeout = PS_TIMEOUT_DISK;
else
@@ -1075,7 +1074,7 @@ static void pscsi_req_done(struct request *req, int uptodate)
struct pscsi_plugin_task *pt = cmd->priv;
pt->pscsi_result = req->errors;
- pt->pscsi_resid = req->resid_len;
+ pt->pscsi_resid = scsi_req(req)->resid_len;
cmd->scsi_status = status_byte(pt->pscsi_result) << 1;
if (cmd->scsi_status) {
@@ -1096,6 +1095,7 @@ static void pscsi_req_done(struct request *req, int uptodate)
break;
}
+ memcpy(pt->pscsi_sense, scsi_req(req)->sense, TRANSPORT_SENSE_BUFFER);
__blk_put_request(req->q, req);
kfree(pt);
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 3c47614a4b32..73031ec54a7b 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -884,6 +884,8 @@ static void bdev_evict_inode(struct inode *inode)
spin_lock(&bdev_lock);
list_del_init(&bdev->bd_list);
spin_unlock(&bdev_lock);
+ if (bdev->bd_bdi != &noop_backing_dev_info)
+ bdi_put(bdev->bd_bdi);
}
static const struct super_operations bdev_sops = {
@@ -954,6 +956,21 @@ static int bdev_set(struct inode *inode, void *data)
static LIST_HEAD(all_bdevs);
+/*
+ * If there is a bdev inode for this device, unhash it so that it gets evicted
+ * as soon as last inode reference is dropped.
+ */
+void bdev_unhash_inode(dev_t dev)
+{
+ struct inode *inode;
+
+ inode = ilookup5(blockdev_superblock, hash(dev), bdev_test, &dev);
+ if (inode) {
+ remove_inode_hash(inode);
+ iput(inode);
+ }
+}
+
struct block_device *bdget(dev_t dev)
{
struct block_device *bdev;
@@ -971,6 +988,7 @@ struct block_device *bdget(dev_t dev)
bdev->bd_contains = NULL;
bdev->bd_super = NULL;
bdev->bd_inode = inode;
+ bdev->bd_bdi = &noop_backing_dev_info;
bdev->bd_block_size = (1 << inode->i_blkbits);
bdev->bd_part_count = 0;
bdev->bd_invalidated = 0;
@@ -1527,6 +1545,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = disk;
bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev;
+ if (bdev->bd_bdi == &noop_backing_dev_info)
+ bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
if (!partno) {
ret = -ENXIO;
@@ -1622,6 +1642,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = NULL;
bdev->bd_part = NULL;
bdev->bd_queue = NULL;
+ bdi_put(bdev->bd_bdi);
+ bdev->bd_bdi = &noop_backing_dev_info;
if (bdev != bdev->bd_contains)
__blkdev_put(bdev->bd_contains, mode, 1);
bdev->bd_contains = NULL;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 18004169552c..37a31b12bb0c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1800,7 +1800,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
if (!device->bdev)
continue;
- bdi = blk_get_backing_dev_info(device->bdev);
+ bdi = device->bdev->bd_bdi;
if (bdi_congested(bdi, bdi_bits)) {
ret = 1;
break;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 3c3c69c0eee4..b2e70073a10d 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -366,7 +366,7 @@ static noinline void run_scheduled_bios(struct btrfs_device *device)
*/
blk_start_plug(&plug);
- bdi = blk_get_backing_dev_info(device->bdev);
+ bdi = device->bdev->bd_bdi;
limit = btrfs_async_submit_limit(fs_info);
limit = limit * 2 / 3;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index f17fcf89e18e..7fb1732a3630 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -248,6 +248,42 @@ static struct file_system_type debug_fs_type = {
};
MODULE_ALIAS_FS("debugfs");
+/**
+ * debugfs_lookup() - look up an existing debugfs file
+ * @name: a pointer to a string containing the name of the file to look up.
+ * @parent: a pointer to the parent dentry of the file.
+ *
+ * This function will return a pointer to a dentry if it succeeds. If the file
+ * doesn't exist or an error occurs, %NULL will be returned. The returned
+ * dentry must be passed to dput() when it is no longer needed.
+ *
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
+ * returned.
+ */
+struct dentry *debugfs_lookup(const char *name, struct dentry *parent)
+{
+ struct dentry *dentry;
+
+ if (IS_ERR(parent))
+ return NULL;
+
+ if (!parent)
+ parent = debugfs_mount->mnt_root;
+
+ inode_lock(d_inode(parent));
+ dentry = lookup_one_len(name, parent, strlen(name));
+ inode_unlock(d_inode(parent));
+
+ if (IS_ERR(dentry))
+ return NULL;
+ if (!d_really_is_positive(dentry)) {
+ dput(dentry);
+ return NULL;
+ }
+ return dentry;
+}
+EXPORT_SYMBOL_GPL(debugfs_lookup);
+
static struct dentry *start_creating(const char *name, struct dentry *parent)
{
struct dentry *dentry;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index a34308df927f..c0e5b9a8bf5f 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1226,7 +1226,7 @@ static int set_gfs2_super(struct super_block *s, void *data)
* We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super()
*/
- s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
+ s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0;
}
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 47febcf99185..20b1c17320d5 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -104,6 +104,7 @@ config NFSD_SCSILAYOUT
depends on NFSD_V4 && BLOCK
select NFSD_PNFS
select EXPORTFS_BLOCK_OPS
+ select BLK_SCSI_REQUEST
help
This option enables support for the exporting pNFS SCSI layouts
in the kernel's NFS server. The pNFS SCSI layout enables NFS
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 0780ff864539..a06115e31612 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -10,6 +10,7 @@
#include <linux/nfsd/debug.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_common.h>
+#include <scsi/scsi_request.h>
#include "blocklayoutxdr.h"
#include "pnfs.h"
@@ -213,6 +214,7 @@ static int nfsd4_scsi_identify_device(struct block_device *bdev,
{
struct request_queue *q = bdev->bd_disk->queue;
struct request *rq;
+ struct scsi_request *req;
size_t bufflen = 252, len, id_len;
u8 *buf, *d, type, assoc;
int error;
@@ -221,23 +223,24 @@ static int nfsd4_scsi_identify_device(struct block_device *bdev,
if (!buf)
return -ENOMEM;
- rq = blk_get_request(q, READ, GFP_KERNEL);
+ rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(rq)) {
error = -ENOMEM;
goto out_free_buf;
}
- blk_rq_set_block_pc(rq);
+ req = scsi_req(rq);
+ scsi_req_init(rq);
error = blk_rq_map_kern(q, rq, buf, bufflen, GFP_KERNEL);
if (error)
goto out_put_request;
- rq->cmd[0] = INQUIRY;
- rq->cmd[1] = 1;
- rq->cmd[2] = 0x83;
- rq->cmd[3] = bufflen >> 8;
- rq->cmd[4] = bufflen & 0xff;
- rq->cmd_len = COMMAND_SIZE(INQUIRY);
+ req->cmd[0] = INQUIRY;
+ req->cmd[1] = 1;
+ req->cmd[2] = 0x83;
+ req->cmd[3] = bufflen >> 8;
+ req->cmd[4] = bufflen & 0xff;
+ req->cmd_len = COMMAND_SIZE(INQUIRY);
error = blk_execute_rq(rq->q, NULL, rq, 1);
if (error) {
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 12eeae62a2b1..e1872f36147f 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -1068,7 +1068,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_time_gran = 1;
sb->s_max_links = NILFS_LINK_MAX;
- sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
+ sb->s_bdi = bdev_get_queue(sb->s_bdev)->backing_dev_info;
err = load_nilfs(nilfs, sb);
if (err)
diff --git a/fs/super.c b/fs/super.c
index 1709ed029a2c..ea662b0e5e78 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1047,7 +1047,7 @@ static int set_bdev_super(struct super_block *s, void *data)
* We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super()
*/
- s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
+ s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0;
}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index ac3b4db519df..8c7d01b75922 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -758,7 +758,7 @@ xfs_buf_readahead_map(
int nmaps,
const struct xfs_buf_ops *ops)
{
- if (bdi_read_congested(target->bt_bdi))
+ if (bdi_read_congested(target->bt_bdev->bd_bdi))
return;
xfs_buf_read_map(target, map, nmaps,
@@ -1791,7 +1791,6 @@ xfs_alloc_buftarg(
btp->bt_mount = mp;
btp->bt_dev = bdev->bd_dev;
btp->bt_bdev = bdev;
- btp->bt_bdi = blk_get_backing_dev_info(bdev);
if (xfs_setsize_buftarg_early(btp, bdev))
goto error;
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 8a9d3a9599f0..3c867e5a63e1 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -109,7 +109,6 @@ typedef unsigned int xfs_buf_flags_t;
typedef struct xfs_buftarg {
dev_t bt_dev;
struct block_device *bt_bdev;
- struct backing_dev_info *bt_bdi;
struct xfs_mount *bt_mount;
unsigned int bt_meta_sectorsize;
size_t bt_meta_sectormask;
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index e850e76acaaf..ad955817916d 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -10,6 +10,7 @@
#include <linux/flex_proportions.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/kref.h>
struct page;
struct device;
@@ -144,6 +145,7 @@ struct backing_dev_info {
char *name;
+ struct kref refcnt; /* Reference counter for the structure */
unsigned int capabilities; /* Device capabilities */
unsigned int min_ratio;
unsigned int max_ratio, max_prop_frac;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 43b93a947e61..c52a48cb9a66 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -18,7 +18,14 @@
#include <linux/slab.h>
int __must_check bdi_init(struct backing_dev_info *bdi);
-void bdi_exit(struct backing_dev_info *bdi);
+
+static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
+{
+ kref_get(&bdi->refcnt);
+ return bdi;
+}
+
+void bdi_put(struct backing_dev_info *bdi);
__printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
@@ -29,6 +36,7 @@ void bdi_unregister(struct backing_dev_info *bdi);
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
void bdi_destroy(struct backing_dev_info *bdi);
+struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
bool range_cyclic, enum wb_reason reason);
@@ -183,7 +191,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
sb = inode->i_sb;
#ifdef CONFIG_BLOCK
if (sb_is_blkdev_sb(sb))
- return blk_get_backing_dev_info(I_BDEV(inode));
+ return I_BDEV(inode)->bd_bdi;
#endif
return sb->s_bdi;
}
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 37c9a43c5e78..d703acb55d0f 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -162,6 +162,13 @@ enum req_opf {
/* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = 8,
+ /* SCSI passthrough using struct scsi_request */
+ REQ_OP_SCSI_IN = 32,
+ REQ_OP_SCSI_OUT = 33,
+ /* Driver private requests */
+ REQ_OP_DRV_IN = 34,
+ REQ_OP_DRV_OUT = 35,
+
REQ_OP_LAST,
};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 05675b1dfd20..aecca0e7d9ca 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -71,15 +71,6 @@ struct request_list {
};
/*
- * request command types
- */
-enum rq_cmd_type_bits {
- REQ_TYPE_FS = 1, /* fs request */
- REQ_TYPE_BLOCK_PC, /* scsi command */
- REQ_TYPE_DRV_PRIV, /* driver defined types from here */
-};
-
-/*
* request flags */
typedef __u32 __bitwise req_flags_t;
@@ -128,8 +119,6 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_NOMERGE_FLAGS \
(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
-#define BLK_MAX_CDB 16
-
/*
* Try to put the fields that are referenced together in the same cacheline.
*
@@ -147,9 +136,11 @@ struct request {
struct blk_mq_ctx *mq_ctx;
int cpu;
- unsigned cmd_type;
unsigned int cmd_flags; /* op and common flags */
req_flags_t rq_flags;
+
+ int internal_tag;
+
unsigned long atomic_flags;
/* the following two fields are internal, NEVER access directly */
@@ -221,23 +212,11 @@ struct request {
unsigned short ioprio;
- int internal_tag;
-
void *special; /* opaque pointer available for LLD use */
int errors;
- /*
- * when request is used as a packet command carrier
- */
- unsigned char __cmd[BLK_MAX_CDB];
- unsigned char *cmd;
- unsigned short cmd_len;
-
unsigned int extra_len; /* length of alignment and padding */
- unsigned int sense_len;
- unsigned int resid_len; /* residual count */
- void *sense;
unsigned long deadline;
struct list_head timeout_list;
@@ -254,6 +233,21 @@ struct request {
struct request *next_rq;
};
+static inline bool blk_rq_is_scsi(struct request *rq)
+{
+ return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
+}
+
+static inline bool blk_rq_is_private(struct request *rq)
+{
+ return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
+}
+
+static inline bool blk_rq_is_passthrough(struct request *rq)
+{
+ return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
+}
+
static inline unsigned short req_get_ioprio(struct request *req)
{
return req->ioprio;
@@ -273,6 +267,8 @@ typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
typedef int (lld_busy_fn) (struct request_queue *q);
typedef int (bsg_job_fn) (struct bsg_job *);
+typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
+typedef void (exit_rq_fn)(struct request_queue *, struct request *);
enum blk_eh_timer_return {
BLK_EH_NOT_HANDLED,
@@ -335,6 +331,7 @@ struct queue_limits {
unsigned short logical_block_size;
unsigned short max_segments;
unsigned short max_integrity_segments;
+ unsigned short max_discard_segments;
unsigned char misaligned;
unsigned char discard_misaligned;
@@ -408,6 +405,8 @@ struct request_queue {
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
+ init_rq_fn *init_rq_fn;
+ exit_rq_fn *exit_rq_fn;
const struct blk_mq_ops *mq_ops;
@@ -434,7 +433,8 @@ struct request_queue {
*/
struct delayed_work delay_work;
- struct backing_dev_info backing_dev_info;
+ struct backing_dev_info *backing_dev_info;
+ struct disk_devt *disk_devt;
/*
* The queue owner gets to use this for whatever they like.
@@ -571,12 +571,15 @@ struct request_queue {
struct list_head tag_set_list;
struct bio_set *bio_split;
-#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
struct dentry *mq_debugfs_dir;
#endif
bool mq_sysfs_init_done;
+
+ size_t cmd_size;
+ void *rq_alloc_data;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -703,9 +706,10 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
-#define blk_account_rq(rq) \
- (((rq)->rq_flags & RQF_STARTED) && \
- ((rq)->cmd_type == REQ_TYPE_FS))
+static inline bool blk_account_rq(struct request *rq)
+{
+ return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
+}
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
@@ -780,7 +784,7 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
static inline bool rq_mergeable(struct request *rq)
{
- if (rq->cmd_type != REQ_TYPE_FS)
+ if (blk_rq_is_passthrough(rq))
return false;
if (req_op(rq) == REQ_OP_FLUSH)
@@ -918,7 +922,6 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
-extern void blk_rq_set_block_pc(struct request *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
@@ -1055,7 +1058,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
{
struct request_queue *q = rq->q;
- if (unlikely(rq->cmd_type != REQ_TYPE_FS))
+ if (blk_rq_is_passthrough(rq))
return q->limits.max_hw_sectors;
if (!q->limits.chunk_sectors ||
@@ -1137,14 +1140,15 @@ extern void blk_unprep_request(struct request *);
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
-extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
- request_fn_proc *, spinlock_t *);
+extern int blk_init_allocated_queue(struct request_queue *);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
+extern void blk_queue_max_discard_segments(struct request_queue *,
+ unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
@@ -1187,8 +1191,16 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
-extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
+/*
+ * Number of physical segments as sent to the device.
+ *
+ * Normally this is the number of discontiguous data segments sent by the
+ * submitter. But for data-less command like discard we might have no
+ * actual data segments submitted, but the driver might have to add it's
+ * own special payload. In that case we still return 1 here so that this
+ * special payload will be mapped.
+ */
static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
{
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
@@ -1196,6 +1208,15 @@ static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
return rq->nr_phys_segments;
}
+/*
+ * Number of discard segments (or ranges) the driver needs to fill in.
+ * Each discard bio merged into a request is counted as one segment.
+ */
+static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
+{
+ return max_t(unsigned short, rq->nr_phys_segments, 1);
+}
+
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern long nr_blockdev_pages(void);
@@ -1384,6 +1405,11 @@ static inline unsigned short queue_max_segments(struct request_queue *q)
return q->limits.max_segments;
}
+static inline unsigned short queue_max_discard_segments(struct request_queue *q)
+{
+ return q->limits.max_discard_segments;
+}
+
static inline unsigned int queue_max_segment_size(struct request_queue *q)
{
return q->limits.max_segment_size;
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index e417f080219a..d2e908586e3d 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -30,9 +30,6 @@ struct blk_trace {
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
-extern int do_blk_trace_setup(struct request_queue *q, char *name,
- dev_t dev, struct block_device *bdev,
- struct blk_user_trace_setup *buts);
extern __printf(2, 3)
void __trace_note_message(struct blk_trace *, const char *fmt, ...);
@@ -80,7 +77,6 @@ extern struct attribute_group blk_trace_attr_group;
#else /* !CONFIG_BLK_DEV_IO_TRACE */
# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
# define blk_trace_shutdown(q) do { } while (0)
-# define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY)
# define blk_add_driver_data(q, rq, data, len) do {} while (0)
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
# define blk_trace_startstop(q, start) (-ENOTTY)
@@ -110,16 +106,16 @@ struct compat_blk_user_trace_setup {
#endif
-#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK)
+extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
-static inline int blk_cmd_buf_len(struct request *rq)
+static inline sector_t blk_rq_trace_sector(struct request *rq)
{
- return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1;
+ return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq);
}
-extern void blk_dump_cmd(char *buf, struct request *rq);
-extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
-
-#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
+static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
+{
+ return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq);
+}
#endif
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 657a718c27d2..e34dde2da0ef 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -66,9 +66,8 @@ struct bsg_job {
void bsg_job_done(struct bsg_job *job, int result,
unsigned int reply_payload_rcv_len);
-int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
- bsg_job_fn *job_fn, int dd_job_size);
-void bsg_request_fn(struct request_queue *q);
+struct request_queue *bsg_setup_queue(struct device *dev, char *name,
+ bsg_job_fn *job_fn, int dd_job_size);
void bsg_job_put(struct bsg_job *job);
int __must_check bsg_job_get(struct bsg_job *job);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 014cc564d1c4..c0befcf41b58 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -80,6 +80,8 @@ static const struct file_operations __fops = { \
#if defined(CONFIG_DEBUG_FS)
+struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
+
struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
@@ -181,6 +183,12 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
* want to duplicate the design decision mistakes of procfs and devfs again.
*/
+static inline struct dentry *debugfs_lookup(const char *name,
+ struct dentry *parent)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ef7962e84444..a7e6903866fd 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -55,8 +55,6 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti);
* = 2: The target wants to push back the io
*/
typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
-typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
- union map_info *map_context);
typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
struct request *rq,
union map_info *map_context,
@@ -163,7 +161,6 @@ struct target_type {
dm_ctr_fn ctr;
dm_dtr_fn dtr;
dm_map_fn map;
- dm_map_request_fn map_rq;
dm_clone_and_map_request_fn clone_and_map_rq;
dm_release_clone_request_fn release_clone_rq;
dm_endio_fn end_io;
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index b5825c4f06f7..aebecc4ed088 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -9,12 +9,22 @@
struct io_cq;
struct elevator_type;
-typedef int (elevator_merge_fn) (struct request_queue *, struct request **,
+/*
+ * Return values from elevator merger
+ */
+enum elv_merge {
+ ELEVATOR_NO_MERGE = 0,
+ ELEVATOR_FRONT_MERGE = 1,
+ ELEVATOR_BACK_MERGE = 2,
+ ELEVATOR_DISCARD_MERGE = 3,
+};
+
+typedef enum elv_merge (elevator_merge_fn) (struct request_queue *, struct request **,
struct bio *);
typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
-typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int);
+typedef void (elevator_merged_fn) (struct request_queue *, struct request *, enum elv_merge);
typedef int (elevator_allow_bio_merge_fn) (struct request_queue *,
struct request *, struct bio *);
@@ -87,7 +97,7 @@ struct elevator_mq_ops {
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
- void (*request_merged)(struct request_queue *, struct request *, int);
+ void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
void (*requests_merged)(struct request_queue *, struct request *, struct request *);
struct request *(*get_request)(struct request_queue *, unsigned int, struct blk_mq_alloc_data *);
void (*put_request)(struct request *);
@@ -99,7 +109,7 @@ struct elevator_mq_ops {
void (*requeue_request)(struct request *);
struct request *(*former_request)(struct request_queue *, struct request *);
struct request *(*next_request)(struct request_queue *, struct request *);
- int (*get_rq_priv)(struct request_queue *, struct request *);
+ int (*get_rq_priv)(struct request_queue *, struct request *, struct bio *);
void (*put_rq_priv)(struct request_queue *, struct request *);
void (*init_icq)(struct io_cq *);
void (*exit_icq)(struct io_cq *);
@@ -166,10 +176,12 @@ extern void elv_dispatch_sort(struct request_queue *, struct request *);
extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
extern void elv_add_request(struct request_queue *, struct request *, int);
extern void __elv_add_request(struct request_queue *, struct request *, int);
-extern int elv_merge(struct request_queue *, struct request **, struct bio *);
+extern enum elv_merge elv_merge(struct request_queue *, struct request **,
+ struct bio *);
extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
-extern void elv_merged_request(struct request_queue *, struct request *, int);
+extern void elv_merged_request(struct request_queue *, struct request *,
+ enum elv_merge);
extern void elv_bio_merged(struct request_queue *q, struct request *,
struct bio *);
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
@@ -219,13 +231,6 @@ extern void elv_rb_del(struct rb_root *, struct request *);
extern struct request *elv_rb_find(struct rb_root *, sector_t);
/*
- * Return values from elevator merger
- */
-#define ELEVATOR_NO_MERGE 0
-#define ELEVATOR_FRONT_MERGE 1
-#define ELEVATOR_BACK_MERGE 2
-
-/*
* Insertion selection
*/
#define ELEVATOR_INSERT_FRONT 1
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2ba074328894..c930cbc19342 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -423,6 +423,7 @@ struct block_device {
int bd_invalidated;
struct gendisk * bd_disk;
struct request_queue * bd_queue;
+ struct backing_dev_info *bd_bdi;
struct list_head bd_list;
/*
* Private data. You must have bd_claim'ed the block_device
@@ -2342,6 +2343,7 @@ extern struct kmem_cache *names_cachep;
#ifdef CONFIG_BLOCK
extern int register_blkdev(unsigned int, const char *);
extern void unregister_blkdev(unsigned int, const char *);
+extern void bdev_unhash_inode(dev_t dev);
extern struct block_device *bdget(dev_t);
extern struct block_device *bdgrab(struct block_device *bdev);
extern void bd_set_size(struct block_device *, loff_t size);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 76f39754e7b0..a999d281a2f1 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -167,6 +167,13 @@ struct blk_integrity {
};
#endif /* CONFIG_BLK_DEV_INTEGRITY */
+struct disk_devt {
+ atomic_t count;
+ void (*release)(struct disk_devt *disk_devt);
+};
+
+void put_disk_devt(struct disk_devt *disk_devt);
+void get_disk_devt(struct disk_devt *disk_devt);
struct gendisk {
/* major, first_minor and minors are input parameters only,
@@ -176,6 +183,7 @@ struct gendisk {
int first_minor;
int minors; /* maximum number of minors, =1 for
* disks that can't be partitioned. */
+ struct disk_devt *disk_devt;
char disk_name[DISK_NAME_LEN]; /* name of major driver */
char *(*devnode)(struct gendisk *gd, umode_t *mode);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index a633898f36ac..2f51c1724b5a 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -20,6 +20,7 @@
#include <linux/mutex.h>
/* for request_sense */
#include <linux/cdrom.h>
+#include <scsi/scsi_cmnd.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -39,18 +40,53 @@
struct device;
-/* IDE-specific values for req->cmd_type */
-enum ata_cmd_type_bits {
- REQ_TYPE_ATA_TASKFILE = REQ_TYPE_DRV_PRIV + 1,
- REQ_TYPE_ATA_PC,
- REQ_TYPE_ATA_SENSE, /* sense request */
- REQ_TYPE_ATA_PM_SUSPEND,/* suspend request */
- REQ_TYPE_ATA_PM_RESUME, /* resume request */
+/* values for ide_request.type */
+enum ata_priv_type {
+ ATA_PRIV_MISC,
+ ATA_PRIV_TASKFILE,
+ ATA_PRIV_PC,
+ ATA_PRIV_SENSE, /* sense request */
+ ATA_PRIV_PM_SUSPEND, /* suspend request */
+ ATA_PRIV_PM_RESUME, /* resume request */
};
-#define ata_pm_request(rq) \
- ((rq)->cmd_type == REQ_TYPE_ATA_PM_SUSPEND || \
- (rq)->cmd_type == REQ_TYPE_ATA_PM_RESUME)
+struct ide_request {
+ struct scsi_request sreq;
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
+ u8 type;
+};
+
+static inline struct ide_request *ide_req(struct request *rq)
+{
+ return blk_mq_rq_to_pdu(rq);
+}
+
+static inline bool ata_misc_request(struct request *rq)
+{
+ return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC;
+}
+
+static inline bool ata_taskfile_request(struct request *rq)
+{
+ return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE;
+}
+
+static inline bool ata_pc_request(struct request *rq)
+{
+ return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC;
+}
+
+static inline bool ata_sense_request(struct request *rq)
+{
+ return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE;
+}
+
+static inline bool ata_pm_request(struct request *rq)
+{
+ return blk_rq_is_private(rq) &&
+ (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND ||
+ ide_req(rq)->type == ATA_PRIV_PM_RESUME);
+}
/* Error codes returned in rq->errors to the higher part of the driver. */
enum {
@@ -579,7 +615,7 @@ struct ide_drive_s {
/* current sense rq and buffer */
bool sense_rq_armed;
- struct request sense_rq;
+ struct request *sense_rq;
struct request_sense sense_data;
};
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 00eac863a9c7..0b676a02cf3e 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -554,6 +554,8 @@ enum {
NVME_DSMGMT_AD = 1 << 2,
};
+#define NVME_DSM_MAX_RANGES 256
+
struct nvme_dsm_range {
__le32 cattr;
__le32 nlb;
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 9fc1aecfc813..b379f93a2c48 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -8,6 +8,7 @@
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_request.h>
struct Scsi_Host;
struct scsi_driver;
@@ -57,6 +58,7 @@ struct scsi_pointer {
#define SCMD_TAGGED (1 << 0)
struct scsi_cmnd {
+ struct scsi_request req;
struct scsi_device *device;
struct list_head list; /* scsi_cmnd participates in queue lists */
struct list_head eh_entry; /* entry for the host eh_cmd_q */
@@ -149,7 +151,7 @@ static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd)
return cmd + 1;
}
-/* make sure not to use it with REQ_TYPE_BLOCK_PC commands */
+/* make sure not to use it with passthrough commands */
static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
{
return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 36680f13270d..3cd8c3bec638 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -551,9 +551,6 @@ struct Scsi_Host {
struct list_head __devices;
struct list_head __targets;
- struct scsi_host_cmd_pool *cmd_pool;
- spinlock_t free_list_lock;
- struct list_head free_list; /* backup store of cmd structs */
struct list_head starved_list;
spinlock_t default_lock;
@@ -826,8 +823,6 @@ extern void scsi_block_requests(struct Scsi_Host *);
struct class_container;
-extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
- void (*) (struct request_queue *));
/*
* These two functions are used to allocate and free a pseudo device
* which will connect to the host adapter itself rather than any
diff --git a/include/scsi/scsi_request.h b/include/scsi/scsi_request.h
new file mode 100644
index 000000000000..ba0aeb980f7e
--- /dev/null
+++ b/include/scsi/scsi_request.h
@@ -0,0 +1,30 @@
+#ifndef _SCSI_SCSI_REQUEST_H
+#define _SCSI_SCSI_REQUEST_H
+
+#include <linux/blk-mq.h>
+
+#define BLK_MAX_CDB 16
+
+struct scsi_request {
+ unsigned char __cmd[BLK_MAX_CDB];
+ unsigned char *cmd;
+ unsigned short cmd_len;
+ unsigned int sense_len;
+ unsigned int resid_len; /* residual count */
+ void *sense;
+};
+
+static inline struct scsi_request *scsi_req(struct request *rq)
+{
+ return blk_mq_rq_to_pdu(rq);
+}
+
+static inline void scsi_req_free_cmd(struct scsi_request *req)
+{
+ if (req->cmd != req->__cmd)
+ kfree(req->cmd);
+}
+
+void scsi_req_init(struct request *);
+
+#endif /* _SCSI_SCSI_REQUEST_H */
diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h
index 81292392adbc..b6e07b56d013 100644
--- a/include/scsi/scsi_transport.h
+++ b/include/scsi/scsi_transport.h
@@ -119,4 +119,6 @@ scsi_transport_device_data(struct scsi_device *sdev)
+ shost->transportt->device_private_offset;
}
+void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q);
+
#endif /* SCSI_TRANSPORT_H */
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 3e02e3a25413..a88ed13446ff 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -73,19 +73,17 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
__field( unsigned int, nr_sector )
__field( int, errors )
__array( char, rwbs, RWBS_LEN )
- __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ __dynamic_array( char, cmd, 1 )
),
TP_fast_assign(
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
- __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
- 0 : blk_rq_pos(rq);
- __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
- 0 : blk_rq_sectors(rq);
+ __entry->sector = blk_rq_trace_sector(rq);
+ __entry->nr_sector = blk_rq_trace_nr_sectors(rq);
__entry->errors = rq->errors;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
- blk_dump_cmd(__get_str(cmd), rq);
+ __get_str(cmd)[0] = '\0';
),
TP_printk("%d,%d %s (%s) %llu + %u [%d]",
@@ -153,7 +151,7 @@ TRACE_EVENT(block_rq_complete,
__field( unsigned int, nr_sector )
__field( int, errors )
__array( char, rwbs, RWBS_LEN )
- __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ __dynamic_array( char, cmd, 1 )
),
TP_fast_assign(
@@ -163,7 +161,7 @@ TRACE_EVENT(block_rq_complete,
__entry->errors = rq->errors;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
- blk_dump_cmd(__get_str(cmd), rq);
+ __get_str(cmd)[0] = '\0';
),
TP_printk("%d,%d %s (%s) %llu + %u [%d]",
@@ -186,20 +184,17 @@ DECLARE_EVENT_CLASS(block_rq,
__field( unsigned int, bytes )
__array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
- __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ __dynamic_array( char, cmd, 1 )
),
TP_fast_assign(
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
- __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
- 0 : blk_rq_pos(rq);
- __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
- 0 : blk_rq_sectors(rq);
- __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
- blk_rq_bytes(rq) : 0;
+ __entry->sector = blk_rq_trace_sector(rq);
+ __entry->nr_sector = blk_rq_trace_nr_sectors(rq);
+ __entry->bytes = blk_rq_bytes(rq);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
- blk_dump_cmd(__get_str(cmd), rq);
+ __get_str(cmd)[0] = '\0';
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 95cecbf67f5c..b2058a7f94bd 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -28,6 +28,8 @@
#include <linux/uaccess.h>
#include <linux/list.h>
+#include "../../block/blk.h"
+
#include <trace/events/block.h>
#include "trace_output.h"
@@ -292,9 +294,6 @@ record_it:
local_irq_restore(flags);
}
-static struct dentry *blk_tree_root;
-static DEFINE_MUTEX(blk_tree_mutex);
-
static void blk_trace_free(struct blk_trace *bt)
{
debugfs_remove(bt->msg_file);
@@ -433,9 +432,9 @@ static void blk_trace_setup_lba(struct blk_trace *bt,
/*
* Setup everything required to start tracing
*/
-int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
- struct block_device *bdev,
- struct blk_user_trace_setup *buts)
+static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ struct block_device *bdev,
+ struct blk_user_trace_setup *buts)
{
struct blk_trace *bt = NULL;
struct dentry *dir = NULL;
@@ -468,22 +467,15 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
ret = -ENOENT;
- mutex_lock(&blk_tree_mutex);
- if (!blk_tree_root) {
- blk_tree_root = debugfs_create_dir("block", NULL);
- if (!blk_tree_root) {
- mutex_unlock(&blk_tree_mutex);
- goto err;
- }
- }
- mutex_unlock(&blk_tree_mutex);
-
- dir = debugfs_create_dir(buts->name, blk_tree_root);
+ if (!blk_debugfs_root)
+ goto err;
+ dir = debugfs_lookup(buts->name, blk_debugfs_root);
+ if (!dir)
+ bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
if (!dir)
goto err;
- bt->dir = dir;
bt->dev = dev;
atomic_set(&bt->dropped, 0);
INIT_LIST_HEAD(&bt->running_list);
@@ -525,9 +517,12 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
if (atomic_inc_return(&blk_probes_ref) == 1)
blk_register_tracepoints();
- return 0;
+ ret = 0;
err:
- blk_trace_free(bt);
+ if (dir && !bt->dir)
+ dput(dir);
+ if (ret)
+ blk_trace_free(bt);
return ret;
}
@@ -712,15 +707,13 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
if (likely(!bt))
return;
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ if (blk_rq_is_passthrough(rq))
what |= BLK_TC_ACT(BLK_TC_PC);
- __blk_add_trace(bt, 0, nr_bytes, req_op(rq), rq->cmd_flags,
- what, rq->errors, rq->cmd_len, rq->cmd);
- } else {
+ else
what |= BLK_TC_ACT(BLK_TC_FS);
- __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, req_op(rq),
- rq->cmd_flags, what, rq->errors, 0, NULL);
- }
+
+ __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
+ rq->cmd_flags, what, rq->errors, 0, NULL);
}
static void blk_add_trace_rq_abort(void *ignore,
@@ -972,11 +965,7 @@ void blk_add_driver_data(struct request_queue *q,
if (likely(!bt))
return;
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
- __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, 0,
- BLK_TA_DRV_DATA, rq->errors, len, data);
- else
- __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0, 0,
+ __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
BLK_TA_DRV_DATA, rq->errors, len, data);
}
EXPORT_SYMBOL_GPL(blk_add_driver_data);
@@ -1752,31 +1741,6 @@ void blk_trace_remove_sysfs(struct device *dev)
#ifdef CONFIG_EVENT_TRACING
-void blk_dump_cmd(char *buf, struct request *rq)
-{
- int i, end;
- int len = rq->cmd_len;
- unsigned char *cmd = rq->cmd;
-
- if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
- buf[0] = '\0';
- return;
- }
-
- for (end = len - 1; end >= 0; end--)
- if (cmd[end])
- break;
- end++;
-
- for (i = 0; i < len; i++) {
- buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
- if (i == end && end != len - 1) {
- sprintf(buf, " ..");
- break;
- }
- }
-}
-
void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
{
int i = 0;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 3bfed5ab2475..39ce616a9d71 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -237,6 +237,7 @@ static __init int bdi_class_init(void)
bdi_class->dev_groups = bdi_dev_groups;
bdi_debug_init();
+
return 0;
}
postcore_initcall(bdi_class_init);
@@ -758,15 +759,20 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
if (!bdi->wb_congested)
return -ENOMEM;
+ atomic_set(&bdi->wb_congested->refcnt, 1);
+
err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
if (err) {
- kfree(bdi->wb_congested);
+ wb_congested_put(bdi->wb_congested);
return err;
}
return 0;
}
-static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
+static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
+{
+ wb_congested_put(bdi->wb_congested);
+}
#endif /* CONFIG_CGROUP_WRITEBACK */
@@ -776,6 +782,7 @@ int bdi_init(struct backing_dev_info *bdi)
bdi->dev = NULL;
+ kref_init(&bdi->refcnt);
bdi->min_ratio = 0;
bdi->max_ratio = 100;
bdi->max_prop_frac = FPROP_FRAC_BASE;
@@ -791,6 +798,22 @@ int bdi_init(struct backing_dev_info *bdi)
}
EXPORT_SYMBOL(bdi_init);
+struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
+{
+ struct backing_dev_info *bdi;
+
+ bdi = kmalloc_node(sizeof(struct backing_dev_info),
+ gfp_mask | __GFP_ZERO, node_id);
+ if (!bdi)
+ return NULL;
+
+ if (bdi_init(bdi)) {
+ kfree(bdi);
+ return NULL;
+ }
+ return bdi;
+}
+
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...)
{
@@ -871,12 +894,26 @@ void bdi_unregister(struct backing_dev_info *bdi)
}
}
-void bdi_exit(struct backing_dev_info *bdi)
+static void bdi_exit(struct backing_dev_info *bdi)
{
WARN_ON_ONCE(bdi->dev);
wb_exit(&bdi->wb);
}
+static void release_bdi(struct kref *ref)
+{
+ struct backing_dev_info *bdi =
+ container_of(ref, struct backing_dev_info, refcnt);
+
+ bdi_exit(bdi);
+ kfree(bdi);
+}
+
+void bdi_put(struct backing_dev_info *bdi)
+{
+ kref_put(&bdi->refcnt, release_bdi);
+}
+
void bdi_destroy(struct backing_dev_info *bdi)
{
bdi_unregister(bdi);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 290e8b7d3181..216449825859 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1988,11 +1988,11 @@ void laptop_mode_timer_fn(unsigned long data)
* We want to write everything out, not just down to the dirty
* threshold
*/
- if (!bdi_has_dirty_io(&q->backing_dev_info))
+ if (!bdi_has_dirty_io(q->backing_dev_info))
return;
rcu_read_lock();
- list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node)
+ list_for_each_entry_rcu(wb, &q->backing_dev_info->wb_list, bdi_node)
if (wb_has_dirty_io(wb))
wb_start_writeback(wb, nr_pages, true,
WB_REASON_LAPTOP_TIMER);