summaryrefslogtreecommitdiff
path: root/drivers/block/loop.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/loop.c')
-rw-r--r--drivers/block/loop.c193
1 files changed, 104 insertions, 89 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 407cb172d6e3..85de67334695 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -213,10 +213,13 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
*/
blk_mq_freeze_queue(lo->lo_queue);
lo->use_dio = use_dio;
- if (use_dio)
+ if (use_dio) {
+ queue_flag_clear_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
- else
+ } else {
+ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
+ }
blk_mq_unfreeze_queue(lo->lo_queue);
}
@@ -460,12 +463,21 @@ static void lo_complete_rq(struct request *rq)
blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
}
+static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
+{
+ if (!atomic_dec_and_test(&cmd->ref))
+ return;
+ kfree(cmd->bvec);
+ cmd->bvec = NULL;
+ blk_mq_complete_request(cmd->rq);
+}
+
static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
{
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
cmd->ret = ret;
- blk_mq_complete_request(cmd->rq);
+ lo_rw_aio_do_completion(cmd);
}
static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
@@ -473,22 +485,51 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
{
struct iov_iter iter;
struct bio_vec *bvec;
- struct bio *bio = cmd->rq->bio;
+ struct request *rq = cmd->rq;
+ struct bio *bio = rq->bio;
struct file *file = lo->lo_backing_file;
+ unsigned int offset;
+ int segments = 0;
int ret;
- /* nomerge for loop request queue */
- WARN_ON(cmd->rq->bio != cmd->rq->biotail);
+ if (rq->bio != rq->biotail) {
+ struct req_iterator iter;
+ struct bio_vec tmp;
+
+ __rq_for_each_bio(bio, rq)
+ segments += bio_segments(bio);
+ bvec = kmalloc(sizeof(struct bio_vec) * segments, GFP_NOIO);
+ if (!bvec)
+ return -EIO;
+ cmd->bvec = bvec;
+
+ /*
+ * The bios of the request may be started from the middle of
+ * the 'bvec' because of bio splitting, so we can't directly
+ * copy bio->bi_iov_vec to new bvec. The rq_for_each_segment
+ * API will take care of all details for us.
+ */
+ rq_for_each_segment(tmp, rq, iter) {
+ *bvec = tmp;
+ bvec++;
+ }
+ bvec = cmd->bvec;
+ offset = 0;
+ } else {
+ /*
+ * Same here, this bio may be started from the middle of the
+ * 'bvec' because of bio splitting, so offset from the bvec
+ * must be passed to iov iterator
+ */
+ offset = bio->bi_iter.bi_bvec_done;
+ bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ segments = bio_segments(bio);
+ }
+ atomic_set(&cmd->ref, 2);
- bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
- bio_segments(bio), blk_rq_bytes(cmd->rq));
- /*
- * This bio may be started from the middle of the 'bvec'
- * because of bio splitting, so offset from the bvec must
- * be passed to iov iterator
- */
- iter.iov_offset = bio->bi_iter.bi_bvec_done;
+ segments, blk_rq_bytes(rq));
+ iter.iov_offset = offset;
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
@@ -500,6 +541,8 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
else
ret = call_read_iter(file, &cmd->iocb, &iter);
+ lo_rw_aio_do_completion(cmd);
+
if (ret != -EIOCBQUEUED)
cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
return 0;
@@ -546,74 +589,12 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
}
}
-struct switch_request {
- struct file *file;
- struct completion wait;
-};
-
static inline void loop_update_dio(struct loop_device *lo)
{
__loop_update_dio(lo, io_is_direct(lo->lo_backing_file) |
lo->use_dio);
}
-/*
- * Do the actual switch; called from the BIO completion routine
- */
-static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
-{
- struct file *file = p->file;
- struct file *old_file = lo->lo_backing_file;
- struct address_space *mapping;
-
- /* if no new file, only flush of queued bios requested */
- if (!file)
- return;
-
- mapping = file->f_mapping;
- mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
- lo->lo_backing_file = file;
- lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
- mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
- lo->old_gfp_mask = mapping_gfp_mask(mapping);
- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- loop_update_dio(lo);
-}
-
-/*
- * loop_switch performs the hard work of switching a backing store.
- * First it needs to flush existing IO, it does this by sending a magic
- * BIO down the pipe. The completion of this BIO does the actual switch.
- */
-static int loop_switch(struct loop_device *lo, struct file *file)
-{
- struct switch_request w;
-
- w.file = file;
-
- /* freeze queue and wait for completion of scheduled requests */
- blk_mq_freeze_queue(lo->lo_queue);
-
- /* do the switch action */
- do_loop_switch(lo, &w);
-
- /* unfreeze */
- blk_mq_unfreeze_queue(lo->lo_queue);
-
- return 0;
-}
-
-/*
- * Helper to flush the IOs in loop, but keeping loop thread running
- */
-static int loop_flush(struct loop_device *lo)
-{
- /* loop not yet configured, no running thread, nothing to flush */
- if (lo->lo_state != Lo_bound)
- return 0;
- return loop_switch(lo, NULL);
-}
-
static void loop_reread_partitions(struct loop_device *lo,
struct block_device *bdev)
{
@@ -678,9 +659,14 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
goto out_putf;
/* and ... switch */
- error = loop_switch(lo, file);
- if (error)
- goto out_putf;
+ blk_mq_freeze_queue(lo->lo_queue);
+ mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
+ lo->lo_backing_file = file;
+ lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
+ mapping_set_gfp_mask(file->f_mapping,
+ lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+ loop_update_dio(lo);
+ blk_mq_unfreeze_queue(lo->lo_queue);
fput(old_file);
if (lo->lo_flags & LO_FLAGS_PARTSCAN)
@@ -867,7 +853,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct file *file, *f;
struct inode *inode;
struct address_space *mapping;
- unsigned lo_blocksize;
int lo_flags = 0;
int error;
loff_t size;
@@ -911,9 +896,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
!file->f_op->write_iter)
lo_flags |= LO_FLAGS_READ_ONLY;
- lo_blocksize = S_ISBLK(inode->i_mode) ?
- inode->i_bdev->bd_block_size : PAGE_SIZE;
-
error = -EFBIG;
size = get_loop_size(lo, file);
if ((loff_t)(sector_t)size != size)
@@ -927,7 +909,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->use_dio = false;
- lo->lo_blocksize = lo_blocksize;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
@@ -947,7 +928,8 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
/* let user-space know about the new size */
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
- set_blocksize(bdev, lo_blocksize);
+ set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
+ block_size(inode->i_bdev) : PAGE_SIZE);
lo->lo_state = Lo_bound;
if (part_shift)
@@ -1053,6 +1035,9 @@ static int loop_clr_fd(struct loop_device *lo)
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
+ blk_queue_logical_block_size(lo->lo_queue, 512);
+ blk_queue_physical_block_size(lo->lo_queue, 512);
+ blk_queue_io_min(lo->lo_queue, 512);
if (bdev) {
bdput(bdev);
invalidate_bdev(bdev);
@@ -1336,6 +1321,26 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
return error;
}
+static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
+{
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
+
+ if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
+ return -EINVAL;
+
+ blk_mq_freeze_queue(lo->lo_queue);
+
+ blk_queue_logical_block_size(lo->lo_queue, arg);
+ blk_queue_physical_block_size(lo->lo_queue, arg);
+ blk_queue_io_min(lo->lo_queue, arg);
+ loop_update_dio(lo);
+
+ blk_mq_unfreeze_queue(lo->lo_queue);
+
+ return 0;
+}
+
static int lo_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
@@ -1384,6 +1389,11 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_dio(lo, arg);
break;
+ case LOOP_SET_BLOCK_SIZE:
+ err = -EPERM;
+ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+ err = loop_set_block_size(lo, arg);
+ break;
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
@@ -1583,12 +1593,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
err = loop_clr_fd(lo);
if (!err)
return;
- } else {
+ } else if (lo->lo_state == Lo_bound) {
/*
* Otherwise keep thread (if running) and config,
* but flush possible ongoing bios in thread.
*/
- loop_flush(lo);
+ blk_mq_freeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue);
}
mutex_unlock(&lo->lo_ctl_mutex);
@@ -1770,9 +1781,13 @@ static int loop_add(struct loop_device **l, int i)
}
lo->lo_queue->queuedata = lo;
+ blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
+
/*
- * It doesn't make sense to enable merge because the I/O
- * submitted to backing file is handled page by page.
+ * By default, we do buffer IO, so it doesn't make sense to enable
+ * merge because the I/O submitted to backing file is handled page by
+ * page. For directio mode, merge does help to dispatch bigger request
+ * to underlayer disk. We will enable merge once directio is enabled.
*/
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);