diff options
author | Mike Christie <mchristi@redhat.com> | 2016-06-02 20:12:37 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-08-20 18:09:26 +0200 |
commit | 51d841908029ff6b892a93e4df8175162ca8dcc8 (patch) | |
tree | 8241671f523aaf22d985f2fca471bebcbb4c70ff | |
parent | f318588b758514c35f0a9227195178a3b2b4b733 (diff) | |
download | lwn-51d841908029ff6b892a93e4df8175162ca8dcc8.tar.gz lwn-51d841908029ff6b892a93e4df8175162ca8dcc8.zip |
target: Fix max_unmap_lba_count calc overflow
commit ea263c7fada4af8ec7fe5fcfd6e7d7705a89351b upstream.
max_discard_sectors only 32bits, and some non scsi backend
devices will set this to the max 0xffffffff, so we can end up
overflowing during the max_unmap_lba_count calculation.
This fixes a regression caused by my patch:
commit 8a9ebe717a133ba7bc90b06047f43cc6b8bcb8b3
Author: Mike Christie <mchristi@redhat.com>
Date: Mon Jan 18 14:09:27 2016 -0600
target: Fix WRITE_SAME/DISCARD conversion to linux 512b sectors
which can result in extra discards being sent to due the overflow
causing max_unmap_lba_count to be smaller than what the backing
device can actually support.
Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/target/target_core_device.c | 8 | ||||
-rw-r--r-- | drivers/target/target_core_file.c | 3 | ||||
-rw-r--r-- | drivers/target/target_core_iblock.c | 3 | ||||
-rw-r--r-- | include/target/target_core_backend.h | 2 |
4 files changed, 8 insertions, 8 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 3436a83568ea..dcd5ed26eb18 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -832,13 +832,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) * in ATA and we need to set TPE=1 */ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, - struct request_queue *q, int block_size) + struct request_queue *q) { + int block_size = queue_logical_block_size(q); + if (!blk_queue_discard(q)) return false; - attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) / - block_size; + attrib->max_unmap_lba_count = + q->limits.max_discard_sectors >> (ilog2(block_size) - 9); /* * Currently hardcoded to 1 in Linux/SCSI code.. */ diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 75f0f08b2a34..79291869bce6 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -161,8 +161,7 @@ static int fd_configure_device(struct se_device *dev) dev_size, div_u64(dev_size, fd_dev->fd_block_size), fd_dev->fd_block_size); - if (target_configure_unmap_from_queue(&dev->dev_attrib, q, - fd_dev->fd_block_size)) + if (target_configure_unmap_from_queue(&dev->dev_attrib, q)) pr_debug("IFILE: BLOCK Discard support available," " disabled by default\n"); /* diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 2c53dcefff3e..4620c1dcdbc7 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -121,8 +121,7 @@ static int iblock_configure_device(struct se_device *dev) dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); dev->dev_attrib.hw_queue_depth = q->nr_requests; - if (target_configure_unmap_from_queue(&dev->dev_attrib, q, - dev->dev_attrib.hw_block_size)) + if (target_configure_unmap_from_queue(&dev->dev_attrib, q)) pr_debug("IBLOCK: BLOCK Discard support available," " disabled by default\n"); diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 28ee5c2e6bcd..711322a8ee35 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -96,6 +96,6 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, bool target_sense_desc_format(struct se_device *dev); sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, - struct request_queue *q, int block_size); + struct request_queue *q); #endif /* TARGET_CORE_BACKEND_H */ |