summaryrefslogtreecommitdiff
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2009-05-22 17:17:53 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-22 23:22:55 +0200
commitc72758f33784e5e2a1a4bb9421ef3e6de8f9fcf3 (patch)
treea83f7540cc894caafe74db911cba3998d6a9a164 /include/linux/blkdev.h
parentcd43e26f071524647e660706b784ebcbefbd2e44 (diff)
downloadlwn-c72758f33784e5e2a1a4bb9421ef3e6de8f9fcf3.tar.gz
lwn-c72758f33784e5e2a1a4bb9421ef3e6de8f9fcf3.zip
block: Export I/O topology for block devices and partitions
To support devices with physical block sizes bigger than 512 bytes we need to ensure proper alignment. This patch adds support for exposing I/O topology characteristics as devices are stacked. logical_block_size is the smallest unit the device can address. physical_block_size indicates the smallest I/O the device can write without incurring a read-modify-write penalty. The io_min parameter is the smallest preferred I/O size reported by the device. In many cases this is the same as the physical block size. However, the io_min parameter can be scaled up when stacking (RAID5 chunk size > physical block size). The io_opt characteristic indicates the optimal I/O size reported by the device. This is usually the stripe width for arrays. The alignment_offset parameter indicates the number of bytes the start of the device/partition is offset from the device's natural alignment. Partition tools and MD/DM utilities can use this to pad their offsets so filesystems start on proper boundaries. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h47
1 files changed, 47 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b7bb6fdba12c..5e740a135e73 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -314,11 +314,16 @@ struct queue_limits {
unsigned int max_hw_sectors;
unsigned int max_sectors;
unsigned int max_segment_size;
+ unsigned int physical_block_size;
+ unsigned int alignment_offset;
+ unsigned int io_min;
+ unsigned int io_opt;
unsigned short logical_block_size;
unsigned short max_hw_segments;
unsigned short max_phys_segments;
+ unsigned char misaligned;
unsigned char no_cluster;
};
@@ -911,6 +916,15 @@ extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_alignment_offset(struct request_queue *q,
+ unsigned int alignment);
+extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
+extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
+extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ sector_t offset);
+extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
+ sector_t offset);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
@@ -1047,6 +1061,39 @@ static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
return queue_logical_block_size(bdev_get_queue(bdev));
}
+static inline unsigned int queue_physical_block_size(struct request_queue *q)
+{
+ return q->limits.physical_block_size;
+}
+
+static inline unsigned int queue_io_min(struct request_queue *q)
+{
+ return q->limits.io_min;
+}
+
+static inline unsigned int queue_io_opt(struct request_queue *q)
+{
+ return q->limits.io_opt;
+}
+
+static inline int queue_alignment_offset(struct request_queue *q)
+{
+ if (q && q->limits.misaligned)
+ return -1;
+
+ if (q && q->limits.alignment_offset)
+ return q->limits.alignment_offset;
+
+ return 0;
+}
+
+static inline int queue_sector_alignment_offset(struct request_queue *q,
+ sector_t sector)
+{
+ return ((sector << 9) - q->limits.alignment_offset)
+ & (q->limits.io_min - 1);
+}
+
static inline int queue_dma_alignment(struct request_queue *q)
{
return q ? q->dma_alignment : 511;