summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/brd.c66
-rw-r--r--drivers/block/loop.c13
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c14
-rw-r--r--drivers/block/null_blk/main.c9
-rw-r--r--drivers/block/null_blk/zoned.c2
-rw-r--r--drivers/block/rbd.c1
-rw-r--r--drivers/block/ublk_drv.c208
-rw-r--r--drivers/block/virtio_blk.c55
-rw-r--r--drivers/md/dm-cache-target.c4
-rw-r--r--drivers/md/dm-clone-target.c4
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-zone.c4
-rw-r--r--drivers/md/md-bitmap.c1
-rw-r--r--drivers/md/md.c15
-rw-r--r--drivers/md/md.h24
-rw-r--r--drivers/md/raid0.c12
-rw-r--r--drivers/md/raid1.c108
-rw-r--r--drivers/md/raid10.c87
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c17
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/mmc/core/block.c55
-rw-r--r--drivers/nvme/host/apple.c2
-rw-r--r--drivers/nvme/host/core.c38
-rw-r--r--drivers/nvme/host/ioctl.c21
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c120
-rw-r--r--drivers/nvme/host/trace.c58
-rw-r--r--drivers/nvme/host/zns.c2
-rw-r--r--drivers/nvme/target/Makefile2
-rw-r--r--drivers/nvme/target/admin-cmd.c288
-rw-r--r--drivers/nvme/target/configfs.c27
-rw-r--r--drivers/nvme/target/core.c64
-rw-r--r--drivers/nvme/target/fabrics-cmd.c7
-rw-r--r--drivers/nvme/target/nvmet.h67
-rw-r--r--drivers/nvme/target/pr.c1156
-rw-r--r--drivers/nvme/target/trace.c108
-rw-r--r--drivers/nvme/target/zns.c21
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/sd_zbc.c2
45 files changed, 2382 insertions, 326 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 2fd1ed101748..5a95671d8151 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -316,8 +316,40 @@ __setup("ramdisk_size=", ramdisk_size);
* (should share code eventually).
*/
static LIST_HEAD(brd_devices);
+static DEFINE_MUTEX(brd_devices_mutex);
static struct dentry *brd_debugfs_dir;
+static struct brd_device *brd_find_or_alloc_device(int i)
+{
+ struct brd_device *brd;
+
+ mutex_lock(&brd_devices_mutex);
+ list_for_each_entry(brd, &brd_devices, brd_list) {
+ if (brd->brd_number == i) {
+ mutex_unlock(&brd_devices_mutex);
+ return ERR_PTR(-EEXIST);
+ }
+ }
+
+ brd = kzalloc(sizeof(*brd), GFP_KERNEL);
+ if (!brd) {
+ mutex_unlock(&brd_devices_mutex);
+ return ERR_PTR(-ENOMEM);
+ }
+ brd->brd_number = i;
+ list_add_tail(&brd->brd_list, &brd_devices);
+ mutex_unlock(&brd_devices_mutex);
+ return brd;
+}
+
+static void brd_free_device(struct brd_device *brd)
+{
+ mutex_lock(&brd_devices_mutex);
+ list_del(&brd->brd_list);
+ mutex_unlock(&brd_devices_mutex);
+ kfree(brd);
+}
+
static int brd_alloc(int i)
{
struct brd_device *brd;
@@ -340,14 +372,9 @@ static int brd_alloc(int i)
BLK_FEAT_NOWAIT,
};
- list_for_each_entry(brd, &brd_devices, brd_list)
- if (brd->brd_number == i)
- return -EEXIST;
- brd = kzalloc(sizeof(*brd), GFP_KERNEL);
- if (!brd)
- return -ENOMEM;
- brd->brd_number = i;
- list_add_tail(&brd->brd_list, &brd_devices);
+ brd = brd_find_or_alloc_device(i);
+ if (IS_ERR(brd))
+ return PTR_ERR(brd);
xa_init(&brd->brd_pages);
@@ -378,8 +405,7 @@ static int brd_alloc(int i)
out_cleanup_disk:
put_disk(disk);
out_free_dev:
- list_del(&brd->brd_list);
- kfree(brd);
+ brd_free_device(brd);
return err;
}
@@ -398,8 +424,7 @@ static void brd_cleanup(void)
del_gendisk(brd->brd_disk);
put_disk(brd->brd_disk);
brd_free_pages(brd);
- list_del(&brd->brd_list);
- kfree(brd);
+ brd_free_device(brd);
}
}
@@ -426,16 +451,6 @@ static int __init brd_init(void)
{
int err, i;
- brd_check_and_reset_par();
-
- brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
-
- for (i = 0; i < rd_nr; i++) {
- err = brd_alloc(i);
- if (err)
- goto out_free;
- }
-
/*
* brd module now has a feature to instantiate underlying device
* structure on-demand, provided that there is an access dev node.
@@ -451,11 +466,18 @@ static int __init brd_init(void)
* dynamically.
*/
+ brd_check_and_reset_par();
+
+ brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
+
if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) {
err = -EIO;
goto out_free;
}
+ for (i = 0; i < rd_nr; i++)
+ brd_alloc(i);
+
pr_info("brd: module loaded\n");
return 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 78a7bb28defe..fe9bb4fb5f1b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -173,7 +173,7 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
static bool lo_bdev_can_use_dio(struct loop_device *lo,
struct block_device *backing_bdev)
{
- unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
+ unsigned int sb_bsize = bdev_logical_block_size(backing_bdev);
if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
return false;
@@ -786,11 +786,10 @@ static void loop_config_discard(struct loop_device *lo,
* file-backed loop devices: discarded regions read back as zero.
*/
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
+ struct block_device *bdev = I_BDEV(inode);
- max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
- granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
- queue_physical_block_size(backingq);
+ max_discard_sectors = bdev_write_zeroes_sectors(bdev);
+ granularity = bdev_discard_granularity(bdev);
/*
* We use punch hole to reclaim the free space used by the
@@ -977,7 +976,7 @@ loop_set_status_from_info(struct loop_device *lo,
return 0;
}
-static unsigned short loop_default_blocksize(struct loop_device *lo,
+static unsigned int loop_default_blocksize(struct loop_device *lo,
struct block_device *backing_bdev)
{
/* In case of direct I/O, match underlying block size */
@@ -986,7 +985,7 @@ static unsigned short loop_default_blocksize(struct loop_device *lo,
return SECTOR_SIZE;
}
-static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize)
+static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 223faa9d5ffd..43701b7b10a7 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2701,7 +2701,12 @@ static int mtip_hw_init(struct driver_data *dd)
int rv;
unsigned long timeout, timetaken;
- dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
+ dd->mmio = pcim_iomap_region(dd->pdev, MTIP_ABAR, MTIP_DRV_NAME);
+ if (IS_ERR(dd->mmio)) {
+ dev_err(&dd->pdev->dev, "Unable to request / ioremap PCI region\n");
+ return PTR_ERR(dd->mmio);
+ }
+
mtip_detect_product(dd);
if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
@@ -3710,13 +3715,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
goto iomap_err;
}
- /* Map BAR5 to memory. */
- rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
- if (rv < 0) {
- dev_err(&pdev->dev, "Unable to map regions\n");
- goto iomap_err;
- }
-
rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rv) {
dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 2f0431e42c49..3c3d8d200abb 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1638,10 +1638,9 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static void null_queue_rqs(struct request **rqlist)
+static void null_queue_rqs(struct rq_list *rqlist)
{
- struct request *requeue_list = NULL;
- struct request **requeue_lastp = &requeue_list;
+ struct rq_list requeue_list = {};
struct blk_mq_queue_data bd = { };
blk_status_t ret;
@@ -1651,8 +1650,8 @@ static void null_queue_rqs(struct request **rqlist)
bd.rq = rq;
ret = null_queue_rq(rq->mq_hctx, &bd);
if (ret != BLK_STS_OK)
- rq_list_add_tail(&requeue_lastp, rq);
- } while (!rq_list_empty(*rqlist));
+ rq_list_add_tail(&requeue_list, rq);
+ } while (!rq_list_empty(rqlist));
*rqlist = requeue_list;
}
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 9bc768b2ca56..0d5f9bf95229 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -166,7 +166,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
lim->features |= BLK_FEAT_ZONED;
lim->chunk_sectors = dev->zone_size_sects;
- lim->max_zone_append_sectors = dev->zone_append_max_sectors;
+ lim->max_hw_zone_append_sectors = dev->zone_append_max_sectors;
lim->max_open_zones = dev->zone_max_open;
lim->max_active_zones = dev->zone_max_active;
return 0;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9c8b19a22c2a..ac421dbeeb11 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -7284,6 +7284,7 @@ static ssize_t do_rbd_remove(const char *buf, size_t count)
*/
blk_mq_freeze_queue(rbd_dev->disk->queue);
blk_mark_disk_dead(rbd_dev->disk);
+ blk_mq_unfreeze_queue(rbd_dev->disk->queue);
}
del_gendisk(rbd_dev->disk);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 6ba2c1dd1d87..c6d18cd8af44 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -60,7 +60,12 @@
| UBLK_F_UNPRIVILEGED_DEV \
| UBLK_F_CMD_IOCTL_ENCODE \
| UBLK_F_USER_COPY \
- | UBLK_F_ZONED)
+ | UBLK_F_ZONED \
+ | UBLK_F_USER_RECOVERY_FAIL_IO)
+
+#define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
+ | UBLK_F_USER_RECOVERY_REISSUE \
+ | UBLK_F_USER_RECOVERY_FAIL_IO)
/* All UBLK_PARAM_TYPE_* should be included here */
#define UBLK_PARAM_TYPE_ALL \
@@ -143,6 +148,7 @@ struct ublk_queue {
bool force_abort;
bool timeout;
bool canceling;
+ bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
unsigned short nr_io_ready; /* how many ios setup */
spinlock_t cancel_lock;
struct ublk_device *dev;
@@ -179,8 +185,7 @@ struct ublk_device {
unsigned int nr_queues_ready;
unsigned int nr_privileged_daemon;
- struct work_struct quiesce_work;
- struct work_struct stop_work;
+ struct work_struct nosrv_work;
};
/* header of ublk_params */
@@ -664,30 +669,69 @@ static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
return ublk_get_queue(ub, q_id)->io_cmd_buf;
}
+static inline int __ublk_queue_cmd_buf_size(int depth)
+{
+ return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
+}
+
static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
{
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
- return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
- PAGE_SIZE);
+ return __ublk_queue_cmd_buf_size(ubq->q_depth);
+}
+
+static int ublk_max_cmd_buf_size(void)
+{
+ return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH);
+}
+
+/*
+ * Should I/O outstanding to the ublk server when it exits be reissued?
+ * If not, outstanding I/O will get errors.
+ */
+static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub)
+{
+ return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
+ (ub->dev_info.flags & UBLK_F_USER_RECOVERY_REISSUE);
+}
+
+/*
+ * Should I/O issued while there is no ublk server queue? If not, I/O
+ * issued while there is no ublk server will get errors.
+ */
+static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub)
+{
+ return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
+ !(ub->dev_info.flags & UBLK_F_USER_RECOVERY_FAIL_IO);
}
-static inline bool ublk_queue_can_use_recovery_reissue(
- struct ublk_queue *ubq)
+/*
+ * Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy
+ * of the device flags for smaller cache footprint - better for fast
+ * paths.
+ */
+static inline bool ublk_nosrv_should_queue_io(struct ublk_queue *ubq)
{
return (ubq->flags & UBLK_F_USER_RECOVERY) &&
- (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
+ !(ubq->flags & UBLK_F_USER_RECOVERY_FAIL_IO);
}
-static inline bool ublk_queue_can_use_recovery(
- struct ublk_queue *ubq)
+/*
+ * Should ublk devices be stopped (i.e. no recovery possible) when the
+ * ublk server exits? If not, devices can be used again by a future
+ * incarnation of a ublk server via the start_recovery/end_recovery
+ * commands.
+ */
+static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub)
{
- return ubq->flags & UBLK_F_USER_RECOVERY;
+ return !(ub->dev_info.flags & UBLK_F_USER_RECOVERY);
}
-static inline bool ublk_can_use_recovery(struct ublk_device *ub)
+static inline bool ublk_dev_in_recoverable_state(struct ublk_device *ub)
{
- return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
+ return ub->dev_info.state == UBLK_S_DEV_QUIESCED ||
+ ub->dev_info.state == UBLK_S_DEV_FAIL_IO;
}
static void ublk_free_disk(struct gendisk *disk)
@@ -1063,7 +1107,7 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
{
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
- if (ublk_queue_can_use_recovery_reissue(ubq))
+ if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
blk_mq_requeue_request(req, false);
else
ublk_put_req_ref(ubq, req);
@@ -1091,7 +1135,7 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
struct request *rq)
{
/* We cannot process this rq so just requeue it. */
- if (ublk_queue_can_use_recovery(ubq))
+ if (ublk_nosrv_dev_should_queue_io(ubq->dev))
blk_mq_requeue_request(rq, false);
else
blk_mq_end_request(rq, BLK_STS_IOERR);
@@ -1236,10 +1280,7 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
struct ublk_device *ub = ubq->dev;
if (ublk_abort_requests(ub, ubq)) {
- if (ublk_can_use_recovery(ub))
- schedule_work(&ub->quiesce_work);
- else
- schedule_work(&ub->stop_work);
+ schedule_work(&ub->nosrv_work);
}
return BLK_EH_DONE;
}
@@ -1254,6 +1295,10 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *rq = bd->rq;
blk_status_t res;
+ if (unlikely(ubq->fail_io)) {
+ return BLK_STS_TARGET;
+ }
+
/* fill iod to slot in io cmd buffer */
res = ublk_setup_iod(ubq, rq);
if (unlikely(res != BLK_STS_OK))
@@ -1268,7 +1313,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
* Note: force_abort is guaranteed to be seen because it is set
* before request queue is unqiuesced.
*/
- if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
+ if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
return BLK_STS_IOERR;
if (unlikely(ubq->canceling)) {
@@ -1322,7 +1367,7 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ublk_device *ub = filp->private_data;
size_t sz = vma->vm_end - vma->vm_start;
- unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
+ unsigned max_sz = ublk_max_cmd_buf_size();
unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
int q_id, ret = 0;
@@ -1489,10 +1534,7 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
ublk_cancel_cmd(ubq, io, issue_flags);
if (need_schedule) {
- if (ublk_can_use_recovery(ub))
- schedule_work(&ub->quiesce_work);
- else
- schedule_work(&ub->stop_work);
+ schedule_work(&ub->nosrv_work);
}
}
@@ -1555,20 +1597,6 @@ static void __ublk_quiesce_dev(struct ublk_device *ub)
ub->dev_info.state = UBLK_S_DEV_QUIESCED;
}
-static void ublk_quiesce_work_fn(struct work_struct *work)
-{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, quiesce_work);
-
- mutex_lock(&ub->mutex);
- if (ub->dev_info.state != UBLK_S_DEV_LIVE)
- goto unlock;
- __ublk_quiesce_dev(ub);
- unlock:
- mutex_unlock(&ub->mutex);
- ublk_cancel_dev(ub);
-}
-
static void ublk_unquiesce_dev(struct ublk_device *ub)
{
int i;
@@ -1597,7 +1625,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_DEAD)
goto unlock;
- if (ublk_can_use_recovery(ub)) {
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
if (ub->dev_info.state == UBLK_S_DEV_LIVE)
__ublk_quiesce_dev(ub);
ublk_unquiesce_dev(ub);
@@ -1617,6 +1645,37 @@ static void ublk_stop_dev(struct ublk_device *ub)
ublk_cancel_dev(ub);
}
+static void ublk_nosrv_work(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, nosrv_work);
+ int i;
+
+ if (ublk_nosrv_should_stop_dev(ub)) {
+ ublk_stop_dev(ub);
+ return;
+ }
+
+ mutex_lock(&ub->mutex);
+ if (ub->dev_info.state != UBLK_S_DEV_LIVE)
+ goto unlock;
+
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
+ __ublk_quiesce_dev(ub);
+ } else {
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ ublk_get_queue(ub, i)->fail_io = true;
+ }
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ }
+
+ unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_cancel_dev(ub);
+}
+
/* device can only be started after all IOs are ready */
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
{
@@ -2130,14 +2189,6 @@ static int ublk_add_chdev(struct ublk_device *ub)
return ret;
}
-static void ublk_stop_work_fn(struct work_struct *work)
-{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, stop_work);
-
- ublk_stop_dev(ub);
-}
-
/* align max io buffer size with PAGE_SIZE */
static void ublk_align_max_io_size(struct ublk_device *ub)
{
@@ -2162,8 +2213,7 @@ static int ublk_add_tag_set(struct ublk_device *ub)
static void ublk_remove(struct ublk_device *ub)
{
ublk_stop_dev(ub);
- cancel_work_sync(&ub->stop_work);
- cancel_work_sync(&ub->quiesce_work);
+ cancel_work_sync(&ub->nosrv_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
ublk_put_device(ub);
ublks_added--;
@@ -2229,7 +2279,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
lim.features |= BLK_FEAT_ZONED;
lim.max_active_zones = p->max_active_zones;
lim.max_open_zones = p->max_open_zones;
- lim.max_zone_append_sectors = p->max_zone_append_sectors;
+ lim.max_hw_zone_append_sectors = p->max_zone_append_sectors;
}
if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) {
@@ -2372,6 +2422,19 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
return -EPERM;
+ /* forbid nonsense combinations of recovery flags */
+ switch (info.flags & UBLK_F_ALL_RECOVERY_FLAGS) {
+ case 0:
+ case UBLK_F_USER_RECOVERY:
+ case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE):
+ case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_FAIL_IO):
+ break;
+ default:
+ pr_warn("%s: invalid recovery flags %llx\n", __func__,
+ info.flags & UBLK_F_ALL_RECOVERY_FLAGS);
+ return -EINVAL;
+ }
+
/*
* unprivileged device can't be trusted, but RECOVERY and
* RECOVERY_REISSUE still may hang error handling, so can't
@@ -2424,8 +2487,7 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
goto out_unlock;
mutex_init(&ub->mutex);
spin_lock_init(&ub->lock);
- INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
- INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
+ INIT_WORK(&ub->nosrv_work, ublk_nosrv_work);
ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0)
@@ -2560,9 +2622,7 @@ static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
static int ublk_ctrl_stop_dev(struct ublk_device *ub)
{
ublk_stop_dev(ub);
- cancel_work_sync(&ub->stop_work);
- cancel_work_sync(&ub->quiesce_work);
-
+ cancel_work_sync(&ub->nosrv_work);
return 0;
}
@@ -2699,7 +2759,7 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
int i;
mutex_lock(&ub->mutex);
- if (!ublk_can_use_recovery(ub))
+ if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
if (!ub->nr_queues_ready)
goto out_unlock;
@@ -2710,14 +2770,18 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
* and related io_uring ctx is freed so file struct of /dev/ublkcX is
* released.
*
+ * and one of the following holds
+ *
* (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
* (a)has quiesced request queue
* (b)has requeued every inflight rqs whose io_flags is ACTIVE
* (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
* (d)has completed/camceled all ioucmds owned by ther dying process
+ *
+ * (3) UBLK_S_DEV_FAIL_IO is set, which means the queue is not
+ * quiesced, but all I/O is being immediately errored
*/
- if (test_bit(UB_STATE_OPEN, &ub->state) ||
- ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ if (test_bit(UB_STATE_OPEN, &ub->state) || !ublk_dev_in_recoverable_state(ub)) {
ret = -EBUSY;
goto out_unlock;
}
@@ -2741,6 +2805,7 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
int ret = -EINVAL;
+ int i;
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
@@ -2752,21 +2817,32 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
mutex_lock(&ub->mutex);
- if (!ublk_can_use_recovery(ub))
+ if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
- if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ if (!ublk_dev_in_recoverable_state(ub)) {
ret = -EBUSY;
goto out_unlock;
}
ub->dev_info.ublksrv_pid = ublksrv_pid;
pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
__func__, ublksrv_pid, header->dev_id);
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
- pr_devel("%s: queue unquiesced, dev id %d.\n",
- __func__, header->dev_id);
- blk_mq_kick_requeue_list(ub->ub_disk->queue);
- ub->dev_info.state = UBLK_S_DEV_LIVE;
+
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ pr_devel("%s: queue unquiesced, dev id %d.\n",
+ __func__, header->dev_id);
+ blk_mq_kick_requeue_list(ub->ub_disk->queue);
+ } else {
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ ublk_get_queue(ub, i)->fail_io = false;
+ }
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ }
+
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 194417abc105..c0cdba71f436 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -471,18 +471,18 @@ static bool virtblk_prep_rq_batch(struct request *req)
return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
}
-static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
- struct request **rqlist)
+static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
+ struct rq_list *rqlist)
{
+ struct request *req;
unsigned long flags;
- int err;
bool kick;
spin_lock_irqsave(&vq->lock, flags);
- while (!rq_list_empty(*rqlist)) {
- struct request *req = rq_list_pop(rqlist);
+ while ((req = rq_list_pop(rqlist))) {
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ int err;
err = virtblk_add_req(vq->vq, vbr);
if (err) {
@@ -495,37 +495,32 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irqrestore(&vq->lock, flags);
- return kick;
+ if (kick)
+ virtqueue_notify(vq->vq);
}
-static void virtio_queue_rqs(struct request **rqlist)
+static void virtio_queue_rqs(struct rq_list *rqlist)
{
- struct request *req, *next, *prev = NULL;
- struct request *requeue_list = NULL;
-
- rq_list_for_each_safe(rqlist, req, next) {
- struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
- bool kick;
-
- if (!virtblk_prep_rq_batch(req)) {
- rq_list_move(rqlist, &requeue_list, req, prev);
- req = prev;
- if (!req)
- continue;
- }
+ struct rq_list submit_list = { };
+ struct rq_list requeue_list = { };
+ struct virtio_blk_vq *vq = NULL;
+ struct request *req;
- if (!next || req->mq_hctx != next->mq_hctx) {
- req->rq_next = NULL;
- kick = virtblk_add_req_batch(vq, rqlist);
- if (kick)
- virtqueue_notify(vq->vq);
+ while ((req = rq_list_pop(rqlist))) {
+ struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx);
- *rqlist = next;
- prev = NULL;
- } else
- prev = req;
+ if (vq && vq != this_vq)
+ virtblk_add_req_batch(vq, &submit_list);
+ vq = this_vq;
+
+ if (virtblk_prep_rq_batch(req))
+ rq_list_add_tail(&submit_list, req);
+ else
+ rq_list_add_tail(&requeue_list, req);
}
+ if (vq)
+ virtblk_add_req_batch(vq, &submit_list);
*rqlist = requeue_list;
}
@@ -784,7 +779,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
wg, v);
return -ENODEV;
}
- lim->max_zone_append_sectors = v;
+ lim->max_hw_zone_append_sectors = v;
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
return 0;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 849eb6333e98..9cb797a561d6 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3362,7 +3362,7 @@ static int cache_iterate_devices(struct dm_target *ti,
static void disable_passdown_if_not_supported(struct cache *cache)
{
struct block_device *origin_bdev = cache->origin_dev->bdev;
- struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
+ struct queue_limits *origin_limits = bdev_limits(origin_bdev);
const char *reason = NULL;
if (!cache->features.discard_passdown)
@@ -3384,7 +3384,7 @@ static void disable_passdown_if_not_supported(struct cache *cache)
static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
{
struct block_device *origin_bdev = cache->origin_dev->bdev;
- struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
+ struct queue_limits *origin_limits = bdev_limits(origin_bdev);
if (!cache->features.discard_passdown) {
/* No passdown is done so setting own virtual limits */
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 12bbe487a4c8..e956d980672c 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -2020,7 +2020,7 @@ static void clone_resume(struct dm_target *ti)
static void disable_passdown_if_not_supported(struct clone *clone)
{
struct block_device *dest_dev = clone->dest_dev->bdev;
- struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
+ struct queue_limits *dest_limits = bdev_limits(dest_dev);
const char *reason = NULL;
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
@@ -2041,7 +2041,7 @@ static void disable_passdown_if_not_supported(struct clone *clone)
static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
{
struct block_device *dest_bdev = clone->dest_dev->bdev;
- struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits;
+ struct queue_limits *dest_limits = bdev_limits(dest_bdev);
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
/* No passdown is done so we set our own virtual limits */
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 89632ce97760..9095f19a84f3 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2842,7 +2842,7 @@ static void disable_discard_passdown_if_not_supported(struct pool_c *pt)
{
struct pool *pool = pt->pool;
struct block_device *data_bdev = pt->data_dev->bdev;
- struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
+ struct queue_limits *data_limits = bdev_limits(data_bdev);
const char *reason = NULL;
if (!pt->adjusted_pf.discard_passdown)
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index c0d41c36e06e..20edd3fabbab 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -344,7 +344,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
} else {
set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- lim->max_zone_append_sectors = 0;
+ lim->max_hw_zone_append_sectors = 0;
}
/*
@@ -379,7 +379,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
if (!zlim.mapped_nr_seq_zones) {
lim->max_open_zones = 0;
lim->max_active_zones = 0;
- lim->max_zone_append_sectors = 0;
+ lim->max_hw_zone_append_sectors = 0;
lim->zone_write_granularity = 0;
lim->chunk_sectors = 0;
lim->features &= ~BLK_FEAT_ZONED;
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 29da10e6f703..c3a42dd66ce5 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -1285,6 +1285,7 @@ static void bitmap_unplug_async(struct bitmap *bitmap)
queue_work(md_bitmap_wq, &unplug_work.work);
wait_for_completion(&done);
+ destroy_work_on_stack(&unplug_work.work);
}
static void bitmap_unplug(struct mddev *mddev, bool sync)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 67108c397c5a..aebe12b0ee27 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9784,9 +9784,7 @@ EXPORT_SYMBOL(md_reap_sync_thread);
void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
sysfs_notify_dirent_safe(rdev->sysfs_state);
- wait_event_timeout(rdev->blocked_wait,
- !test_bit(Blocked, &rdev->flags) &&
- !test_bit(BlockedBadBlocks, &rdev->flags),
+ wait_event_timeout(rdev->blocked_wait, !rdev_blocked(rdev),
msecs_to_jiffies(5000));
rdev_dec_pending(rdev, mddev);
}
@@ -9815,6 +9813,17 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
{
struct mddev *mddev = rdev->mddev;
int rv;
+
+ /*
+ * Recording new badblocks for faulty rdev will force unnecessary
+ * super block updating. This is fragile for external management because
+ * userspace daemon may trying to remove this device and deadlock may
+ * occur. This will be probably solved in the mdadm, but it is safer to
+ * avoid it.
+ */
+ if (test_bit(Faulty, &rdev->flags))
+ return 1;
+
if (is_new)
s += rdev->new_data_offset;
else
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 5d2e6bd58e4d..4ba93af36126 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -1002,6 +1002,30 @@ static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector);
}
+static inline bool rdev_blocked(struct md_rdev *rdev)
+{
+ /*
+ * Blocked will be set by error handler and cleared by daemon after
+ * updating superblock, meanwhile write IO should be blocked to prevent
+ * reading old data after power failure.
+ */
+ if (test_bit(Blocked, &rdev->flags))
+ return true;
+
+ /*
+ * Faulty device should not be accessed anymore, there is no need to
+ * wait for bad block to be acknowledged.
+ */
+ if (test_bit(Faulty, &rdev->flags))
+ return false;
+
+ /* rdev is blocked by badblocks. */
+ if (test_bit(BlockedBadBlocks, &rdev->flags))
+ return true;
+
+ return false;
+}
+
#define mddev_add_trace_msg(mddev, fmt, args...) \
do { \
if (!mddev_is_dm(mddev)) \
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 32d587524778..baaf5f8b80ae 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -466,6 +466,12 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
struct bio *split = bio_split(bio,
zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
&mddev->bio_set);
+
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return;
+ }
bio_chain(split, bio);
submit_bio_noacct(bio);
bio = split;
@@ -608,6 +614,12 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
if (sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, sectors, GFP_NOIO,
&mddev->bio_set);
+
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return true;
+ }
bio_chain(split, bio);
raid0_map_submit_bio(mddev, bio);
bio = split;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6c9d24203f39..a5adf08ee174 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1322,7 +1322,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
const enum req_op op = bio_op(bio);
const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
int max_sectors;
- int rdisk;
+ int rdisk, error;
bool r1bio_existed = !!r1_bio;
/*
@@ -1383,6 +1383,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split);
+
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
bio_chain(split, bio);
submit_bio_noacct(bio);
bio = split;
@@ -1410,6 +1415,47 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_private = r1_bio;
mddev_trace_remap(mddev, read_bio, r1_bio->sector);
submit_bio_noacct(read_bio);
+ return;
+
+err_handle:
+ atomic_dec(&mirror->rdev->nr_pending);
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
+ raid_end_bio_io(r1_bio);
+}
+
+static bool wait_blocked_rdev(struct mddev *mddev, struct bio *bio)
+{
+ struct r1conf *conf = mddev->private;
+ int disks = conf->raid_disks * 2;
+ int i;
+
+retry:
+ for (i = 0; i < disks; i++) {
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
+
+ if (!rdev)
+ continue;
+
+ /* don't write here until the bad block is acknowledged */
+ if (test_bit(WriteErrorSeen, &rdev->flags) &&
+ rdev_has_badblock(rdev, bio->bi_iter.bi_sector,
+ bio_sectors(bio)) < 0)
+ set_bit(BlockedBadBlocks, &rdev->flags);
+
+ if (rdev_blocked(rdev)) {
+ if (bio->bi_opf & REQ_NOWAIT)
+ return false;
+
+ mddev_add_trace_msg(rdev->mddev, "raid1 wait rdev %d blocked",
+ rdev->raid_disk);
+ atomic_inc(&rdev->nr_pending);
+ md_wait_for_blocked_rdev(rdev, rdev->mddev);
+ goto retry;
+ }
+ }
+
+ return true;
}
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
@@ -1417,9 +1463,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
- int i, disks;
+ int i, disks, k, error;
unsigned long flags;
- struct md_rdev *blocked_rdev;
int first_clone;
int max_sectors;
bool write_behind = false;
@@ -1457,7 +1502,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
return;
}
- retry_write:
+ if (!wait_blocked_rdev(mddev, bio)) {
+ bio_wouldblock_error(bio);
+ return;
+ }
+
r1_bio = alloc_r1bio(mddev, bio);
r1_bio->sectors = max_write_sectors;
@@ -1473,7 +1522,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
*/
disks = conf->raid_disks * 2;
- blocked_rdev = NULL;
max_sectors = r1_bio->sectors;
for (i = 0; i < disks; i++) {
struct md_rdev *rdev = conf->mirrors[i].rdev;
@@ -1486,11 +1534,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (!is_discard && rdev && test_bit(WriteMostly, &rdev->flags))
write_behind = true;
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
- atomic_inc(&rdev->nr_pending);
- blocked_rdev = rdev;
- break;
- }
r1_bio->bios[i] = NULL;
if (!rdev || test_bit(Faulty, &rdev->flags)) {
if (i < conf->raid_disks)
@@ -1506,13 +1549,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
&first_bad, &bad_sectors);
- if (is_bad < 0) {
- /* mustn't write here until the bad block is
- * acknowledged*/
- set_bit(BlockedBadBlocks, &rdev->flags);
- blocked_rdev = rdev;
- break;
- }
if (is_bad && first_bad <= r1_bio->sector) {
/* Cannot write here at all */
bad_sectors -= (r1_bio->sector - first_bad);
@@ -1543,27 +1579,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
r1_bio->bios[i] = bio;
}
- if (unlikely(blocked_rdev)) {
- /* Wait for this device to become unblocked */
- int j;
-
- for (j = 0; j < i; j++)
- if (r1_bio->bios[j])
- rdev_dec_pending(conf->mirrors[j].rdev, mddev);
- mempool_free(r1_bio, &conf->r1bio_pool);
- allow_barrier(conf, bio->bi_iter.bi_sector);
-
- if (bio->bi_opf & REQ_NOWAIT) {
- bio_wouldblock_error(bio);
- return;
- }
- mddev_add_trace_msg(mddev, "raid1 wait rdev %d blocked",
- blocked_rdev->raid_disk);
- md_wait_for_blocked_rdev(blocked_rdev, mddev);
- wait_barrier(conf, bio->bi_iter.bi_sector, false);
- goto retry_write;
- }
-
/*
* When using a bitmap, we may call alloc_behind_master_bio below.
* alloc_behind_master_bio allocates a copy of the data payload a page
@@ -1576,6 +1591,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
GFP_NOIO, &conf->bio_split);
+
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
bio_chain(split, bio);
submit_bio_noacct(bio);
bio = split;
@@ -1660,6 +1680,18 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
/* In case raid1d snuck in to freeze_array */
wake_up_barrier(conf);
+ return;
+err_handle:
+ for (k = 0; k < i; k++) {
+ if (r1_bio->bios[k]) {
+ rdev_dec_pending(conf->mirrors[k].rdev, mddev);
+ r1_bio->bios[k] = NULL;
+ }
+ }
+
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
+ raid_end_bio_io(r1_bio);
}
static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 862b1fb71d86..18989231791a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1159,6 +1159,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
int slot = r10_bio->read_slot;
struct md_rdev *err_rdev = NULL;
gfp_t gfp = GFP_NOIO;
+ int error;
if (slot >= 0 && r10_bio->devs[slot].rdev) {
/*
@@ -1206,6 +1207,10 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split);
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
bio_chain(split, bio);
allow_barrier(conf);
submit_bio_noacct(bio);
@@ -1236,6 +1241,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
mddev_trace_remap(mddev, read_bio, r10_bio->sector);
submit_bio_noacct(read_bio);
return;
+err_handle:
+ atomic_dec(&rdev->nr_pending);
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R10BIO_Uptodate, &r10_bio->state);
+ raid_end_bio_io(r10_bio);
}
static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
@@ -1285,9 +1295,9 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
{
- int i;
struct r10conf *conf = mddev->private;
struct md_rdev *blocked_rdev;
+ int i;
retry_wait:
blocked_rdev = NULL;
@@ -1295,40 +1305,36 @@ retry_wait:
struct md_rdev *rdev, *rrdev;
rdev = conf->mirrors[i].rdev;
- rrdev = conf->mirrors[i].replacement;
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
- atomic_inc(&rdev->nr_pending);
- blocked_rdev = rdev;
- break;
- }
- if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
- atomic_inc(&rrdev->nr_pending);
- blocked_rdev = rrdev;
- break;
- }
-
- if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
+ if (rdev) {
sector_t dev_sector = r10_bio->devs[i].addr;
/*
* Discard request doesn't care the write result
* so it doesn't need to wait blocked disk here.
*/
- if (!r10_bio->sectors)
- continue;
-
- if (rdev_has_badblock(rdev, dev_sector,
- r10_bio->sectors) < 0) {
+ if (test_bit(WriteErrorSeen, &rdev->flags) &&
+ r10_bio->sectors &&
+ rdev_has_badblock(rdev, dev_sector,
+ r10_bio->sectors) < 0)
/*
- * Mustn't write here until the bad block
- * is acknowledged
+ * Mustn't write here until the bad
+ * block is acknowledged
*/
- atomic_inc(&rdev->nr_pending);
set_bit(BlockedBadBlocks, &rdev->flags);
+
+ if (rdev_blocked(rdev)) {
blocked_rdev = rdev;
+ atomic_inc(&rdev->nr_pending);
break;
}
}
+
+ rrdev = conf->mirrors[i].replacement;
+ if (rrdev && rdev_blocked(rrdev)) {
+ atomic_inc(&rrdev->nr_pending);
+ blocked_rdev = rrdev;
+ break;
+ }
}
if (unlikely(blocked_rdev)) {
@@ -1347,9 +1353,10 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
struct r10bio *r10_bio)
{
struct r10conf *conf = mddev->private;
- int i;
+ int i, k;
sector_t sectors;
int max_sectors;
+ int error;
if ((mddev_is_clustered(mddev) &&
md_cluster_ops->area_resyncing(mddev, WRITE,
@@ -1482,6 +1489,10 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
if (r10_bio->sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, r10_bio->sectors,
GFP_NOIO, &conf->bio_split);
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
bio_chain(split, bio);
allow_barrier(conf);
submit_bio_noacct(bio);
@@ -1503,6 +1514,26 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
raid10_write_one_disk(mddev, r10_bio, bio, true, i);
}
one_write_done(r10_bio);
+ return;
+err_handle:
+ for (k = 0; k < i; k++) {
+ int d = r10_bio->devs[k].devnum;
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
+ struct md_rdev *rrdev = conf->mirrors[d].replacement;
+
+ if (r10_bio->devs[k].bio) {
+ rdev_dec_pending(rdev, mddev);
+ r10_bio->devs[k].bio = NULL;
+ }
+ if (r10_bio->devs[k].repl_bio) {
+ rdev_dec_pending(rrdev, mddev);
+ r10_bio->devs[k].repl_bio = NULL;
+ }
+ }
+
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R10BIO_Uptodate, &r10_bio->state);
+ raid_end_bio_io(r10_bio);
}
static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
@@ -1644,6 +1675,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
if (remainder) {
split_size = stripe_size - remainder;
split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return 0;
+ }
bio_chain(split, bio);
allow_barrier(conf);
/* Resend the fist split part */
@@ -1654,6 +1690,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
if (remainder) {
split_size = bio_sectors(bio) - remainder;
split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return 0;
+ }
bio_chain(split, bio);
allow_barrier(conf);
/* Resend the second split part */
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index a70cbec12ed0..37c4da5311ca 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -258,7 +258,7 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
pplhdr->signature = cpu_to_le32(ppl_conf->signature);
- io->seq = atomic64_add_return(1, &ppl_conf->seq);
+ io->seq = atomic64_inc_return(&ppl_conf->seq);
pplhdr->generation = cpu_to_le64(io->seq);
return io;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dc2ea636d173..f09e7677ee9f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4724,14 +4724,13 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (rdev) {
is_bad = rdev_has_badblock(rdev, sh->sector,
RAID5_STRIPE_SECTORS(conf));
- if (s->blocked_rdev == NULL
- && (test_bit(Blocked, &rdev->flags)
- || is_bad < 0)) {
+ if (s->blocked_rdev == NULL) {
if (is_bad < 0)
- set_bit(BlockedBadBlocks,
- &rdev->flags);
- s->blocked_rdev = rdev;
- atomic_inc(&rdev->nr_pending);
+ set_bit(BlockedBadBlocks, &rdev->flags);
+ if (rdev_blocked(rdev)) {
+ s->blocked_rdev = rdev;
+ atomic_inc(&rdev->nr_pending);
+ }
}
}
clear_bit(R5_Insync, &dev->flags);
@@ -7177,6 +7176,8 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
err = mddev_suspend_and_lock(mddev);
if (err)
return err;
+ raid5_quiesce(mddev, true);
+
conf = mddev->private;
if (!conf)
err = -ENODEV;
@@ -7198,6 +7199,8 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
kfree(old_groups);
}
}
+
+ raid5_quiesce(mddev, false);
mddev_unlock_and_resume(mddev);
return err ?: len;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 896ecfc4afa6..d174e586698f 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -633,7 +633,7 @@ struct r5conf {
* two caches.
*/
int active_name;
- char cache_name[2][32];
+ char cache_name[2][48];
struct kmem_cache *slab_cache; /* for allocating stripes */
struct mutex cache_size_mutex; /* Protect changes to cache size */
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ef06a4d5d65b..79f6fad97a80 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2501,6 +2501,56 @@ static inline int mmc_blk_readonly(struct mmc_card *card)
!(card->csd.cmdclass & CCC_BLOCK_WRITE);
}
+/*
+ * Search for a declared partitions node for the disk in mmc-card related node.
+ *
+ * This is to permit support for partition table defined in DT in special case
+ * where a partition table is not written in the disk and is expected to be
+ * passed from the running system.
+ *
+ * For the user disk, "partitions" node is searched.
+ * For the special HW disk, "partitions-" node with the appended name is used
+ * following this conversion table (to adhere to JEDEC naming)
+ * - boot0 -> partitions-boot1
+ * - boot1 -> partitions-boot2
+ * - gp0 -> partitions-gp1
+ * - gp1 -> partitions-gp2
+ * - gp2 -> partitions-gp3
+ * - gp3 -> partitions-gp4
+ */
+static struct fwnode_handle *mmc_blk_get_partitions_node(struct device *mmc_dev,
+ const char *subname)
+{
+ const char *node_name = "partitions";
+
+ if (subname) {
+ mmc_dev = mmc_dev->parent;
+
+ /*
+ * Check if we are allocating a BOOT disk boot0/1 disk.
+ * In DT we use the JEDEC naming boot1/2.
+ */
+ if (!strcmp(subname, "boot0"))
+ node_name = "partitions-boot1";
+ if (!strcmp(subname, "boot1"))
+ node_name = "partitions-boot2";
+ /*
+ * Check if we are allocating a GP disk gp0/1/2/3 disk.
+ * In DT we use the JEDEC naming gp1/2/3/4.
+ */
+ if (!strcmp(subname, "gp0"))
+ node_name = "partitions-gp1";
+ if (!strcmp(subname, "gp1"))
+ node_name = "partitions-gp2";
+ if (!strcmp(subname, "gp2"))
+ node_name = "partitions-gp3";
+ if (!strcmp(subname, "gp3"))
+ node_name = "partitions-gp4";
+ }
+
+ return device_get_named_child_node(mmc_dev, node_name);
+}
+
static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct device *parent,
sector_t size,
@@ -2509,6 +2559,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
int area_type,
unsigned int part_type)
{
+ struct fwnode_handle *disk_fwnode;
struct mmc_blk_data *md;
int devidx, ret;
char cap_str[10];
@@ -2610,7 +2661,9 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
/* used in ->open, must be set before add_disk: */
if (area_type == MMC_BLK_DATA_AREA_MAIN)
dev_set_drvdata(&card->dev, md);
- ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
+ disk_fwnode = mmc_blk_get_partitions_node(parent, subname);
+ ret = add_disk_fwnode(md->parent, md->disk, mmc_disk_attr_groups,
+ disk_fwnode);
if (ret)
goto err_put_disk;
return md;
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index b1387dc459a3..7cd1102a8d2c 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -649,7 +649,7 @@ static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
found = apple_nvme_poll_cq(q, &iob);
- if (!rq_list_empty(iob.req_list))
+ if (!rq_list_empty(&iob.req_list))
apple_nvme_complete_batch(&iob);
return found;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 855b42c92284..1a8d32a4a5c3 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -42,6 +42,8 @@ struct nvme_ns_info {
bool is_readonly;
bool is_ready;
bool is_removed;
+ bool is_rotational;
+ bool no_vwc;
};
unsigned int admin_timeout = 60;
@@ -1639,6 +1641,8 @@ static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
info->is_ready = id->nstat & NVME_NSTAT_NRDY;
+ info->is_rotational = id->nsfeat & NVME_NS_ROTATIONAL;
+ info->no_vwc = id->nsfeat & NVME_NS_VWC_NOT_PRESENT;
}
kfree(id);
return ret;
@@ -2185,11 +2189,14 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ns->head->ids.csi == NVME_CSI_ZNS)
nvme_update_zone_info(ns, &lim, &zi);
- if (ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+ if ((ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) && !info->no_vwc)
lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
else
lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
+ if (info->is_rotational)
+ lim.features |= BLK_FEAT_ROTATIONAL;
+
/*
* Register a metadata profile for PI, or the plain non-integrity NVMe
* metadata masquerading as Type 0 if supported, otherwise reject block
@@ -3636,6 +3643,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head->ns_id = info->nsid;
head->ids = info->ids;
head->shared = info->is_shared;
+ head->rotational = info->is_rotational;
ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1);
ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE);
kref_init(&head->ref);
@@ -4017,7 +4025,7 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns_info info = { .nsid = nsid };
struct nvme_ns *ns;
- int ret;
+ int ret = 1;
if (nvme_identify_ns_descs(ctrl, &info))
return;
@@ -4034,9 +4042,10 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
* set up a namespace. If not fall back to the legacy version.
*/
if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
- (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS))
+ (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS) ||
+ ctrl->vs >= NVME_VS(2, 0, 0))
ret = nvme_ns_info_from_id_cs_indep(ctrl, &info);
- else
+ if (ret > 0)
ret = nvme_ns_info_from_identify(ctrl, &info);
if (info.is_removed)
@@ -4895,7 +4904,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl)
srcu_idx = srcu_read_lock(&ctrl->srcu);
list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
srcu_read_lock_held(&ctrl->srcu))
- blk_mq_unfreeze_queue(ns->queue);
+ blk_mq_unfreeze_queue_non_owner(ns->queue);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
}
@@ -4940,7 +4949,12 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
srcu_idx = srcu_read_lock(&ctrl->srcu);
list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
srcu_read_lock_held(&ctrl->srcu))
- blk_freeze_queue_start(ns->queue);
+ /*
+ * Typical non_owner use case is from pci driver, in which
+ * start_freeze is called from timeout work function, but
+ * unfreeze is done in reset work context
+ */
+ blk_freeze_queue_start_non_owner(ns->queue);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);
@@ -5036,6 +5050,8 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
+ BUILD_BUG_ON(sizeof(struct nvme_endurance_group_log) != 512);
+ BUILD_BUG_ON(sizeof(struct nvme_rotational_media_log) != 512);
BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512);
@@ -5044,22 +5060,20 @@ static inline void _nvme_check_size(void)
static int __init nvme_core_init(void)
{
+ unsigned int wq_flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS;
int result = -ENOMEM;
_nvme_check_size();
- nvme_wq = alloc_workqueue("nvme-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ nvme_wq = alloc_workqueue("nvme-wq", wq_flags, 0);
if (!nvme_wq)
goto out;
- nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ nvme_reset_wq = alloc_workqueue("nvme-reset-wq", wq_flags, 0);
if (!nvme_reset_wq)
goto destroy_wq;
- nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ nvme_delete_wq = alloc_workqueue("nvme-delete-wq", wq_flags, 0);
if (!nvme_delete_wq)
goto destroy_reset_wq;
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index a96976b22fa7..6522ae16531c 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -114,7 +114,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
+ struct io_uring_cmd *ioucmd, unsigned int flags)
{
struct request_queue *q = req->q;
struct nvme_ns *ns = q->queuedata;
@@ -152,8 +152,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
bio_set_dev(bio, bdev);
if (has_metadata) {
- ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len,
- meta_seed);
+ ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len);
if (ret)
goto out_unmap;
}
@@ -170,7 +169,7 @@ out:
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
- void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
+ void __user *meta_buffer, unsigned meta_len,
u64 *result, unsigned timeout, unsigned int flags)
{
struct nvme_ns *ns = q->queuedata;
@@ -187,7 +186,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
req->timeout = timeout;
if (ubuffer && bufflen) {
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
- meta_len, meta_seed, NULL, flags);
+ meta_len, NULL, flags);
if (ret)
return ret;
}
@@ -268,7 +267,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.lbatm = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
- meta_len, lower_32_bits(io.slba), NULL, 0, 0);
+ meta_len, NULL, 0, 0);
}
static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
@@ -323,7 +322,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
- cmd.metadata_len, 0, &result, timeout, 0);
+ cmd.metadata_len, &result, timeout, 0);
if (status >= 0) {
if (put_user(result, &ucmd->result))
@@ -370,7 +369,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
- cmd.metadata_len, 0, &cmd.result, timeout, flags);
+ cmd.metadata_len, &cmd.result, timeout, flags);
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
@@ -402,7 +401,7 @@ struct nvme_uring_cmd_pdu {
static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
struct io_uring_cmd *ioucmd)
{
- return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
+ return io_uring_cmd_to_pdu(ioucmd, struct nvme_uring_cmd_pdu);
}
static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
@@ -507,7 +506,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (d.addr && d.data_len) {
ret = nvme_map_user_request(req, d.addr,
d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, 0, ioucmd, vec);
+ d.metadata_len, ioucmd, vec);
if (ret)
return ret;
}
@@ -635,8 +634,6 @@ static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
struct nvme_ctrl *ctrl = ns->ctrl;
int ret;
- BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
-
ret = nvme_uring_cmd_checks(issue_flags);
if (ret)
return ret;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 6a15873055b9..f04cfe3fb936 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -635,8 +635,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
if (head->ids.csi == NVME_CSI_ZNS)
lim.features |= BLK_FEAT_ZONED;
- else
- lim.max_zone_append_sectors = 0;
head->disk = blk_alloc_disk(&lim, ctrl->numa_node);
if (IS_ERR(head->disk))
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 093cb423f536..900719c4c70c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -474,6 +474,7 @@ struct nvme_ns_head {
struct list_head entry;
struct kref ref;
bool shared;
+ bool rotational;
bool passthru_err_log_enabled;
struct nvme_effects_log *effects;
u64 nuse;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4b9fda0b1d9a..5f2e3ad2cc52 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -141,6 +141,7 @@ struct nvme_dev {
struct nvme_ctrl ctrl;
u32 last_ps;
bool hmb;
+ struct sg_table *hmb_sgt;
mempool_t *iod_mempool;
@@ -153,6 +154,7 @@ struct nvme_dev {
/* host memory buffer support: */
u64 host_mem_size;
u32 nr_host_mem_descs;
+ u32 host_mem_descs_size;
dma_addr_t host_mem_descs_dma;
struct nvme_host_mem_buf_desc *host_mem_descs;
void **host_mem_desc_bufs;
@@ -902,11 +904,12 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
+static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist)
{
+ struct request *req;
+
spin_lock(&nvmeq->sq_lock);
- while (!rq_list_empty(*rqlist)) {
- struct request *req = rq_list_pop(rqlist);
+ while ((req = rq_list_pop(rqlist))) {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
nvme_sq_copy_cmd(nvmeq, &iod->cmd);
@@ -929,33 +932,26 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
}
-static void nvme_queue_rqs(struct request **rqlist)
+static void nvme_queue_rqs(struct rq_list *rqlist)
{
- struct request *req, *next, *prev = NULL;
- struct request *requeue_list = NULL;
-
- rq_list_for_each_safe(rqlist, req, next) {
- struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
-
- if (!nvme_prep_rq_batch(nvmeq, req)) {
- /* detach 'req' and add to remainder list */
- rq_list_move(rqlist, &requeue_list, req, prev);
+ struct rq_list submit_list = { };
+ struct rq_list requeue_list = { };
+ struct nvme_queue *nvmeq = NULL;
+ struct request *req;
- req = prev;
- if (!req)
- continue;
- }
+ while ((req = rq_list_pop(rqlist))) {
+ if (nvmeq && nvmeq != req->mq_hctx->driver_data)
+ nvme_submit_cmds(nvmeq, &submit_list);
+ nvmeq = req->mq_hctx->driver_data;
- if (!next || req->mq_hctx != next->mq_hctx) {
- /* detach rest of list, and submit */
- req->rq_next = NULL;
- nvme_submit_cmds(nvmeq, rqlist);
- *rqlist = next;
- prev = NULL;
- } else
- prev = req;
+ if (nvme_prep_rq_batch(nvmeq, req))
+ rq_list_add_tail(&submit_list, req);
+ else
+ rq_list_add_tail(&requeue_list, req);
}
+ if (nvmeq)
+ nvme_submit_cmds(nvmeq, &submit_list);
*rqlist = requeue_list;
}
@@ -1083,7 +1079,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
DEFINE_IO_COMP_BATCH(iob);
if (nvme_poll_cq(nvmeq, &iob)) {
- if (!rq_list_empty(iob.req_list))
+ if (!rq_list_empty(&iob.req_list))
nvme_pci_complete_batch(&iob);
return IRQ_HANDLED;
}
@@ -1951,7 +1947,7 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
return ret;
}
-static void nvme_free_host_mem(struct nvme_dev *dev)
+static void nvme_free_host_mem_multi(struct nvme_dev *dev)
{
int i;
@@ -1966,18 +1962,54 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
kfree(dev->host_mem_desc_bufs);
dev->host_mem_desc_bufs = NULL;
- dma_free_coherent(dev->dev,
- dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
+}
+
+static void nvme_free_host_mem(struct nvme_dev *dev)
+{
+ if (dev->hmb_sgt)
+ dma_free_noncontiguous(dev->dev, dev->host_mem_size,
+ dev->hmb_sgt, DMA_BIDIRECTIONAL);
+ else
+ nvme_free_host_mem_multi(dev);
+
+ dma_free_coherent(dev->dev, dev->host_mem_descs_size,
dev->host_mem_descs, dev->host_mem_descs_dma);
dev->host_mem_descs = NULL;
+ dev->host_mem_descs_size = 0;
dev->nr_host_mem_descs = 0;
}
-static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
+static int nvme_alloc_host_mem_single(struct nvme_dev *dev, u64 size)
+{
+ dev->hmb_sgt = dma_alloc_noncontiguous(dev->dev, size,
+ DMA_BIDIRECTIONAL, GFP_KERNEL, 0);
+ if (!dev->hmb_sgt)
+ return -ENOMEM;
+
+ dev->host_mem_descs = dma_alloc_coherent(dev->dev,
+ sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma,
+ GFP_KERNEL);
+ if (!dev->host_mem_descs) {
+ dma_free_noncontiguous(dev->dev, dev->host_mem_size,
+ dev->hmb_sgt, DMA_BIDIRECTIONAL);
+ dev->hmb_sgt = NULL;
+ return -ENOMEM;
+ }
+ dev->host_mem_size = size;
+ dev->host_mem_descs_size = sizeof(*dev->host_mem_descs);
+ dev->nr_host_mem_descs = 1;
+
+ dev->host_mem_descs[0].addr =
+ cpu_to_le64(dev->hmb_sgt->sgl->dma_address);
+ dev->host_mem_descs[0].size = cpu_to_le32(size / NVME_CTRL_PAGE_SIZE);
+ return 0;
+}
+
+static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred,
u32 chunk_size)
{
struct nvme_host_mem_buf_desc *descs;
- u32 max_entries, len;
+ u32 max_entries, len, descs_size;
dma_addr_t descs_dma;
int i = 0;
void **bufs;
@@ -1990,8 +2022,9 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
max_entries = dev->ctrl.hmmaxd;
- descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
- &descs_dma, GFP_KERNEL);
+ descs_size = max_entries * sizeof(*descs);
+ descs = dma_alloc_coherent(dev->dev, descs_size, &descs_dma,
+ GFP_KERNEL);
if (!descs)
goto out;
@@ -2020,6 +2053,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
dev->host_mem_size = size;
dev->host_mem_descs = descs;
dev->host_mem_descs_dma = descs_dma;
+ dev->host_mem_descs_size = descs_size;
dev->host_mem_desc_bufs = bufs;
return 0;
@@ -2034,8 +2068,7 @@ out_free_bufs:
kfree(bufs);
out_free_descs:
- dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
- descs_dma);
+ dma_free_coherent(dev->dev, descs_size, descs, descs_dma);
out:
dev->host_mem_descs = NULL;
return -ENOMEM;
@@ -2047,9 +2080,18 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
u64 chunk_size;
+ /*
+ * If there is an IOMMU that can merge pages, try a virtually
+ * non-contiguous allocation for a single segment first.
+ */
+ if (!(PAGE_SIZE & dma_get_merge_boundary(dev->dev))) {
+ if (!nvme_alloc_host_mem_single(dev, preferred))
+ return 0;
+ }
+
/* start big and work our way down */
for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) {
- if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
+ if (!nvme_alloc_host_mem_multi(dev, preferred, chunk_size)) {
if (!min || dev->host_mem_size >= min)
return 0;
nvme_free_host_mem(dev);
@@ -2097,8 +2139,10 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
}
dev_info(dev->ctrl.device,
- "allocated %lld MiB host memory buffer.\n",
- dev->host_mem_size >> ilog2(SZ_1M));
+ "allocated %lld MiB host memory buffer (%u segment%s).\n",
+ dev->host_mem_size >> ilog2(SZ_1M),
+ dev->nr_host_mem_descs,
+ str_plural(dev->nr_host_mem_descs));
}
ret = nvme_set_host_mem(dev, enable_bits);
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 87c437fc070d..ad25ad1e4041 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -228,27 +228,61 @@ static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
static const char *nvme_trace_resv_reg(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const rrega_strs[] = {
+ [0x00] = "register",
+ [0x01] = "unregister",
+ [0x02] = "replace",
+ };
const char *ret = trace_seq_buffer_ptr(p);
u8 rrega = cdw10[0] & 0x7;
u8 iekey = (cdw10[0] >> 3) & 0x1;
u8 ptpl = (cdw10[3] >> 6) & 0x3;
+ const char *rrega_str;
+
+ if (rrega < ARRAY_SIZE(rrega_strs) && rrega_strs[rrega])
+ rrega_str = rrega_strs[rrega];
+ else
+ rrega_str = "reserved";
- trace_seq_printf(p, "rrega=%u, iekey=%u, ptpl=%u",
- rrega, iekey, ptpl);
+ trace_seq_printf(p, "rrega=%u:%s, iekey=%u, ptpl=%u",
+ rrega, rrega_str, iekey, ptpl);
trace_seq_putc(p, 0);
return ret;
}
+static const char * const rtype_strs[] = {
+ [0x00] = "reserved",
+ [0x01] = "write exclusive",
+ [0x02] = "exclusive access",
+ [0x03] = "write exclusive registrants only",
+ [0x04] = "exclusive access registrants only",
+ [0x05] = "write exclusive all registrants",
+ [0x06] = "exclusive access all registrants",
+};
+
static const char *nvme_trace_resv_acq(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const racqa_strs[] = {
+ [0x00] = "acquire",
+ [0x01] = "preempt",
+ [0x02] = "preempt and abort",
+ };
const char *ret = trace_seq_buffer_ptr(p);
u8 racqa = cdw10[0] & 0x7;
u8 iekey = (cdw10[0] >> 3) & 0x1;
u8 rtype = cdw10[1];
+ const char *racqa_str = "reserved";
+ const char *rtype_str = "reserved";
- trace_seq_printf(p, "racqa=%u, iekey=%u, rtype=%u",
- racqa, iekey, rtype);
+ if (racqa < ARRAY_SIZE(racqa_strs) && racqa_strs[racqa])
+ racqa_str = racqa_strs[racqa];
+
+ if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype])
+ rtype_str = rtype_strs[rtype];
+
+ trace_seq_printf(p, "racqa=%u:%s, iekey=%u, rtype=%u:%s",
+ racqa, racqa_str, iekey, rtype, rtype_str);
trace_seq_putc(p, 0);
return ret;
@@ -256,13 +290,25 @@ static const char *nvme_trace_resv_acq(struct trace_seq *p, u8 *cdw10)
static const char *nvme_trace_resv_rel(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const rrela_strs[] = {
+ [0x00] = "release",
+ [0x01] = "clear",
+ };
const char *ret = trace_seq_buffer_ptr(p);
u8 rrela = cdw10[0] & 0x7;
u8 iekey = (cdw10[0] >> 3) & 0x1;
u8 rtype = cdw10[1];
+ const char *rrela_str = "reserved";
+ const char *rtype_str = "reserved";
+
+ if (rrela < ARRAY_SIZE(rrela_strs) && rrela_strs[rrela])
+ rrela_str = rrela_strs[rrela];
+
+ if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype])
+ rtype_str = rtype_strs[rtype];
- trace_seq_printf(p, "rrela=%u, iekey=%u, rtype=%u",
- rrela, iekey, rtype);
+ trace_seq_printf(p, "rrela=%u:%s, iekey=%u, rtype=%u:%s",
+ rrela, rrela_str, iekey, rtype, rtype_str);
trace_seq_putc(p, 0);
return ret;
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 9a06f9d98cd6..382949e18c6a 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -111,7 +111,7 @@ void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
lim->features |= BLK_FEAT_ZONED;
lim->max_open_zones = zi->max_open_zones;
lim->max_active_zones = zi->max_active_zones;
- lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
+ lim->max_hw_zone_append_sectors = ns->ctrl->max_zone_append;
lim->chunk_sectors = ns->head->zsze =
nvme_lba_to_sect(ns->head, zi->zone_size);
}
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index c402c44350b2..f2b025bbe10c 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
- discovery.o io-cmd-file.o io-cmd-bdev.o
+ discovery.o io-cmd-file.o io-cmd-bdev.o pr.o
nvmet-$(CONFIG_NVME_TARGET_DEBUGFS) += debugfs.o
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 081f0473cd9e..934b401fbc2f 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -71,6 +71,35 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
nvmet_req_complete(req, 0);
}
+static void nvmet_execute_get_supported_log_pages(struct nvmet_req *req)
+{
+ struct nvme_supported_log *logs;
+ u16 status;
+
+ logs = kzalloc(sizeof(*logs), GFP_KERNEL);
+ if (!logs) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP);
+ logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP);
+
+ status = nvmet_copy_to_sgl(req, 0, logs, sizeof(*logs));
+ kfree(logs);
+out:
+ nvmet_req_complete(req, status);
+}
+
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
@@ -130,6 +159,45 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
return NVME_SC_SUCCESS;
}
+static void nvmet_execute_get_log_page_rmi(struct nvmet_req *req)
+{
+ struct nvme_rotational_media_log *log;
+ struct gendisk *disk;
+ u16 status;
+
+ req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
+ req->cmd->get_log_page.lsi));
+ status = nvmet_req_find_ns(req);
+ if (status)
+ goto out;
+
+ if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) {
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ if (req->transfer_len != sizeof(*log)) {
+ status = NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ goto out;
+
+ log->endgid = req->cmd->get_log_page.lsi;
+ disk = req->ns->bdev->bd_disk;
+ if (disk && disk->ia_ranges)
+ log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges);
+ else
+ log->numa = cpu_to_le16(1);
+
+ status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+ kfree(log);
+out:
+ nvmet_req_complete(req, status);
+}
+
static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
{
struct nvme_smart_log *log;
@@ -176,6 +244,10 @@ static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
log->iocs[nvme_cmd_read] =
log->iocs[nvme_cmd_flush] =
log->iocs[nvme_cmd_dsm] =
+ log->iocs[nvme_cmd_resv_acquire] =
+ log->iocs[nvme_cmd_resv_register] =
+ log->iocs[nvme_cmd_resv_release] =
+ log->iocs[nvme_cmd_resv_report] =
cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
log->iocs[nvme_cmd_write] =
log->iocs[nvme_cmd_write_zeroes] =
@@ -272,6 +344,49 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
return struct_size(desc, nsids, count);
}
+static void nvmet_execute_get_log_page_endgrp(struct nvmet_req *req)
+{
+ u64 host_reads, host_writes, data_units_read, data_units_written;
+ struct nvme_endurance_group_log *log;
+ u16 status;
+
+ /*
+ * The target driver emulates each endurance group as its own
+ * namespace, reusing the nsid as the endurance group identifier.
+ */
+ req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
+ req->cmd->get_log_page.lsi));
+ status = nvmet_req_find_ns(req);
+ if (status)
+ goto out;
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ if (!req->ns->bdev)
+ goto copy;
+
+ host_reads = part_stat_read(req->ns->bdev, ios[READ]);
+ data_units_read =
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
+ host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
+ data_units_written =
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
+
+ put_unaligned_le64(host_reads, &log->hrc[0]);
+ put_unaligned_le64(data_units_read, &log->dur[0]);
+ put_unaligned_le64(host_writes, &log->hwc[0]);
+ put_unaligned_le64(data_units_written, &log->duw[0]);
+copy:
+ status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+ kfree(log);
+out:
+ nvmet_req_complete(req, status);
+}
+
static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
{
struct nvme_ana_rsp_hdr hdr = { 0, };
@@ -317,12 +432,44 @@ out:
nvmet_req_complete(req, status);
}
+static void nvmet_execute_get_log_page_features(struct nvmet_req *req)
+{
+ struct nvme_supported_features_log *features;
+ u16 status;
+
+ features = kzalloc(sizeof(*features), GFP_KERNEL);
+ if (!features) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ features->fis[NVME_FEAT_NUM_QUEUES] =
+ cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
+ features->fis[NVME_FEAT_KATO] =
+ cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
+ features->fis[NVME_FEAT_ASYNC_EVENT] =
+ cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
+ features->fis[NVME_FEAT_HOST_ID] =
+ cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
+ features->fis[NVME_FEAT_WRITE_PROTECT] =
+ cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
+ features->fis[NVME_FEAT_RESV_MASK] =
+ cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
+
+ status = nvmet_copy_to_sgl(req, 0, features, sizeof(*features));
+ kfree(features);
+out:
+ nvmet_req_complete(req, status);
+}
+
static void nvmet_execute_get_log_page(struct nvmet_req *req)
{
if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
return;
switch (req->cmd->get_log_page.lid) {
+ case NVME_LOG_SUPPORTED:
+ return nvmet_execute_get_supported_log_pages(req);
case NVME_LOG_ERROR:
return nvmet_execute_get_log_page_error(req);
case NVME_LOG_SMART:
@@ -338,8 +485,16 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
return nvmet_execute_get_log_changed_ns(req);
case NVME_LOG_CMD_EFFECTS:
return nvmet_execute_get_log_cmd_effects_ns(req);
+ case NVME_LOG_ENDURANCE_GROUP:
+ return nvmet_execute_get_log_page_endgrp(req);
case NVME_LOG_ANA:
return nvmet_execute_get_log_page_ana(req);
+ case NVME_LOG_FEATURES:
+ return nvmet_execute_get_log_page_features(req);
+ case NVME_LOG_RMI:
+ return nvmet_execute_get_log_page_rmi(req);
+ case NVME_LOG_RESERVATION:
+ return nvmet_execute_get_log_page_resv(req);
}
pr_debug("unhandled lid %d on qid %d\n",
req->cmd->get_log_page.lid, req->sq->qid);
@@ -433,7 +588,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
- NVME_CTRL_ONCS_WRITE_ZEROES);
+ NVME_CTRL_ONCS_WRITE_ZEROES |
+ NVME_CTRL_ONCS_RESERVATIONS);
/* XXX: don't report vwc if the underlying device is write through */
id->vwc = NVME_CTRL_VWC_PRESENT;
@@ -467,6 +623,13 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->msdbd = ctrl->ops->msdbd;
+ /*
+ * Endurance group identifier is 16 bits, so we can't let namespaces
+ * overflow that since we reuse the nsid
+ */
+ BUILD_BUG_ON(NVMET_MAX_NAMESPACES > USHRT_MAX);
+ id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES);
+
id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
id->anatt = 10; /* random value */
id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
@@ -551,6 +714,21 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
id->nmic = NVME_NS_NMIC_SHARED;
id->anagrpid = cpu_to_le32(req->ns->anagrpid);
+ if (req->ns->pr.enable)
+ id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE |
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS |
+ NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY |
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY |
+ NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS |
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS |
+ NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF;
+
+ /*
+ * Since we don't know any better, every namespace is its own endurance
+ * group.
+ */
+ id->endgid = cpu_to_le16(req->ns->nsid);
+
memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
id->lbaf[0].ds = req->ns->blksize_shift;
@@ -576,7 +754,40 @@ out:
nvmet_req_complete(req, status);
}
-static void nvmet_execute_identify_nslist(struct nvmet_req *req)
+static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
+{
+ u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid);
+ static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_ns *ns;
+ unsigned long idx;
+ __le16 *list;
+ u16 status;
+ int i = 1;
+
+ list = kzalloc(buf_size, GFP_KERNEL);
+ if (!list) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ if (ns->nsid <= min_endgid)
+ continue;
+
+ list[i++] = cpu_to_le16(ns->nsid);
+ if (i == buf_size / sizeof(__le16))
+ break;
+ }
+
+ list[0] = cpu_to_le16(i - 1);
+ status = nvmet_copy_to_sgl(req, 0, list, buf_size);
+ kfree(list);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
{
static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -606,6 +817,8 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req)
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
if (ns->nsid <= min_nsid)
continue;
+ if (match_css && req->ns->csi != req->cmd->identify.csi)
+ continue;
list[i++] = cpu_to_le32(ns->nsid);
if (i == buf_size / sizeof(__le32))
break;
@@ -685,6 +898,56 @@ static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
}
+static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
+{
+ u16 status;
+
+ status = nvmet_req_find_ns(req);
+ if (status)
+ goto out;
+
+ status = nvmet_copy_to_sgl(req, 0, ZERO_PAGE(0),
+ NVME_IDENTIFY_DATA_SIZE);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_id_cs_indep(struct nvmet_req *req)
+{
+ struct nvme_id_ns_cs_indep *id;
+ u16 status;
+
+ status = nvmet_req_find_ns(req);
+ if (status)
+ goto out;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ id->nstat = NVME_NSTAT_NRDY;
+ id->anagrpid = cpu_to_le32(req->ns->anagrpid);
+ id->nmic = NVME_NS_NMIC_SHARED;
+ if (req->ns->readonly)
+ id->nsattr |= NVME_NS_ATTR_RO;
+ if (req->ns->bdev && !bdev_nonrot(req->ns->bdev))
+ id->nsfeat |= NVME_NS_ROTATIONAL;
+ /*
+ * We need flush command to flush the file's metadata,
+ * so report supporting vwc if backend is file, even
+ * though buffered_io is disable.
+ */
+ if (req->ns->bdev && !bdev_write_cache(req->ns->bdev))
+ id->nsfeat |= NVME_NS_VWC_NOT_PRESENT;
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ kfree(id);
+out:
+ nvmet_req_complete(req, status);
+}
+
static void nvmet_execute_identify(struct nvmet_req *req)
{
if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
@@ -698,7 +961,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
nvmet_execute_identify_ctrl(req);
return;
case NVME_ID_CNS_NS_ACTIVE_LIST:
- nvmet_execute_identify_nslist(req);
+ nvmet_execute_identify_nslist(req, false);
return;
case NVME_ID_CNS_NS_DESC_LIST:
nvmet_execute_identify_desclist(req);
@@ -706,8 +969,8 @@ static void nvmet_execute_identify(struct nvmet_req *req)
case NVME_ID_CNS_CS_NS:
switch (req->cmd->identify.csi) {
case NVME_CSI_NVM:
- /* Not supported */
- break;
+ nvme_execute_identify_ns_nvm(req);
+ return;
case NVME_CSI_ZNS:
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
nvmet_execute_identify_ns_zns(req);
@@ -729,6 +992,15 @@ static void nvmet_execute_identify(struct nvmet_req *req)
break;
}
break;
+ case NVME_ID_CNS_NS_ACTIVE_LIST_CS:
+ nvmet_execute_identify_nslist(req, true);
+ return;
+ case NVME_ID_CNS_NS_CS_INDEP:
+ nvmet_execute_id_cs_indep(req);
+ return;
+ case NVME_ID_CNS_ENDGRP_LIST:
+ nvmet_execute_identify_endgrp_list(req);
+ return;
}
pr_debug("unhandled identify cns %d on qid %d\n",
@@ -861,6 +1133,9 @@ void nvmet_execute_set_features(struct nvmet_req *req)
case NVME_FEAT_WRITE_PROTECT:
status = nvmet_set_feat_write_protect(req);
break;
+ case NVME_FEAT_RESV_MASK:
+ status = nvmet_set_feat_resv_notif_mask(req, cdw11);
+ break;
default:
req->error_loc = offsetof(struct nvme_common_command, cdw10);
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
@@ -959,6 +1234,9 @@ void nvmet_execute_get_features(struct nvmet_req *req)
case NVME_FEAT_WRITE_PROTECT:
status = nvmet_get_feat_write_protect(req);
break;
+ case NVME_FEAT_RESV_MASK:
+ status = nvmet_get_feat_resv_notif_mask(req);
+ break;
default:
req->error_loc =
offsetof(struct nvme_common_command, cdw10);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 685e89b35d33..eeee9e9b854c 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -769,6 +769,32 @@ static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
+static ssize_t nvmet_ns_resv_enable_show(struct config_item *item, char *page)
+{
+ return sysfs_emit(page, "%d\n", to_nvmet_ns(item)->pr.enable);
+}
+
+static ssize_t nvmet_ns_resv_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ bool val;
+
+ if (kstrtobool(page, &val))
+ return -EINVAL;
+
+ mutex_lock(&ns->subsys->lock);
+ if (ns->enabled) {
+ pr_err("the ns:%d is already enabled.\n", ns->nsid);
+ mutex_unlock(&ns->subsys->lock);
+ return -EINVAL;
+ }
+ ns->pr.enable = val;
+ mutex_unlock(&ns->subsys->lock);
+ return count;
+}
+CONFIGFS_ATTR(nvmet_ns_, resv_enable);
+
static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_device_path,
&nvmet_ns_attr_device_nguid,
@@ -777,6 +803,7 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_enable,
&nvmet_ns_attr_buffered_io,
&nvmet_ns_attr_revalidate_size,
+ &nvmet_ns_attr_resv_enable,
#ifdef CONFIG_PCI_P2PDMA
&nvmet_ns_attr_p2pmem,
#endif
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index ed2424f8a396..1f4e9989663b 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -611,6 +611,12 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
if (ret)
goto out_restore_subsys_maxnsid;
+ if (ns->pr.enable) {
+ ret = nvmet_pr_init_ns(ns);
+ if (ret)
+ goto out_remove_from_subsys;
+ }
+
subsys->nr_namespaces++;
nvmet_ns_changed(subsys, ns->nsid);
@@ -620,6 +626,8 @@ out_unlock:
mutex_unlock(&subsys->lock);
return ret;
+out_remove_from_subsys:
+ xa_erase(&subsys->namespaces, ns->nsid);
out_restore_subsys_maxnsid:
subsys->max_nsid = nvmet_max_nsid(subsys);
percpu_ref_exit(&ns->ref);
@@ -663,6 +671,9 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
wait_for_completion(&ns->disable_done);
percpu_ref_exit(&ns->ref);
+ if (ns->pr.enable)
+ nvmet_pr_exit_ns(ns);
+
mutex_lock(&subsys->lock);
subsys->nr_namespaces--;
@@ -754,6 +765,7 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
struct nvmet_ns *ns = req->ns;
+ struct nvmet_pr_per_ctrl_ref *pc_ref = req->pc_ref;
if (!req->sq->sqhd_disabled)
nvmet_update_sq_head(req);
@@ -766,6 +778,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
trace_nvmet_req_complete(req);
req->ops->queue_response(req);
+
+ if (pc_ref)
+ nvmet_pr_put_ns_pc_ref(pc_ref);
if (ns)
nvmet_put_namespace(ns);
}
@@ -929,18 +944,39 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
return ret;
}
+ if (req->ns->pr.enable) {
+ ret = nvmet_parse_pr_cmd(req);
+ if (!ret)
+ return ret;
+ }
+
switch (req->ns->csi) {
case NVME_CSI_NVM:
if (req->ns->file)
- return nvmet_file_parse_io_cmd(req);
- return nvmet_bdev_parse_io_cmd(req);
+ ret = nvmet_file_parse_io_cmd(req);
+ else
+ ret = nvmet_bdev_parse_io_cmd(req);
+ break;
case NVME_CSI_ZNS:
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
- return nvmet_bdev_zns_parse_io_cmd(req);
- return NVME_SC_INVALID_IO_CMD_SET;
+ ret = nvmet_bdev_zns_parse_io_cmd(req);
+ else
+ ret = NVME_SC_INVALID_IO_CMD_SET;
+ break;
default:
- return NVME_SC_INVALID_IO_CMD_SET;
+ ret = NVME_SC_INVALID_IO_CMD_SET;
}
+ if (ret)
+ return ret;
+
+ if (req->ns->pr.enable) {
+ ret = nvmet_pr_check_cmd_access(req);
+ if (ret)
+ return ret;
+
+ ret = nvmet_pr_get_ns_pc_ref(req);
+ }
+ return ret;
}
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
@@ -964,6 +1000,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->ns = NULL;
req->error_loc = NVMET_NO_ERROR_LOC;
req->error_slba = 0;
+ req->pc_ref = NULL;
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
@@ -1015,6 +1052,8 @@ EXPORT_SYMBOL_GPL(nvmet_req_init);
void nvmet_req_uninit(struct nvmet_req *req)
{
percpu_ref_put(&req->sq->ref);
+ if (req->pc_ref)
+ nvmet_pr_put_ns_pc_ref(req->pc_ref);
if (req->ns)
nvmet_put_namespace(req->ns);
}
@@ -1383,7 +1422,8 @@ static void nvmet_fatal_error_handler(struct work_struct *work)
}
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
- struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
+ struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
+ uuid_t *hostid)
{
struct nvmet_subsys *subsys;
struct nvmet_ctrl *ctrl;
@@ -1462,6 +1502,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
}
ctrl->cntlid = ret;
+ uuid_copy(&ctrl->hostid, hostid);
+
/*
* Discovery controllers may use some arbitrary high value
* in order to cleanup stale discovery sessions
@@ -1478,6 +1520,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
nvmet_start_keep_alive_timer(ctrl);
mutex_lock(&subsys->lock);
+ ret = nvmet_ctrl_init_pr(ctrl);
+ if (ret)
+ goto init_pr_fail;
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
nvmet_setup_p2p_ns_map(ctrl, req);
nvmet_debugfs_ctrl_setup(ctrl);
@@ -1486,6 +1531,10 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
*ctrlp = ctrl;
return 0;
+init_pr_fail:
+ mutex_unlock(&subsys->lock);
+ nvmet_stop_keep_alive_timer(ctrl);
+ ida_free(&cntlid_ida, ctrl->cntlid);
out_free_sqs:
kfree(ctrl->sqs);
out_free_changed_ns_list:
@@ -1504,6 +1553,7 @@ static void nvmet_ctrl_free(struct kref *ref)
struct nvmet_subsys *subsys = ctrl->subsys;
mutex_lock(&subsys->lock);
+ nvmet_ctrl_destroy_pr(ctrl);
nvmet_release_p2p_ns_map(ctrl);
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
@@ -1717,7 +1767,7 @@ static int __init nvmet_init(void)
goto out_free_zbd_work_queue;
nvmet_wq = alloc_workqueue("nvmet-wq",
- WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 0);
if (!nvmet_wq)
goto out_free_buffered_work_queue;
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index c4b2eddd5666..c49904ebb6c2 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -64,6 +64,9 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
case NVME_REG_CSTS:
val = ctrl->csts;
break;
+ case NVME_REG_CRTO:
+ val = NVME_CAP_TIMEOUT(ctrl->csts);
+ break;
default:
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
@@ -245,12 +248,10 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
- le32_to_cpu(c->kato), &ctrl);
+ le32_to_cpu(c->kato), &ctrl, &d->hostid);
if (status)
goto out;
- uuid_copy(&ctrl->hostid, &d->hostid);
-
dhchap_status = nvmet_setup_auth(ctrl);
if (dhchap_status) {
pr_err("Failed to setup authentication, dhchap status %u\n",
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 190f55e6d753..58328b35dc96 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -20,8 +20,9 @@
#include <linux/blkdev.h>
#include <linux/radix-tree.h>
#include <linux/t10-pi.h>
+#include <linux/kfifo.h>
-#define NVMET_DEFAULT_VS NVME_VS(1, 3, 0)
+#define NVMET_DEFAULT_VS NVME_VS(2, 1, 0)
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
@@ -30,6 +31,7 @@
#define NVMET_MN_MAX_SIZE 40
#define NVMET_SN_MAX_SIZE 20
#define NVMET_FR_MAX_SIZE 8
+#define NVMET_PR_LOG_QUEUE_SIZE 64
/*
* Supported optional AENs:
@@ -56,6 +58,38 @@
#define IPO_IATTR_CONNECT_SQE(x) \
(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
+struct nvmet_pr_registrant {
+ u64 rkey;
+ uuid_t hostid;
+ enum nvme_pr_type rtype;
+ struct list_head entry;
+ struct rcu_head rcu;
+};
+
+struct nvmet_pr {
+ bool enable;
+ unsigned long notify_mask;
+ atomic_t generation;
+ struct nvmet_pr_registrant __rcu *holder;
+ /*
+ * During the execution of the reservation command, mutual
+ * exclusion is required throughout the process. However,
+ * while waiting asynchronously for the 'per controller
+ * percpu_ref' to complete before the 'preempt and abort'
+ * command finishes, a semaphore is needed to ensure mutual
+ * exclusion instead of a mutex.
+ */
+ struct semaphore pr_sem;
+ struct list_head registrant_list;
+};
+
+struct nvmet_pr_per_ctrl_ref {
+ struct percpu_ref ref;
+ struct completion free_done;
+ struct completion confirm_done;
+ uuid_t hostid;
+};
+
struct nvmet_ns {
struct percpu_ref ref;
struct file *bdev_file;
@@ -85,6 +119,8 @@ struct nvmet_ns {
int pi_type;
int metadata_size;
u8 csi;
+ struct nvmet_pr pr;
+ struct xarray pr_per_ctrl_refs;
};
static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
@@ -191,6 +227,13 @@ static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
}
+struct nvmet_pr_log_mgr {
+ struct mutex lock;
+ u64 lost_count;
+ u64 counter;
+ DECLARE_KFIFO(log_queue, struct nvme_pr_log, NVMET_PR_LOG_QUEUE_SIZE);
+};
+
struct nvmet_ctrl {
struct nvmet_subsys *subsys;
struct nvmet_sq **sqs;
@@ -246,6 +289,7 @@ struct nvmet_ctrl {
u8 *dh_key;
size_t dh_keysize;
#endif
+ struct nvmet_pr_log_mgr pr_log_mgr;
};
struct nvmet_subsys {
@@ -396,6 +440,9 @@ struct nvmet_req {
struct work_struct zmgmt_work;
} z;
#endif /* CONFIG_BLK_DEV_ZONED */
+ struct {
+ struct work_struct abort_work;
+ } r;
};
int sg_cnt;
int metadata_sg_cnt;
@@ -412,6 +459,7 @@ struct nvmet_req {
struct device *p2p_client;
u16 error_loc;
u64 error_slba;
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
};
#define NVMET_MAX_MPOOL_BVEC 16
@@ -498,7 +546,8 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
- struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
+ struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
+ uuid_t *hostid);
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
const char *hostnqn, u16 cntlid,
struct nvmet_req *req);
@@ -761,4 +810,18 @@ static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
#endif
+int nvmet_pr_init_ns(struct nvmet_ns *ns);
+u16 nvmet_parse_pr_cmd(struct nvmet_req *req);
+u16 nvmet_pr_check_cmd_access(struct nvmet_req *req);
+int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl);
+void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl);
+void nvmet_pr_exit_ns(struct nvmet_ns *ns);
+void nvmet_execute_get_log_page_resv(struct nvmet_req *req);
+u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask);
+u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req);
+u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req);
+static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref)
+{
+ percpu_ref_put(&pc_ref->ref);
+}
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/pr.c b/drivers/nvme/target/pr.c
new file mode 100644
index 000000000000..25a02b50d9f3
--- /dev/null
+++ b/drivers/nvme/target/pr.c
@@ -0,0 +1,1156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics Persist Reservation.
+ * Copyright (c) 2024 Guixin Liu, Alibaba Group.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/unaligned.h>
+#include "nvmet.h"
+
+#define NVMET_PR_NOTIFI_MASK_ALL \
+ (1 << NVME_PR_NOTIFY_BIT_REG_PREEMPTED | \
+ 1 << NVME_PR_NOTIFY_BIT_RESV_RELEASED | \
+ 1 << NVME_PR_NOTIFY_BIT_RESV_PREEMPTED)
+
+static inline bool nvmet_pr_parse_ignore_key(u32 cdw10)
+{
+ /* Ignore existing key, bit 03. */
+ return (cdw10 >> 3) & 1;
+}
+
+static inline struct nvmet_ns *nvmet_pr_to_ns(struct nvmet_pr *pr)
+{
+ return container_of(pr, struct nvmet_ns, pr);
+}
+
+static struct nvmet_pr_registrant *
+nvmet_pr_find_registrant(struct nvmet_pr *pr, uuid_t *hostid)
+{
+ struct nvmet_pr_registrant *reg;
+
+ list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
+ if (uuid_equal(&reg->hostid, hostid))
+ return reg;
+ }
+ return NULL;
+}
+
+u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask)
+{
+ u32 nsid = le32_to_cpu(req->cmd->common.nsid);
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_ns *ns;
+ unsigned long idx;
+ u16 status;
+
+ if (mask & ~(NVMET_PR_NOTIFI_MASK_ALL)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw11);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ if (nsid != U32_MAX) {
+ status = nvmet_req_find_ns(req);
+ if (status)
+ return status;
+ if (!req->ns->pr.enable)
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+
+ WRITE_ONCE(req->ns->pr.notify_mask, mask);
+ goto success;
+ }
+
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ if (ns->pr.enable)
+ WRITE_ONCE(ns->pr.notify_mask, mask);
+ }
+
+success:
+ nvmet_set_result(req, mask);
+ return NVME_SC_SUCCESS;
+}
+
+u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req)
+{
+ u16 status;
+
+ status = nvmet_req_find_ns(req);
+ if (status)
+ return status;
+
+ if (!req->ns->pr.enable)
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+
+ nvmet_set_result(req, READ_ONCE(req->ns->pr.notify_mask));
+ return status;
+}
+
+void nvmet_execute_get_log_page_resv(struct nvmet_req *req)
+{
+ struct nvmet_pr_log_mgr *log_mgr = &req->sq->ctrl->pr_log_mgr;
+ struct nvme_pr_log next_log = {0};
+ struct nvme_pr_log log = {0};
+ u16 status = NVME_SC_SUCCESS;
+ u64 lost_count;
+ u64 cur_count;
+ u64 next_count;
+
+ mutex_lock(&log_mgr->lock);
+ if (!kfifo_get(&log_mgr->log_queue, &log))
+ goto out;
+
+ /*
+ * We can't get the last in kfifo.
+ * Utilize the current count and the count from the next log to
+ * calculate the number of lost logs, while also addressing cases
+ * of overflow. If there is no subsequent log, the number of lost
+ * logs is equal to the lost_count within the nvmet_pr_log_mgr.
+ */
+ cur_count = le64_to_cpu(log.count);
+ if (kfifo_peek(&log_mgr->log_queue, &next_log)) {
+ next_count = le64_to_cpu(next_log.count);
+ if (next_count > cur_count)
+ lost_count = next_count - cur_count - 1;
+ else
+ lost_count = U64_MAX - cur_count + next_count - 1;
+ } else {
+ lost_count = log_mgr->lost_count;
+ }
+
+ log.count = cpu_to_le64((cur_count + lost_count) == 0 ?
+ 1 : (cur_count + lost_count));
+ log_mgr->lost_count -= lost_count;
+
+ log.nr_pages = kfifo_len(&log_mgr->log_queue);
+
+out:
+ status = nvmet_copy_to_sgl(req, 0, &log, sizeof(log));
+ mutex_unlock(&log_mgr->lock);
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_pr_add_resv_log(struct nvmet_ctrl *ctrl, u8 log_type,
+ u32 nsid)
+{
+ struct nvmet_pr_log_mgr *log_mgr = &ctrl->pr_log_mgr;
+ struct nvme_pr_log log = {0};
+
+ mutex_lock(&log_mgr->lock);
+ log_mgr->counter++;
+ if (log_mgr->counter == 0)
+ log_mgr->counter = 1;
+
+ log.count = cpu_to_le64(log_mgr->counter);
+ log.type = log_type;
+ log.nsid = cpu_to_le32(nsid);
+
+ if (!kfifo_put(&log_mgr->log_queue, log)) {
+ pr_info("a reservation log lost, cntlid:%d, log_type:%d, nsid:%d\n",
+ ctrl->cntlid, log_type, nsid);
+ log_mgr->lost_count++;
+ }
+
+ mutex_unlock(&log_mgr->lock);
+}
+
+static void nvmet_pr_resv_released(struct nvmet_pr *pr, uuid_t *hostid)
+{
+ struct nvmet_ns *ns = nvmet_pr_to_ns(pr);
+ struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
+
+ if (test_bit(NVME_PR_NOTIFY_BIT_RESV_RELEASED, &pr->notify_mask))
+ return;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (!uuid_equal(&ctrl->hostid, hostid) &&
+ nvmet_pr_find_registrant(pr, &ctrl->hostid)) {
+ nvmet_pr_add_resv_log(ctrl,
+ NVME_PR_LOG_RESERVATION_RELEASED, ns->nsid);
+ nvmet_add_async_event(ctrl, NVME_AER_CSS,
+ NVME_AEN_RESV_LOG_PAGE_AVALIABLE,
+ NVME_LOG_RESERVATION);
+ }
+ }
+ mutex_unlock(&subsys->lock);
+}
+
+static void nvmet_pr_send_event_to_host(struct nvmet_pr *pr, uuid_t *hostid,
+ u8 log_type)
+{
+ struct nvmet_ns *ns = nvmet_pr_to_ns(pr);
+ struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (uuid_equal(hostid, &ctrl->hostid)) {
+ nvmet_pr_add_resv_log(ctrl, log_type, ns->nsid);
+ nvmet_add_async_event(ctrl, NVME_AER_CSS,
+ NVME_AEN_RESV_LOG_PAGE_AVALIABLE,
+ NVME_LOG_RESERVATION);
+ }
+ }
+ mutex_unlock(&subsys->lock);
+}
+
+static void nvmet_pr_resv_preempted(struct nvmet_pr *pr, uuid_t *hostid)
+{
+ if (test_bit(NVME_PR_NOTIFY_BIT_RESV_PREEMPTED, &pr->notify_mask))
+ return;
+
+ nvmet_pr_send_event_to_host(pr, hostid,
+ NVME_PR_LOG_RESERVATOIN_PREEMPTED);
+}
+
+static void nvmet_pr_registration_preempted(struct nvmet_pr *pr,
+ uuid_t *hostid)
+{
+ if (test_bit(NVME_PR_NOTIFY_BIT_REG_PREEMPTED, &pr->notify_mask))
+ return;
+
+ nvmet_pr_send_event_to_host(pr, hostid,
+ NVME_PR_LOG_REGISTRATION_PREEMPTED);
+}
+
+static inline void nvmet_pr_set_new_holder(struct nvmet_pr *pr, u8 new_rtype,
+ struct nvmet_pr_registrant *reg)
+{
+ reg->rtype = new_rtype;
+ rcu_assign_pointer(pr->holder, reg);
+}
+
+static u16 nvmet_pr_register(struct nvmet_req *req,
+ struct nvmet_pr_register_data *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr_registrant *new, *reg;
+ struct nvmet_pr *pr = &req->ns->pr;
+ u16 status = NVME_SC_SUCCESS;
+ u64 nrkey = le64_to_cpu(d->nrkey);
+
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NVME_SC_INTERNAL;
+
+ down(&pr->pr_sem);
+ reg = nvmet_pr_find_registrant(pr, &ctrl->hostid);
+ if (reg) {
+ if (reg->rkey != nrkey)
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ kfree(new);
+ goto out;
+ }
+
+ memset(new, 0, sizeof(*new));
+ INIT_LIST_HEAD(&new->entry);
+ new->rkey = nrkey;
+ uuid_copy(&new->hostid, &ctrl->hostid);
+ list_add_tail_rcu(&new->entry, &pr->registrant_list);
+
+out:
+ up(&pr->pr_sem);
+ return status;
+}
+
+static void nvmet_pr_unregister_one(struct nvmet_pr *pr,
+ struct nvmet_pr_registrant *reg)
+{
+ struct nvmet_pr_registrant *first_reg;
+ struct nvmet_pr_registrant *holder;
+ u8 original_rtype;
+
+ list_del_rcu(&reg->entry);
+
+ holder = rcu_dereference_protected(pr->holder, 1);
+ if (reg != holder)
+ goto out;
+
+ original_rtype = holder->rtype;
+ if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS ||
+ original_rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) {
+ first_reg = list_first_or_null_rcu(&pr->registrant_list,
+ struct nvmet_pr_registrant, entry);
+ if (first_reg)
+ first_reg->rtype = original_rtype;
+ rcu_assign_pointer(pr->holder, first_reg);
+ } else {
+ rcu_assign_pointer(pr->holder, NULL);
+
+ if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_REG_ONLY ||
+ original_rtype == NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY)
+ nvmet_pr_resv_released(pr, &reg->hostid);
+ }
+out:
+ kfree_rcu(reg, rcu);
+}
+
+static u16 nvmet_pr_unregister(struct nvmet_req *req,
+ struct nvmet_pr_register_data *d,
+ bool ignore_key)
+{
+ u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *reg;
+
+ down(&pr->pr_sem);
+ list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
+ if (uuid_equal(&reg->hostid, &ctrl->hostid)) {
+ if (ignore_key || reg->rkey == le64_to_cpu(d->crkey)) {
+ status = NVME_SC_SUCCESS;
+ nvmet_pr_unregister_one(pr, reg);
+ }
+ break;
+ }
+ }
+ up(&pr->pr_sem);
+
+ return status;
+}
+
+static void nvmet_pr_update_reg_rkey(struct nvmet_pr_registrant *reg,
+ void *attr)
+{
+ reg->rkey = *(u64 *)attr;
+}
+
+static u16 nvmet_pr_update_reg_attr(struct nvmet_pr *pr,
+ struct nvmet_pr_registrant *reg,
+ void (*change_attr)(struct nvmet_pr_registrant *reg,
+ void *attr),
+ void *attr)
+{
+ struct nvmet_pr_registrant *holder;
+ struct nvmet_pr_registrant *new;
+
+ holder = rcu_dereference_protected(pr->holder, 1);
+ if (reg != holder) {
+ change_attr(reg, attr);
+ return NVME_SC_SUCCESS;
+ }
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ return NVME_SC_INTERNAL;
+
+ new->rkey = holder->rkey;
+ new->rtype = holder->rtype;
+ uuid_copy(&new->hostid, &holder->hostid);
+ INIT_LIST_HEAD(&new->entry);
+
+ change_attr(new, attr);
+ list_replace_rcu(&holder->entry, &new->entry);
+ rcu_assign_pointer(pr->holder, new);
+ kfree_rcu(holder, rcu);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_pr_replace(struct nvmet_req *req,
+ struct nvmet_pr_register_data *d,
+ bool ignore_key)
+{
+ u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *reg;
+ u64 nrkey = le64_to_cpu(d->nrkey);
+
+ down(&pr->pr_sem);
+ list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
+ if (uuid_equal(&reg->hostid, &ctrl->hostid)) {
+ if (ignore_key || reg->rkey == le64_to_cpu(d->crkey))
+ status = nvmet_pr_update_reg_attr(pr, reg,
+ nvmet_pr_update_reg_rkey,
+ &nrkey);
+ break;
+ }
+ }
+ up(&pr->pr_sem);
+ return status;
+}
+
+static void nvmet_execute_pr_register(struct nvmet_req *req)
+{
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ bool ignore_key = nvmet_pr_parse_ignore_key(cdw10);
+ struct nvmet_pr_register_data *d;
+ u8 reg_act = cdw10 & 0x07; /* Reservation Register Action, bit 02:00 */
+ u16 status;
+
+ d = kmalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+ if (status)
+ goto free_data;
+
+ switch (reg_act) {
+ case NVME_PR_REGISTER_ACT_REG:
+ status = nvmet_pr_register(req, d);
+ break;
+ case NVME_PR_REGISTER_ACT_UNREG:
+ status = nvmet_pr_unregister(req, d, ignore_key);
+ break;
+ case NVME_PR_REGISTER_ACT_REPLACE:
+ status = nvmet_pr_replace(req, d, ignore_key);
+ break;
+ default:
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
+ break;
+ }
+free_data:
+ kfree(d);
+out:
+ if (!status)
+ atomic_inc(&req->ns->pr.generation);
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_pr_acquire(struct nvmet_req *req,
+ struct nvmet_pr_registrant *reg,
+ u8 rtype)
+{
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *holder;
+
+ holder = rcu_dereference_protected(pr->holder, 1);
+ if (holder && reg != holder)
+ return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ if (holder && reg == holder) {
+ if (holder->rtype == rtype)
+ return NVME_SC_SUCCESS;
+ return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ }
+
+ nvmet_pr_set_new_holder(pr, rtype, reg);
+ return NVME_SC_SUCCESS;
+}
+
+static void nvmet_pr_confirm_ns_pc_ref(struct percpu_ref *ref)
+{
+ struct nvmet_pr_per_ctrl_ref *pc_ref =
+ container_of(ref, struct nvmet_pr_per_ctrl_ref, ref);
+
+ complete(&pc_ref->confirm_done);
+}
+
+static void nvmet_pr_set_ctrl_to_abort(struct nvmet_req *req, uuid_t *hostid)
+{
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ struct nvmet_ns *ns = req->ns;
+ unsigned long idx;
+
+ xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
+ if (uuid_equal(&pc_ref->hostid, hostid)) {
+ percpu_ref_kill_and_confirm(&pc_ref->ref,
+ nvmet_pr_confirm_ns_pc_ref);
+ wait_for_completion(&pc_ref->confirm_done);
+ }
+ }
+}
+
+static u16 nvmet_pr_unreg_all_host_by_prkey(struct nvmet_req *req, u64 prkey,
+ uuid_t *send_hostid,
+ bool abort)
+{
+ u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ struct nvmet_pr_registrant *reg, *tmp;
+ struct nvmet_pr *pr = &req->ns->pr;
+ uuid_t hostid;
+
+ list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
+ if (reg->rkey == prkey) {
+ status = NVME_SC_SUCCESS;
+ uuid_copy(&hostid, &reg->hostid);
+ if (abort)
+ nvmet_pr_set_ctrl_to_abort(req, &hostid);
+ nvmet_pr_unregister_one(pr, reg);
+ if (!uuid_equal(&hostid, send_hostid))
+ nvmet_pr_registration_preempted(pr, &hostid);
+ }
+ }
+ return status;
+}
+
+static void nvmet_pr_unreg_all_others_by_prkey(struct nvmet_req *req,
+ u64 prkey,
+ uuid_t *send_hostid,
+ bool abort)
+{
+ struct nvmet_pr_registrant *reg, *tmp;
+ struct nvmet_pr *pr = &req->ns->pr;
+ uuid_t hostid;
+
+ list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
+ if (reg->rkey == prkey &&
+ !uuid_equal(&reg->hostid, send_hostid)) {
+ uuid_copy(&hostid, &reg->hostid);
+ if (abort)
+ nvmet_pr_set_ctrl_to_abort(req, &hostid);
+ nvmet_pr_unregister_one(pr, reg);
+ nvmet_pr_registration_preempted(pr, &hostid);
+ }
+ }
+}
+
+static void nvmet_pr_unreg_all_others(struct nvmet_req *req,
+ uuid_t *send_hostid,
+ bool abort)
+{
+ struct nvmet_pr_registrant *reg, *tmp;
+ struct nvmet_pr *pr = &req->ns->pr;
+ uuid_t hostid;
+
+ list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
+ if (!uuid_equal(&reg->hostid, send_hostid)) {
+ uuid_copy(&hostid, &reg->hostid);
+ if (abort)
+ nvmet_pr_set_ctrl_to_abort(req, &hostid);
+ nvmet_pr_unregister_one(pr, reg);
+ nvmet_pr_registration_preempted(pr, &hostid);
+ }
+ }
+}
+
+static void nvmet_pr_update_holder_rtype(struct nvmet_pr_registrant *reg,
+ void *attr)
+{
+ u8 new_rtype = *(u8 *)attr;
+
+ reg->rtype = new_rtype;
+}
+
+static u16 nvmet_pr_preempt(struct nvmet_req *req,
+ struct nvmet_pr_registrant *reg,
+ u8 rtype,
+ struct nvmet_pr_acquire_data *d,
+ bool abort)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *holder;
+ enum nvme_pr_type original_rtype;
+ u64 prkey = le64_to_cpu(d->prkey);
+ u16 status;
+
+ holder = rcu_dereference_protected(pr->holder, 1);
+ if (!holder)
+ return nvmet_pr_unreg_all_host_by_prkey(req, prkey,
+ &ctrl->hostid, abort);
+
+ original_rtype = holder->rtype;
+ if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS ||
+ original_rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) {
+ if (!prkey) {
+ /*
+ * To prevent possible access from other hosts, and
+ * avoid terminate the holder, set the new holder
+ * first before unregistering.
+ */
+ nvmet_pr_set_new_holder(pr, rtype, reg);
+ nvmet_pr_unreg_all_others(req, &ctrl->hostid, abort);
+ return NVME_SC_SUCCESS;
+ }
+ return nvmet_pr_unreg_all_host_by_prkey(req, prkey,
+ &ctrl->hostid, abort);
+ }
+
+ if (holder == reg) {
+ status = nvmet_pr_update_reg_attr(pr, holder,
+ nvmet_pr_update_holder_rtype, &rtype);
+ if (!status && original_rtype != rtype)
+ nvmet_pr_resv_released(pr, &reg->hostid);
+ return status;
+ }
+
+ if (prkey == holder->rkey) {
+ /*
+ * Same as before, set the new holder first.
+ */
+ nvmet_pr_set_new_holder(pr, rtype, reg);
+ nvmet_pr_unreg_all_others_by_prkey(req, prkey, &ctrl->hostid,
+ abort);
+ if (original_rtype != rtype)
+ nvmet_pr_resv_released(pr, &reg->hostid);
+ return NVME_SC_SUCCESS;
+ }
+
+ if (prkey)
+ return nvmet_pr_unreg_all_host_by_prkey(req, prkey,
+ &ctrl->hostid, abort);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+}
+
+static void nvmet_pr_do_abort(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, r.abort_work);
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ struct nvmet_ns *ns = req->ns;
+ unsigned long idx;
+
+ /*
+ * The target does not support abort, just wait per-controller ref to 0.
+ */
+ xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
+ if (percpu_ref_is_dying(&pc_ref->ref)) {
+ wait_for_completion(&pc_ref->free_done);
+ reinit_completion(&pc_ref->confirm_done);
+ reinit_completion(&pc_ref->free_done);
+ percpu_ref_resurrect(&pc_ref->ref);
+ }
+ }
+
+ up(&ns->pr.pr_sem);
+ nvmet_req_complete(req, NVME_SC_SUCCESS);
+}
+
+static u16 __nvmet_execute_pr_acquire(struct nvmet_req *req,
+ struct nvmet_pr_registrant *reg,
+ u8 acquire_act,
+ u8 rtype,
+ struct nvmet_pr_acquire_data *d)
+{
+ u16 status;
+
+ switch (acquire_act) {
+ case NVME_PR_ACQUIRE_ACT_ACQUIRE:
+ status = nvmet_pr_acquire(req, reg, rtype);
+ goto out;
+ case NVME_PR_ACQUIRE_ACT_PREEMPT:
+ status = nvmet_pr_preempt(req, reg, rtype, d, false);
+ goto inc_gen;
+ case NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT:
+ status = nvmet_pr_preempt(req, reg, rtype, d, true);
+ goto inc_gen;
+ default:
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
+ goto out;
+ }
+inc_gen:
+ if (!status)
+ atomic_inc(&req->ns->pr.generation);
+out:
+ return status;
+}
+
+static void nvmet_execute_pr_acquire(struct nvmet_req *req)
+{
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ bool ignore_key = nvmet_pr_parse_ignore_key(cdw10);
+ /* Reservation type, bit 15:08 */
+ u8 rtype = (u8)((cdw10 >> 8) & 0xff);
+ /* Reservation acquire action, bit 02:00 */
+ u8 acquire_act = cdw10 & 0x07;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr_acquire_data *d = NULL;
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *reg;
+ u16 status = NVME_SC_SUCCESS;
+
+ if (ignore_key ||
+ rtype < NVME_PR_WRITE_EXCLUSIVE ||
+ rtype > NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) {
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ d = kmalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+ if (status)
+ goto free_data;
+
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ down(&pr->pr_sem);
+ list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
+ if (uuid_equal(&reg->hostid, &ctrl->hostid) &&
+ reg->rkey == le64_to_cpu(d->crkey)) {
+ status = __nvmet_execute_pr_acquire(req, reg,
+ acquire_act, rtype, d);
+ break;
+ }
+ }
+
+ if (!status && acquire_act == NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT) {
+ kfree(d);
+ INIT_WORK(&req->r.abort_work, nvmet_pr_do_abort);
+ queue_work(nvmet_wq, &req->r.abort_work);
+ return;
+ }
+
+ up(&pr->pr_sem);
+
+free_data:
+ kfree(d);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_pr_release(struct nvmet_req *req,
+ struct nvmet_pr_registrant *reg,
+ u8 rtype)
+{
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *holder;
+ u8 original_rtype;
+
+ holder = rcu_dereference_protected(pr->holder, 1);
+ if (!holder || reg != holder)
+ return NVME_SC_SUCCESS;
+
+ original_rtype = holder->rtype;
+ if (original_rtype != rtype)
+ return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+
+ rcu_assign_pointer(pr->holder, NULL);
+
+ if (original_rtype != NVME_PR_WRITE_EXCLUSIVE &&
+ original_rtype != NVME_PR_EXCLUSIVE_ACCESS)
+ nvmet_pr_resv_released(pr, &reg->hostid);
+
+ return NVME_SC_SUCCESS;
+}
+
+static void nvmet_pr_clear(struct nvmet_req *req)
+{
+ struct nvmet_pr_registrant *reg, *tmp;
+ struct nvmet_pr *pr = &req->ns->pr;
+
+ rcu_assign_pointer(pr->holder, NULL);
+
+ list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
+ list_del_rcu(&reg->entry);
+ if (!uuid_equal(&req->sq->ctrl->hostid, &reg->hostid))
+ nvmet_pr_resv_preempted(pr, &reg->hostid);
+ kfree_rcu(reg, rcu);
+ }
+
+ atomic_inc(&pr->generation);
+}
+
+static u16 __nvmet_execute_pr_release(struct nvmet_req *req,
+ struct nvmet_pr_registrant *reg,
+ u8 release_act, u8 rtype)
+{
+ switch (release_act) {
+ case NVME_PR_RELEASE_ACT_RELEASE:
+ return nvmet_pr_release(req, reg, rtype);
+ case NVME_PR_RELEASE_ACT_CLEAR:
+ nvmet_pr_clear(req);
+ return NVME_SC_SUCCESS;
+ default:
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
+ }
+}
+
+static void nvmet_execute_pr_release(struct nvmet_req *req)
+{
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ bool ignore_key = nvmet_pr_parse_ignore_key(cdw10);
+ u8 rtype = (u8)((cdw10 >> 8) & 0xff); /* Reservation type, bit 15:08 */
+ u8 release_act = cdw10 & 0x07; /* Reservation release action, bit 02:00 */
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_release_data *d;
+ struct nvmet_pr_registrant *reg;
+ u16 status;
+
+ if (ignore_key) {
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ d = kmalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+ if (status)
+ goto free_data;
+
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ down(&pr->pr_sem);
+ list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
+ if (uuid_equal(&reg->hostid, &ctrl->hostid) &&
+ reg->rkey == le64_to_cpu(d->crkey)) {
+ status = __nvmet_execute_pr_release(req, reg,
+ release_act, rtype);
+ break;
+ }
+ }
+ up(&pr->pr_sem);
+free_data:
+ kfree(d);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_pr_report(struct nvmet_req *req)
+{
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ u32 num_bytes = 4 * (cdw10 + 1); /* cdw10 is number of dwords */
+ u8 eds = cdw11 & 1; /* Extended data structure, bit 00 */
+ struct nvme_registered_ctrl_ext *ctrl_eds;
+ struct nvme_reservation_status_ext *data;
+ struct nvmet_pr *pr = &req->ns->pr;
+ struct nvmet_pr_registrant *holder;
+ struct nvmet_pr_registrant *reg;
+ u16 num_ctrls = 0;
+ u16 status;
+ u8 rtype;
+
+ /* nvmet hostid(uuid_t) is 128 bit. */
+ if (!eds) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw11);
+ status = NVME_SC_HOST_ID_INCONSIST | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ if (num_bytes < sizeof(struct nvme_reservation_status_ext)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ data = kmalloc(num_bytes, GFP_KERNEL);
+ if (!data) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+ memset(data, 0, num_bytes);
+ data->gen = cpu_to_le32(atomic_read(&pr->generation));
+ data->ptpls = 0;
+ ctrl_eds = data->regctl_eds;
+
+ rcu_read_lock();
+ holder = rcu_dereference(pr->holder);
+ rtype = holder ? holder->rtype : 0;
+ data->rtype = rtype;
+
+ list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
+ num_ctrls++;
+ /*
+ * continue to get the number of all registrans.
+ */
+ if (((void *)ctrl_eds + sizeof(*ctrl_eds)) >
+ ((void *)data + num_bytes))
+ continue;
+ /*
+ * Dynamic controller, set cntlid to 0xffff.
+ */
+ ctrl_eds->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
+ if (rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS ||
+ rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS)
+ ctrl_eds->rcsts = 1;
+ if (reg == holder)
+ ctrl_eds->rcsts = 1;
+ uuid_copy((uuid_t *)&ctrl_eds->hostid, &reg->hostid);
+ ctrl_eds->rkey = cpu_to_le64(reg->rkey);
+ ctrl_eds++;
+ }
+ rcu_read_unlock();
+
+ put_unaligned_le16(num_ctrls, data->regctl);
+ status = nvmet_copy_to_sgl(req, 0, data, num_bytes);
+ kfree(data);
+out:
+ nvmet_req_complete(req, status);
+}
+
+u16 nvmet_parse_pr_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->common.opcode) {
+ case nvme_cmd_resv_register:
+ req->execute = nvmet_execute_pr_register;
+ break;
+ case nvme_cmd_resv_acquire:
+ req->execute = nvmet_execute_pr_acquire;
+ break;
+ case nvme_cmd_resv_release:
+ req->execute = nvmet_execute_pr_release;
+ break;
+ case nvme_cmd_resv_report:
+ req->execute = nvmet_execute_pr_report;
+ break;
+ default:
+ return 1;
+ }
+ return NVME_SC_SUCCESS;
+}
+
+static bool nvmet_is_req_write_cmd_group(struct nvmet_req *req)
+{
+ u8 opcode = req->cmd->common.opcode;
+
+ if (req->sq->qid) {
+ switch (opcode) {
+ case nvme_cmd_flush:
+ case nvme_cmd_write:
+ case nvme_cmd_write_zeroes:
+ case nvme_cmd_dsm:
+ case nvme_cmd_zone_append:
+ case nvme_cmd_zone_mgmt_send:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+static bool nvmet_is_req_read_cmd_group(struct nvmet_req *req)
+{
+ u8 opcode = req->cmd->common.opcode;
+
+ if (req->sq->qid) {
+ switch (opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_zone_mgmt_recv:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+u16 nvmet_pr_check_cmd_access(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_pr_registrant *holder;
+ struct nvmet_ns *ns = req->ns;
+ struct nvmet_pr *pr = &ns->pr;
+ u16 status = NVME_SC_SUCCESS;
+
+ rcu_read_lock();
+ holder = rcu_dereference(pr->holder);
+ if (!holder)
+ goto unlock;
+ if (uuid_equal(&ctrl->hostid, &holder->hostid))
+ goto unlock;
+
+ /*
+ * The Reservation command group is checked in executing,
+ * allow it here.
+ */
+ switch (holder->rtype) {
+ case NVME_PR_WRITE_EXCLUSIVE:
+ if (nvmet_is_req_write_cmd_group(req))
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ break;
+ case NVME_PR_EXCLUSIVE_ACCESS:
+ if (nvmet_is_req_read_cmd_group(req) ||
+ nvmet_is_req_write_cmd_group(req))
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ break;
+ case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
+ case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
+ if ((nvmet_is_req_write_cmd_group(req)) &&
+ !nvmet_pr_find_registrant(pr, &ctrl->hostid))
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ break;
+ case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
+ case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
+ if ((nvmet_is_req_read_cmd_group(req) ||
+ nvmet_is_req_write_cmd_group(req)) &&
+ !nvmet_pr_find_registrant(pr, &ctrl->hostid))
+ status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
+ break;
+ default:
+ pr_warn("the reservation type is set wrong, type:%d\n",
+ holder->rtype);
+ break;
+ }
+
+unlock:
+ rcu_read_unlock();
+ if (status)
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ return status;
+}
+
+u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req)
+{
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+
+ pc_ref = xa_load(&req->ns->pr_per_ctrl_refs,
+ req->sq->ctrl->cntlid);
+ if (unlikely(!percpu_ref_tryget_live(&pc_ref->ref)))
+ return NVME_SC_INTERNAL;
+ req->pc_ref = pc_ref;
+ return NVME_SC_SUCCESS;
+}
+
+static void nvmet_pr_ctrl_ns_all_cmds_done(struct percpu_ref *ref)
+{
+ struct nvmet_pr_per_ctrl_ref *pc_ref =
+ container_of(ref, struct nvmet_pr_per_ctrl_ref, ref);
+
+ complete(&pc_ref->free_done);
+}
+
+static int nvmet_pr_alloc_and_insert_pc_ref(struct nvmet_ns *ns,
+ unsigned long idx,
+ uuid_t *hostid)
+{
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ int ret;
+
+ pc_ref = kmalloc(sizeof(*pc_ref), GFP_ATOMIC);
+ if (!pc_ref)
+ return -ENOMEM;
+
+ ret = percpu_ref_init(&pc_ref->ref, nvmet_pr_ctrl_ns_all_cmds_done,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
+ if (ret)
+ goto free;
+
+ init_completion(&pc_ref->free_done);
+ init_completion(&pc_ref->confirm_done);
+ uuid_copy(&pc_ref->hostid, hostid);
+
+ ret = xa_insert(&ns->pr_per_ctrl_refs, idx, pc_ref, GFP_KERNEL);
+ if (ret)
+ goto exit;
+ return ret;
+exit:
+ percpu_ref_exit(&pc_ref->ref);
+free:
+ kfree(pc_ref);
+ return ret;
+}
+
+int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_subsys *subsys = ctrl->subsys;
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ struct nvmet_ns *ns = NULL;
+ unsigned long idx;
+ int ret;
+
+ ctrl->pr_log_mgr.counter = 0;
+ ctrl->pr_log_mgr.lost_count = 0;
+ mutex_init(&ctrl->pr_log_mgr.lock);
+ INIT_KFIFO(ctrl->pr_log_mgr.log_queue);
+
+ /*
+ * Here we are under subsys lock, if an ns not in subsys->namespaces,
+ * we can make sure that ns is not enabled, and not call
+ * nvmet_pr_init_ns(), see more details in nvmet_ns_enable().
+ * So just check ns->pr.enable.
+ */
+ xa_for_each(&subsys->namespaces, idx, ns) {
+ if (ns->pr.enable) {
+ ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid,
+ &ctrl->hostid);
+ if (ret)
+ goto free_per_ctrl_refs;
+ }
+ }
+ return 0;
+
+free_per_ctrl_refs:
+ xa_for_each(&subsys->namespaces, idx, ns) {
+ if (ns->pr.enable) {
+ pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
+ if (pc_ref)
+ percpu_ref_exit(&pc_ref->ref);
+ kfree(pc_ref);
+ }
+ }
+ return ret;
+}
+
+void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ struct nvmet_ns *ns;
+ unsigned long idx;
+
+ kfifo_free(&ctrl->pr_log_mgr.log_queue);
+ mutex_destroy(&ctrl->pr_log_mgr.lock);
+
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ if (ns->pr.enable) {
+ pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
+ if (pc_ref)
+ percpu_ref_exit(&pc_ref->ref);
+ kfree(pc_ref);
+ }
+ }
+}
+
+int nvmet_pr_init_ns(struct nvmet_ns *ns)
+{
+ struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ struct nvmet_ctrl *ctrl = NULL;
+ unsigned long idx;
+ int ret;
+
+ ns->pr.holder = NULL;
+ atomic_set(&ns->pr.generation, 0);
+ sema_init(&ns->pr.pr_sem, 1);
+ INIT_LIST_HEAD(&ns->pr.registrant_list);
+ ns->pr.notify_mask = 0;
+
+ xa_init(&ns->pr_per_ctrl_refs);
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid,
+ &ctrl->hostid);
+ if (ret)
+ goto free_per_ctrl_refs;
+ }
+ return 0;
+
+free_per_ctrl_refs:
+ xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
+ xa_erase(&ns->pr_per_ctrl_refs, idx);
+ percpu_ref_exit(&pc_ref->ref);
+ kfree(pc_ref);
+ }
+ return ret;
+}
+
+void nvmet_pr_exit_ns(struct nvmet_ns *ns)
+{
+ struct nvmet_pr_registrant *reg, *tmp;
+ struct nvmet_pr_per_ctrl_ref *pc_ref;
+ struct nvmet_pr *pr = &ns->pr;
+ unsigned long idx;
+
+ list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
+ list_del(&reg->entry);
+ kfree(reg);
+ }
+
+ xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
+ /*
+ * No command on ns here, we can safely free pc_ref.
+ */
+ pc_ref = xa_erase(&ns->pr_per_ctrl_refs, idx);
+ percpu_ref_exit(&pc_ref->ref);
+ kfree(pc_ref);
+ }
+
+ xa_destroy(&ns->pr_per_ctrl_refs);
+}
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
index 9a3548179a8e..6dbc7036f2e4 100644
--- a/drivers/nvme/target/trace.c
+++ b/drivers/nvme/target/trace.c
@@ -180,6 +180,106 @@ static const char *nvmet_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
return ret;
}
+static const char *nvmet_trace_resv_reg(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const rrega_strs[] = {
+ [0x00] = "register",
+ [0x01] = "unregister",
+ [0x02] = "replace",
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 rrega = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 ptpl = (cdw10[3] >> 6) & 0x3;
+ const char *rrega_str;
+
+ if (rrega < ARRAY_SIZE(rrega_strs) && rrega_strs[rrega])
+ rrega_str = rrega_strs[rrega];
+ else
+ rrega_str = "reserved";
+
+ trace_seq_printf(p, "rrega=%u:%s, iekey=%u, ptpl=%u",
+ rrega, rrega_str, iekey, ptpl);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char * const rtype_strs[] = {
+ [0x00] = "reserved",
+ [0x01] = "write exclusive",
+ [0x02] = "exclusive access",
+ [0x03] = "write exclusive registrants only",
+ [0x04] = "exclusive access registrants only",
+ [0x05] = "write exclusive all registrants",
+ [0x06] = "exclusive access all registrants",
+};
+
+static const char *nvmet_trace_resv_acq(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const racqa_strs[] = {
+ [0x00] = "acquire",
+ [0x01] = "preempt",
+ [0x02] = "preempt and abort",
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 racqa = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 rtype = cdw10[1];
+ const char *racqa_str = "reserved";
+ const char *rtype_str = "reserved";
+
+ if (racqa < ARRAY_SIZE(racqa_strs) && racqa_strs[racqa])
+ racqa_str = racqa_strs[racqa];
+
+ if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype])
+ rtype_str = rtype_strs[rtype];
+
+ trace_seq_printf(p, "racqa=%u:%s, iekey=%u, rtype=%u:%s",
+ racqa, racqa_str, iekey, rtype, rtype_str);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_resv_rel(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const rrela_strs[] = {
+ [0x00] = "release",
+ [0x01] = "clear",
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 rrela = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 rtype = cdw10[1];
+ const char *rrela_str = "reserved";
+ const char *rtype_str = "reserved";
+
+ if (rrela < ARRAY_SIZE(rrela_strs) && rrela_strs[rrela])
+ rrela_str = rrela_strs[rrela];
+
+ if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype])
+ rtype_str = rtype_strs[rtype];
+
+ trace_seq_printf(p, "rrela=%u:%s, iekey=%u, rtype=%u:%s",
+ rrela, rrela_str, iekey, rtype, rtype_str);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_resv_report(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u32 numd = get_unaligned_le32(cdw10);
+ u8 eds = cdw10[4] & 0x1;
+
+ trace_seq_printf(p, "numd=%u, eds=%u", numd, eds);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
u8 opcode, u8 *cdw10)
{
@@ -195,6 +295,14 @@ const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
return nvmet_trace_zone_mgmt_send(p, cdw10);
case nvme_cmd_zone_mgmt_recv:
return nvmet_trace_zone_mgmt_recv(p, cdw10);
+ case nvme_cmd_resv_register:
+ return nvmet_trace_resv_reg(p, cdw10);
+ case nvme_cmd_resv_acquire:
+ return nvmet_trace_resv_acq(p, cdw10);
+ case nvme_cmd_resv_release:
+ return nvmet_trace_resv_rel(p, cdw10);
+ case nvme_cmd_resv_report:
+ return nvmet_trace_resv_report(p, cdw10);
default:
return nvmet_trace_common(p, cdw10);
}
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index af9e13be7678..3aef35b05111 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -537,6 +537,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
u16 status = NVME_SC_SUCCESS;
unsigned int total_len = 0;
struct scatterlist *sg;
+ u32 data_len = nvmet_rw_data_len(req);
struct bio *bio;
int sg_cnt;
@@ -544,6 +545,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
return;
+ if (data_len >
+ bdev_max_zone_append_sectors(req->ns->bdev) << SECTOR_SHIFT) {
+ req->error_loc = offsetof(struct nvme_rw_command, length);
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto out;
+ }
+
if (!req->sg_cnt) {
nvmet_req_complete(req, 0);
return;
@@ -576,20 +584,17 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
bio->bi_opf |= REQ_FUA;
for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
- struct page *p = sg_page(sg);
- unsigned int l = sg->length;
- unsigned int o = sg->offset;
- unsigned int ret;
+ unsigned int len = sg->length;
- ret = bio_add_zone_append_page(bio, p, l, o);
- if (ret != sg->length) {
+ if (bio_add_pc_page(bdev_get_queue(bio->bi_bdev), bio,
+ sg_page(sg), len, sg->offset) != len) {
status = NVME_SC_INTERNAL;
goto out_put_bio;
}
- total_len += sg->length;
+ total_len += len;
}
- if (total_len != nvmet_rw_data_len(req)) {
+ if (total_len != data_len) {
status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
goto out_put_bio;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 42a4a996defb..3ed642f4f00d 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2117,7 +2117,7 @@ int dasd_flush_device_queue(struct dasd_device *device)
case DASD_CQR_IN_IO:
rc = device->discipline->term_IO(cqr);
if (rc) {
- /* unable to terminate requeust */
+ /* unable to terminate request */
dev_err(&device->cdev->dev,
"Flushing the DASD request queue failed\n");
/* stop flush processing */
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 6adaeb985dde..71d8fb86139d 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -855,7 +855,7 @@ dasd_delete_device(struct dasd_device *device)
dev_set_drvdata(&device->cdev->dev, NULL);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
- /* Removve copy relation */
+ /* Remove copy relation */
dasd_devmap_delete_copy_relation_device(device);
/*
* Drop ref_count by 3, one for the devmap reference, one for
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 90b106408992..1ebe589b5185 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2405,7 +2405,7 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
}
if (count_area != NULL && count_area->kl == 0) {
- /* we found notthing violating our disk layout */
+ /* we found nothing violating our disk layout */
if (dasd_check_blocksize(count_area->dl) == 0)
block->bp_block = count_area->dl;
}
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 0faaa437d9be..48e12e81df00 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -350,6 +350,7 @@ dasd_proc_init(void)
remove_proc_entry("devices", dasd_proc_root_entry);
out_nodevices:
remove_proc_entry("dasd", NULL);
+ dasd_proc_root_entry = NULL;
out_nodasd:
return -ENOENT;
}
@@ -357,7 +358,11 @@ dasd_proc_init(void)
void
dasd_proc_exit(void)
{
+ if (!dasd_proc_root_entry)
+ return;
+
remove_proc_entry("devices", dasd_proc_root_entry);
remove_proc_entry("statistics", dasd_proc_root_entry);
remove_proc_entry("dasd", NULL);
+ dasd_proc_root_entry = NULL;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ca4bc0ac76ad..8947dab132d7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1190,8 +1190,8 @@ static u8 sd_group_number(struct scsi_cmnd *cmd)
if (!sdkp->rscs)
return 0;
- return min3((u32)rq->write_hint, (u32)sdkp->permanent_stream_count,
- 0x3fu);
+ return min3((u32)rq->bio->bi_write_hint,
+ (u32)sdkp->permanent_stream_count, 0x3fu);
}
static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
@@ -1389,7 +1389,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
protect | fua, dld);
} else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
- sdp->use_10_for_rw || protect || rq->write_hint) {
+ sdp->use_10_for_rw || protect || rq->bio->bi_write_hint) {
ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
protect | fua);
} else {
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 6ab27f4f4878..7a447ff600d2 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -633,8 +633,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
lim->max_open_zones = sdkp->zones_max_open;
lim->max_active_zones = 0;
lim->chunk_sectors = logical_to_sectors(sdkp->device, zone_blocks);
- /* Enable block layer zone append emulation */
- lim->max_zone_append_sectors = 0;
return 0;