summaryrefslogtreecommitdiff
path: root/drivers/block/nvme-core.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-12-11 13:58:39 -0700
committerJens Axboe <axboe@fb.com>2014-12-11 13:58:39 -0700
commitfe54303ee2be293c1c5c7a53a152453789cabc2f (patch)
tree359ec0be601521afe7cde6dcbd47e6a66e1364a2 /drivers/block/nvme-core.c
parent285dffc9101244ac65c29672a1fb3fe614b52238 (diff)
downloadlwn-fe54303ee2be293c1c5c7a53a152453789cabc2f.tar.gz
lwn-fe54303ee2be293c1c5c7a53a152453789cabc2f.zip
NVMe: fix retry/error logic in nvme_queue_rq()
The logic around retrying and erroring IO in nvme_queue_rq() is broken in a few ways: - If we fail allocating dma memory for a discard, we return retry. We have the 'iod' stored in ->special, but we free the 'iod'. - For a normal request, if we fail dma mapping of setting up prps, we have the same iod situation. Additionally, we haven't set the callback for the request yet, so we also potentially leak IOMMU resources. Get rid of the ->special 'iod' store. The retry is uncommon enough that it's not worth optimizing for or holding on to resources to attempt to speed it up. Additionally, it's usually best practice to free any request related resources when doing retries. Acked-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block/nvme-core.c')
-rw-r--r--drivers/block/nvme-core.c39
1 files changed, 16 insertions, 23 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 95f2310255ce..e92bdf4c68fc 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -621,24 +621,15 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
struct nvme_iod *iod;
int psegs = req->nr_phys_segments;
- int result = BLK_MQ_RQ_QUEUE_BUSY;
enum dma_data_direction dma_dir;
unsigned size = !(req->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(req) :
sizeof(struct nvme_dsm_range);
- /*
- * Requeued IO has already been prepped
- */
- iod = req->special;
- if (iod)
- goto submit_iod;
-
iod = nvme_alloc_iod(psegs, size, ns->dev, GFP_ATOMIC);
if (!iod)
- return result;
+ return BLK_MQ_RQ_QUEUE_BUSY;
iod->private = req;
- req->special = iod;
if (req->cmd_flags & REQ_DISCARD) {
void *range;
@@ -651,7 +642,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
GFP_ATOMIC,
&iod->first_dma);
if (!range)
- goto finish_cmd;
+ goto retry_cmd;
iod_list(iod)[0] = (__le64 *)range;
iod->npages = 0;
} else if (psegs) {
@@ -659,22 +650,22 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
sg_init_table(iod->sg, psegs);
iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
- if (!iod->nents) {
- result = BLK_MQ_RQ_QUEUE_ERROR;
- goto finish_cmd;
- }
+ if (!iod->nents)
+ goto error_cmd;
if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir))
- goto finish_cmd;
+ goto retry_cmd;
- if (blk_rq_bytes(req) != nvme_setup_prps(nvmeq->dev, iod,
- blk_rq_bytes(req), GFP_ATOMIC))
- goto finish_cmd;
+ if (blk_rq_bytes(req) !=
+ nvme_setup_prps(nvmeq->dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
+ dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg,
+ iod->nents, dma_dir);
+ goto retry_cmd;
+ }
}
blk_mq_start_request(req);
- submit_iod:
nvme_set_info(cmd, iod, req_completion);
spin_lock_irq(&nvmeq->q_lock);
if (req->cmd_flags & REQ_DISCARD)
@@ -688,10 +679,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_unlock_irq(&nvmeq->q_lock);
return BLK_MQ_RQ_QUEUE_OK;
- finish_cmd:
- nvme_finish_cmd(nvmeq, req->tag, NULL);
+ error_cmd:
nvme_free_iod(nvmeq->dev, iod);
- return result;
+ return BLK_MQ_RQ_QUEUE_ERROR;
+ retry_cmd:
+ nvme_free_iod(nvmeq->dev, iod);
+ return BLK_MQ_RQ_QUEUE_BUSY;
}
static int nvme_process_cq(struct nvme_queue *nvmeq)