summaryrefslogtreecommitdiff
path: root/drivers/nvme/host/pci.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2015-11-03 20:37:26 -0700
committerJens Axboe <axboe@fb.com>2015-11-07 10:40:47 -0700
commita0fa9647a54e81883abd57c5c865d1747f68a577 (patch)
treef017a7459fd6e0cc36b0e004c7d4a059b5a04c15 /drivers/nvme/host/pci.c
parent05229beeddf7e75e2e616ddaad4b70e7fca9528d (diff)
downloadlwn-a0fa9647a54e81883abd57c5c865d1747f68a577.tar.gz
lwn-a0fa9647a54e81883abd57c5c865d1747f68a577.zip
NVMe: add blk polling support
Add nvme_poll(), which will check a specific completion queue for command completions. Wire that up to the new block layer poll mechanism. Signed-off-by: Jens Axboe <axboe@fb.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Keith Busch <keith.busch@intel.com>
Diffstat (limited to 'drivers/nvme/host/pci.c')
-rw-r--r--drivers/nvme/host/pci.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e878590e71b6..4a715f49f5db 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -90,7 +90,7 @@ static struct class *nvme_class;
static int __nvme_reset(struct nvme_dev *dev);
static int nvme_reset(struct nvme_dev *dev);
-static int nvme_process_cq(struct nvme_queue *nvmeq);
+static void nvme_process_cq(struct nvme_queue *nvmeq);
static void nvme_dead_ctrl(struct nvme_dev *dev);
struct async_cmd_info {
@@ -935,7 +935,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_MQ_RQ_QUEUE_BUSY;
}
-static int nvme_process_cq(struct nvme_queue *nvmeq)
+static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
{
u16 head, phase;
@@ -953,6 +953,8 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
head = 0;
phase = !phase;
}
+ if (tag && *tag == cqe.command_id)
+ *tag = -1;
ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn);
fn(nvmeq, ctx, &cqe);
}
@@ -964,14 +966,18 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
* a big problem.
*/
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
- return 0;
+ return;
writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
nvmeq->cq_head = head;
nvmeq->cq_phase = phase;
nvmeq->cqe_seen = 1;
- return 1;
+}
+
+static void nvme_process_cq(struct nvme_queue *nvmeq)
+{
+ __nvme_process_cq(nvmeq, NULL);
}
static irqreturn_t nvme_irq(int irq, void *data)
@@ -995,6 +1001,23 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
return IRQ_WAKE_THREAD;
}
+static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
+{
+ struct nvme_queue *nvmeq = hctx->driver_data;
+
+ if ((le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) ==
+ nvmeq->cq_phase) {
+ spin_lock_irq(&nvmeq->q_lock);
+ __nvme_process_cq(nvmeq, &tag);
+ spin_unlock_irq(&nvmeq->q_lock);
+
+ if (tag == -1)
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
@@ -1654,6 +1677,7 @@ static struct blk_mq_ops nvme_mq_ops = {
.init_hctx = nvme_init_hctx,
.init_request = nvme_init_request,
.timeout = nvme_timeout,
+ .poll = nvme_poll,
};
static void nvme_dev_remove_admin(struct nvme_dev *dev)