diff options
author | James Smart <jsmart2021@gmail.com> | 2017-02-12 13:52:30 -0800 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2017-02-22 18:41:43 -0500 |
commit | 895427bd012ce5814fc9888c7c0ee9de44761833 (patch) | |
tree | 307a6d5500f676e5df31b8120a3c5986d0636eba /drivers/scsi/lpfc/lpfc_debugfs.h | |
parent | 1d9d5a9879ad493ee7cf75987df1f365c61fefe5 (diff) | |
download | lwn-895427bd012ce5814fc9888c7c0ee9de44761833.tar.gz lwn-895427bd012ce5814fc9888c7c0ee9de44761833.zip |
scsi: lpfc: NVME Initiator: Base modifications
NVME Initiator: Base modifications
This patch adds base modifications for NVME initiator support.
The base modifications consist of:
- Formal split of SLI3 rings from SLI-4 WQs (sometimes referred to as
rings as well) as implementation now widely varies between the two.
- Addition of configuration modes:
SCSI initiator only; NVME initiator only; NVME target only; and
SCSI and NVME initiator.
The configuration mode drives overall adapter configuration,
offloads enabled, and resource splits.
NVME support is only available on SLI-4 devices and newer fw.
- Implements the following based on configuration mode:
- Exchange resources are split by protocol; Obviously, if only
1 mode, then no split occurs. Default is 50/50. module attribute
allows tuning.
- Pools and config parameters are separated per-protocol
- Each protocol has it's own set of queues, but share interrupt
vectors.
SCSI:
SLI3 devices have few queues and the original style of queue
allocation remains.
SLI4 devices piggy back on an "io-channel" concept that
eventually needs to merge with scsi-mq/blk-mq support (it is
underway). For now, the paradigm continues as it existed
prior. io channel allocates N msix and N WQs (N=4 default)
and either round robins or uses cpu # modulo N for scheduling.
A bunch of module parameters allow the configuration to be
tuned.
NVME (initiator):
Allocates an msix per cpu (or whatever pci_alloc_irq_vectors
gets)
Allocates a WQ per cpu, and maps the WQs to msix on a WQ #
modulo msix vector count basis.
Module parameters exist to cap/control the config if desired.
- Each protocol has its own buffer and dma pools.
I apologize for the size of the patch.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
----
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_debugfs.h')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.h | 72 |
1 files changed, 58 insertions, 14 deletions
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 9ae2c4b5fd12..98814d651239 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -44,8 +44,10 @@ enum { DUMP_FCP, + DUMP_NVME, DUMP_MBX, DUMP_ELS, + DUMP_NVMELS, }; /* @@ -364,11 +366,11 @@ lpfc_debug_dump_q(struct lpfc_queue *q) } /** - * lpfc_debug_dump_wq - dump all entries from the fcp work queue + * lpfc_debug_dump_wq - dump all entries from the fcp or nvme work queue * @phba: Pointer to HBA context object. - * @wqidx: Index to a FCP work queue. + * @wqidx: Index to a FCP or NVME work queue. * - * This function dumps all entries from a FCP work queue specified + * This function dumps all entries from a FCP or NVME work queue specified * by the wqidx. **/ static inline void @@ -380,16 +382,22 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx) if (qtype == DUMP_FCP) { wq = phba->sli4_hba.fcp_wq[wqidx]; qtypestr = "FCP"; + } else if (qtype == DUMP_NVME) { + wq = phba->sli4_hba.nvme_wq[wqidx]; + qtypestr = "NVME"; } else if (qtype == DUMP_MBX) { wq = phba->sli4_hba.mbx_wq; qtypestr = "MBX"; } else if (qtype == DUMP_ELS) { wq = phba->sli4_hba.els_wq; qtypestr = "ELS"; + } else if (qtype == DUMP_NVMELS) { + wq = phba->sli4_hba.nvmels_wq; + qtypestr = "NVMELS"; } else return; - if (qtype == DUMP_FCP) + if (qtype == DUMP_FCP || qtype == DUMP_NVME) pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n", qtypestr, wqidx, wq->queue_id); else @@ -400,12 +408,12 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx) } /** - * lpfc_debug_dump_cq - dump all entries from a fcp work queue's + * lpfc_debug_dump_cq - dump all entries from a fcp or nvme work queue's * cmpl queue * @phba: Pointer to HBA context object. * @wqidx: Index to a FCP work queue. * - * This function dumps all entries from a FCP completion queue + * This function dumps all entries from a FCP or NVME completion queue * which is associated to the work queue specified by the @wqidx. **/ static inline void @@ -415,12 +423,16 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) char *qtypestr; int eqidx; - /* fcp wq and cq are 1:1, thus same indexes */ + /* fcp/nvme wq and cq are 1:1, thus same indexes */ if (qtype == DUMP_FCP) { wq = phba->sli4_hba.fcp_wq[wqidx]; cq = phba->sli4_hba.fcp_cq[wqidx]; qtypestr = "FCP"; + } else if (qtype == DUMP_NVME) { + wq = phba->sli4_hba.nvme_wq[wqidx]; + cq = phba->sli4_hba.nvme_cq[wqidx]; + qtypestr = "NVME"; } else if (qtype == DUMP_MBX) { wq = phba->sli4_hba.mbx_wq; cq = phba->sli4_hba.mbx_cq; @@ -429,21 +441,25 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) wq = phba->sli4_hba.els_wq; cq = phba->sli4_hba.els_cq; qtypestr = "ELS"; + } else if (qtype == DUMP_NVMELS) { + wq = phba->sli4_hba.nvmels_wq; + cq = phba->sli4_hba.nvmels_cq; + qtypestr = "NVMELS"; } else return; - for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++) { + for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) { eq = phba->sli4_hba.hba_eq[eqidx]; if (cq->assoc_qid == eq->queue_id) break; } - if (eqidx == phba->cfg_fcp_io_channel) { + if (eqidx == phba->io_channel_irqs) { pr_err("Couldn't find EQ for CQ. Using EQ[0]\n"); eqidx = 0; eq = phba->sli4_hba.hba_eq[0]; } - if (qtype == DUMP_FCP) + if (qtype == DUMP_FCP || qtype == DUMP_NVME) pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]" "->EQ[Idx:%d|Qid:%d]:\n", qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id, @@ -527,11 +543,25 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) return; } + for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++) + if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid) + break; + if (wq_idx < phba->cfg_nvme_io_channel) { + pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]); + return; + } + if (phba->sli4_hba.els_wq->queue_id == qid) { pr_err("ELS WQ[Qid:%d]\n", qid); lpfc_debug_dump_q(phba->sli4_hba.els_wq); return; } + + if (phba->sli4_hba.nvmels_wq->queue_id == qid) { + pr_err("NVME LS WQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.nvmels_wq); + } } /** @@ -596,12 +626,28 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid) return; } + for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++) + if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid) + break; + + if (cq_idx < phba->cfg_nvme_io_channel) { + pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]); + return; + } + if (phba->sli4_hba.els_cq->queue_id == qid) { pr_err("ELS CQ[Qid:%d]\n", qid); lpfc_debug_dump_q(phba->sli4_hba.els_cq); return; } + if (phba->sli4_hba.nvmels_cq->queue_id == qid) { + pr_err("NVME LS CQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.nvmels_cq); + return; + } + if (phba->sli4_hba.mbx_cq->queue_id == qid) { pr_err("MBX CQ[Qid:%d]\n", qid); lpfc_debug_dump_q(phba->sli4_hba.mbx_cq); @@ -621,17 +667,15 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid) { int eq_idx; - for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) { + for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++) if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid) break; - } - if (eq_idx < phba->cfg_fcp_io_channel) { + if (eq_idx < phba->io_channel_irqs) { printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid); lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]); return; } - } void lpfc_debug_dump_all_queues(struct lpfc_hba *); |