diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-09-02 15:09:46 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-09-02 15:09:46 -0700 |
commit | a9c9a6f741cdaa2fa9ba24a790db8d07295761e3 (patch) | |
tree | 222aaa35ed4e66c2027845213251e2a3f491b5ba /drivers/scsi/mpt3sas | |
parent | 23852bec534a1633dc08f4df88b8493ae99953a9 (diff) | |
parent | 9b5ac8ab4e8bf5636d1d425aee68ddf45af12057 (diff) | |
download | lwn-a9c9a6f741cdaa2fa9ba24a790db8d07295761e3.tar.gz lwn-a9c9a6f741cdaa2fa9ba24a790db8d07295761e3.zip |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This series consists of the usual driver updates (ufs, qla2xxx,
target, smartpqi, lpfc, mpt3sas).
The core change causing the most churn was replacing the command
request field request with a macro, allowing us to offset map to it
and remove the redundant field; the same was also done for the tag
field.
The most impactful change is the final removal of scsi_ioctl, which
has been deprecated for over a decade"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (293 commits)
scsi: ufs: Fix ufshcd_request_sense_async() for Samsung KLUFG8RHDA-B2D1
scsi: ufs: ufs-exynos: Fix static checker warning
scsi: mpt3sas: Use the proper SCSI midlayer interfaces for PI
scsi: lpfc: Use the proper SCSI midlayer interfaces for PI
scsi: lpfc: Copyright updates for 14.0.0.1 patches
scsi: lpfc: Update lpfc version to 14.0.0.1
scsi: lpfc: Add bsg support for retrieving adapter cmf data
scsi: lpfc: Add cmf_info sysfs entry
scsi: lpfc: Add debugfs support for cm framework buffers
scsi: lpfc: Add support for maintaining the cm statistics buffer
scsi: lpfc: Add rx monitoring statistics
scsi: lpfc: Add support for the CM framework
scsi: lpfc: Add cmfsync WQE support
scsi: lpfc: Add support for cm enablement buffer
scsi: lpfc: Add cm statistics buffer support
scsi: lpfc: Add EDC ELS support
scsi: lpfc: Expand FPIN and RDF receive logging
scsi: lpfc: Add MIB feature enablement support
scsi: lpfc: Add SET_HOST_DATA mbox cmd to pass date/time info to firmware
scsi: fc: Add EDC ELS definition
...
Diffstat (limited to 'drivers/scsi/mpt3sas')
-rw-r--r-- | drivers/scsi/mpt3sas/mpt3sas_base.c | 317 | ||||
-rw-r--r-- | drivers/scsi/mpt3sas/mpt3sas_base.h | 40 | ||||
-rw-r--r-- | drivers/scsi/mpt3sas/mpt3sas_config.c | 37 | ||||
-rw-r--r-- | drivers/scsi/mpt3sas/mpt3sas_ctl.c | 24 | ||||
-rw-r--r-- | drivers/scsi/mpt3sas/mpt3sas_scsih.c | 160 |
5 files changed, 501 insertions, 77 deletions
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index cf4a3a2c22ad..6c82435bc9cc 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -116,6 +116,14 @@ MODULE_PARM_DESC(perf_mode, "\t\tdefault - default perf_mode is 'balanced'" ); +static int poll_queues; +module_param(poll_queues, int, 0444); +MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" + "This parameter is effective only if host_tagset_enable=1. &\n\t\t" + "when poll_queues are enabled then &\n\t\t" + "perf_mode is set to latency mode. &\n\t\t" + ); + enum mpt3sas_perf_mode { MPT_PERF_MODE_DEFAULT = -1, MPT_PERF_MODE_BALANCED = 0, @@ -709,6 +717,7 @@ _base_fault_reset_work(struct work_struct *work) * and this call is safe since dead ioc will never return any * command back from HW. */ + mpt3sas_base_pause_mq_polling(ioc); ioc->schedule_dead_ioc_flush_running_cmds(ioc); /* * Set remove_host flag early since kernel thread will @@ -744,6 +753,7 @@ _base_fault_reset_work(struct work_struct *work) spin_unlock_irqrestore( &ioc->ioc_reset_in_progress_lock, flags); mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_pause_mq_polling(ioc); _base_clear_outstanding_commands(ioc); } @@ -1548,6 +1558,53 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) } /** + * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues + * when driver is flushing out the IOs. + * @ioc: per adapter object + * + * Pause polling on the mq poll (io uring) queues when driver is flushing + * out the IOs. Otherwise we may see the race condition of completing the same + * IO from two paths. + * + * Returns nothing. + */ +void +mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc) +{ + int iopoll_q_count = + ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1); + + /* + * wait for current poll to complete. + */ + for (qid = 0; qid < iopoll_q_count; qid++) { + while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) + udelay(500); + } +} + +/** + * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues. + * @ioc: per adapter object + * + * Returns nothing. + */ +void +mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc) +{ + int iopoll_q_count = + ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0); +} + +/** * mpt3sas_base_mask_interrupts - disable interrupts * @ioc: per adapter object * @@ -1722,7 +1779,8 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q) MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex); } - if (!reply_q->irq_poll_scheduled) { + if (!reply_q->is_iouring_poll_q && + !reply_q->irq_poll_scheduled) { reply_q->irq_poll_scheduled = true; irq_poll_sched(&reply_q->irqpoll); } @@ -1779,6 +1837,33 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q) } /** + * mpt3sas_blk_mq_poll - poll the blk mq poll queue + * @shost: Scsi_Host object + * @queue_num: hw ctx queue number + * + * Return number of entries that has been processed from poll queue. + */ +int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + struct MPT3SAS_ADAPTER *ioc = + (struct MPT3SAS_ADAPTER *)shost->hostdata; + struct adapter_reply_queue *reply_q; + int num_entries = 0; + int qid = queue_num - ioc->iopoll_q_start_index; + + if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) || + !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1)) + return 0; + + reply_q = ioc->io_uring_poll_queues[qid].reply_q; + + num_entries = _base_process_reply_queue(reply_q); + atomic_dec(&ioc->io_uring_poll_queues[qid].busy); + + return num_entries; +} + +/** * _base_interrupt - MPT adapter (IOC) specific interrupt handler. * @irq: irq number (not used) * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure @@ -1851,6 +1936,8 @@ _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc) return; list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + if (reply_q->is_iouring_poll_q) + continue; irq_poll_init(&reply_q->irqpoll, ioc->hba_queue_depth/4, _base_irqpoll); reply_q->irq_poll_scheduled = false; @@ -1900,6 +1987,12 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll) /* TMs are on msix_index == 0 */ if (reply_q->msix_index == 0) continue; + + if (reply_q->is_iouring_poll_q) { + _base_process_reply_queue(reply_q); + continue; + } + synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); if (reply_q->irq_poll_scheduled) { /* Calling irq_poll_disable will wait for any pending @@ -2998,6 +3091,11 @@ mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc) list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { list_del(&reply_q->list); + if (reply_q->is_iouring_poll_q) { + kfree(reply_q); + continue; + } + if (ioc->smp_affinity_enable) irq_set_affinity_hint(pci_irq_vector(ioc->pdev, reply_q->msix_index), NULL); @@ -3019,7 +3117,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) { struct pci_dev *pdev = ioc->pdev; struct adapter_reply_queue *reply_q; - int r; + int r, qid; reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); if (!reply_q) { @@ -3031,6 +3129,17 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) reply_q->msix_index = index; atomic_set(&reply_q->busy, 0); + + if (index >= ioc->iopoll_q_start_index) { + qid = index - ioc->iopoll_q_start_index; + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d", + ioc->driver_name, ioc->id, qid); + reply_q->is_iouring_poll_q = 1; + ioc->io_uring_poll_queues[qid].reply_q = reply_q; + goto out; + } + + if (ioc->msix_enable) snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", ioc->driver_name, ioc->id, index); @@ -3045,7 +3154,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) kfree(reply_q); return -EBUSY; } - +out: INIT_LIST_HEAD(&reply_q->list); list_add_tail(&reply_q->list, &ioc->reply_queue_list); return 0; @@ -3066,6 +3175,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) unsigned int cpu, nr_cpus, nr_msix, index = 0; struct adapter_reply_queue *reply_q; int local_numa_node; + int iopoll_q_count = ioc->reply_queue_count - + ioc->iopoll_q_start_index; if (!_base_is_controller_msix_enabled(ioc)) return; @@ -3099,7 +3210,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { const cpumask_t *mask; - if (reply_q->msix_index < ioc->high_iops_queues) + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) continue; mask = pci_irq_get_affinity(ioc->pdev, @@ -3121,13 +3233,14 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) fall_back: cpu = cpumask_first(cpu_online_mask); - nr_msix -= ioc->high_iops_queues; + nr_msix -= (ioc->high_iops_queues - iopoll_q_count); index = 0; list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { unsigned int i, group = nr_cpus / nr_msix; - if (reply_q->msix_index < ioc->high_iops_queues) + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) continue; if (cpu >= nr_cpus) @@ -3164,8 +3277,12 @@ _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc, { u16 lnksta, speed; + /* + * Disable high iops queues if io uring poll queues are enabled. + */ if (perf_mode == MPT_PERF_MODE_IOPS || - perf_mode == MPT_PERF_MODE_LATENCY) { + perf_mode == MPT_PERF_MODE_LATENCY || + ioc->io_uring_poll_queues) { ioc->high_iops_queues = 0; return; } @@ -3202,6 +3319,7 @@ mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc) return; pci_free_irq_vectors(ioc->pdev); ioc->msix_enable = 0; + kfree(ioc->io_uring_poll_queues); } /** @@ -3215,18 +3333,24 @@ _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc) int i, irq_flags = PCI_IRQ_MSIX; struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues }; struct irq_affinity *descp = &desc; + /* + * Don't allocate msix vectors for poll_queues. + * msix_vectors is always within a range of FW supported reply queue. + */ + int nr_msix_vectors = ioc->iopoll_q_start_index; + if (ioc->smp_affinity_enable) - irq_flags |= PCI_IRQ_AFFINITY; + irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; else descp = NULL; - ioc_info(ioc, " %d %d\n", ioc->high_iops_queues, - ioc->reply_queue_count); + ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues, + ioc->reply_queue_count, nr_msix_vectors); i = pci_alloc_irq_vectors_affinity(ioc->pdev, ioc->high_iops_queues, - ioc->reply_queue_count, irq_flags, descp); + nr_msix_vectors, irq_flags, descp); return i; } @@ -3242,6 +3366,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) int r; int i, local_max_msix_vectors; u8 try_msix = 0; + int iopoll_q_count = 0; ioc->msix_load_balance = false; @@ -3257,22 +3382,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count); pr_info("\t no of cores: %d, max_msix_vectors: %d\n", ioc->cpu_count, max_msix_vectors); - if (ioc->is_aero_ioc) - _base_check_and_enable_high_iops_queues(ioc, - ioc->msix_vector_count); + ioc->reply_queue_count = - min_t(int, ioc->cpu_count + ioc->high_iops_queues, - ioc->msix_vector_count); + min_t(int, ioc->cpu_count, ioc->msix_vector_count); if (!ioc->rdpq_array_enable && max_msix_vectors == -1) local_max_msix_vectors = (reset_devices) ? 1 : 8; else local_max_msix_vectors = max_msix_vectors; - if (local_max_msix_vectors > 0) - ioc->reply_queue_count = min_t(int, local_max_msix_vectors, - ioc->reply_queue_count); - else if (local_max_msix_vectors == 0) + if (local_max_msix_vectors == 0) goto try_ioapic; /* @@ -3293,14 +3412,77 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) if (ioc->msix_load_balance) ioc->smp_affinity_enable = 0; + if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1) + ioc->shost->host_tagset = 0; + + /* + * Enable io uring poll queues only if host_tagset is enabled. + */ + if (ioc->shost->host_tagset) + iopoll_q_count = poll_queues; + + if (iopoll_q_count) { + ioc->io_uring_poll_queues = kcalloc(iopoll_q_count, + sizeof(struct io_uring_poll_queue), GFP_KERNEL); + if (!ioc->io_uring_poll_queues) + iopoll_q_count = 0; + } + + if (ioc->is_aero_ioc) + _base_check_and_enable_high_iops_queues(ioc, + ioc->msix_vector_count); + + /* + * Add high iops queues count to reply queue count if high iops queues + * are enabled. + */ + ioc->reply_queue_count = min_t(int, + ioc->reply_queue_count + ioc->high_iops_queues, + ioc->msix_vector_count); + + /* + * Adjust the reply queue count incase reply queue count + * exceeds the user provided MSIx vectors count. + */ + if (local_max_msix_vectors > 0) + ioc->reply_queue_count = min_t(int, local_max_msix_vectors, + ioc->reply_queue_count); + /* + * Add io uring poll queues count to reply queues count + * if io uring is enabled in driver. + */ + if (iopoll_q_count) { + if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS)) + iopoll_q_count = 0; + ioc->reply_queue_count = min_t(int, + ioc->reply_queue_count + iopoll_q_count, + ioc->msix_vector_count); + } + + /* + * Starting index of io uring poll queues in reply queue list. + */ + ioc->iopoll_q_start_index = + ioc->reply_queue_count - iopoll_q_count; + r = _base_alloc_irq_vectors(ioc); if (r < 0) { ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r); goto try_ioapic; } + /* + * Adjust the reply queue count if the allocated + * MSIx vectors is less then the requested number + * of MSIx vectors. + */ + if (r < ioc->iopoll_q_start_index) { + ioc->reply_queue_count = r + iopoll_q_count; + ioc->iopoll_q_start_index = + ioc->reply_queue_count - iopoll_q_count; + } + ioc->msix_enable = 1; - ioc->reply_queue_count = r; for (i = 0; i < ioc->reply_queue_count; i++) { r = _base_request_irq(ioc, i); if (r) { @@ -3320,6 +3502,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) ioc->high_iops_queues = 0; ioc_info(ioc, "High IOPs queues : disabled\n"); ioc->reply_queue_count = 1; + ioc->iopoll_q_start_index = ioc->reply_queue_count - 0; r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY); if (r < 0) { dfailprintk(ioc, @@ -3416,6 +3599,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) u64 pio_chip = 0; phys_addr_t chip_phys = 0; struct adapter_reply_queue *reply_q; + int iopoll_q_count = 0; dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); @@ -3489,6 +3673,12 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) if (r) goto out_fail; + iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + for (i = 0; i < iopoll_q_count; i++) { + atomic_set(&ioc->io_uring_poll_queues[i].busy, 0); + atomic_set(&ioc->io_uring_poll_queues[i].pause, 0); + } + if (!ioc->is_driver_loading) _base_init_irqpolls(ioc); /* Use the Combined reply queue feature only for SAS3 C0 & higher @@ -3530,11 +3720,18 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) * 4))); } - list_for_each_entry(reply_q, &ioc->reply_queue_list, list) + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (reply_q->msix_index >= ioc->iopoll_q_start_index) { + pr_info("%s: enabled: index: %d\n", + reply_q->name, reply_q->msix_index); + continue; + } + pr_info("%s: %s enabled: IRQ %d\n", reply_q->name, ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC", pci_irq_vector(ioc->pdev, reply_q->msix_index)); + } ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n", &chip_phys, ioc->chip, memap_sz); @@ -3651,7 +3848,7 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc, &ioc->total_io_cnt), ioc->reply_queue_count) : 0; if (scmd && ioc->shost->nr_hw_queues > 1) { - u32 tag = blk_mq_unique_tag(scmd->request); + u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); return blk_mq_unique_tag_to_hwq(tag) + ioc->high_iops_queues; @@ -3735,7 +3932,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, u16 smid; u32 tag, unique_tag; - unique_tag = blk_mq_unique_tag(scmd->request); + unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); tag = blk_mq_unique_tag_to_tag(unique_tag); /* @@ -5169,6 +5366,73 @@ _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc) } /** + * _base_assign_fw_reported_qd - Get FW reported QD for SAS/SATA devices. + * - On failure set default QD values. + * @ioc : per adapter object + * + * Returns 0 for success, non-zero for failure. + * + */ +static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1; + int sz; + int rc = 0; + + ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH; + ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH; + ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH; + ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH; + if (!ioc->is_gen35_ioc) + goto out; + /* sas iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return rc; + } + rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz); + if (rc) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->max_wideport_qd = + (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ? + le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) : + MPT3SAS_SAS_QUEUE_DEPTH; + ioc->max_narrowport_qd = + (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ? + le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) : + MPT3SAS_SAS_QUEUE_DEPTH; + ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ? + sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH; + /* pcie iounit page 1 */ + rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply, + &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t)); + if (rc) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ? + (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) : + MPT3SAS_NVME_QUEUE_DEPTH; +out: + dinitprintk(ioc, pr_err( + "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n", + ioc->max_wideport_qd, ioc->max_narrowport_qd, + ioc->max_sata_qd, ioc->max_nvme_qd)); + kfree(sas_iounit_pg1); + return rc; +} + +/** * _base_static_config_pages - static start of day config pages * @ioc: per adapter object */ @@ -5237,6 +5501,9 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) ioc_warn(ioc, "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n"); } + rc = _base_assign_fw_reported_qd(ioc); + if (rc) + return rc; rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); if (rc) return rc; @@ -8471,6 +8738,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, _base_pre_reset_handler(ioc); mpt3sas_wait_for_commands_to_complete(ioc); mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_pause_mq_polling(ioc); r = mpt3sas_base_make_ioc_ready(ioc, type); if (r) goto out; @@ -8512,6 +8780,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); ioc->ioc_reset_count++; mutex_unlock(&ioc->reset_in_progress_mutex); + mpt3sas_base_resume_mq_polling(ioc); out_unlocked: if ((r == 0) && is_trigger) { diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 0c6c3df0038d..f87c0911f66a 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -77,9 +77,9 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "37.101.00.00" -#define MPT3SAS_MAJOR_VERSION 37 -#define MPT3SAS_MINOR_VERSION 101 +#define MPT3SAS_DRIVER_VERSION "39.100.00.00" +#define MPT3SAS_MAJOR_VERSION 39 +#define MPT3SAS_MINOR_VERSION 100 #define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 00 @@ -354,6 +354,7 @@ struct mpt3sas_nvme_cmd { #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12 #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16 #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10) +#define MPT3_MIN_IRQS 1 /* OEM Identifiers */ #define MFG10_OEM_ID_INVALID (0x00000000) @@ -575,6 +576,7 @@ struct _sas_device { u8 is_chassis_slot_valid; u8 connector_name[5]; struct kref refcount; + u8 port_type; struct hba_port *port; struct sas_rphy *rphy; }; @@ -936,6 +938,8 @@ struct _event_ack_list { * @os_irq: irq number * @irqpoll: irq_poll object * @irq_poll_scheduled: Tells whether irq poll is scheduled or not + * @is_iouring_poll_q: Tells whether reply queues is assigned + * to io uring poll queues or not * @list: this list */ struct adapter_reply_queue { @@ -949,9 +953,22 @@ struct adapter_reply_queue { struct irq_poll irqpoll; bool irq_poll_scheduled; bool irq_line_enable; + bool is_iouring_poll_q; struct list_head list; }; +/** + * struct io_uring_poll_queue - the io uring poll queue structure + * @busy: Tells whether io uring poll queue is busy or not + * @pause: Tells whether IOs are paused on io uring poll queue or not + * @reply_q: reply queue mapped for io uring poll queue + */ +struct io_uring_poll_queue { + atomic_t busy; + atomic_t pause; + struct adapter_reply_queue *reply_q; +}; + typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); /* SAS3.0 support */ @@ -1176,6 +1193,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands * @thresh_hold: Max number of reply descriptors processed * before updating Host Index + * @iopoll_q_start_index: starting index of io uring poll queues + * in reply queue list * @drv_internal_flags: Bit map internal to driver * @drv_support_bitmap: driver's supported feature bit map * @use_32bit_dma: Flag to use 32 bit consistent dma mask @@ -1372,11 +1391,13 @@ struct MPT3SAS_ADAPTER { bool msix_load_balance; u16 thresh_hold; u8 high_iops_queues; + u8 iopoll_q_start_index; u32 drv_internal_flags; u32 drv_support_bitmap; u32 dma_mask; bool enable_sdev_max_qd; bool use_32bit_dma; + struct io_uring_poll_queue *io_uring_poll_queues; /* internal commands, callback index */ u8 scsi_io_cb_idx; @@ -1423,6 +1444,10 @@ struct MPT3SAS_ADAPTER { u8 tm_custom_handling; u8 nvme_abort_timeout; u16 max_shutdown_latency; + u16 max_wideport_qd; + u16 max_narrowport_qd; + u16 max_nvme_qd; + u8 max_sata_qd; /* static config pages */ struct mpt3sas_facts facts; @@ -1730,10 +1755,12 @@ do { ioc_err(ioc, "In func: %s\n", __func__); \ status, mpi_request, sz); } while (0) int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count); -int -mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type); +int mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type); void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc); +int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); +void mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc); /* scsih shared API */ struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, @@ -1829,6 +1856,9 @@ int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc, int mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page, u32 form, u32 handle); +int mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page, + u16 sz); int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, u16 sz); diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index 83a5c2172ad4..0563078227de 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -1169,6 +1169,43 @@ out: } /** + * mpt3sas_config_get_pcie_iounit_pg1 - obtain pcie iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT; + mpi_request.Header.PageVersion = MPI26_PCIEIOUNITPAGE1_PAGEVERSION; + mpi_request.Header.PageNumber = 1; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); +out: + return r; +} + +/** * mpt3sas_config_get_pcie_device_pg2 - obtain pcie device page 2 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index b66140e4c370..770b241d7bb2 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -3820,9 +3820,10 @@ enable_sdev_max_qd_store(struct device *cdev, } } else if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) - qdepth = MPT3SAS_NVME_QUEUE_DEPTH; + qdepth = ioc->max_nvme_qd; else - qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + qdepth = (sas_target_priv_data->sas_dev->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; mpt3sas_scsih_change_queue_depth(sdev, qdepth); } @@ -3919,6 +3920,24 @@ sas_device_handle_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(sas_device_handle); /** + * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority + * @dev: pointer to embedded device + * @attr: sas_ncq_prio_supported attribute descriptor + * @buf: the buffer returned + * + * A sysfs 'read-only' sdev attribute, only works with SATA + */ +static ssize_t +sas_ncq_prio_supported_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev)); +} +static DEVICE_ATTR_RO(sas_ncq_prio_supported); + +/** * sas_ncq_prio_enable_show - send prioritized io commands to device * @dev: pointer to embedded device * @attr: ? @@ -3960,6 +3979,7 @@ static DEVICE_ATTR_RW(sas_ncq_prio_enable); struct device_attribute *mpt3sas_dev_attrs[] = { &dev_attr_sas_address, &dev_attr_sas_device_handle, + &dev_attr_sas_ncq_prio_supported, &dev_attr_sas_ncq_prio_enable, NULL, }; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 8e64a6f14542..2f82b1e629af 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -1803,7 +1803,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) * limit max device queue for SATA to 32 if enable_sdev_max_qd * is disabled. */ - if (ioc->enable_sdev_max_qd) + if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc) goto not_sata; sas_device_priv_data = sdev->hostdata; @@ -2657,7 +2657,7 @@ scsih_slave_configure(struct scsi_device *sdev) return 1; } - qdepth = MPT3SAS_NVME_QUEUE_DEPTH; + qdepth = ioc->max_nvme_qd; ds = "NVMe"; sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n", @@ -2709,7 +2709,8 @@ scsih_slave_configure(struct scsi_device *sdev) sas_device->volume_handle = volume_handle; sas_device->volume_wwid = volume_wwid; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { - qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + qdepth = (sas_device->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; ssp_target = 1; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SEP) { @@ -2721,7 +2722,7 @@ scsih_slave_configure(struct scsi_device *sdev) } else ds = "SSP"; } else { - qdepth = MPT3SAS_SATA_QUEUE_DEPTH; + qdepth = ioc->max_sata_qd; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) ds = "STP"; else if (sas_device->device_info & @@ -3304,7 +3305,7 @@ scsih_abort(struct scsi_cmnd *scmd) sdev_printk(KERN_INFO, scmd->device, "attempting task abort!" "scmd(0x%p), outstanding for %u ms & timeout %u ms\n", scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), - (scmd->request->timeout / HZ) * 1000); + (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; @@ -5047,48 +5048,31 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, Mpi25SCSIIORequest_t *mpi_request) { u16 eedp_flags; - unsigned char prot_op = scsi_get_prot_op(scmd); - unsigned char prot_type = scsi_get_prot_type(scmd); Mpi25SCSIIORequest_t *mpi_request_3v = (Mpi25SCSIIORequest_t *)mpi_request; - if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL) - return; - - if (prot_op == SCSI_PROT_READ_STRIP) + switch (scsi_get_prot_op(scmd)) { + case SCSI_PROT_READ_STRIP: eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; - else if (prot_op == SCSI_PROT_WRITE_INSERT) + break; + case SCSI_PROT_WRITE_INSERT: eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; - else + break; + default: return; + } - switch (prot_type) { - case SCSI_PROT_DIF_TYPE1: - case SCSI_PROT_DIF_TYPE2: + if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; - /* - * enable ref/guard checking - * auto increment ref tag - */ + if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | - MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | - MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG; mpi_request->CDB.EEDP32.PrimaryReferenceTag = - cpu_to_be32(t10_pi_ref_tag(scmd->request)); - break; - - case SCSI_PROT_DIF_TYPE3: - - /* - * enable guard checking - */ - eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; - - break; + cpu_to_be32(scsi_prot_ref_tag(scmd)); } - mpi_request_3v->EEDPBlockSize = - cpu_to_le16(scmd->device->sector_size); + mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd)); if (ioc->is_gen35_ioc) eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; @@ -5141,7 +5125,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) struct MPT3SAS_DEVICE *sas_device_priv_data; struct MPT3SAS_TARGET *sas_target_priv_data; struct _raid_device *raid_device; - struct request *rq = scmd->request; + struct request *rq = scsi_cmd_to_rq(scmd); int class; Mpi25SCSIIORequest_t *mpi_request; struct _pcie_device *pcie_device = NULL; @@ -7371,6 +7355,10 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, /* get device name */ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); + sas_device->port_type = sas_device_pg0.MaxPortConnections; + ioc_info(ioc, + "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n", + handle, sas_device->sas_address, sas_device->port_type); if (ioc->wait_for_discovery_to_complete) _scsih_sas_device_init_add(ioc, sas_device); @@ -9604,6 +9592,42 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) } /** + * _scsih_update_device_qdepth - Update QD during Reset. + * @ioc: per adapter object + * + */ +static void +_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct scsi_device *sdev; + u16 qdepth; + + ioc_info(ioc, "Update devices with firmware reported queue depth\n"); + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) { + sas_target_priv_data = sas_device_priv_data->sas_target; + sas_device = sas_device_priv_data->sas_target->sas_dev; + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) + qdepth = ioc->max_nvme_qd; + else if (sas_device && + sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = (sas_device->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; + else if (sas_device && + sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + qdepth = ioc->max_sata_qd; + else + continue; + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + } + } +} + +/** * _scsih_mark_responding_sas_device - mark a sas_devices as responding * @ioc: per adapter object * @sas_device_pg0: SAS Device page 0 @@ -10654,6 +10678,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) _scsih_remove_unresponding_devices(ioc); _scsih_del_dirty_vphy(ioc); _scsih_del_dirty_port_entries(ioc); + if (ioc->is_gen35_ioc) + _scsih_update_device_qdepth(ioc); _scsih_scan_for_devices_after_reset(ioc); /* * If diag reset has occurred during the driver load @@ -11178,8 +11204,10 @@ static void scsih_remove(struct pci_dev *pdev) ioc->remove_host = 1; - if (!pci_device_is_present(pdev)) + if (!pci_device_is_present(pdev)) { + mpt3sas_base_pause_mq_polling(ioc); _scsih_flush_running_cmds(ioc); + } _scsih_fw_event_cleanup_queue(ioc); @@ -11274,8 +11302,10 @@ scsih_shutdown(struct pci_dev *pdev) ioc->remove_host = 1; - if (!pci_device_is_present(pdev)) + if (!pci_device_is_present(pdev)) { + mpt3sas_base_pause_mq_polling(ioc); _scsih_flush_running_cmds(ioc); + } _scsih_fw_event_cleanup_queue(ioc); @@ -11785,12 +11815,41 @@ static int scsih_map_queues(struct Scsi_Host *shost) { struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)shost->hostdata; + struct blk_mq_queue_map *map; + int i, qoff, offset; + int nr_msix_vectors = ioc->iopoll_q_start_index; + int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors; - if (ioc->shost->nr_hw_queues == 1) + if (shost->nr_hw_queues == 1) return 0; - return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], - ioc->pdev, ioc->high_iops_queues); + for (i = 0, qoff = 0; i < shost->nr_maps; i++) { + map = &shost->tag_set.map[i]; + map->nr_queues = 0; + offset = 0; + if (i == HCTX_TYPE_DEFAULT) { + map->nr_queues = + nr_msix_vectors - ioc->high_iops_queues; + offset = ioc->high_iops_queues; + } else if (i == HCTX_TYPE_POLL) + map->nr_queues = iopoll_q_count; + + if (!map->nr_queues) + BUG_ON(i == HCTX_TYPE_DEFAULT); + + /* + * The poll queue(s) doesn't have an IRQ (and hence IRQ + * affinity), so use the regular blk-mq cpu mapping + */ + map->queue_offset = qoff; + if (i != HCTX_TYPE_POLL) + blk_mq_pci_map_queues(map, ioc->pdev, offset); + else + blk_mq_map_queues(map); + + qoff += map->nr_queues; + } + return 0; } /* shost template for SAS 2.0 HBA devices */ @@ -11861,6 +11920,7 @@ static struct scsi_host_template mpt3sas_driver_template = { .track_queue_depth = 1, .cmd_size = sizeof(struct scsiio_tracker), .map_queues = scsih_map_queues, + .mq_poll = mpt3sas_blk_mq_poll, }; /* raid transport support for SAS 3.0 HBA devices */ @@ -11957,6 +12017,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct Scsi_Host *shost = NULL; int rv; u16 hba_mpi_version; + int iopoll_q_count = 0; /* Determine in which MPI version class this pci device belongs */ hba_mpi_version = _scsih_determine_hba_mpi_version(pdev); @@ -12204,6 +12265,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_thread_fail; } + shost->host_tagset = 0; + + if (ioc->is_gen35_ioc && host_tagset_enable) + shost->host_tagset = 1; + ioc->is_driver_loading = 1; if ((mpt3sas_base_attach(ioc))) { ioc_err(ioc, "failure at %s:%d/%s()!\n", @@ -12226,16 +12292,17 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) } else ioc->hide_drives = 0; - shost->host_tagset = 0; shost->nr_hw_queues = 1; - if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 && - host_tagset_enable && ioc->smp_affinity_enable) { - - shost->host_tagset = 1; + if (shost->host_tagset) { shost->nr_hw_queues = ioc->reply_queue_count - ioc->high_iops_queues; + iopoll_q_count = + ioc->reply_queue_count - ioc->iopoll_q_start_index; + + shost->nr_maps = iopoll_q_count ? 3 : 1; + dev_info(&ioc->pdev->dev, "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n", shost->can_queue, shost->nr_hw_queues); @@ -12359,6 +12426,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) /* Permanent error, prepare for device removal */ ioc->pci_error_recovery = 1; mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_pause_mq_polling(ioc); _scsih_flush_running_cmds(ioc); return PCI_ERS_RESULT_DISCONNECT; } |