summaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc/lpfc_nvmet.c
diff options
context:
space:
mode:
authorJames Smart <jsmart2021@gmail.com>2021-04-11 18:31:15 -0700
committerMartin K. Petersen <martin.petersen@oracle.com>2021-04-13 01:39:13 -0400
commita789241e49b6adce84cdba7a24c92ecc845aface (patch)
tree903015224bbe38a054c241922f588657659de507 /drivers/scsi/lpfc/lpfc_nvmet.c
parentf866eb06c087125619457b53e9211a9e758f64f7 (diff)
downloadlwn-a789241e49b6adce84cdba7a24c92ecc845aface.tar.gz
lwn-a789241e49b6adce84cdba7a24c92ecc845aface.zip
scsi: lpfc: Fix NMI crash during rmmod due to circular hbalock dependency
Remove hbalock dependency for lpfc_abts_els_sgl_list and lpfc_abts_nvmet_ctx_list. The lists are adaquately synchronized with the sgl_list_lock and abts_nvmet_buf_list_lock. Link: https://lore.kernel.org/r/20210412013127.2387-5-jsmart2021@gmail.com Co-developed-by: Justin Tee <justin.tee@broadcom.com> Signed-off-by: Justin Tee <justin.tee@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c26
1 files changed, 11 insertions, 15 deletions
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index c84da8e6b65d..f2d9a3580887 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1440,7 +1440,10 @@ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
list_del_init(&ctx_buf->list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ spin_lock(&phba->hbalock);
__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
+ spin_unlock(&phba->hbalock);
+
ctx_buf->sglq->state = SGL_FREED;
ctx_buf->sglq->ndlp = NULL;
@@ -1787,8 +1790,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
}
- spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) {
@@ -1806,10 +1808,10 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
}
ctxp->flag &= ~LPFC_NVME_XBUSY;
spin_unlock(&ctxp->ctxlock);
- spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
+ iflag);
rrq_empty = list_empty(&phba->active_rrq_list);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
if (ndlp &&
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
@@ -1830,9 +1832,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
lpfc_worker_wake_up(phba);
return;
}
- spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
-
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
if (ctxp) {
/*
@@ -1876,8 +1876,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
sid = sli4_sid_from_fc_hdr(fc_hdr);
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
- spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) {
@@ -1886,9 +1885,8 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
xri = ctxp->ctxbuf->sglq->sli4_xritag;
- spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
-
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
+ iflag);
spin_lock_irqsave(&ctxp->ctxlock, iflag);
ctxp->flag |= LPFC_NVME_ABTS_RCV;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
@@ -1907,9 +1905,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
return 0;
}
- spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
-
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
/* check the wait list */
if (phba->sli4_hba.nvmet_io_wait_cnt) {
struct rqb_dmabuf *nvmebuf;