summaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc/lpfc_bsg.c
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2010-03-15 11:25:20 -0400
committerJames Bottomley <James.Bottomley@suse.de>2010-04-11 09:23:50 -0500
commit7a4702774381103e936cae09ec12301090c6c212 (patch)
tree537fcd43fb911d9841d2d3ba3790b135bc6aa907 /drivers/scsi/lpfc/lpfc_bsg.c
parentcb5172eafd9ffdab6bb7b1eec628ea706d5817c8 (diff)
downloadlwn-7a4702774381103e936cae09ec12301090c6c212.tar.gz
lwn-7a4702774381103e936cae09ec12301090c6c212.zip
[SCSI] lpfc 8.3.11: Driver management improvements via BSG
- Add BSG support for PCI loopback testing. - Add BSG support for extended mailbox commands. Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com> Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_bsg.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c257
1 files changed, 202 insertions, 55 deletions
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index d62b3e467926..92ad202a9380 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -79,6 +79,12 @@ struct lpfc_bsg_iocb {
struct lpfc_bsg_mbox {
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *mb;
+ struct lpfc_dmabuf *rxbmp; /* for BIU diags */
+ struct lpfc_dmabufext *dmp; /* for BIU diags */
+ uint8_t *ext; /* extended mailbox data */
+ uint32_t mbOffset; /* from app */
+ uint32_t inExtWLen; /* from app */
+ uint32_t outWxtWLen; /* from app */
/* job waiting for this mbox command to finish */
struct fc_bsg_job *set_job;
@@ -2377,35 +2383,68 @@ void
lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- MAILBOX_t *pmb;
- MAILBOX_t *mb;
struct fc_bsg_job *job;
uint32_t size;
unsigned long flags;
+ uint8_t *to;
+ uint8_t *from;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
+ /* job already timed out? */
if (!dd_data) {
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
return;
}
- pmb = &dd_data->context_un.mbox.pmboxq->u.mb;
- mb = dd_data->context_un.mbox.mb;
+ /* build the outgoing buffer to do an sg copy
+ * the format is the response mailbox followed by any extended
+ * mailbox data
+ */
+ from = (uint8_t *)&pmboxq->u.mb;
+ to = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(to, from, sizeof(MAILBOX_t));
+ /* copy the extended data if any, count is in words */
+ if (dd_data->context_un.mbox.outWxtWLen) {
+ from = (uint8_t *)dd_data->context_un.mbox.ext;
+ to += sizeof(MAILBOX_t);
+ memcpy(to, from,
+ dd_data->context_un.mbox.outWxtWLen * sizeof(uint32_t));
+ }
+
+ from = (uint8_t *)dd_data->context_un.mbox.mb;
job = dd_data->context_un.mbox.set_job;
- memcpy(mb, pmb, sizeof(*pmb));
- size = job->request_payload.payload_len;
+ size = job->reply_payload.payload_len;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
- mb, size);
+ from, size);
job->reply->result = 0;
+
dd_data->context_un.mbox.set_job = NULL;
job->dd_data = NULL;
job->job_done(job);
+ /* need to hold the lock until we call job done to hold off
+ * the timeout handler returning to the midlayer while
+ * we are stillprocessing the job
+ */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ kfree(dd_data->context_un.mbox.mb);
mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
- kfree(mb);
+ kfree(dd_data->context_un.mbox.ext);
+ if (dd_data->context_un.mbox.dmp) {
+ dma_free_coherent(&phba->pcidev->dev,
+ dd_data->context_un.mbox.dmp->size,
+ dd_data->context_un.mbox.dmp->dma.virt,
+ dd_data->context_un.mbox.dmp->dma.phys);
+ kfree(dd_data->context_un.mbox.dmp);
+ }
+ if (dd_data->context_un.mbox.rxbmp) {
+ lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
+ dd_data->context_un.mbox.rxbmp->phys);
+ kfree(dd_data->context_un.mbox.rxbmp);
+ }
kfree(dd_data);
return;
}
@@ -2468,6 +2507,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
case MBX_WRITE_EVENT_LOG:
case MBX_PORT_CAPABILITIES:
case MBX_PORT_IOV_CONTROL:
+ case MBX_RUN_BIU_DIAG64:
break;
case MBX_SET_VARIABLE:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -2482,7 +2522,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
phba->fc_topology = TOPOLOGY_PT_PT;
}
break;
- case MBX_RUN_BIU_DIAG64:
case MBX_READ_EVENT_LOG:
case MBX_READ_SPARM64:
case MBX_READ_LA:
@@ -2518,97 +2557,199 @@ static uint32_t
lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
struct lpfc_vport *vport)
{
- LPFC_MBOXQ_t *pmboxq;
- MAILBOX_t *pmb;
- MAILBOX_t *mb;
- struct bsg_job_data *dd_data;
+ LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
+ MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
+ /* a 4k buffer to hold the mb and extended data from/to the bsg */
+ MAILBOX_t *mb = NULL;
+ struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
uint32_t size;
+ struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
+ struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
+ struct ulp_bde64 *rxbpl = NULL;
+ struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ uint8_t *ext = NULL;
int rc = 0;
+ uint8_t *from;
+
+ /* in case no data is transferred */
+ job->reply->reply_payload_rcv_len = 0;
+
+ /* check if requested extended data lengths are valid */
+ if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) ||
+ (mbox_req->outWxtWLen > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2727 Failed allocation of dd_data\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto job_done;
}
mb = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!mb) {
- kfree(dd_data);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto job_done;
}
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
- kfree(dd_data);
- kfree(mb);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto job_done;
}
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
size = job->request_payload.payload_len;
- job->reply->reply_payload_rcv_len =
- sg_copy_to_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- mb, size);
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ mb, size);
rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
- if (rc != 0) {
- kfree(dd_data);
- kfree(mb);
- mempool_free(pmboxq, phba->mbox_mem_pool);
- return rc; /* must be negative */
- }
+ if (rc != 0)
+ goto job_done; /* must be negative */
- memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmb = &pmboxq->u.mb;
memcpy(pmb, mb, sizeof(*pmb));
pmb->mbxOwner = OWN_HOST;
- pmboxq->context1 = NULL;
pmboxq->vport = vport;
+ /* extended mailbox commands will need an extended buffer */
+ if (mbox_req->inExtWLen || mbox_req->outWxtWLen) {
+ ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
+ if (!ext) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ /* any data for the device? */
+ if (mbox_req->inExtWLen) {
+ from = (uint8_t *)mb;
+ from += sizeof(MAILBOX_t);
+ memcpy((uint8_t *)ext, from,
+ mbox_req->inExtWLen * sizeof(uint32_t));
+ }
+
+ pmboxq->context2 = ext;
+ pmboxq->in_ext_byte_len =
+ mbox_req->inExtWLen *
+ sizeof(uint32_t);
+ pmboxq->out_ext_byte_len =
+ mbox_req->outWxtWLen *
+ sizeof(uint32_t);
+ pmboxq->mbox_offset_word =
+ mbox_req->mbOffset;
+ pmboxq->context2 = ext;
+ pmboxq->in_ext_byte_len =
+ mbox_req->inExtWLen * sizeof(uint32_t);
+ pmboxq->out_ext_byte_len =
+ mbox_req->outWxtWLen * sizeof(uint32_t);
+ pmboxq->mbox_offset_word = mbox_req->mbOffset;
+ }
+
+ /* biu diag will need a kernel buffer to transfer the data
+ * allocate our own buffer and setup the mailbox command to
+ * use ours
+ */
+ if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!rxbmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ dmp = diag_cmd_data_alloc(phba, rxbpl, PAGE_SIZE, 0);
+ if (!dmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ dmp->size = PAGE_SIZE;
+ INIT_LIST_HEAD(&dmp->dma.list);
+ pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
+ putPaddrHigh(dmp->dma.phys);
+ pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
+ putPaddrLow(dmp->dma.phys);
+
+ pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
+ putPaddrHigh(dmp->dma.phys +
+ pmb->un.varBIUdiag.un.s2.
+ xmit_bde64.tus.f.bdeSize);
+ pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
+ putPaddrLow(dmp->dma.phys +
+ pmb->un.varBIUdiag.un.s2.
+ xmit_bde64.tus.f.bdeSize);
+ dd_data->context_un.mbox.rxbmp = rxbmp;
+ dd_data->context_un.mbox.dmp = dmp;
+ } else {
+ dd_data->context_un.mbox.rxbmp = NULL;
+ dd_data->context_un.mbox.dmp = NULL;
+ }
+
+ /* setup wake call as IOCB callback */
+ pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
+
+ /* setup context field to pass wait_queue pointer to wake function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = mb;
+ dd_data->context_un.mbox.set_job = job;
+ dd_data->context_un.mbox.ext = ext;
+ dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
+ dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
+ dd_data->context_un.mbox.outWxtWLen = mbox_req->outWxtWLen;
+ job->dd_data = dd_data;
+
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
- if (rc != MBX_TIMEOUT) {
- kfree(dd_data);
- kfree(mb);
- mempool_free(pmboxq, phba->mbox_mem_pool);
- }
- return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+ rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+ goto job_done;
}
+ /* job finished, copy the data */
memcpy(mb, pmb, sizeof(*pmb));
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
mb, size);
- kfree(dd_data);
- kfree(mb);
- mempool_free(pmboxq, phba->mbox_mem_pool);
/* not waiting mbox already done */
- return 0;
+ rc = 0;
+ goto job_done;
}
- /* setup wake call as IOCB callback */
- pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
- /* setup context field to pass wait_queue pointer to wake function */
- pmboxq->context1 = dd_data;
- dd_data->type = TYPE_MBOX;
- dd_data->context_un.mbox.pmboxq = pmboxq;
- dd_data->context_un.mbox.mb = mb;
- dd_data->context_un.mbox.set_job = job;
- job->dd_data = dd_data;
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
- if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
- kfree(dd_data);
- kfree(mb);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
+ return 1; /* job started */
+
+job_done:
+ /* common exit for error or job completed inline */
+ kfree(mb);
+ if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
- return -EIO;
+ kfree(ext);
+ if (dmp) {
+ dma_free_coherent(&phba->pcidev->dev,
+ dmp->size, dmp->dma.virt,
+ dmp->dma.phys);
+ kfree(dmp);
+ }
+ if (rxbmp) {
+ lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
+ kfree(rxbmp);
}
+ kfree(dd_data);
- return 1;
+ return rc;
}
/**
@@ -2638,6 +2779,11 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
goto job_error;
}
+ if (job->reply_payload.payload_len != PAGE_SIZE) {
+ rc = -EINVAL;
+ goto job_error;
+ }
+
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
rc = -EAGAIN;
goto job_error;
@@ -3094,6 +3240,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
job->dd_data = NULL;
job->reply->reply_payload_rcv_len = 0;
job->reply->result = -EAGAIN;
+ /* the mbox completion handler can now be run */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->job_done(job);
break;