diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2007-05-26 00:26:59 +0900 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2007-05-29 16:54:22 -0500 |
commit | 63015bc9333907725f90a1691d0ade44e51cdcbf (patch) | |
tree | 0e5eb8d8e348dcd72e1bf60ab2c9786ef30b0ccc /drivers/scsi/ipr.c | |
parent | c13e5566471d90ff2858f5cacaf27021d158e037 (diff) | |
download | lwn-63015bc9333907725f90a1691d0ade44e51cdcbf.tar.gz lwn-63015bc9333907725f90a1691d0ade44e51cdcbf.zip |
[SCSI] ipr: convert to use the data buffer accessors
- remove the unnecessary map_single path.
- convert to use the new accessors for the sg lists and the
parameters.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r-- | drivers/scsi/ipr.c | 144 |
1 files changed, 40 insertions, 104 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 4baa79e68679..6411c458d336 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -540,32 +540,6 @@ struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) } /** - * ipr_unmap_sglist - Unmap scatterlist if mapped - * @ioa_cfg: ioa config struct - * @ipr_cmd: ipr command struct - * - * Return value: - * nothing - **/ -static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg, - struct ipr_cmnd *ipr_cmd) -{ - struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; - - if (ipr_cmd->dma_use_sg) { - if (scsi_cmd->use_sg > 0) { - pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer, - scsi_cmd->use_sg, - scsi_cmd->sc_data_direction); - } else { - pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle, - scsi_cmd->request_bufflen, - scsi_cmd->sc_data_direction); - } - } -} - -/** * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts * @ioa_cfg: ioa config struct * @clr_ints: interrupts to clear @@ -677,7 +651,7 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) scsi_cmd->result |= (DID_ERROR << 16); - ipr_unmap_sglist(ioa_cfg, ipr_cmd); + scsi_dma_unmap(ipr_cmd->scsi_cmd); scsi_cmd->scsi_done(scsi_cmd); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); } @@ -4285,93 +4259,55 @@ static irqreturn_t ipr_isr(int irq, void *devp) static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, struct ipr_cmnd *ipr_cmd) { - int i; - struct scatterlist *sglist; + int i, nseg; + struct scatterlist *sg; u32 length; u32 ioadl_flags = 0; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; - length = scsi_cmd->request_bufflen; - - if (length == 0) + length = scsi_bufflen(scsi_cmd); + if (!length) return 0; - if (scsi_cmd->use_sg) { - ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, - scsi_cmd->request_buffer, - scsi_cmd->use_sg, - scsi_cmd->sc_data_direction); - - if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { - ioadl_flags = IPR_IOADL_FLAGS_WRITE; - ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; - ioarcb->write_data_transfer_length = cpu_to_be32(length); - ioarcb->write_ioadl_len = - cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); - } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { - ioadl_flags = IPR_IOADL_FLAGS_READ; - ioarcb->read_data_transfer_length = cpu_to_be32(length); - ioarcb->read_ioadl_len = - cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); - } - - sglist = scsi_cmd->request_buffer; + nseg = scsi_dma_map(scsi_cmd); + if (nseg < 0) { + dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); + return -1; + } - if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { - ioadl = ioarcb->add_data.u.ioadl; - ioarcb->write_ioadl_addr = - cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) + - offsetof(struct ipr_ioarcb, add_data)); - ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; - } + ipr_cmd->dma_use_sg = nseg; - for (i = 0; i < ipr_cmd->dma_use_sg; i++) { - ioadl[i].flags_and_data_len = - cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i])); - ioadl[i].address = - cpu_to_be32(sg_dma_address(&sglist[i])); - } + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_WRITE; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->write_data_transfer_length = cpu_to_be32(length); + ioarcb->write_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_READ; + ioarcb->read_data_transfer_length = cpu_to_be32(length); + ioarcb->read_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + } - if (likely(ipr_cmd->dma_use_sg)) { - ioadl[i-1].flags_and_data_len |= - cpu_to_be32(IPR_IOADL_FLAGS_LAST); - return 0; - } else - dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); - } else { - if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { - ioadl_flags = IPR_IOADL_FLAGS_WRITE; - ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; - ioarcb->write_data_transfer_length = cpu_to_be32(length); - ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); - } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { - ioadl_flags = IPR_IOADL_FLAGS_READ; - ioarcb->read_data_transfer_length = cpu_to_be32(length); - ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); - } + if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { + ioadl = ioarcb->add_data.u.ioadl; + ioarcb->write_ioadl_addr = + cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) + + offsetof(struct ipr_ioarcb, add_data)); + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + } - ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev, - scsi_cmd->request_buffer, length, - scsi_cmd->sc_data_direction); - - if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) { - ioadl = ioarcb->add_data.u.ioadl; - ioarcb->write_ioadl_addr = - cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) + - offsetof(struct ipr_ioarcb, add_data)); - ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; - ipr_cmd->dma_use_sg = 1; - ioadl[0].flags_and_data_len = - cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST); - ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle); - return 0; - } else - dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n"); + scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { + ioadl[i].flags_and_data_len = + cpu_to_be32(ioadl_flags | sg_dma_len(sg)); + ioadl[i].address = cpu_to_be32(sg_dma_address(sg)); } - return -1; + ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); + return 0; } /** @@ -4434,7 +4370,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) res->needs_sync_complete = 1; res->in_erp = 0; } - ipr_unmap_sglist(ioa_cfg, ipr_cmd); + scsi_dma_unmap(ipr_cmd->scsi_cmd); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); scsi_cmd->scsi_done(scsi_cmd); } @@ -4812,7 +4748,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, break; } - ipr_unmap_sglist(ioa_cfg, ipr_cmd); + scsi_dma_unmap(ipr_cmd->scsi_cmd); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); scsi_cmd->scsi_done(scsi_cmd); } @@ -4833,10 +4769,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); - scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len); + scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len)); if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { - ipr_unmap_sglist(ioa_cfg, ipr_cmd); + scsi_dma_unmap(ipr_cmd->scsi_cmd); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); scsi_cmd->scsi_done(scsi_cmd); } else |