summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 10:09:16 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 10:09:16 -0700
commit92d15c2ccbb3e31a3fc71ad28fdb55e1319383c0 (patch)
tree8d83c0dc3c6b935d8367e331872f242b742f0a8a /arch
parentf20bf6125605acbbc7eb8c9420d7221c91aa83eb (diff)
parent644bd2f048972d75eb1979b1fdca257d528ce687 (diff)
downloadlwn-92d15c2ccbb3e31a3fc71ad28fdb55e1319383c0.tar.gz
lwn-92d15c2ccbb3e31a3fc71ad28fdb55e1319383c0.zip
Merge branch 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block: (63 commits) Fix memory leak in dm-crypt SPARC64: sg chaining support SPARC: sg chaining support PPC: sg chaining support PS3: sg chaining support IA64: sg chaining support x86-64: enable sg chaining x86-64: update pci-gart iommu to sg helpers x86-64: update nommu to sg helpers x86-64: update calgary iommu to sg helpers swiotlb: sg chaining support i386: enable sg chaining i386 dma_map_sg: convert to using sg helpers mmc: need to zero sglist on init Panic in blk_rq_map_sg() from CCISS driver remove sglist_len remove blk_queue_max_phys_segments in libata revert sg segment size ifdefs Fixup u14-34f ENABLE_SG_CHAINING qla1280: enable use_sg_chaining option ...
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/hp/common/sba_iommu.c14
-rw-r--r--arch/ia64/hp/sim/simscsi.c1
-rw-r--r--arch/ia64/sn/pci/pci_dma.c11
-rw-r--r--arch/powerpc/kernel/dma_64.c5
-rw-r--r--arch/powerpc/kernel/ibmebus.c11
-rw-r--r--arch/powerpc/kernel/iommu.c23
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c7
-rw-r--r--arch/sparc/kernel/ioport.c25
-rw-r--r--arch/sparc/mm/io-unit.c12
-rw-r--r--arch/sparc/mm/iommu.c10
-rw-r--r--arch/sparc/mm/sun4c.c10
-rw-r--r--arch/sparc64/kernel/iommu.c39
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c32
-rw-r--r--arch/x86/kernel/pci-calgary_64.c24
-rw-r--r--arch/x86/kernel/pci-gart_64.c65
-rw-r--r--arch/x86/kernel/pci-nommu_64.c5
16 files changed, 167 insertions, 127 deletions
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index e980e7aa2306..4338f4123f31 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -396,7 +396,7 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
startsg->dma_address, startsg->dma_length,
sba_sg_address(startsg));
- startsg++;
+ startsg = sg_next(startsg);
}
}
@@ -409,7 +409,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
while (the_nents-- > 0) {
if (sba_sg_address(the_sg) == 0x0UL)
sba_dump_sg(NULL, startsg, nents);
- the_sg++;
+ the_sg = sg_next(the_sg);
}
}
@@ -1201,7 +1201,7 @@ sba_fill_pdir(
u32 pide = startsg->dma_address & ~PIDE_FLAG;
dma_offset = (unsigned long) pide & ~iovp_mask;
startsg->dma_address = 0;
- dma_sg++;
+ dma_sg = sg_next(dma_sg);
dma_sg->dma_address = pide | ioc->ibase;
pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
n_mappings++;
@@ -1228,7 +1228,7 @@ sba_fill_pdir(
pdirp++;
} while (cnt > 0);
}
- startsg++;
+ startsg = sg_next(startsg);
}
/* force pdir update */
wmb();
@@ -1297,7 +1297,7 @@ sba_coalesce_chunks( struct ioc *ioc,
while (--nents > 0) {
unsigned long vaddr; /* tmp */
- startsg++;
+ startsg = sg_next(startsg);
/* PARANOID */
startsg->dma_address = startsg->dma_length = 0;
@@ -1407,7 +1407,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
#ifdef ALLOW_IOV_BYPASS_SG
ASSERT(to_pci_dev(dev)->dma_mask);
if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
- for (sg = sglist ; filled < nents ; filled++, sg++){
+ for_each_sg(sglist, sg, nents, filled) {
sg->dma_length = sg->length;
sg->dma_address = virt_to_phys(sba_sg_address(sg));
}
@@ -1501,7 +1501,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
while (nents && sglist->dma_length) {
sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
- sglist++;
+ sglist = sg_next(sglist);
nents--;
}
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index d62fa76e5a7d..a3a558a06757 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -360,6 +360,7 @@ static struct scsi_host_template driver_template = {
.max_sectors = 1024,
.cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN,
.use_clustering = DISABLE_CLUSTERING,
+ .use_sg_chaining = ENABLE_SG_CHAINING,
};
static int __init
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index d79ddacfba2d..ecd8a52b9b9e 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -218,16 +218,17 @@ EXPORT_SYMBOL(sn_dma_unmap_single);
*
* Unmap a set of streaming mode DMA translations.
*/
-void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nhwentries, int direction)
{
int i;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
+ struct scatterlist *sg;
BUG_ON(dev->bus != &pci_bus_type);
- for (i = 0; i < nhwentries; i++, sg++) {
+ for_each_sg(sgl, sg, nhwentries, i) {
provider->dma_unmap(pdev, sg->dma_address, direction);
sg->dma_address = (dma_addr_t) NULL;
sg->dma_length = 0;
@@ -244,11 +245,11 @@ EXPORT_SYMBOL(sn_dma_unmap_sg);
*
* Maps each entry of @sg for DMA.
*/
-int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
int direction)
{
unsigned long phys_addr;
- struct scatterlist *saved_sg = sg;
+ struct scatterlist *saved_sg = sgl, *sg;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
int i;
@@ -258,7 +259,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
/*
* Setup a DMA address for each entry in the scatterlist.
*/
- for (i = 0; i < nhwentries; i++, sg++) {
+ for_each_sg(sgl, sg, nhwentries, i) {
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
sg->dma_address = provider->dma_map(pdev,
phys_addr, sg->length,
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 7b0e754383cf..9001104b56b0 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -154,12 +154,13 @@ static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
{
}
-static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg,
+static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction)
{
+ struct scatterlist *sg;
int i;
- for (i = 0; i < nents; i++, sg++) {
+ for_each_sg(sgl, sg, nents, i) {
sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
dma_direct_offset;
sg->dma_length = sg->length;
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 53bf64623bd8..2e16ca5778a3 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -87,15 +87,16 @@ static void ibmebus_unmap_single(struct device *dev,
}
static int ibmebus_map_sg(struct device *dev,
- struct scatterlist *sg,
+ struct scatterlist *sgl,
int nents, enum dma_data_direction direction)
{
+ struct scatterlist *sg;
int i;
- for (i = 0; i < nents; i++) {
- sg[i].dma_address = (dma_addr_t)page_address(sg[i].page)
- + sg[i].offset;
- sg[i].dma_length = sg[i].length;
+ for_each_sg(sgl, sg, nents, i) {
+ sg->dma_address = (dma_addr_t)page_address(sg->page)
+ + sg->offset;
+ sg->dma_length = sg->length;
}
return nents;
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index e4ec6eee81a8..306a6f75b6c5 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -277,7 +277,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
dma_addr_t dma_next = 0, dma_addr;
unsigned long flags;
struct scatterlist *s, *outs, *segstart;
- int outcount, incount;
+ int outcount, incount, i;
unsigned long handle;
BUG_ON(direction == DMA_NONE);
@@ -297,7 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
spin_lock_irqsave(&(tbl->it_lock), flags);
- for (s = outs; nelems; nelems--, s++) {
+ for_each_sg(sglist, s, nelems, i) {
unsigned long vaddr, npages, entry, slen;
slen = s->length;
@@ -341,7 +341,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
if (novmerge || (dma_addr != dma_next)) {
/* Can't merge: create a new segment */
segstart = s;
- outcount++; outs++;
+ outcount++;
+ outs = sg_next(outs);
DBG(" can't merge, new segment.\n");
} else {
outs->dma_length += s->length;
@@ -374,7 +375,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
* next entry of the sglist if we didn't fill the list completely
*/
if (outcount < incount) {
- outs++;
+ outs = sg_next(outs);
outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0;
}
@@ -385,7 +386,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
return outcount;
failure:
- for (s = &sglist[0]; s <= outs; s++) {
+ for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
unsigned long vaddr, npages;
@@ -395,6 +396,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
+ if (s == outs)
+ break;
}
spin_unlock_irqrestore(&(tbl->it_lock), flags);
return 0;
@@ -404,6 +407,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
+ struct scatterlist *sg;
unsigned long flags;
BUG_ON(direction == DMA_NONE);
@@ -413,15 +417,16 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
spin_lock_irqsave(&(tbl->it_lock), flags);
+ sg = sglist;
while (nelems--) {
unsigned int npages;
- dma_addr_t dma_handle = sglist->dma_address;
+ dma_addr_t dma_handle = sg->dma_address;
- if (sglist->dma_length == 0)
+ if (sg->dma_length == 0)
break;
- npages = iommu_num_pages(dma_handle,sglist->dma_length);
+ npages = iommu_num_pages(dma_handle, sg->dma_length);
__iommu_free(tbl, dma_handle, npages);
- sglist++;
+ sg = sg_next(sg);
}
/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 190ff4b59a55..07e64b48e7fc 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -616,17 +616,18 @@ static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr,
}
}
-static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction)
{
#if defined(CONFIG_PS3_DYNAMIC_DMA)
BUG_ON("do");
return -EPERM;
#else
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
+ struct scatterlist *sg;
int i;
- for (i = 0; i < nents; i++, sg++) {
+ for_each_sg(sgl, sg, nents, i) {
int result = ps3_dma_map(dev->d_region,
page_to_phys(sg->page) + sg->offset, sg->length,
&sg->dma_address, 0);
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 62182d2d7b0d..9c3ed88853f3 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/pci.h> /* struct pci_dev */
#include <linux/proc_fs.h>
+#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/vaddrs.h>
@@ -717,19 +718,19 @@ void pci_unmap_page(struct pci_dev *hwdev,
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
-int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
+int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
int direction)
{
+ struct scatterlist *sg;
int n;
BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
- for (n = 0; n < nents; n++) {
+ for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg->page) == NULL);
sg->dvma_address =
virt_to_phys(page_address(sg->page)) + sg->offset;
sg->dvma_length = sg->length;
- sg++;
}
return nents;
}
@@ -738,19 +739,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
-void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
+void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
int direction)
{
+ struct scatterlist *sg;
int n;
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
- for (n = 0; n < nents; n++) {
+ for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg->page) == NULL);
mmu_inval_dma_area(
(unsigned long) page_address(sg->page),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
- sg++;
}
}
}
@@ -789,34 +790,34 @@ void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t
* The same as pci_dma_sync_single_* but for a scatter-gather list,
* same rules and usage.
*/
-void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
{
+ struct scatterlist *sg;
int n;
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
- for (n = 0; n < nents; n++) {
+ for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg->page) == NULL);
mmu_inval_dma_area(
(unsigned long) page_address(sg->page),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
- sg++;
}
}
}
-void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
{
+ struct scatterlist *sg;
int n;
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
- for (n = 0; n < nents; n++) {
+ for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg->page) == NULL);
mmu_inval_dma_area(
(unsigned long) page_address(sg->page),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
- sg++;
}
}
}
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index 7c89893b1fe8..375b4db63704 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -11,8 +11,8 @@
#include <linux/mm.h>
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
#include <linux/bitops.h>
+#include <linux/scatterlist.h>
-#include <asm/scatterlist.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sbus.h>
@@ -144,8 +144,9 @@ static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus
spin_lock_irqsave(&iounit->lock, flags);
while (sz != 0) {
--sz;
- sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
- sg[sz].dvma_length = sg[sz].length;
+ sg->dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg->page) + sg->offset, sg->length);
+ sg->dvma_length = sg->length;
+ sg = sg_next(sg);
}
spin_unlock_irqrestore(&iounit->lock, flags);
}
@@ -173,11 +174,12 @@ static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_
spin_lock_irqsave(&iounit->lock, flags);
while (sz != 0) {
--sz;
- len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
- vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
+ len = ((sg->dvma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+ vaddr = (sg->dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
for (len += vaddr; vaddr < len; vaddr++)
clear_bit(vaddr, iounit->bmap);
+ sg = sg_next(sg);
}
spin_unlock_irqrestore(&iounit->lock, flags);
}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 52e907af9d29..283656d9f6ea 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -12,8 +12,8 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
+#include <linux/scatterlist.h>
-#include <asm/scatterlist.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sbus.h>
@@ -240,7 +240,7 @@ static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sb
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
sg->dvma_length = (__u32) sg->length;
- sg++;
+ sg = sg_next(sg);
}
}
@@ -254,7 +254,7 @@ static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbu
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
sg->dvma_length = (__u32) sg->length;
- sg++;
+ sg = sg_next(sg);
}
}
@@ -285,7 +285,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
sg->dvma_length = (__u32) sg->length;
- sg++;
+ sg = sg_next(sg);
}
}
@@ -325,7 +325,7 @@ static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
sg->dvma_address = 0x21212121;
- sg++;
+ sg = sg_next(sg);
}
}
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index 005a3e72d4f2..ee6708fc4492 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -17,8 +17,8 @@
#include <linux/highmem.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
+#include <linux/scatterlist.h>
-#include <asm/scatterlist.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
@@ -1228,8 +1228,9 @@ static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *
{
while (sz != 0) {
--sz;
- sg[sz].dvma_address = (__u32)sun4c_lockarea(page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
- sg[sz].dvma_length = sg[sz].length;
+ sg->dvma_address = (__u32)sun4c_lockarea(page_address(sg->page) + sg->offset, sg->length);
+ sg->dvma_length = sg->length;
+ sg = sg_next(sg);
}
}
@@ -1244,7 +1245,8 @@ static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b
{
while (sz != 0) {
--sz;
- sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length);
+ sun4c_unlockarea((char *)sg->dvma_address, sg->length);
+ sg = sg_next(sg);
}
}
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index b35a62167e9c..db3ffcf7a120 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
+#include <linux/scatterlist.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
@@ -480,7 +481,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
unsigned long iopte_protection)
{
struct scatterlist *dma_sg = sg;
- struct scatterlist *sg_end = sg + nelems;
+ struct scatterlist *sg_end = sg_last(sg, nelems);
int i;
for (i = 0; i < nused; i++) {
@@ -515,7 +516,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
break;
}
- sg++;
+ sg = sg_next(sg);
}
pteval = iopte_protection | (pteval & IOPTE_PAGE);
@@ -528,24 +529,24 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
}
pteval = (pteval & IOPTE_PAGE) + len;
- sg++;
+ sg = sg_next(sg);
/* Skip over any tail mappings we've fully mapped,
* adjusting pteval along the way. Stop when we
* detect a page crossing event.
*/
- while (sg < sg_end &&
+ while (sg != sg_end &&
(pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
((pteval ^
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
pteval += sg->length;
- sg++;
+ sg = sg_next(sg);
}
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
pteval = ~0UL;
} while (dma_npages != 0);
- dma_sg++;
+ dma_sg = sg_next(dma_sg);
}
}
@@ -606,7 +607,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
sgtmp = sglist;
while (used && sgtmp->dma_length) {
sgtmp->dma_address += dma_base;
- sgtmp++;
+ sgtmp = sg_next(sgtmp);
used--;
}
used = nelems - used;
@@ -642,6 +643,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, ctx, i, npages;
+ struct scatterlist *sg, *sgprv;
u32 bus_addr;
if (unlikely(direction == DMA_NONE)) {
@@ -654,11 +656,14 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
bus_addr = sglist->dma_address & IO_PAGE_MASK;
- for (i = 1; i < nelems; i++)
- if (sglist[i].dma_length == 0)
+ sgprv = NULL;
+ for_each_sg(sglist, sg, nelems, i) {
+ if (sg->dma_length == 0)
break;
- i--;
- npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
+ sgprv = sg;
+ }
+
+ npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
bus_addr) >> IO_PAGE_SHIFT;
base = iommu->page_table +
@@ -730,6 +735,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long flags, ctx, npages, i;
+ struct scatterlist *sg, *sgprv;
u32 bus_addr;
iommu = dev->archdata.iommu;
@@ -753,11 +759,14 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
/* Step 2: Kick data out of streaming buffers. */
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
- for(i = 1; i < nelems; i++)
- if (!sglist[i].dma_length)
+ sgprv = NULL;
+ for_each_sg(sglist, sg, nelems, i) {
+ if (sg->dma_length == 0)
break;
- i--;
- npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
+ sgprv = sg;
+ }
+
+ npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
- bus_addr) >> IO_PAGE_SHIFT;
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 95de1444ee67..cacacfae5451 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -13,6 +13,7 @@
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/log2.h>
+#include <linux/scatterlist.h>
#include <asm/iommu.h>
#include <asm/irq.h>
@@ -373,7 +374,7 @@ static inline long fill_sg(long entry, struct device *dev,
int nused, int nelems, unsigned long prot)
{
struct scatterlist *dma_sg = sg;
- struct scatterlist *sg_end = sg + nelems;
+ struct scatterlist *sg_end = sg_last(sg, nelems);
unsigned long flags;
int i;
@@ -413,7 +414,7 @@ static inline long fill_sg(long entry, struct device *dev,
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
break;
}
- sg++;
+ sg = sg_next(sg);
}
pteval = (pteval & IOPTE_PAGE);
@@ -431,24 +432,25 @@ static inline long fill_sg(long entry, struct device *dev,
}
pteval = (pteval & IOPTE_PAGE) + len;
- sg++;
+ sg = sg_next(sg);
/* Skip over any tail mappings we've fully mapped,
* adjusting pteval along the way. Stop when we
* detect a page crossing event.
*/
- while (sg < sg_end &&
- (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
+ while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
((pteval ^
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
pteval += sg->length;
- sg++;
+ if (sg == sg_end)
+ break;
+ sg = sg_next(sg);
}
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
pteval = ~0UL;
} while (dma_npages != 0);
- dma_sg++;
+ dma_sg = sg_next(dma_sg);
}
if (unlikely(iommu_batch_end() < 0L))
@@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
sgtmp = sglist;
while (used && sgtmp->dma_length) {
sgtmp->dma_address += dma_base;
- sgtmp++;
+ sgtmp = sg_next(sgtmp);
used--;
}
used = nelems - used;
@@ -545,6 +547,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct pci_pbm_info *pbm;
struct iommu *iommu;
unsigned long flags, i, npages;
+ struct scatterlist *sg, *sgprv;
long entry;
u32 devhandle, bus_addr;
@@ -558,12 +561,15 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
devhandle = pbm->devhandle;
bus_addr = sglist->dma_address & IO_PAGE_MASK;
-
- for (i = 1; i < nelems; i++)
- if (sglist[i].dma_length == 0)
+ sgprv = NULL;
+ for_each_sg(sglist, sg, nelems, i) {
+ if (sg->dma_length == 0)
break;
- i--;
- npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
+
+ sgprv = sg;
+ }
+
+ npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
bus_addr) >> IO_PAGE_SHIFT;
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 71da01e73f03..a50b787b3bfa 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -35,6 +35,7 @@
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/scatterlist.h>
#include <asm/iommu.h>
#include <asm/calgary.h>
#include <asm/tce.h>
@@ -384,31 +385,32 @@ static void calgary_unmap_sg(struct device *dev,
struct scatterlist *sglist, int nelems, int direction)
{
struct iommu_table *tbl = find_iommu_table(dev);
+ struct scatterlist *s;
+ int i;
if (!translate_phb(to_pci_dev(dev)))
return;
- while (nelems--) {
+ for_each_sg(sglist, s, nelems, i) {
unsigned int npages;
- dma_addr_t dma = sglist->dma_address;
- unsigned int dmalen = sglist->dma_length;
+ dma_addr_t dma = s->dma_address;
+ unsigned int dmalen = s->dma_length;
if (dmalen == 0)
break;
npages = num_dma_pages(dma, dmalen);
iommu_free(tbl, dma, npages);
- sglist++;
}
}
static int calgary_nontranslate_map_sg(struct device* dev,
struct scatterlist *sg, int nelems, int direction)
{
+ struct scatterlist *s;
int i;
- for (i = 0; i < nelems; i++ ) {
- struct scatterlist *s = &sg[i];
+ for_each_sg(sg, s, nelems, i) {
BUG_ON(!s->page);
s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
s->dma_length = s->length;
@@ -420,6 +422,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
int nelems, int direction)
{
struct iommu_table *tbl = find_iommu_table(dev);
+ struct scatterlist *s;
unsigned long vaddr;
unsigned int npages;
unsigned long entry;
@@ -428,8 +431,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
if (!translate_phb(to_pci_dev(dev)))
return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
- for (i = 0; i < nelems; i++ ) {
- struct scatterlist *s = &sg[i];
+ for_each_sg(sg, s, nelems, i) {
BUG_ON(!s->page);
vaddr = (unsigned long)page_address(s->page) + s->offset;
@@ -454,9 +456,9 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
return nelems;
error:
calgary_unmap_sg(dev, sg, nelems, direction);
- for (i = 0; i < nelems; i++) {
- sg[i].dma_address = bad_dma_address;
- sg[i].dma_length = 0;
+ for_each_sg(sg, s, nelems, i) {
+ sg->dma_address = bad_dma_address;
+ sg->dma_length = 0;
}
return 0;
}
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 4918c575d582..cfcc84e6c350 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/kdebug.h>
+#include <linux/scatterlist.h>
#include <asm/atomic.h>
#include <asm/io.h>
#include <asm/mtrr.h>
@@ -278,10 +279,10 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
*/
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
{
+ struct scatterlist *s;
int i;
- for (i = 0; i < nents; i++) {
- struct scatterlist *s = &sg[i];
+ for_each_sg(sg, s, nents, i) {
if (!s->dma_length || !s->length)
break;
gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
@@ -292,14 +293,14 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
int nents, int dir)
{
+ struct scatterlist *s;
int i;
#ifdef CONFIG_IOMMU_DEBUG
printk(KERN_DEBUG "dma_map_sg overflow\n");
#endif
- for (i = 0; i < nents; i++ ) {
- struct scatterlist *s = &sg[i];
+ for_each_sg(sg, s, nents, i) {
unsigned long addr = page_to_phys(s->page) + s->offset;
if (nonforced_iommu(dev, addr, s->length)) {
addr = dma_map_area(dev, addr, s->length, dir);
@@ -319,23 +320,23 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
}
/* Map multiple scatterlist entries continuous into the first. */
-static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
+static int __dma_map_cont(struct scatterlist *start, int nelems,
struct scatterlist *sout, unsigned long pages)
{
unsigned long iommu_start = alloc_iommu(pages);
unsigned long iommu_page = iommu_start;
+ struct scatterlist *s;
int i;
if (iommu_start == -1)
return -1;
-
- for (i = start; i < stopat; i++) {
- struct scatterlist *s = &sg[i];
+
+ for_each_sg(start, s, nelems, i) {
unsigned long pages, addr;
unsigned long phys_addr = s->dma_address;
- BUG_ON(i > start && s->offset);
- if (i == start) {
+ BUG_ON(s != start && s->offset);
+ if (s == start) {
*sout = *s;
sout->dma_address = iommu_bus_base;
sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
@@ -357,17 +358,17 @@ static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
return 0;
}
-static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
+static inline int dma_map_cont(struct scatterlist *start, int nelems,
struct scatterlist *sout,
unsigned long pages, int need)
{
- if (!need) {
- BUG_ON(stopat - start != 1);
- *sout = sg[start];
- sout->dma_length = sg[start].length;
+ if (!need) {
+ BUG_ON(nelems != 1);
+ *sout = *start;
+ sout->dma_length = start->length;
return 0;
- }
- return __dma_map_cont(sg, start, stopat, sout, pages);
+ }
+ return __dma_map_cont(start, nelems, sout, pages);
}
/*
@@ -381,6 +382,7 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
int start;
unsigned long pages = 0;
int need = 0, nextneed;
+ struct scatterlist *s, *ps, *start_sg, *sgmap;
if (nents == 0)
return 0;
@@ -390,8 +392,9 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
out = 0;
start = 0;
- for (i = 0; i < nents; i++) {
- struct scatterlist *s = &sg[i];
+ start_sg = sgmap = sg;
+ ps = NULL; /* shut up gcc */
+ for_each_sg(sg, s, nents, i) {
dma_addr_t addr = page_to_phys(s->page) + s->offset;
s->dma_address = addr;
BUG_ON(s->length == 0);
@@ -400,29 +403,33 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
/* Handle the previous not yet processed entries */
if (i > start) {
- struct scatterlist *ps = &sg[i-1];
/* Can only merge when the last chunk ends on a page
boundary and the new one doesn't have an offset. */
if (!iommu_merge || !nextneed || !need || s->offset ||
- (ps->offset + ps->length) % PAGE_SIZE) {
- if (dma_map_cont(sg, start, i, sg+out, pages,
- need) < 0)
+ (ps->offset + ps->length) % PAGE_SIZE) {
+ if (dma_map_cont(start_sg, i - start, sgmap,
+ pages, need) < 0)
goto error;
out++;
+ sgmap = sg_next(sgmap);
pages = 0;
- start = i;
+ start = i;
+ start_sg = s;
}
}
need = nextneed;
pages += to_pages(s->offset, s->length);
+ ps = s;
}
- if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
+ if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0)
goto error;
out++;
flush_gart();
- if (out < nents)
- sg[out].dma_length = 0;
+ if (out < nents) {
+ sgmap = sg_next(sgmap);
+ sgmap->dma_length = 0;
+ }
return out;
error:
@@ -437,8 +444,8 @@ error:
if (panic_on_overflow)
panic("dma_map_sg: overflow on %lu pages\n", pages);
iommu_full(dev, pages << PAGE_SHIFT, dir);
- for (i = 0; i < nents; i++)
- sg[i].dma_address = bad_dma_address;
+ for_each_sg(sg, s, nents, i)
+ s->dma_address = bad_dma_address;
return 0;
}
diff --git a/arch/x86/kernel/pci-nommu_64.c b/arch/x86/kernel/pci-nommu_64.c
index 2a34c6c025a9..e85d4360360c 100644
--- a/arch/x86/kernel/pci-nommu_64.c
+++ b/arch/x86/kernel/pci-nommu_64.c
@@ -5,6 +5,7 @@
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
#include <asm/iommu.h>
#include <asm/processor.h>
@@ -57,10 +58,10 @@ static void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, int direction)
{
+ struct scatterlist *s;
int i;
- for (i = 0; i < nents; i++ ) {
- struct scatterlist *s = &sg[i];
+ for_each_sg(sg, s, nents, i) {
BUG_ON(!s->page);
s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))