diff options
author | Anton Blanchard <anton@samba.org> | 2012-06-03 19:43:44 +0000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-07-03 14:14:47 +1000 |
commit | 67ca141567519a6b0ec81850a7b6569b6d8c2b52 (patch) | |
tree | 204f16ef14591930529216357f39a4ed88ced14e /arch/powerpc/kernel/iommu.c | |
parent | 0e4bc95d87394364f408627067238453830bdbf3 (diff) | |
download | lwn-67ca141567519a6b0ec81850a7b6569b6d8c2b52.tar.gz lwn-67ca141567519a6b0ec81850a7b6569b6d8c2b52.zip |
powerpc/iommu: Reduce spinlock coverage in iommu_free
This patch moves tce_free outside of the lock in iommu_free.
Some performance numbers were obtained with a Chelsio T3 adapter on
two POWER7 boxes, running a 100 session TCP round robin test.
Performance improved 25% with this patch applied.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 51 |
1 files changed, 40 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 9c8967fa1e63..d855cfc0732d 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -190,10 +190,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, * not altered. */ if (unlikely(build_fail)) { - spin_lock_irqsave(&(tbl->it_lock), flags); __iommu_free(tbl, ret, npages); - spin_unlock_irqrestore(&(tbl->it_lock), flags); - return DMA_ERROR_CODE; } @@ -207,8 +204,8 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, return ret; } -static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, - unsigned int npages) +static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, + unsigned int npages) { unsigned long entry, free_entry; @@ -228,21 +225,53 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); WARN_ON(1); } - return; + + return false; } + return true; +} + +static void __iommu_free_locked(struct iommu_table *tbl, dma_addr_t dma_addr, + unsigned int npages) +{ + unsigned long entry, free_entry; + + BUG_ON(!spin_is_locked(&tbl->it_lock)); + + entry = dma_addr >> IOMMU_PAGE_SHIFT; + free_entry = entry - tbl->it_offset; + + if (!iommu_free_check(tbl, dma_addr, npages)) + return; + ppc_md.tce_free(tbl, entry, npages); bitmap_clear(tbl->it_map, free_entry, npages); } -static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, - unsigned int npages) +static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, + unsigned int npages) { + unsigned long entry, free_entry; unsigned long flags; + entry = dma_addr >> IOMMU_PAGE_SHIFT; + free_entry = entry - tbl->it_offset; + + if (!iommu_free_check(tbl, dma_addr, npages)) + return; + + ppc_md.tce_free(tbl, entry, npages); + spin_lock_irqsave(&(tbl->it_lock), flags); - __iommu_free(tbl, dma_addr, npages); + bitmap_clear(tbl->it_map, free_entry, npages); spin_unlock_irqrestore(&(tbl->it_lock), flags); +} + +static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, + unsigned int npages) +{ + __iommu_free(tbl, dma_addr, npages); /* Make sure TLB cache is flushed if the HW needs it. We do * not do an mb() here on purpose, it is not needed on any of @@ -390,7 +419,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, vaddr = s->dma_address & IOMMU_PAGE_MASK; npages = iommu_num_pages(s->dma_address, s->dma_length, IOMMU_PAGE_SIZE); - __iommu_free(tbl, vaddr, npages); + __iommu_free_locked(tbl, vaddr, npages); s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; } @@ -425,7 +454,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, break; npages = iommu_num_pages(dma_handle, sg->dma_length, IOMMU_PAGE_SIZE); - __iommu_free(tbl, dma_handle, npages); + __iommu_free_locked(tbl, dma_handle, npages); sg = sg_next(sg); } |