diff options
author | Becky Bruce <becky.bruce@freescale.com> | 2008-09-12 10:34:46 +0000 |
---|---|---|
committer | Kumar Gala <galak@kernel.crashing.org> | 2008-09-24 16:26:45 -0500 |
commit | 4fc665b88a79a45bae8bbf3a05563c27c7337c3d (patch) | |
tree | ca668c2fab7c3a4d62b92174f4a5fcae2625cdd1 /arch/powerpc/kernel/dma.c | |
parent | 8fae0353247530d2124b2419052fa6120462fa99 (diff) | |
download | lwn-4fc665b88a79a45bae8bbf3a05563c27c7337c3d.tar.gz lwn-4fc665b88a79a45bae8bbf3a05563c27c7337c3d.zip |
powerpc: Merge 32 and 64-bit dma code
We essentially adopt the 64-bit dma code, with some changes to support
32-bit systems, including HIGHMEM. dma functions on 32-bit are now
invoked via accessor functions which call the correct op for a device based
on archdata dma_ops. If there is no archdata dma_ops, this defaults
to dma_direct_ops.
In addition, the dma_map/unmap_page functions are added to dma_ops
because we can't just fall back on map/unmap_single when HIGHMEM is
enabled. In the case of dma_direct_*, we stop using map/unmap_single
and just use the page version - this saves a lot of ugly
ifdeffing. We leave map/unmap_single in the dma_ops definition,
though, because they are needed by the iommu code, which does not
implement map/unmap_page. Ideally, going forward, we will completely
eliminate map/unmap_single and just have map/unmap_page, if it's
workable for 64-bit.
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/dma.c')
-rw-r--r-- | arch/powerpc/kernel/dma.c | 69 |
1 files changed, 46 insertions, 23 deletions
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 124f86743bde..41fdd48bf433 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -16,21 +16,30 @@ * This implementation supports a per-device offset that can be applied if * the address at which memory is visible to devices is not 0. Platform code * can set archdata.dma_data to an unsigned long holding the offset. By - * default the offset is zero. + * default the offset is PCI_DRAM_OFFSET. */ static unsigned long get_dma_direct_offset(struct device *dev) { - return (unsigned long)dev->archdata.dma_data; + if (dev) + return (unsigned long)dev->archdata.dma_data; + + return PCI_DRAM_OFFSET; } -static void *dma_direct_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) +void *dma_direct_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) { +#ifdef CONFIG_NOT_COHERENT_CACHE + return __dma_alloc_coherent(size, dma_handle, flag); +#else struct page *page; void *ret; int node = dev_to_node(dev); + /* ignore region specifiers */ + flag &= ~(__GFP_HIGHMEM); + page = alloc_pages_node(node, flag, get_order(size)); if (page == NULL) return NULL; @@ -39,27 +48,17 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev); return ret; +#endif } -static void dma_direct_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) +void dma_direct_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) { +#ifdef CONFIG_NOT_COHERENT_CACHE + __dma_free_coherent(size, vaddr); +#else free_pages((unsigned long)vaddr, get_order(size)); -} - -static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr, - size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs) -{ - return virt_to_abs(ptr) + get_dma_direct_offset(dev); -} - -static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs) -{ +#endif } static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, @@ -85,20 +84,44 @@ static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, static int dma_direct_dma_supported(struct device *dev, u64 mask) { +#ifdef CONFIG_PPC64 /* Could be improved to check for memory though it better be * done via some global so platforms can set the limit in case * they have limited DMA windows */ return mask >= DMA_32BIT_MASK; +#else + return 1; +#endif +} + +static inline dma_addr_t dma_direct_map_page(struct device *dev, + struct page *page, + unsigned long offset, + size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + BUG_ON(dir == DMA_NONE); + __dma_sync_page(page, offset, size, dir); + return page_to_phys(page) + offset + get_dma_direct_offset(dev); +} + +static inline void dma_direct_unmap_page(struct device *dev, + dma_addr_t dma_address, + size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) +{ } struct dma_mapping_ops dma_direct_ops = { .alloc_coherent = dma_direct_alloc_coherent, .free_coherent = dma_direct_free_coherent, - .map_single = dma_direct_map_single, - .unmap_single = dma_direct_unmap_single, .map_sg = dma_direct_map_sg, .unmap_sg = dma_direct_unmap_sg, .dma_supported = dma_direct_dma_supported, + .map_page = dma_direct_map_page, + .unmap_page = dma_direct_unmap_page, }; EXPORT_SYMBOL(dma_direct_ops); |