diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-12-16 12:17:26 -0800 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-16 21:31:38 +0100 |
commit | 8c5df16bec8a60bb8589fc232b9e26cac0ed4b2c (patch) | |
tree | b85d46d552ba86da332ebad328761998171332c5 | |
parent | a79b7a2a758c39315344f0d86b5adb21d90d786e (diff) | |
download | lwn-8c5df16bec8a60bb8589fc232b9e26cac0ed4b2c.tar.gz lwn-8c5df16bec8a60bb8589fc232b9e26cac0ed4b2c.zip |
swiotlb: allow architectures to override swiotlb pool allocation
Impact: generalize swiotlb allocation code
Architectures may need to allocate memory specially for use with
the swiotlb. Create the weak function swiotlb_alloc_boot() and
swiotlb_alloc() defaulting to the current behaviour.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/swiotlb.h | 3 | ||||
-rw-r--r-- | lib/swiotlb.c | 16 |
2 files changed, 16 insertions, 3 deletions
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index b18ec5533e8c..b8c5fc766a56 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -10,6 +10,9 @@ struct scatterlist; extern void swiotlb_init(void); +extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); +extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); + extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 5f6c629a924d..abecb2857556 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -21,6 +21,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/spinlock.h> +#include <linux/swiotlb.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ctype.h> @@ -126,6 +127,16 @@ setup_io_tlb_npages(char *str) __setup("swiotlb=", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ +void * __weak swiotlb_alloc_boot(size_t size, unsigned long nslabs) +{ + return alloc_bootmem_low_pages(size); +} + +void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs) +{ + return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); +} + /* * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. @@ -145,7 +156,7 @@ swiotlb_init_with_default_size(size_t default_size) /* * Get IO TLB memory from the low pages */ - io_tlb_start = alloc_bootmem_low_pages(bytes); + io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); io_tlb_end = io_tlb_start + bytes; @@ -202,8 +213,7 @@ swiotlb_late_init_with_default_size(size_t default_size) bytes = io_tlb_nslabs << IO_TLB_SHIFT; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { - io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, - order); + io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs); if (io_tlb_start) break; order--; |