summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-02-10 01:43:10 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 10:51:18 -0800
commit4b51d66989218aad731a721b5b28c79bf5388c09 (patch)
tree8ff7acbd219f699c20c2f1fd201ffb3db5a64062 /include
parent66701b1499a3ff11882c8c4aef36e8eac86e17b1 (diff)
downloadlwn-4b51d66989218aad731a721b5b28c79bf5388c09.tar.gz
lwn-4b51d66989218aad731a721b5b28c79bf5388c09.zip
[PATCH] optional ZONE_DMA: optional ZONE_DMA in the VM
Make ZONE_DMA optional in core code. - ifdef all code for ZONE_DMA and related definitions following the example for ZONE_DMA32 and ZONE_HIGHMEM. - Without ZONE_DMA, ZONE_HIGHMEM and ZONE_DMA32 we get to a ZONES_SHIFT of 0. - Modify the VM statistics to work correctly without a DMA zone. - Modify slab to not create DMA slabs if there is no ZONE_DMA. [akpm@osdl.org: cleanup] [jdike@addtoit.com: build fix] [apw@shadowen.org: Simplify calculation of the number of bits we need for ZONES_SHIFT] Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Andi Kleen <ak@suse.de> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Matthew Wilcox <willy@debian.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/mmzone.h26
-rw-r--r--include/linux/slab_def.h30
-rw-r--r--include/linux/vmstat.h17
4 files changed, 60 insertions, 15 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 063799ea6be0..2a7d15bcde46 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -85,8 +85,10 @@ struct vm_area_struct;
static inline enum zone_type gfp_zone(gfp_t flags)
{
+#ifdef CONFIG_ZONE_DMA
if (flags & __GFP_DMA)
return ZONE_DMA;
+#endif
#ifdef CONFIG_ZONE_DMA32
if (flags & __GFP_DMA32)
return ZONE_DMA32;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 398f2ec55f54..ee9e3143df4f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -96,6 +96,7 @@ struct per_cpu_pageset {
#endif
enum zone_type {
+#ifdef CONFIG_ZONE_DMA
/*
* ZONE_DMA is used when there are devices that are not able
* to do DMA to all of addressable memory (ZONE_NORMAL). Then we
@@ -116,6 +117,7 @@ enum zone_type {
* <16M.
*/
ZONE_DMA,
+#endif
#ifdef CONFIG_ZONE_DMA32
/*
* x86_64 needs two ZONE_DMAs because it supports devices that are
@@ -152,11 +154,27 @@ enum zone_type {
* match the requested limits. See gfp_zone() in include/linux/gfp.h
*/
-#if !defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_HIGHMEM)
+/*
+ * Count the active zones. Note that the use of defined(X) outside
+ * #if and family is not necessarily defined so ensure we cannot use
+ * it later. Use __ZONE_COUNT to work out how many shift bits we need.
+ */
+#define __ZONE_COUNT ( \
+ defined(CONFIG_ZONE_DMA) \
+ + defined(CONFIG_ZONE_DMA32) \
+ + 1 \
+ + defined(CONFIG_HIGHMEM) \
+)
+#if __ZONE_COUNT < 2
+#define ZONES_SHIFT 0
+#elif __ZONE_COUNT <= 2
#define ZONES_SHIFT 1
-#else
+#elif __ZONE_COUNT <= 4
#define ZONES_SHIFT 2
+#else
+#error ZONES_SHIFT -- too many zones configured adjust calculation
#endif
+#undef __ZONE_COUNT
struct zone {
/* Fields commonly accessed by the page allocator */
@@ -523,7 +541,11 @@ static inline int is_dma32(struct zone *zone)
static inline int is_dma(struct zone *zone)
{
+#ifdef CONFIG_ZONE_DMA
return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
+#else
+ return 0;
+#endif
}
/* These two functions are used to setup the per zone pages min values */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 4b463e66ddea..5e4364644ed1 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -19,7 +19,9 @@
struct cache_sizes {
size_t cs_size;
struct kmem_cache *cs_cachep;
+#ifdef CONFIG_ZONE_DMA
struct kmem_cache *cs_dmacachep;
+#endif
};
extern struct cache_sizes malloc_sizes[];
@@ -39,9 +41,12 @@ static inline void *kmalloc(size_t size, gfp_t flags)
__you_cannot_kmalloc_that_much();
}
found:
- return kmem_cache_alloc((flags & GFP_DMA) ?
- malloc_sizes[i].cs_dmacachep :
- malloc_sizes[i].cs_cachep, flags);
+#ifdef CONFIG_ZONE_DMA
+ if (flags & GFP_DMA)
+ return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
+ flags);
+#endif
+ return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
}
return __kmalloc(size, flags);
}
@@ -62,9 +67,12 @@ static inline void *kzalloc(size_t size, gfp_t flags)
__you_cannot_kzalloc_that_much();
}
found:
- return kmem_cache_zalloc((flags & GFP_DMA) ?
- malloc_sizes[i].cs_dmacachep :
- malloc_sizes[i].cs_cachep, flags);
+#ifdef CONFIG_ZONE_DMA
+ if (flags & GFP_DMA)
+ return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
+ flags);
+#endif
+ return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
}
return __kzalloc(size, flags);
}
@@ -88,9 +96,13 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
__you_cannot_kmalloc_that_much();
}
found:
- return kmem_cache_alloc_node((flags & GFP_DMA) ?
- malloc_sizes[i].cs_dmacachep :
- malloc_sizes[i].cs_cachep, flags, node);
+#ifdef CONFIG_ZONE_DMA
+ if (flags & GFP_DMA)
+ return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
+ flags, node);
+#endif
+ return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
+ flags, node);
}
return __kmalloc_node(size, flags, node);
}
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 77caf911969c..7ba91f2839fa 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -19,6 +19,12 @@
* generated will simply be the increment of a global address.
*/
+#ifdef CONFIG_ZONE_DMA
+#define DMA_ZONE(xx) xx##_DMA,
+#else
+#define DMA_ZONE(xx)
+#endif
+
#ifdef CONFIG_ZONE_DMA32
#define DMA32_ZONE(xx) xx##_DMA32,
#else
@@ -31,7 +37,7 @@
#define HIGHMEM_ZONE(xx)
#endif
-#define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGALLOC),
@@ -96,7 +102,8 @@ static inline void vm_events_fold_cpu(int cpu)
#endif /* CONFIG_VM_EVENT_COUNTERS */
#define __count_zone_vm_events(item, zone, delta) \
- __count_vm_events(item##_DMA + zone_idx(zone), delta)
+ __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
+ zone_idx(zone), delta)
/*
* Zone based page accounting with per cpu differentials.
@@ -143,14 +150,16 @@ static inline unsigned long node_page_state(int node,
struct zone *zones = NODE_DATA(node)->node_zones;
return
+#ifdef CONFIG_ZONE_DMA
+ zone_page_state(&zones[ZONE_DMA], item) +
+#endif
#ifdef CONFIG_ZONE_DMA32
zone_page_state(&zones[ZONE_DMA32], item) +
#endif
- zone_page_state(&zones[ZONE_NORMAL], item) +
#ifdef CONFIG_HIGHMEM
zone_page_state(&zones[ZONE_HIGHMEM], item) +
#endif
- zone_page_state(&zones[ZONE_DMA], item);
+ zone_page_state(&zones[ZONE_NORMAL], item);
}
extern void zone_statistics(struct zonelist *, struct zone *);