summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-03-22 00:08:07 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 07:53:58 -0800
commit545b1ea9bfa5a8ca9af33d63144bd4f2faaea8dd (patch)
treedeef747e0f08089a0cd14e09551efaddfad813f9 /mm/page_alloc.c
parent9d41415221214ca4820b9464dfa548e2f20e7dd5 (diff)
downloadlwn-545b1ea9bfa5a8ca9af33d63144bd4f2faaea8dd.tar.gz
lwn-545b1ea9bfa5a8ca9af33d63144bd4f2faaea8dd.zip
[PATCH] mm: cleanup bootmem
The bootmem code added to page_alloc.c duplicated some page freeing code that it really doesn't need to because it is not so performance critical. While we're here, make prefetching work properly by actually prefetching the page we're about to use before prefetching ahead to the next one (ie. get the most important transaction started first). Also prefetch just a single page ahead rather than leaving a gap of 16. Jack Steiner reported no problems with SGI's ia64 simulator. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fc65e87368b3..7aa0181287e1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -55,7 +55,6 @@ unsigned long totalhigh_pages __read_mostly;
long nr_swap_pages;
int percpu_pagelist_fraction;
-static void fastcall free_hot_cold_page(struct page *page, int cold);
static void __free_pages_ok(struct page *page, unsigned int order);
/*
@@ -448,28 +447,23 @@ void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
if (order == 0) {
__ClearPageReserved(page);
set_page_count(page, 0);
-
- free_hot_cold_page(page, 0);
+ set_page_refs(page, 0);
+ __free_page(page);
} else {
- LIST_HEAD(list);
int loop;
+ prefetchw(page);
for (loop = 0; loop < BITS_PER_LONG; loop++) {
struct page *p = &page[loop];
- if (loop + 16 < BITS_PER_LONG)
- prefetchw(p + 16);
+ if (loop + 1 < BITS_PER_LONG)
+ prefetchw(p + 1);
__ClearPageReserved(p);
set_page_count(p, 0);
}
- arch_free_page(page, order);
-
- mod_page_state(pgfree, 1 << order);
-
- list_add(&page->lru, &list);
- kernel_map_pages(page, 1 << order, 0);
- free_pages_bulk(page_zone(page), 1, &list, order);
+ set_page_refs(page, order);
+ __free_pages(page, order);
}
}